Browse Source

Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (170 commits)
  [SCSI] scsi_dh_rdac: Add MD36xxf into device list
  [SCSI] scsi_debug: add consecutive medium errors
  [SCSI] libsas: fix ata list corruption issue
  [SCSI] hpsa: export resettable host attribute
  [SCSI] hpsa: move device attributes to avoid forward declarations
  [SCSI] scsi_debug: Logical Block Provisioning (SBC3r26)
  [SCSI] sd: Logical Block Provisioning update
  [SCSI] Include protection operation in SCSI command trace
  [SCSI] hpsa: fix incorrect PCI IDs and add two new ones (2nd try)
  [SCSI] target: Fix volume size misreporting for volumes > 2TB
  [SCSI] bnx2fc: Broadcom FCoE offload driver
  [SCSI] fcoe: fix broken fcoe interface reset
  [SCSI] fcoe: precedence bug in fcoe_filter_frames()
  [SCSI] libfcoe: Remove stale fcoe-netdev entries
  [SCSI] libfcoe: Move FCOE_MTU definition from fcoe.h to libfcoe.h
  [SCSI] libfc: introduce __fc_fill_fc_hdr that accepts fc_hdr as an argument
  [SCSI] fcoe, libfc: initialize EM anchors list and then update npiv EMs
  [SCSI] Revert "[SCSI] libfc: fix exchange being deleted when the abort itself is timed out"
  [SCSI] libfc: Fixing a memory leak when destroying an interface
  [SCSI] megaraid_sas: Version and Changelog update
  ...

Fix up trivial conflicts due to whitespace differences in
drivers/scsi/libsas/{sas_ata.c,sas_scsi_host.c}
Linus Torvalds 14 years ago
parent
commit
c55d267de2
100 changed files with 13944 additions and 1857 deletions
  1. 23 0
      Documentation/scsi/ChangeLog.megaraid_sas
  2. 23 0
      Documentation/scsi/hpsa.txt
  3. 13 1
      Documentation/scsi/scsi_mid_low_api.txt
  4. 1 2
      MAINTAINERS
  5. 20 3
      block/blk-core.c
  6. 26 0
      drivers/infiniband/ulp/iser/iscsi_iser.c
  7. 10 12
      drivers/md/dm-mpath.c
  8. 1 0
      drivers/message/fusion/lsi/mpi_cnfg.h
  9. 1 0
      drivers/message/fusion/lsi/mpi_ioc.h
  10. 6 1
      drivers/message/fusion/mptbase.c
  11. 3 1
      drivers/message/fusion/mptctl.c
  12. 5 2
      drivers/message/fusion/mptsas.c
  13. 0 7
      drivers/net/bnx2x/bnx2x_main.c
  14. 29 51
      drivers/s390/scsi/zfcp_aux.c
  15. 2 13
      drivers/s390/scsi/zfcp_def.h
  16. 3 1
      drivers/s390/scsi/zfcp_erp.c
  17. 6 3
      drivers/s390/scsi/zfcp_ext.h
  18. 214 119
      drivers/s390/scsi/zfcp_fc.c
  19. 66 58
      drivers/s390/scsi/zfcp_fc.h
  20. 15 12
      drivers/s390/scsi/zfcp_fsf.c
  21. 39 32
      drivers/s390/scsi/zfcp_scsi.c
  22. 1 0
      drivers/scsi/Kconfig
  23. 1 0
      drivers/scsi/Makefile
  24. 1 2
      drivers/scsi/NCR5380.c
  25. 2 2
      drivers/scsi/arcmsr/arcmsr_hba.c
  26. 5 13
      drivers/scsi/be2iscsi/be_iscsi.c
  27. 2 2
      drivers/scsi/be2iscsi/be_iscsi.h
  28. 2 1
      drivers/scsi/be2iscsi/be_main.c
  29. 1080 0
      drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
  30. 11 0
      drivers/scsi/bnx2fc/Kconfig
  31. 3 0
      drivers/scsi/bnx2fc/Makefile
  32. 511 0
      drivers/scsi/bnx2fc/bnx2fc.h
  33. 206 0
      drivers/scsi/bnx2fc/bnx2fc_constants.h
  34. 70 0
      drivers/scsi/bnx2fc/bnx2fc_debug.h
  35. 515 0
      drivers/scsi/bnx2fc/bnx2fc_els.c
  36. 2535 0
      drivers/scsi/bnx2fc/bnx2fc_fcoe.c
  37. 1868 0
      drivers/scsi/bnx2fc/bnx2fc_hwi.c
  38. 1833 0
      drivers/scsi/bnx2fc/bnx2fc_io.c
  39. 844 0
      drivers/scsi/bnx2fc/bnx2fc_tgt.c
  40. 3 1
      drivers/scsi/bnx2i/bnx2i.h
  41. 122 3
      drivers/scsi/bnx2i/bnx2i_hwi.c
  42. 23 6
      drivers/scsi/bnx2i/bnx2i_init.c
  43. 28 25
      drivers/scsi/bnx2i/bnx2i_iscsi.c
  44. 10 46
      drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
  45. 15 4
      drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
  46. 3 4
      drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
  47. 27 39
      drivers/scsi/cxgbi/libcxgbi.c
  48. 1 4
      drivers/scsi/cxgbi/libcxgbi.h
  49. 37 75
      drivers/scsi/device_handler/scsi_dh.c
  50. 11 7
      drivers/scsi/device_handler/scsi_dh_alua.c
  51. 1 1
      drivers/scsi/device_handler/scsi_dh_emc.c
  52. 4 3
      drivers/scsi/device_handler/scsi_dh_hp_sw.c
  53. 13 13
      drivers/scsi/device_handler/scsi_dh_rdac.c
  54. 2 0
      drivers/scsi/fcoe/Makefile
  55. 194 383
      drivers/scsi/fcoe/fcoe.c
  56. 3 47
      drivers/scsi/fcoe/fcoe.h
  57. 7 33
      drivers/scsi/fcoe/fcoe_ctlr.c
  58. 770 0
      drivers/scsi/fcoe/fcoe_transport.c
  59. 31 0
      drivers/scsi/fcoe/libfcoe.h
  60. 1 1
      drivers/scsi/fnic/fnic.h
  61. 1 1
      drivers/scsi/fnic/vnic_dev.c
  62. 326 253
      drivers/scsi/hpsa.c
  63. 7 2
      drivers/scsi/hpsa.h
  64. 3 1
      drivers/scsi/hpsa_cmd.h
  65. 6 3
      drivers/scsi/ipr.c
  66. 67 67
      drivers/scsi/iscsi_tcp.c
  67. 4 0
      drivers/scsi/iscsi_tcp.h
  68. 82 29
      drivers/scsi/libfc/fc_exch.c
  69. 15 24
      drivers/scsi/libfc/fc_fcp.c
  70. 120 0
      drivers/scsi/libfc/fc_libfc.c
  71. 14 0
      drivers/scsi/libfc/fc_libfc.h
  72. 60 9
      drivers/scsi/libfc/fc_lport.c
  73. 2 8
      drivers/scsi/libfc/fc_npiv.c
  74. 155 36
      drivers/scsi/libfc/fc_rport.c
  75. 41 3
      drivers/scsi/libiscsi.c
  76. 0 8
      drivers/scsi/libsas/Kconfig
  77. 0 4
      drivers/scsi/libsas/Makefile
  78. 22 58
      drivers/scsi/libsas/sas_ata.c
  79. 0 4
      drivers/scsi/libsas/sas_dump.c
  80. 0 12
      drivers/scsi/libsas/sas_dump.h
  81. 5 0
      drivers/scsi/libsas/sas_expander.c
  82. 1 5
      drivers/scsi/libsas/sas_internal.h
  83. 1 2
      drivers/scsi/libsas/sas_scsi_host.c
  84. 13 2
      drivers/scsi/lpfc/lpfc.h
  85. 93 30
      drivers/scsi/lpfc/lpfc_attr.c
  86. 6 3
      drivers/scsi/lpfc/lpfc_crtn.h
  87. 49 0
      drivers/scsi/lpfc/lpfc_ct.c
  88. 882 55
      drivers/scsi/lpfc/lpfc_debugfs.c
  89. 59 1
      drivers/scsi/lpfc/lpfc_debugfs.h
  90. 163 10
      drivers/scsi/lpfc/lpfc_els.c
  91. 14 4
      drivers/scsi/lpfc/lpfc_hbadisc.c
  92. 10 1
      drivers/scsi/lpfc/lpfc_hw.h
  93. 100 4
      drivers/scsi/lpfc/lpfc_hw4.h
  94. 101 33
      drivers/scsi/lpfc/lpfc_init.c
  95. 10 9
      drivers/scsi/lpfc/lpfc_mbox.c
  96. 8 3
      drivers/scsi/lpfc/lpfc_nportdisc.c
  97. 85 28
      drivers/scsi/lpfc/lpfc_scsi.c
  98. 108 109
      drivers/scsi/lpfc/lpfc_sli.c
  99. 7 1
      drivers/scsi/lpfc/lpfc_sli4.h
  100. 2 2
      drivers/scsi/lpfc/lpfc_version.h

+ 23 - 0
Documentation/scsi/ChangeLog.megaraid_sas

@@ -1,3 +1,26 @@
+Release Date    : Thu. Feb 24, 2011 17:00:00 PST 2010 -
+			(emaild-id:megaraidlinux@lsi.com)
+			Adam Radford
+Current Version : 00.00.05.34-rc1
+Old Version     : 00.00.05.29-rc1
+    1. Fix some failure gotos from megasas_probe_one(), etc.
+    2. Add missing check_and_restore_queue_depth() call in
+       complete_cmd_fusion().
+    3. Enable MSI-X before calling megasas_init_fw().
+    4. Call tasklet_schedule() even if outbound_intr_status == 0 for MFI based
+       boards in MSI-X mode.
+    5. Fix megasas_probe_one() to clear PCI_MSIX_FLAGS_ENABLE in msi control
+       register in kdump kernel.
+    6. Fix megasas_get_cmd() to only print "Command pool empty" if
+       megasas_dbg_lvl is set.
+    7. Fix megasas_build_dcdb_fusion() to not filter by TYPE_DISK.
+    8. Fix megasas_build_dcdb_fusion() to use io_request->LUN[1] field.
+    9. Add MR_EVT_CFG_CLEARED to megasas_aen_polling().
+    10. Fix tasklet_init() in megasas_init_fw() to use instancet->tasklet.
+    11. Fix fault state handling in megasas_transition_to_ready().
+    12. Fix max_sectors setting for IEEE SGL's.
+    13. Fix iMR OCR support to work correctly.
+-------------------------------------------------------------------------------
 Release Date    : Tues.  Dec 14, 2010 17:00:00 PST 2010 -
 Release Date    : Tues.  Dec 14, 2010 17:00:00 PST 2010 -
 			(emaild-id:megaraidlinux@lsi.com)
 			(emaild-id:megaraidlinux@lsi.com)
 			Adam Radford
 			Adam Radford

+ 23 - 0
Documentation/scsi/hpsa.txt

@@ -28,6 +28,12 @@ boot parameter "hpsa_allow_any=1" is specified, however these are not tested
 nor supported by HP with this driver.  For older Smart Arrays, the cciss
 nor supported by HP with this driver.  For older Smart Arrays, the cciss
 driver should still be used.
 driver should still be used.
 
 
+The "hpsa_simple_mode=1" boot parameter may be used to prevent the driver from
+putting the controller into "performant" mode.  The difference is that with simple
+mode, each command completion requires an interrupt, while with "performant mode"
+(the default, and ordinarily better performing) it is possible to have multiple
+command completions indicated by a single interrupt.
+
 HPSA specific entries in /sys
 HPSA specific entries in /sys
 -----------------------------
 -----------------------------
 
 
@@ -39,6 +45,8 @@ HPSA specific entries in /sys
 
 
   /sys/class/scsi_host/host*/rescan
   /sys/class/scsi_host/host*/rescan
   /sys/class/scsi_host/host*/firmware_revision
   /sys/class/scsi_host/host*/firmware_revision
+  /sys/class/scsi_host/host*/resettable
+  /sys/class/scsi_host/host*/transport_mode
 
 
   the host "rescan" attribute is a write only attribute.  Writing to this
   the host "rescan" attribute is a write only attribute.  Writing to this
   attribute will cause the driver to scan for new, changed, or removed devices
   attribute will cause the driver to scan for new, changed, or removed devices
@@ -55,6 +63,21 @@ HPSA specific entries in /sys
 	root@host:/sys/class/scsi_host/host4# cat firmware_revision
 	root@host:/sys/class/scsi_host/host4# cat firmware_revision
 	7.14
 	7.14
 
 
+  The transport_mode indicates whether the controller is in "performant"
+  or "simple" mode.  This is controlled by the "hpsa_simple_mode" module
+  parameter.
+
+  The "resettable" read-only attribute indicates whether a particular
+  controller is able to honor the "reset_devices" kernel parameter.  If the
+  device is resettable, this file will contain a "1", otherwise, a "0".  This
+  parameter is used by kdump, for example, to reset the controller at driver
+  load time to eliminate any outstanding commands on the controller and get the
+  controller into a known state so that the kdump initiated i/o will work right
+  and not be disrupted in any way by stale commands or other stale state
+  remaining on the controller from the previous kernel.  This attribute enables
+  kexec tools to warn the user if they attempt to designate a device which is
+  unable to honor the reset_devices kernel parameter as a dump device.
+
   HPSA specific disk attributes:
   HPSA specific disk attributes:
   ------------------------------
   ------------------------------
 
 

+ 13 - 1
Documentation/scsi/scsi_mid_low_api.txt

@@ -1343,7 +1343,7 @@ Members of interest:
                    underruns (overruns should be rare). If possible an LLD
                    underruns (overruns should be rare). If possible an LLD
                    should set 'resid' prior to invoking 'done'. The most
                    should set 'resid' prior to invoking 'done'. The most
                    interesting case is data transfers from a SCSI target
                    interesting case is data transfers from a SCSI target
-                   device device (i.e. READs) that underrun. 
+                   device (e.g. READs) that underrun.
     underflow    - LLD should place (DID_ERROR << 16) in 'result' if
     underflow    - LLD should place (DID_ERROR << 16) in 'result' if
                    actual number of bytes transferred is less than this
                    actual number of bytes transferred is less than this
                    figure. Not many LLDs implement this check and some that
                    figure. Not many LLDs implement this check and some that
@@ -1351,6 +1351,18 @@ Members of interest:
                    report a DID_ERROR. Better for an LLD to implement
                    report a DID_ERROR. Better for an LLD to implement
                    'resid'.
                    'resid'.
 
 
+It is recommended that a LLD set 'resid' on data transfers from a SCSI
+target device (e.g. READs). It is especially important that 'resid' is set
+when such data transfers have sense keys of MEDIUM ERROR and HARDWARE ERROR
+(and possibly RECOVERED ERROR). In these cases if a LLD is in doubt how much
+data has been received then the safest approach is to indicate no bytes have
+been received. For example: to indicate that no valid data has been received
+a LLD might use these helpers:
+    scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
+where 'SCpnt' is a pointer to a scsi_cmnd object. To indicate only three 512
+bytes blocks has been received 'resid' could be set like this:
+    scsi_set_resid(SCpnt, scsi_bufflen(SCpnt) - (3 * 512));
+
 The scsi_cmnd structure is defined in include/scsi/scsi_cmnd.h
 The scsi_cmnd structure is defined in include/scsi/scsi_cmnd.h
 
 
 
 

+ 1 - 2
MAINTAINERS

@@ -5359,8 +5359,7 @@ S:	Supported
 F:	drivers/s390/crypto/
 F:	drivers/s390/crypto/
 
 
 S390 ZFCP DRIVER
 S390 ZFCP DRIVER
-M:	Christof Schmitt <christof.schmitt@de.ibm.com>
-M:	Swen Schillig <swen@vnet.ibm.com>
+M:	Steffen Maier <maier@linux.vnet.ibm.com>
 M:	linux390@de.ibm.com
 M:	linux390@de.ibm.com
 L:	linux-s390@vger.kernel.org
 L:	linux-s390@vger.kernel.org
 W:	http://www.ibm.com/developerworks/linux/linux390/
 W:	http://www.ibm.com/developerworks/linux/linux390/

+ 20 - 3
block/blk-core.c

@@ -2045,9 +2045,26 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
 
 
 	if (error && req->cmd_type == REQ_TYPE_FS &&
 	if (error && req->cmd_type == REQ_TYPE_FS &&
 	    !(req->cmd_flags & REQ_QUIET)) {
 	    !(req->cmd_flags & REQ_QUIET)) {
-		printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
-				req->rq_disk ? req->rq_disk->disk_name : "?",
-				(unsigned long long)blk_rq_pos(req));
+		char *error_type;
+
+		switch (error) {
+		case -ENOLINK:
+			error_type = "recoverable transport";
+			break;
+		case -EREMOTEIO:
+			error_type = "critical target";
+			break;
+		case -EBADE:
+			error_type = "critical nexus";
+			break;
+		case -EIO:
+		default:
+			error_type = "I/O";
+			break;
+		}
+		printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
+		       error_type, req->rq_disk ? req->rq_disk->disk_name : "?",
+		       (unsigned long long)blk_rq_pos(req));
 	}
 	}
 
 
 	blk_account_io_completion(req, nr_bytes);
 	blk_account_io_completion(req, nr_bytes);

+ 26 - 0
drivers/infiniband/ulp/iser/iscsi_iser.c

@@ -532,6 +532,29 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
 	stats->custom[3].value = conn->fmr_unalign_cnt;
 	stats->custom[3].value = conn->fmr_unalign_cnt;
 }
 }
 
 
+static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
+				   enum iscsi_param param, char *buf)
+{
+	struct iser_conn *ib_conn = ep->dd_data;
+	int len;
+
+	switch (param) {
+	case ISCSI_PARAM_CONN_PORT:
+	case ISCSI_PARAM_CONN_ADDRESS:
+		if (!ib_conn || !ib_conn->cma_id)
+			return -ENOTCONN;
+
+		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+					&ib_conn->cma_id->route.addr.dst_addr,
+					param, buf);
+		break;
+	default:
+		return -ENOSYS;
+	}
+
+	return len;
+}
+
 static struct iscsi_endpoint *
 static struct iscsi_endpoint *
 iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
 iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
 		      int non_blocking)
 		      int non_blocking)
@@ -637,6 +660,8 @@ static struct iscsi_transport iscsi_iser_transport = {
 				  ISCSI_MAX_BURST |
 				  ISCSI_MAX_BURST |
 				  ISCSI_PDU_INORDER_EN |
 				  ISCSI_PDU_INORDER_EN |
 				  ISCSI_DATASEQ_INORDER_EN |
 				  ISCSI_DATASEQ_INORDER_EN |
+				  ISCSI_CONN_PORT |
+				  ISCSI_CONN_ADDRESS |
 				  ISCSI_EXP_STATSN |
 				  ISCSI_EXP_STATSN |
 				  ISCSI_PERSISTENT_PORT |
 				  ISCSI_PERSISTENT_PORT |
 				  ISCSI_PERSISTENT_ADDRESS |
 				  ISCSI_PERSISTENT_ADDRESS |
@@ -659,6 +684,7 @@ static struct iscsi_transport iscsi_iser_transport = {
 	.destroy_conn           = iscsi_iser_conn_destroy,
 	.destroy_conn           = iscsi_iser_conn_destroy,
 	.set_param              = iscsi_iser_set_param,
 	.set_param              = iscsi_iser_set_param,
 	.get_conn_param		= iscsi_conn_get_param,
 	.get_conn_param		= iscsi_conn_get_param,
+	.get_ep_param		= iscsi_iser_get_ep_param,
 	.get_session_param	= iscsi_session_get_param,
 	.get_session_param	= iscsi_session_get_param,
 	.start_conn             = iscsi_iser_conn_start,
 	.start_conn             = iscsi_iser_conn_start,
 	.stop_conn              = iscsi_iser_conn_stop,
 	.stop_conn              = iscsi_iser_conn_stop,

+ 10 - 12
drivers/md/dm-mpath.c

@@ -1283,24 +1283,22 @@ static int do_end_io(struct multipath *m, struct request *clone,
 	if (!error && !clone->errors)
 	if (!error && !clone->errors)
 		return 0;	/* I/O complete */
 		return 0;	/* I/O complete */
 
 
-	if (error == -EOPNOTSUPP)
-		return error;
-
-	if (clone->cmd_flags & REQ_DISCARD)
-		/*
-		 * Pass all discard request failures up.
-		 * FIXME: only fail_path if the discard failed due to a
-		 * transport problem.  This requires precise understanding
-		 * of the underlying failure (e.g. the SCSI sense).
-		 */
+	if (error == -EOPNOTSUPP || error == -EREMOTEIO)
 		return error;
 		return error;
 
 
 	if (mpio->pgpath)
 	if (mpio->pgpath)
 		fail_path(mpio->pgpath);
 		fail_path(mpio->pgpath);
 
 
 	spin_lock_irqsave(&m->lock, flags);
 	spin_lock_irqsave(&m->lock, flags);
-	if (!m->nr_valid_paths && !m->queue_if_no_path && !__must_push_back(m))
-		r = -EIO;
+	if (!m->nr_valid_paths) {
+		if (!m->queue_if_no_path) {
+			if (!__must_push_back(m))
+				r = -EIO;
+		} else {
+			if (error == -EBADE)
+				r = error;
+		}
+	}
 	spin_unlock_irqrestore(&m->lock, flags);
 	spin_unlock_irqrestore(&m->lock, flags);
 
 
 	return r;
 	return r;

+ 1 - 0
drivers/message/fusion/lsi/mpi_cnfg.h

@@ -2593,6 +2593,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
 #define MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE              (0x03)
 #define MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE              (0x03)
 #define MPI_SAS_IOUNIT0_RATE_1_5                            (0x08)
 #define MPI_SAS_IOUNIT0_RATE_1_5                            (0x08)
 #define MPI_SAS_IOUNIT0_RATE_3_0                            (0x09)
 #define MPI_SAS_IOUNIT0_RATE_3_0                            (0x09)
+#define MPI_SAS_IOUNIT0_RATE_6_0                            (0x0A)
 
 
 /* see mpi_sas.h for values for SAS IO Unit Page 0 ControllerPhyDeviceInfo values */
 /* see mpi_sas.h for values for SAS IO Unit Page 0 ControllerPhyDeviceInfo values */
 
 

+ 1 - 0
drivers/message/fusion/lsi/mpi_ioc.h

@@ -841,6 +841,7 @@ typedef struct _EVENT_DATA_SAS_PHY_LINK_STATUS
 #define MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE         (0x03)
 #define MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE         (0x03)
 #define MPI_EVENT_SAS_PLS_LR_RATE_1_5                       (0x08)
 #define MPI_EVENT_SAS_PLS_LR_RATE_1_5                       (0x08)
 #define MPI_EVENT_SAS_PLS_LR_RATE_3_0                       (0x09)
 #define MPI_EVENT_SAS_PLS_LR_RATE_3_0                       (0x09)
+#define MPI_EVENT_SAS_PLS_LR_RATE_6_0                       (0x0A)
 
 
 /* SAS Discovery Event data */
 /* SAS Discovery Event data */
 
 

+ 6 - 1
drivers/message/fusion/mptbase.c

@@ -7418,7 +7418,12 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
 		case MPI_EVENT_SAS_PLS_LR_RATE_3_0:
 		case MPI_EVENT_SAS_PLS_LR_RATE_3_0:
 			snprintf(evStr, EVENT_DESCR_STR_SZ,
 			snprintf(evStr, EVENT_DESCR_STR_SZ,
 			   "SAS PHY Link Status: Phy=%d:"
 			   "SAS PHY Link Status: Phy=%d:"
-			   " Rate 3.0 Gpbs",PhyNumber);
+			   " Rate 3.0 Gbps", PhyNumber);
+			break;
+		case MPI_EVENT_SAS_PLS_LR_RATE_6_0:
+			snprintf(evStr, EVENT_DESCR_STR_SZ,
+			   "SAS PHY Link Status: Phy=%d:"
+			   " Rate 6.0 Gbps", PhyNumber);
 			break;
 			break;
 		default:
 		default:
 			snprintf(evStr, EVENT_DESCR_STR_SZ,
 			snprintf(evStr, EVENT_DESCR_STR_SZ,

+ 3 - 1
drivers/message/fusion/mptctl.c

@@ -1314,8 +1314,10 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
 	else
 	else
 		karg->adapterType = MPT_IOCTL_INTERFACE_SCSI;
 		karg->adapterType = MPT_IOCTL_INTERFACE_SCSI;
 
 
-	if (karg->hdr.port > 1)
+	if (karg->hdr.port > 1) {
+		kfree(karg);
 		return -EINVAL;
 		return -EINVAL;
+	}
 	port = karg->hdr.port;
 	port = karg->hdr.port;
 
 
 	karg->port = port;
 	karg->port = port;

+ 5 - 2
drivers/message/fusion/mptsas.c

@@ -1973,7 +1973,6 @@ static struct scsi_host_template mptsas_driver_template = {
 	.change_queue_depth 		= mptscsih_change_queue_depth,
 	.change_queue_depth 		= mptscsih_change_queue_depth,
 	.eh_abort_handler		= mptscsih_abort,
 	.eh_abort_handler		= mptscsih_abort,
 	.eh_device_reset_handler	= mptscsih_dev_reset,
 	.eh_device_reset_handler	= mptscsih_dev_reset,
-	.eh_bus_reset_handler		= mptscsih_bus_reset,
 	.eh_host_reset_handler		= mptscsih_host_reset,
 	.eh_host_reset_handler		= mptscsih_host_reset,
 	.bios_param			= mptscsih_bios_param,
 	.bios_param			= mptscsih_bios_param,
 	.can_queue			= MPT_SAS_CAN_QUEUE,
 	.can_queue			= MPT_SAS_CAN_QUEUE,
@@ -3063,6 +3062,9 @@ static int mptsas_probe_one_phy(struct device *dev,
 	case MPI_SAS_IOUNIT0_RATE_3_0:
 	case MPI_SAS_IOUNIT0_RATE_3_0:
 		phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
 		phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
 		break;
 		break;
+	case MPI_SAS_IOUNIT0_RATE_6_0:
+		phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
+		break;
 	case MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE:
 	case MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE:
 	case MPI_SAS_IOUNIT0_RATE_UNKNOWN:
 	case MPI_SAS_IOUNIT0_RATE_UNKNOWN:
 	default:
 	default:
@@ -3691,7 +3693,8 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event)
 	}
 	}
 
 
 	if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 ||
 	if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 ||
-	    link_rate == MPI_SAS_IOUNIT0_RATE_3_0) {
+	    link_rate == MPI_SAS_IOUNIT0_RATE_3_0 ||
+	    link_rate == MPI_SAS_IOUNIT0_RATE_6_0) {
 
 
 		if (!port_info) {
 		if (!port_info) {
 			if (ioc->old_sas_discovery_protocal) {
 			if (ioc->old_sas_discovery_protocal) {

+ 0 - 7
drivers/net/bnx2x/bnx2x_main.c

@@ -145,13 +145,6 @@ static struct {
 	{ "Broadcom NetXtreme II BCM57712E XGb" }
 	{ "Broadcom NetXtreme II BCM57712E XGb" }
 };
 };
 
 
-#ifndef PCI_DEVICE_ID_NX2_57712
-#define PCI_DEVICE_ID_NX2_57712		0x1662
-#endif
-#ifndef PCI_DEVICE_ID_NX2_57712E
-#define PCI_DEVICE_ID_NX2_57712E	0x1663
-#endif
-
 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },

+ 29 - 51
drivers/s390/scsi/zfcp_aux.c

@@ -122,36 +122,21 @@ static int __init zfcp_module_init(void)
 {
 {
 	int retval = -ENOMEM;
 	int retval = -ENOMEM;
 
 
-	zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn",
-					sizeof(struct zfcp_fc_gpn_ft_req));
-	if (!zfcp_data.gpn_ft_cache)
-		goto out;
-
-	zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb",
-					sizeof(struct fsf_qtcb));
-	if (!zfcp_data.qtcb_cache)
+	zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb",
+						  sizeof(struct fsf_qtcb));
+	if (!zfcp_fsf_qtcb_cache)
 		goto out_qtcb_cache;
 		goto out_qtcb_cache;
 
 
-	zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr",
-					sizeof(struct fsf_status_read_buffer));
-	if (!zfcp_data.sr_buffer_cache)
-		goto out_sr_cache;
+	zfcp_fc_req_cache = zfcp_cache_hw_align("zfcp_fc_req",
+						sizeof(struct zfcp_fc_req));
+	if (!zfcp_fc_req_cache)
+		goto out_fc_cache;
 
 
-	zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid",
-					sizeof(struct zfcp_fc_gid_pn));
-	if (!zfcp_data.gid_pn_cache)
-		goto out_gid_cache;
-
-	zfcp_data.adisc_cache = zfcp_cache_hw_align("zfcp_adisc",
-					sizeof(struct zfcp_fc_els_adisc));
-	if (!zfcp_data.adisc_cache)
-		goto out_adisc_cache;
-
-	zfcp_data.scsi_transport_template =
+	zfcp_scsi_transport_template =
 		fc_attach_transport(&zfcp_transport_functions);
 		fc_attach_transport(&zfcp_transport_functions);
-	if (!zfcp_data.scsi_transport_template)
+	if (!zfcp_scsi_transport_template)
 		goto out_transport;
 		goto out_transport;
-	scsi_transport_reserve_device(zfcp_data.scsi_transport_template,
+	scsi_transport_reserve_device(zfcp_scsi_transport_template,
 				      sizeof(struct zfcp_scsi_dev));
 				      sizeof(struct zfcp_scsi_dev));
 
 
 
 
@@ -175,18 +160,12 @@ static int __init zfcp_module_init(void)
 out_ccw_register:
 out_ccw_register:
 	misc_deregister(&zfcp_cfdc_misc);
 	misc_deregister(&zfcp_cfdc_misc);
 out_misc:
 out_misc:
-	fc_release_transport(zfcp_data.scsi_transport_template);
+	fc_release_transport(zfcp_scsi_transport_template);
 out_transport:
 out_transport:
-	kmem_cache_destroy(zfcp_data.adisc_cache);
-out_adisc_cache:
-	kmem_cache_destroy(zfcp_data.gid_pn_cache);
-out_gid_cache:
-	kmem_cache_destroy(zfcp_data.sr_buffer_cache);
-out_sr_cache:
-	kmem_cache_destroy(zfcp_data.qtcb_cache);
+	kmem_cache_destroy(zfcp_fc_req_cache);
+out_fc_cache:
+	kmem_cache_destroy(zfcp_fsf_qtcb_cache);
 out_qtcb_cache:
 out_qtcb_cache:
-	kmem_cache_destroy(zfcp_data.gpn_ft_cache);
-out:
 	return retval;
 	return retval;
 }
 }
 
 
@@ -196,12 +175,9 @@ static void __exit zfcp_module_exit(void)
 {
 {
 	ccw_driver_unregister(&zfcp_ccw_driver);
 	ccw_driver_unregister(&zfcp_ccw_driver);
 	misc_deregister(&zfcp_cfdc_misc);
 	misc_deregister(&zfcp_cfdc_misc);
-	fc_release_transport(zfcp_data.scsi_transport_template);
-	kmem_cache_destroy(zfcp_data.adisc_cache);
-	kmem_cache_destroy(zfcp_data.gid_pn_cache);
-	kmem_cache_destroy(zfcp_data.sr_buffer_cache);
-	kmem_cache_destroy(zfcp_data.qtcb_cache);
-	kmem_cache_destroy(zfcp_data.gpn_ft_cache);
+	fc_release_transport(zfcp_scsi_transport_template);
+	kmem_cache_destroy(zfcp_fc_req_cache);
+	kmem_cache_destroy(zfcp_fsf_qtcb_cache);
 }
 }
 
 
 module_exit(zfcp_module_exit);
 module_exit(zfcp_module_exit);
@@ -260,18 +236,18 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	adapter->pool.qtcb_pool =
 	adapter->pool.qtcb_pool =
-		mempool_create_slab_pool(4, zfcp_data.qtcb_cache);
+		mempool_create_slab_pool(4, zfcp_fsf_qtcb_cache);
 	if (!adapter->pool.qtcb_pool)
 	if (!adapter->pool.qtcb_pool)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	adapter->pool.status_read_data =
-		mempool_create_slab_pool(FSF_STATUS_READS_RECOM,
-					 zfcp_data.sr_buffer_cache);
-	if (!adapter->pool.status_read_data)
+	BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE);
+	adapter->pool.sr_data =
+		mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0);
+	if (!adapter->pool.sr_data)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	adapter->pool.gid_pn =
 	adapter->pool.gid_pn =
-		mempool_create_slab_pool(1, zfcp_data.gid_pn_cache);
+		mempool_create_slab_pool(1, zfcp_fc_req_cache);
 	if (!adapter->pool.gid_pn)
 	if (!adapter->pool.gid_pn)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
@@ -290,8 +266,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
 		mempool_destroy(adapter->pool.qtcb_pool);
 		mempool_destroy(adapter->pool.qtcb_pool);
 	if (adapter->pool.status_read_req)
 	if (adapter->pool.status_read_req)
 		mempool_destroy(adapter->pool.status_read_req);
 		mempool_destroy(adapter->pool.status_read_req);
-	if (adapter->pool.status_read_data)
-		mempool_destroy(adapter->pool.status_read_data);
+	if (adapter->pool.sr_data)
+		mempool_destroy(adapter->pool.sr_data);
 	if (adapter->pool.gid_pn)
 	if (adapter->pool.gid_pn)
 		mempool_destroy(adapter->pool.gid_pn);
 		mempool_destroy(adapter->pool.gid_pn);
 }
 }
@@ -386,6 +362,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
 
 
 	INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
 	INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
 	INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
 	INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
+	INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update);
 
 
 	if (zfcp_qdio_setup(adapter))
 	if (zfcp_qdio_setup(adapter))
 		goto failed;
 		goto failed;
@@ -437,7 +414,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
 	adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
 	adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
 	adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
 	adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
 
 
-	if (!zfcp_adapter_scsi_register(adapter))
+	if (!zfcp_scsi_adapter_register(adapter))
 		return adapter;
 		return adapter;
 
 
 failed:
 failed:
@@ -451,10 +428,11 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
 
 
 	cancel_work_sync(&adapter->scan_work);
 	cancel_work_sync(&adapter->scan_work);
 	cancel_work_sync(&adapter->stat_work);
 	cancel_work_sync(&adapter->stat_work);
+	cancel_work_sync(&adapter->ns_up_work);
 	zfcp_destroy_adapter_work_queue(adapter);
 	zfcp_destroy_adapter_work_queue(adapter);
 
 
 	zfcp_fc_wka_ports_force_offline(adapter->gs);
 	zfcp_fc_wka_ports_force_offline(adapter->gs);
-	zfcp_adapter_scsi_unregister(adapter);
+	zfcp_scsi_adapter_unregister(adapter);
 	sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
 	sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
 
 
 	zfcp_erp_thread_kill(adapter);
 	zfcp_erp_thread_kill(adapter);

+ 2 - 13
drivers/s390/scsi/zfcp_def.h

@@ -89,7 +89,6 @@ struct zfcp_reqlist;
 #define ZFCP_STATUS_LUN_READONLY		0x00000008
 #define ZFCP_STATUS_LUN_READONLY		0x00000008
 
 
 /* FSF request status (this does not have a common part) */
 /* FSF request status (this does not have a common part) */
-#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT	0x00000002
 #define ZFCP_STATUS_FSFREQ_ERROR		0x00000008
 #define ZFCP_STATUS_FSFREQ_ERROR		0x00000008
 #define ZFCP_STATUS_FSFREQ_CLEANUP		0x00000010
 #define ZFCP_STATUS_FSFREQ_CLEANUP		0x00000010
 #define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED	0x00000040
 #define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED	0x00000040
@@ -108,7 +107,7 @@ struct zfcp_adapter_mempool {
 	mempool_t *scsi_req;
 	mempool_t *scsi_req;
 	mempool_t *scsi_abort;
 	mempool_t *scsi_abort;
 	mempool_t *status_read_req;
 	mempool_t *status_read_req;
-	mempool_t *status_read_data;
+	mempool_t *sr_data;
 	mempool_t *gid_pn;
 	mempool_t *gid_pn;
 	mempool_t *qtcb_pool;
 	mempool_t *qtcb_pool;
 };
 };
@@ -190,6 +189,7 @@ struct zfcp_adapter {
 	struct fsf_qtcb_bottom_port *stats_reset_data;
 	struct fsf_qtcb_bottom_port *stats_reset_data;
 	unsigned long		stats_reset;
 	unsigned long		stats_reset;
 	struct work_struct	scan_work;
 	struct work_struct	scan_work;
+	struct work_struct	ns_up_work;
 	struct service_level	service_level;
 	struct service_level	service_level;
 	struct workqueue_struct	*work_queue;
 	struct workqueue_struct	*work_queue;
 	struct device_dma_parameters dma_parms;
 	struct device_dma_parameters dma_parms;
@@ -314,15 +314,4 @@ struct zfcp_fsf_req {
 	void			(*handler)(struct zfcp_fsf_req *);
 	void			(*handler)(struct zfcp_fsf_req *);
 };
 };
 
 
-/* driver data */
-struct zfcp_data {
-	struct scsi_host_template scsi_host_template;
-	struct scsi_transport_template *scsi_transport_template;
-	struct kmem_cache	*gpn_ft_cache;
-	struct kmem_cache	*qtcb_cache;
-	struct kmem_cache	*sr_buffer_cache;
-	struct kmem_cache	*gid_pn_cache;
-	struct kmem_cache	*adisc_cache;
-};
-
 #endif /* ZFCP_DEF_H */
 #endif /* ZFCP_DEF_H */

+ 3 - 1
drivers/s390/scsi/zfcp_erp.c

@@ -732,7 +732,7 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
 	if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
 	if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
 		return ZFCP_ERP_FAILED;
 		return ZFCP_ERP_FAILED;
 
 
-	if (mempool_resize(act->adapter->pool.status_read_data,
+	if (mempool_resize(act->adapter->pool.sr_data,
 			   act->adapter->stat_read_buf_num, GFP_KERNEL))
 			   act->adapter->stat_read_buf_num, GFP_KERNEL))
 		return ZFCP_ERP_FAILED;
 		return ZFCP_ERP_FAILED;
 
 
@@ -1231,8 +1231,10 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
 		if (result == ZFCP_ERP_SUCCEEDED) {
 		if (result == ZFCP_ERP_SUCCEEDED) {
 			register_service_level(&adapter->service_level);
 			register_service_level(&adapter->service_level);
 			queue_work(adapter->work_queue, &adapter->scan_work);
 			queue_work(adapter->work_queue, &adapter->scan_work);
+			queue_work(adapter->work_queue, &adapter->ns_up_work);
 		} else
 		} else
 			unregister_service_level(&adapter->service_level);
 			unregister_service_level(&adapter->service_level);
+
 		kref_put(&adapter->ref, zfcp_adapter_release);
 		kref_put(&adapter->ref, zfcp_adapter_release);
 		break;
 		break;
 	}
 	}

+ 6 - 3
drivers/s390/scsi/zfcp_ext.h

@@ -80,6 +80,7 @@ extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
 extern void zfcp_erp_timeout_handler(unsigned long);
 extern void zfcp_erp_timeout_handler(unsigned long);
 
 
 /* zfcp_fc.c */
 /* zfcp_fc.c */
+extern struct kmem_cache *zfcp_fc_req_cache;
 extern void zfcp_fc_enqueue_event(struct zfcp_adapter *,
 extern void zfcp_fc_enqueue_event(struct zfcp_adapter *,
 				enum fc_host_event_code event_code, u32);
 				enum fc_host_event_code event_code, u32);
 extern void zfcp_fc_post_event(struct work_struct *);
 extern void zfcp_fc_post_event(struct work_struct *);
@@ -95,8 +96,10 @@ extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
 extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
 extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
 extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
 extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
 extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
 extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
+extern void zfcp_fc_sym_name_update(struct work_struct *);
 
 
 /* zfcp_fsf.c */
 /* zfcp_fsf.c */
+extern struct kmem_cache *zfcp_fsf_qtcb_cache;
 extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
 extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
 extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
 extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
 extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
 extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
@@ -139,9 +142,9 @@ extern struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *,
 					     struct qdio_buffer *);
 					     struct qdio_buffer *);
 
 
 /* zfcp_scsi.c */
 /* zfcp_scsi.c */
-extern struct zfcp_data zfcp_data;
-extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
-extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
+extern struct scsi_transport_template *zfcp_scsi_transport_template;
+extern int zfcp_scsi_adapter_register(struct zfcp_adapter *);
+extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *);
 extern struct fc_function_template zfcp_transport_functions;
 extern struct fc_function_template zfcp_transport_functions;
 extern void zfcp_scsi_rport_work(struct work_struct *);
 extern void zfcp_scsi_rport_work(struct work_struct *);
 extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
 extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);

+ 214 - 119
drivers/s390/scsi/zfcp_fc.c

@@ -11,11 +11,14 @@
 
 
 #include <linux/types.h>
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
+#include <linux/utsname.h>
 #include <scsi/fc/fc_els.h>
 #include <scsi/fc/fc_els.h>
 #include <scsi/libfc.h>
 #include <scsi/libfc.h>
 #include "zfcp_ext.h"
 #include "zfcp_ext.h"
 #include "zfcp_fc.h"
 #include "zfcp_fc.h"
 
 
+struct kmem_cache *zfcp_fc_req_cache;
+
 static u32 zfcp_fc_rscn_range_mask[] = {
 static u32 zfcp_fc_rscn_range_mask[] = {
 	[ELS_ADDR_FMT_PORT]		= 0xFFFFFF,
 	[ELS_ADDR_FMT_PORT]		= 0xFFFFFF,
 	[ELS_ADDR_FMT_AREA]		= 0xFFFF00,
 	[ELS_ADDR_FMT_AREA]		= 0xFFFF00,
@@ -260,24 +263,18 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
 		zfcp_fc_incoming_rscn(fsf_req);
 		zfcp_fc_incoming_rscn(fsf_req);
 }
 }
 
 
-static void zfcp_fc_ns_gid_pn_eval(void *data)
+static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req)
 {
 {
-	struct zfcp_fc_gid_pn *gid_pn = data;
-	struct zfcp_fsf_ct_els *ct = &gid_pn->ct;
-	struct zfcp_fc_gid_pn_req *gid_pn_req = sg_virt(ct->req);
-	struct zfcp_fc_gid_pn_resp *gid_pn_resp = sg_virt(ct->resp);
-	struct zfcp_port *port = gid_pn->port;
+	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+	struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
 
 
-	if (ct->status)
+	if (ct_els->status)
 		return;
 		return;
-	if (gid_pn_resp->ct_hdr.ct_cmd != FC_FS_ACC)
+	if (gid_pn_rsp->ct_hdr.ct_cmd != FC_FS_ACC)
 		return;
 		return;
 
 
-	/* paranoia */
-	if (gid_pn_req->gid_pn.fn_wwpn != port->wwpn)
-		return;
 	/* looks like a valid d_id */
 	/* looks like a valid d_id */
-	port->d_id = ntoh24(gid_pn_resp->gid_pn.fp_fid);
+	ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid);
 }
 }
 
 
 static void zfcp_fc_complete(void *data)
 static void zfcp_fc_complete(void *data)
@@ -285,69 +282,73 @@ static void zfcp_fc_complete(void *data)
 	complete(data);
 	complete(data);
 }
 }
 
 
+static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size)
+{
+	ct_hdr->ct_rev = FC_CT_REV;
+	ct_hdr->ct_fs_type = FC_FST_DIR;
+	ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE;
+	ct_hdr->ct_cmd = cmd;
+	ct_hdr->ct_mr_size = mr_size / 4;
+}
+
 static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
 static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
-				     struct zfcp_fc_gid_pn *gid_pn)
+				     struct zfcp_fc_req *fc_req)
 {
 {
 	struct zfcp_adapter *adapter = port->adapter;
 	struct zfcp_adapter *adapter = port->adapter;
 	DECLARE_COMPLETION_ONSTACK(completion);
 	DECLARE_COMPLETION_ONSTACK(completion);
+	struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req;
+	struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
 	int ret;
 	int ret;
 
 
 	/* setup parameters for send generic command */
 	/* setup parameters for send generic command */
-	gid_pn->port = port;
-	gid_pn->ct.handler = zfcp_fc_complete;
-	gid_pn->ct.handler_data = &completion;
-	gid_pn->ct.req = &gid_pn->sg_req;
-	gid_pn->ct.resp = &gid_pn->sg_resp;
-	sg_init_one(&gid_pn->sg_req, &gid_pn->gid_pn_req,
-		    sizeof(struct zfcp_fc_gid_pn_req));
-	sg_init_one(&gid_pn->sg_resp, &gid_pn->gid_pn_resp,
-		    sizeof(struct zfcp_fc_gid_pn_resp));
-
-	/* setup nameserver request */
-	gid_pn->gid_pn_req.ct_hdr.ct_rev = FC_CT_REV;
-	gid_pn->gid_pn_req.ct_hdr.ct_fs_type = FC_FST_DIR;
-	gid_pn->gid_pn_req.ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
-	gid_pn->gid_pn_req.ct_hdr.ct_options = 0;
-	gid_pn->gid_pn_req.ct_hdr.ct_cmd = FC_NS_GID_PN;
-	gid_pn->gid_pn_req.ct_hdr.ct_mr_size = ZFCP_FC_CT_SIZE_PAGE / 4;
-	gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn;
-
-	ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct,
+	fc_req->ct_els.port = port;
+	fc_req->ct_els.handler = zfcp_fc_complete;
+	fc_req->ct_els.handler_data = &completion;
+	fc_req->ct_els.req = &fc_req->sg_req;
+	fc_req->ct_els.resp = &fc_req->sg_rsp;
+	sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req));
+	sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp));
+
+	zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr,
+			   FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE);
+	gid_pn_req->gid_pn.fn_wwpn = port->wwpn;
+
+	ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els,
 			       adapter->pool.gid_pn_req,
 			       adapter->pool.gid_pn_req,
 			       ZFCP_FC_CTELS_TMO);
 			       ZFCP_FC_CTELS_TMO);
 	if (!ret) {
 	if (!ret) {
 		wait_for_completion(&completion);
 		wait_for_completion(&completion);
-		zfcp_fc_ns_gid_pn_eval(gid_pn);
+		zfcp_fc_ns_gid_pn_eval(fc_req);
 	}
 	}
 	return ret;
 	return ret;
 }
 }
 
 
 /**
 /**
- * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
+ * zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request
  * @port: port where GID_PN request is needed
  * @port: port where GID_PN request is needed
  * return: -ENOMEM on error, 0 otherwise
  * return: -ENOMEM on error, 0 otherwise
  */
  */
 static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
 static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
 {
 {
 	int ret;
 	int ret;
-	struct zfcp_fc_gid_pn *gid_pn;
+	struct zfcp_fc_req *fc_req;
 	struct zfcp_adapter *adapter = port->adapter;
 	struct zfcp_adapter *adapter = port->adapter;
 
 
-	gid_pn = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
-	if (!gid_pn)
+	fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
+	if (!fc_req)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	memset(gid_pn, 0, sizeof(*gid_pn));
+	memset(fc_req, 0, sizeof(*fc_req));
 
 
 	ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
 	ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
 	if (ret)
 	if (ret)
 		goto out;
 		goto out;
 
 
-	ret = zfcp_fc_ns_gid_pn_request(port, gid_pn);
+	ret = zfcp_fc_ns_gid_pn_request(port, fc_req);
 
 
 	zfcp_fc_wka_port_put(&adapter->gs->ds);
 	zfcp_fc_wka_port_put(&adapter->gs->ds);
 out:
 out:
-	mempool_free(gid_pn, adapter->pool.gid_pn);
+	mempool_free(fc_req, adapter->pool.gid_pn);
 	return ret;
 	return ret;
 }
 }
 
 
@@ -419,11 +420,11 @@ void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
 
 
 static void zfcp_fc_adisc_handler(void *data)
 static void zfcp_fc_adisc_handler(void *data)
 {
 {
-	struct zfcp_fc_els_adisc *adisc = data;
-	struct zfcp_port *port = adisc->els.port;
-	struct fc_els_adisc *adisc_resp = &adisc->adisc_resp;
+	struct zfcp_fc_req *fc_req = data;
+	struct zfcp_port *port = fc_req->ct_els.port;
+	struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp;
 
 
-	if (adisc->els.status) {
+	if (fc_req->ct_els.status) {
 		/* request rejected or timed out */
 		/* request rejected or timed out */
 		zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
 		zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
 					    "fcadh_1");
 					    "fcadh_1");
@@ -445,42 +446,42 @@ static void zfcp_fc_adisc_handler(void *data)
  out:
  out:
 	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
 	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
 	put_device(&port->dev);
 	put_device(&port->dev);
-	kmem_cache_free(zfcp_data.adisc_cache, adisc);
+	kmem_cache_free(zfcp_fc_req_cache, fc_req);
 }
 }
 
 
 static int zfcp_fc_adisc(struct zfcp_port *port)
 static int zfcp_fc_adisc(struct zfcp_port *port)
 {
 {
-	struct zfcp_fc_els_adisc *adisc;
+	struct zfcp_fc_req *fc_req;
 	struct zfcp_adapter *adapter = port->adapter;
 	struct zfcp_adapter *adapter = port->adapter;
+	struct Scsi_Host *shost = adapter->scsi_host;
 	int ret;
 	int ret;
 
 
-	adisc = kmem_cache_zalloc(zfcp_data.adisc_cache, GFP_ATOMIC);
-	if (!adisc)
+	fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
+	if (!fc_req)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	adisc->els.port = port;
-	adisc->els.req = &adisc->req;
-	adisc->els.resp = &adisc->resp;
-	sg_init_one(adisc->els.req, &adisc->adisc_req,
+	fc_req->ct_els.port = port;
+	fc_req->ct_els.req = &fc_req->sg_req;
+	fc_req->ct_els.resp = &fc_req->sg_rsp;
+	sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req,
 		    sizeof(struct fc_els_adisc));
 		    sizeof(struct fc_els_adisc));
-	sg_init_one(adisc->els.resp, &adisc->adisc_resp,
+	sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp,
 		    sizeof(struct fc_els_adisc));
 		    sizeof(struct fc_els_adisc));
 
 
-	adisc->els.handler = zfcp_fc_adisc_handler;
-	adisc->els.handler_data = adisc;
+	fc_req->ct_els.handler = zfcp_fc_adisc_handler;
+	fc_req->ct_els.handler_data = fc_req;
 
 
 	/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
 	/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
 	   without FC-AL-2 capability, so we don't set it */
 	   without FC-AL-2 capability, so we don't set it */
-	adisc->adisc_req.adisc_wwpn = fc_host_port_name(adapter->scsi_host);
-	adisc->adisc_req.adisc_wwnn = fc_host_node_name(adapter->scsi_host);
-	adisc->adisc_req.adisc_cmd = ELS_ADISC;
-	hton24(adisc->adisc_req.adisc_port_id,
-	       fc_host_port_id(adapter->scsi_host));
+	fc_req->u.adisc.req.adisc_wwpn = fc_host_port_name(shost);
+	fc_req->u.adisc.req.adisc_wwnn = fc_host_node_name(shost);
+	fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
+	hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
 
 
-	ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els,
+	ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
 				ZFCP_FC_CTELS_TMO);
 				ZFCP_FC_CTELS_TMO);
 	if (ret)
 	if (ret)
-		kmem_cache_free(zfcp_data.adisc_cache, adisc);
+		kmem_cache_free(zfcp_fc_req_cache, fc_req);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -528,68 +529,42 @@ void zfcp_fc_test_link(struct zfcp_port *port)
 		put_device(&port->dev);
 		put_device(&port->dev);
 }
 }
 
 
-static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num)
+static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num)
 {
 {
-	struct scatterlist *sg = &gpn_ft->sg_req;
-
-	kmem_cache_free(zfcp_data.gpn_ft_cache, sg_virt(sg));
-	zfcp_sg_free_table(gpn_ft->sg_resp, buf_num);
-
-	kfree(gpn_ft);
-}
+	struct zfcp_fc_req *fc_req;
 
 
-static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num)
-{
-	struct zfcp_fc_gpn_ft *gpn_ft;
-	struct zfcp_fc_gpn_ft_req *req;
-
-	gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL);
-	if (!gpn_ft)
+	fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
+	if (!fc_req)
 		return NULL;
 		return NULL;
 
 
-	req = kmem_cache_zalloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
-	if (!req) {
-		kfree(gpn_ft);
-		gpn_ft = NULL;
-		goto out;
+	if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
+		kmem_cache_free(zfcp_fc_req_cache, fc_req);
+		return NULL;
 	}
 	}
-	sg_init_one(&gpn_ft->sg_req, req, sizeof(*req));
 
 
-	if (zfcp_sg_setup_table(gpn_ft->sg_resp, buf_num)) {
-		zfcp_free_sg_env(gpn_ft, buf_num);
-		gpn_ft = NULL;
-	}
-out:
-	return gpn_ft;
-}
+	sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req,
+		    sizeof(struct zfcp_fc_gpn_ft_req));
 
 
+	return fc_req;
+}
 
 
-static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
+static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req *fc_req,
 			       struct zfcp_adapter *adapter, int max_bytes)
 			       struct zfcp_adapter *adapter, int max_bytes)
 {
 {
-	struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
-	struct zfcp_fc_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
+	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+	struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req;
 	DECLARE_COMPLETION_ONSTACK(completion);
 	DECLARE_COMPLETION_ONSTACK(completion);
 	int ret;
 	int ret;
 
 
-	/* prepare CT IU for GPN_FT */
-	req->ct_hdr.ct_rev = FC_CT_REV;
-	req->ct_hdr.ct_fs_type = FC_FST_DIR;
-	req->ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
-	req->ct_hdr.ct_options = 0;
-	req->ct_hdr.ct_cmd = FC_NS_GPN_FT;
-	req->ct_hdr.ct_mr_size = max_bytes / 4;
-	req->gpn_ft.fn_domain_id_scope = 0;
-	req->gpn_ft.fn_area_id_scope = 0;
+	zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes);
 	req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
 	req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
 
 
-	/* prepare zfcp_send_ct */
-	ct->handler = zfcp_fc_complete;
-	ct->handler_data = &completion;
-	ct->req = &gpn_ft->sg_req;
-	ct->resp = gpn_ft->sg_resp;
+	ct_els->handler = zfcp_fc_complete;
+	ct_els->handler_data = &completion;
+	ct_els->req = &fc_req->sg_req;
+	ct_els->resp = &fc_req->sg_rsp;
 
 
-	ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL,
+	ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
 			       ZFCP_FC_CTELS_TMO);
 			       ZFCP_FC_CTELS_TMO);
 	if (!ret)
 	if (!ret)
 		wait_for_completion(&completion);
 		wait_for_completion(&completion);
@@ -610,11 +585,11 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
 	list_move_tail(&port->list, lh);
 	list_move_tail(&port->list, lh);
 }
 }
 
 
-static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
+static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
 			       struct zfcp_adapter *adapter, int max_entries)
 			       struct zfcp_adapter *adapter, int max_entries)
 {
 {
-	struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
-	struct scatterlist *sg = gpn_ft->sg_resp;
+	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+	struct scatterlist *sg = &fc_req->sg_rsp;
 	struct fc_ct_hdr *hdr = sg_virt(sg);
 	struct fc_ct_hdr *hdr = sg_virt(sg);
 	struct fc_gpn_ft_resp *acc = sg_virt(sg);
 	struct fc_gpn_ft_resp *acc = sg_virt(sg);
 	struct zfcp_port *port, *tmp;
 	struct zfcp_port *port, *tmp;
@@ -623,7 +598,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
 	u32 d_id;
 	u32 d_id;
 	int ret = 0, x, last = 0;
 	int ret = 0, x, last = 0;
 
 
-	if (ct->status)
+	if (ct_els->status)
 		return -EIO;
 		return -EIO;
 
 
 	if (hdr->ct_cmd != FC_FS_ACC) {
 	if (hdr->ct_cmd != FC_FS_ACC) {
@@ -687,7 +662,7 @@ void zfcp_fc_scan_ports(struct work_struct *work)
 	struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
 	struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
 						    scan_work);
 						    scan_work);
 	int ret, i;
 	int ret, i;
-	struct zfcp_fc_gpn_ft *gpn_ft;
+	struct zfcp_fc_req *fc_req;
 	int chain, max_entries, buf_num, max_bytes;
 	int chain, max_entries, buf_num, max_bytes;
 
 
 	chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
 	chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
@@ -702,25 +677,145 @@ void zfcp_fc_scan_ports(struct work_struct *work)
 	if (zfcp_fc_wka_port_get(&adapter->gs->ds))
 	if (zfcp_fc_wka_port_get(&adapter->gs->ds))
 		return;
 		return;
 
 
-	gpn_ft = zfcp_alloc_sg_env(buf_num);
-	if (!gpn_ft)
+	fc_req = zfcp_alloc_sg_env(buf_num);
+	if (!fc_req)
 		goto out;
 		goto out;
 
 
 	for (i = 0; i < 3; i++) {
 	for (i = 0; i < 3; i++) {
-		ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes);
+		ret = zfcp_fc_send_gpn_ft(fc_req, adapter, max_bytes);
 		if (!ret) {
 		if (!ret) {
-			ret = zfcp_fc_eval_gpn_ft(gpn_ft, adapter, max_entries);
+			ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries);
 			if (ret == -EAGAIN)
 			if (ret == -EAGAIN)
 				ssleep(1);
 				ssleep(1);
 			else
 			else
 				break;
 				break;
 		}
 		}
 	}
 	}
-	zfcp_free_sg_env(gpn_ft, buf_num);
+	zfcp_sg_free_table(&fc_req->sg_rsp, buf_num);
+	kmem_cache_free(zfcp_fc_req_cache, fc_req);
 out:
 out:
 	zfcp_fc_wka_port_put(&adapter->gs->ds);
 	zfcp_fc_wka_port_put(&adapter->gs->ds);
 }
 }
 
 
+static int zfcp_fc_gspn(struct zfcp_adapter *adapter,
+			struct zfcp_fc_req *fc_req)
+{
+	DECLARE_COMPLETION_ONSTACK(completion);
+	char devno[] = "DEVNO:";
+	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+	struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req;
+	struct zfcp_fc_gspn_rsp *gspn_rsp = &fc_req->u.gspn.rsp;
+	int ret;
+
+	zfcp_fc_ct_ns_init(&gspn_req->ct_hdr, FC_NS_GSPN_ID,
+			   FC_SYMBOLIC_NAME_SIZE);
+	hton24(gspn_req->gspn.fp_fid, fc_host_port_id(adapter->scsi_host));
+
+	sg_init_one(&fc_req->sg_req, gspn_req, sizeof(*gspn_req));
+	sg_init_one(&fc_req->sg_rsp, gspn_rsp, sizeof(*gspn_rsp));
+
+	ct_els->handler = zfcp_fc_complete;
+	ct_els->handler_data = &completion;
+	ct_els->req = &fc_req->sg_req;
+	ct_els->resp = &fc_req->sg_rsp;
+
+	ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
+			       ZFCP_FC_CTELS_TMO);
+	if (ret)
+		return ret;
+
+	wait_for_completion(&completion);
+	if (ct_els->status)
+		return ct_els->status;
+
+	if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_NPIV &&
+	    !(strstr(gspn_rsp->gspn.fp_name, devno)))
+		snprintf(fc_host_symbolic_name(adapter->scsi_host),
+			 FC_SYMBOLIC_NAME_SIZE, "%s%s %s NAME: %s",
+			 gspn_rsp->gspn.fp_name, devno,
+			 dev_name(&adapter->ccw_device->dev),
+			 init_utsname()->nodename);
+	else
+		strlcpy(fc_host_symbolic_name(adapter->scsi_host),
+			gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE);
+
+	return 0;
+}
+
+static void zfcp_fc_rspn(struct zfcp_adapter *adapter,
+			 struct zfcp_fc_req *fc_req)
+{
+	DECLARE_COMPLETION_ONSTACK(completion);
+	struct Scsi_Host *shost = adapter->scsi_host;
+	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+	struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req;
+	struct fc_ct_hdr *rspn_rsp = &fc_req->u.rspn.rsp;
+	int ret, len;
+
+	zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID,
+			   FC_SYMBOLIC_NAME_SIZE);
+	hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost));
+	len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost),
+		      FC_SYMBOLIC_NAME_SIZE);
+	rspn_req->rspn.fr_name_len = len;
+
+	sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req));
+	sg_init_one(&fc_req->sg_rsp, rspn_rsp, sizeof(*rspn_rsp));
+
+	ct_els->handler = zfcp_fc_complete;
+	ct_els->handler_data = &completion;
+	ct_els->req = &fc_req->sg_req;
+	ct_els->resp = &fc_req->sg_rsp;
+
+	ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
+			       ZFCP_FC_CTELS_TMO);
+	if (!ret)
+		wait_for_completion(&completion);
+}
+
+/**
+ * zfcp_fc_sym_name_update - Retrieve and update the symbolic port name
+ * @work: ns_up_work of the adapter where to update the symbolic port name
+ *
+ * Retrieve the current symbolic port name that may have been set by
+ * the hardware using the GSPN request and update the fc_host
+ * symbolic_name sysfs attribute. When running in NPIV mode (and hence
+ * the port name is unique for this system), update the symbolic port
+ * name to add Linux specific information and update the FC nameserver
+ * using the RSPN request.
+ */
+void zfcp_fc_sym_name_update(struct work_struct *work)
+{
+	struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
+						    ns_up_work);
+	int ret;
+	struct zfcp_fc_req *fc_req;
+
+	if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
+	    fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
+		return;
+
+	fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
+	if (!fc_req)
+		return;
+
+	ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
+	if (ret)
+		goto out_free;
+
+	ret = zfcp_fc_gspn(adapter, fc_req);
+	if (ret || fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
+		goto out_ds_put;
+
+	memset(fc_req, 0, sizeof(*fc_req));
+	zfcp_fc_rspn(adapter, fc_req);
+
+out_ds_put:
+	zfcp_fc_wka_port_put(&adapter->gs->ds);
+out_free:
+	kmem_cache_free(zfcp_fc_req_cache, fc_req);
+}
+
 static void zfcp_fc_ct_els_job_handler(void *data)
 static void zfcp_fc_ct_els_job_handler(void *data)
 {
 {
 	struct fc_bsg_job *job = data;
 	struct fc_bsg_job *job = data;

+ 66 - 58
drivers/s390/scsi/zfcp_fc.h

@@ -64,32 +64,15 @@ struct zfcp_fc_gid_pn_req {
 } __packed;
 } __packed;
 
 
 /**
 /**
- * struct zfcp_fc_gid_pn_resp - container for ct header plus gid_pn response
+ * struct zfcp_fc_gid_pn_rsp - container for ct header plus gid_pn response
  * @ct_hdr: FC GS common transport header
  * @ct_hdr: FC GS common transport header
  * @gid_pn: GID_PN response
  * @gid_pn: GID_PN response
  */
  */
-struct zfcp_fc_gid_pn_resp {
+struct zfcp_fc_gid_pn_rsp {
 	struct fc_ct_hdr	ct_hdr;
 	struct fc_ct_hdr	ct_hdr;
 	struct fc_gid_pn_resp	gid_pn;
 	struct fc_gid_pn_resp	gid_pn;
 } __packed;
 } __packed;
 
 
-/**
- * struct zfcp_fc_gid_pn - everything required in zfcp for gid_pn request
- * @ct: data passed to zfcp_fsf for issuing fsf request
- * @sg_req: scatterlist entry for request data
- * @sg_resp: scatterlist entry for response data
- * @gid_pn_req: GID_PN request data
- * @gid_pn_resp: GID_PN response data
- */
-struct zfcp_fc_gid_pn {
-	struct zfcp_fsf_ct_els ct;
-	struct scatterlist sg_req;
-	struct scatterlist sg_resp;
-	struct zfcp_fc_gid_pn_req gid_pn_req;
-	struct zfcp_fc_gid_pn_resp gid_pn_resp;
-	struct zfcp_port *port;
-};
-
 /**
 /**
  * struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request
  * struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request
  * @ct_hdr: FC GS common transport header
  * @ct_hdr: FC GS common transport header
@@ -101,41 +84,72 @@ struct zfcp_fc_gpn_ft_req {
 } __packed;
 } __packed;
 
 
 /**
 /**
- * struct zfcp_fc_gpn_ft_resp - container for ct header plus gpn_ft response
+ * struct zfcp_fc_gspn_req - container for ct header plus GSPN_ID request
  * @ct_hdr: FC GS common transport header
  * @ct_hdr: FC GS common transport header
- * @gpn_ft: Array of gpn_ft response data to fill one memory page
+ * @gspn: GSPN_ID request
  */
  */
-struct zfcp_fc_gpn_ft_resp {
+struct zfcp_fc_gspn_req {
 	struct fc_ct_hdr	ct_hdr;
 	struct fc_ct_hdr	ct_hdr;
-	struct fc_gpn_ft_resp	gpn_ft[ZFCP_FC_GPN_FT_ENT_PAGE];
+	struct fc_gid_pn_resp	gspn;
 } __packed;
 } __packed;
 
 
 /**
 /**
- * struct zfcp_fc_gpn_ft - zfcp data for gpn_ft request
- * @ct: data passed to zfcp_fsf for issuing fsf request
- * @sg_req: scatter list entry for gpn_ft request
- * @sg_resp: scatter list entries for gpn_ft responses (per memory page)
+ * struct zfcp_fc_gspn_rsp - container for ct header plus GSPN_ID response
+ * @ct_hdr: FC GS common transport header
+ * @gspn: GSPN_ID response
+ * @name: The name string of the GSPN_ID response
  */
  */
-struct zfcp_fc_gpn_ft {
-	struct zfcp_fsf_ct_els ct;
-	struct scatterlist sg_req;
-	struct scatterlist sg_resp[ZFCP_FC_GPN_FT_NUM_BUFS];
-};
+struct zfcp_fc_gspn_rsp {
+	struct fc_ct_hdr	ct_hdr;
+	struct fc_gspn_resp	gspn;
+	char			name[FC_SYMBOLIC_NAME_SIZE];
+} __packed;
 
 
 /**
 /**
- * struct zfcp_fc_els_adisc - everything required in zfcp for issuing ELS ADISC
- * @els: data required for issuing els fsf command
- * @req: scatterlist entry for ELS ADISC request
- * @resp: scatterlist entry for ELS ADISC response
- * @adisc_req: ELS ADISC request data
- * @adisc_resp: ELS ADISC response data
+ * struct zfcp_fc_rspn_req - container for ct header plus RSPN_ID request
+ * @ct_hdr: FC GS common transport header
+ * @rspn: RSPN_ID request
+ * @name: The name string of the RSPN_ID request
  */
  */
-struct zfcp_fc_els_adisc {
-	struct zfcp_fsf_ct_els els;
-	struct scatterlist req;
-	struct scatterlist resp;
-	struct fc_els_adisc adisc_req;
-	struct fc_els_adisc adisc_resp;
+struct zfcp_fc_rspn_req {
+	struct fc_ct_hdr	ct_hdr;
+	struct fc_ns_rspn	rspn;
+	char			name[FC_SYMBOLIC_NAME_SIZE];
+} __packed;
+
+/**
+ * struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp
+ * @ct_els: data required for issuing fsf command
+ * @sg_req: scatterlist entry for request data
+ * @sg_rsp: scatterlist entry for response data
+ * @u: request specific data
+ */
+struct zfcp_fc_req {
+	struct zfcp_fsf_ct_els				ct_els;
+	struct scatterlist				sg_req;
+	struct scatterlist				sg_rsp;
+	union {
+		struct {
+			struct fc_els_adisc		req;
+			struct fc_els_adisc		rsp;
+		} adisc;
+		struct {
+			struct zfcp_fc_gid_pn_req	req;
+			struct zfcp_fc_gid_pn_rsp	rsp;
+		} gid_pn;
+		struct {
+			struct scatterlist sg_rsp2[ZFCP_FC_GPN_FT_NUM_BUFS - 1];
+			struct zfcp_fc_gpn_ft_req	req;
+		} gpn_ft;
+		struct {
+			struct zfcp_fc_gspn_req		req;
+			struct zfcp_fc_gspn_rsp		rsp;
+		} gspn;
+		struct {
+			struct zfcp_fc_rspn_req		req;
+			struct fc_ct_hdr		rsp;
+		} rspn;
+	} u;
 };
 };
 
 
 /**
 /**
@@ -192,14 +206,21 @@ struct zfcp_fc_wka_ports {
  * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
  * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
  * @fcp: fcp_cmnd to setup
  * @fcp: fcp_cmnd to setup
  * @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
  * @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
+ * @tm: task management flags to setup task management command
  */
  */
 static inline
 static inline
-void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
+void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
+			 u8 tm_flags)
 {
 {
 	char tag[2];
 	char tag[2];
 
 
 	int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
 	int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
 
 
+	if (unlikely(tm_flags)) {
+		fcp->fc_tm_flags = tm_flags;
+		return;
+	}
+
 	if (scsi_populate_tag_msg(scsi, tag)) {
 	if (scsi_populate_tag_msg(scsi, tag)) {
 		switch (tag[0]) {
 		switch (tag[0]) {
 		case MSG_ORDERED_TAG:
 		case MSG_ORDERED_TAG:
@@ -225,19 +246,6 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
 		fcp->fc_dl += fcp->fc_dl / scsi->device->sector_size * 8;
 		fcp->fc_dl += fcp->fc_dl / scsi->device->sector_size * 8;
 }
 }
 
 
-/**
- * zfcp_fc_fcp_tm - setup FCP command as task management command
- * @fcp: fcp_cmnd to setup
- * @dev: scsi_device where to send the task management command
- * @tm: task management flags to setup tm command
- */
-static inline
-void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags)
-{
-	int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun);
-	fcp->fc_tm_flags |= tm_flags;
-}
-
 /**
 /**
  * zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
  * zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
  * @fcp_rsp: FCP RSP IU to evaluate
  * @fcp_rsp: FCP RSP IU to evaluate

+ 15 - 12
drivers/s390/scsi/zfcp_fsf.c

@@ -18,6 +18,8 @@
 #include "zfcp_qdio.h"
 #include "zfcp_qdio.h"
 #include "zfcp_reqlist.h"
 #include "zfcp_reqlist.h"
 
 
+struct kmem_cache *zfcp_fsf_qtcb_cache;
+
 static void zfcp_fsf_request_timeout_handler(unsigned long data)
 static void zfcp_fsf_request_timeout_handler(unsigned long data)
 {
 {
 	struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
 	struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
@@ -83,7 +85,7 @@ void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
 	}
 	}
 
 
 	if (likely(req->qtcb))
 	if (likely(req->qtcb))
-		kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb);
+		kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
 	kfree(req);
 	kfree(req);
 }
 }
 
 
@@ -212,7 +214,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
 
 
 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
 		zfcp_dbf_hba_fsf_uss("fssrh_1", req);
 		zfcp_dbf_hba_fsf_uss("fssrh_1", req);
-		mempool_free(sr_buf, adapter->pool.status_read_data);
+		mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
 		zfcp_fsf_req_free(req);
 		zfcp_fsf_req_free(req);
 		return;
 		return;
 	}
 	}
@@ -265,7 +267,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
 		break;
 		break;
 	}
 	}
 
 
-	mempool_free(sr_buf, adapter->pool.status_read_data);
+	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
 	zfcp_fsf_req_free(req);
 	zfcp_fsf_req_free(req);
 
 
 	atomic_inc(&adapter->stat_miss);
 	atomic_inc(&adapter->stat_miss);
@@ -628,7 +630,7 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
 	if (likely(pool))
 	if (likely(pool))
 		qtcb = mempool_alloc(pool, GFP_ATOMIC);
 		qtcb = mempool_alloc(pool, GFP_ATOMIC);
 	else
 	else
-		qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC);
+		qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
 
 
 	if (unlikely(!qtcb))
 	if (unlikely(!qtcb))
 		return NULL;
 		return NULL;
@@ -723,6 +725,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
 	struct zfcp_adapter *adapter = qdio->adapter;
 	struct zfcp_adapter *adapter = qdio->adapter;
 	struct zfcp_fsf_req *req;
 	struct zfcp_fsf_req *req;
 	struct fsf_status_read_buffer *sr_buf;
 	struct fsf_status_read_buffer *sr_buf;
+	struct page *page;
 	int retval = -EIO;
 	int retval = -EIO;
 
 
 	spin_lock_irq(&qdio->req_q_lock);
 	spin_lock_irq(&qdio->req_q_lock);
@@ -736,11 +739,12 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
 		goto out;
 		goto out;
 	}
 	}
 
 
-	sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
-	if (!sr_buf) {
+	page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
+	if (!page) {
 		retval = -ENOMEM;
 		retval = -ENOMEM;
 		goto failed_buf;
 		goto failed_buf;
 	}
 	}
+	sr_buf = page_address(page);
 	memset(sr_buf, 0, sizeof(*sr_buf));
 	memset(sr_buf, 0, sizeof(*sr_buf));
 	req->data = sr_buf;
 	req->data = sr_buf;
 
 
@@ -755,7 +759,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
 
 
 failed_req_send:
 failed_req_send:
 	req->data = NULL;
 	req->data = NULL;
-	mempool_free(sr_buf, adapter->pool.status_read_data);
+	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
 failed_buf:
 failed_buf:
 	zfcp_dbf_hba_fsf_uss("fssr__1", req);
 	zfcp_dbf_hba_fsf_uss("fssr__1", req);
 	zfcp_fsf_req_free(req);
 	zfcp_fsf_req_free(req);
@@ -1552,7 +1556,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
 				  SBAL_FLAGS0_TYPE_READ,
 				  SBAL_FLAGS0_TYPE_READ,
 				  qdio->adapter->pool.erp_req);
 				  qdio->adapter->pool.erp_req);
 
 
-	if (unlikely(IS_ERR(req))) {
+	if (IS_ERR(req)) {
 		retval = PTR_ERR(req);
 		retval = PTR_ERR(req);
 		goto out;
 		goto out;
 	}
 	}
@@ -1605,7 +1609,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
 				  SBAL_FLAGS0_TYPE_READ,
 				  SBAL_FLAGS0_TYPE_READ,
 				  qdio->adapter->pool.erp_req);
 				  qdio->adapter->pool.erp_req);
 
 
-	if (unlikely(IS_ERR(req))) {
+	if (IS_ERR(req)) {
 		retval = PTR_ERR(req);
 		retval = PTR_ERR(req);
 		goto out;
 		goto out;
 	}
 	}
@@ -2206,7 +2210,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
 	zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction);
 	zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction);
 
 
 	fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
 	fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
-	zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
+	zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
 
 
 	if (scsi_prot_sg_count(scsi_cmnd)) {
 	if (scsi_prot_sg_count(scsi_cmnd)) {
 		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
 		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
@@ -2284,7 +2288,6 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
 		goto out;
 		goto out;
 	}
 	}
 
 
-	req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
 	req->data = scmnd;
 	req->data = scmnd;
 	req->handler = zfcp_fsf_fcp_task_mgmt_handler;
 	req->handler = zfcp_fsf_fcp_task_mgmt_handler;
 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
@@ -2296,7 +2299,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
 
 
 	fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
 	fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
-	zfcp_fc_fcp_tm(fcp_cmnd, scmnd->device, tm_flags);
+	zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
 
 
 	zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
 	zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
 	if (!zfcp_fsf_req_send(req))
 	if (!zfcp_fsf_req_send(req))

+ 39 - 32
drivers/s390/scsi/zfcp_scsi.c

@@ -292,7 +292,37 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
 	return SUCCESS;
 	return SUCCESS;
 }
 }
 
 
-int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
+struct scsi_transport_template *zfcp_scsi_transport_template;
+
+static struct scsi_host_template zfcp_scsi_host_template = {
+	.module			 = THIS_MODULE,
+	.name			 = "zfcp",
+	.queuecommand		 = zfcp_scsi_queuecommand,
+	.eh_abort_handler	 = zfcp_scsi_eh_abort_handler,
+	.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
+	.eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
+	.eh_host_reset_handler	 = zfcp_scsi_eh_host_reset_handler,
+	.slave_alloc		 = zfcp_scsi_slave_alloc,
+	.slave_configure	 = zfcp_scsi_slave_configure,
+	.slave_destroy		 = zfcp_scsi_slave_destroy,
+	.change_queue_depth	 = zfcp_scsi_change_queue_depth,
+	.proc_name		 = "zfcp",
+	.can_queue		 = 4096,
+	.this_id		 = -1,
+	.sg_tablesize		 = ZFCP_QDIO_MAX_SBALES_PER_REQ,
+	.max_sectors		 = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
+	.dma_boundary		 = ZFCP_QDIO_SBALE_LEN - 1,
+	.cmd_per_lun		 = 1,
+	.use_clustering		 = 1,
+	.shost_attrs		 = zfcp_sysfs_shost_attrs,
+	.sdev_attrs		 = zfcp_sysfs_sdev_attrs,
+};
+
+/**
+ * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
+ * @adapter: The zfcp adapter to register with the SCSI midlayer
+ */
+int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
 {
 {
 	struct ccw_dev_id dev_id;
 	struct ccw_dev_id dev_id;
 
 
@@ -301,7 +331,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
 
 
 	ccw_device_get_id(adapter->ccw_device, &dev_id);
 	ccw_device_get_id(adapter->ccw_device, &dev_id);
 	/* register adapter as SCSI host with mid layer of SCSI stack */
 	/* register adapter as SCSI host with mid layer of SCSI stack */
-	adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template,
+	adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
 					     sizeof (struct zfcp_adapter *));
 					     sizeof (struct zfcp_adapter *));
 	if (!adapter->scsi_host) {
 	if (!adapter->scsi_host) {
 		dev_err(&adapter->ccw_device->dev,
 		dev_err(&adapter->ccw_device->dev,
@@ -316,7 +346,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
 	adapter->scsi_host->max_channel = 0;
 	adapter->scsi_host->max_channel = 0;
 	adapter->scsi_host->unique_id = dev_id.devno;
 	adapter->scsi_host->unique_id = dev_id.devno;
 	adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
 	adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
-	adapter->scsi_host->transportt = zfcp_data.scsi_transport_template;
+	adapter->scsi_host->transportt = zfcp_scsi_transport_template;
 
 
 	adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
 	adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
 
 
@@ -328,7 +358,11 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
 	return 0;
 	return 0;
 }
 }
 
 
-void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
+/**
+ * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer
+ * @adapter: The zfcp adapter to unregister.
+ */
+void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
 {
 {
 	struct Scsi_Host *shost;
 	struct Scsi_Host *shost;
 	struct zfcp_port *port;
 	struct zfcp_port *port;
@@ -346,8 +380,6 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
 	scsi_remove_host(shost);
 	scsi_remove_host(shost);
 	scsi_host_put(shost);
 	scsi_host_put(shost);
 	adapter->scsi_host = NULL;
 	adapter->scsi_host = NULL;
-
-	return;
 }
 }
 
 
 static struct fc_host_statistics*
 static struct fc_host_statistics*
@@ -688,33 +720,8 @@ struct fc_function_template zfcp_transport_functions = {
 	/* no functions registered for following dynamic attributes but
 	/* no functions registered for following dynamic attributes but
 	   directly set by LLDD */
 	   directly set by LLDD */
 	.show_host_port_type = 1,
 	.show_host_port_type = 1,
+	.show_host_symbolic_name = 1,
 	.show_host_speed = 1,
 	.show_host_speed = 1,
 	.show_host_port_id = 1,
 	.show_host_port_id = 1,
 	.dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
 	.dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
 };
 };
-
-struct zfcp_data zfcp_data = {
-	.scsi_host_template = {
-		.name			 = "zfcp",
-		.module			 = THIS_MODULE,
-		.proc_name		 = "zfcp",
-		.change_queue_depth	 = zfcp_scsi_change_queue_depth,
-		.slave_alloc		 = zfcp_scsi_slave_alloc,
-		.slave_configure	 = zfcp_scsi_slave_configure,
-		.slave_destroy		 = zfcp_scsi_slave_destroy,
-		.queuecommand		 = zfcp_scsi_queuecommand,
-		.eh_abort_handler	 = zfcp_scsi_eh_abort_handler,
-		.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
-		.eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
-		.eh_host_reset_handler	 = zfcp_scsi_eh_host_reset_handler,
-		.can_queue		 = 4096,
-		.this_id		 = -1,
-		.sg_tablesize		 = ZFCP_QDIO_MAX_SBALES_PER_REQ,
-		.cmd_per_lun		 = 1,
-		.use_clustering		 = 1,
-		.sdev_attrs		 = zfcp_sysfs_sdev_attrs,
-		.max_sectors		 = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
-		.dma_boundary		 = ZFCP_QDIO_SBALE_LEN - 1,
-		.shost_attrs		 = zfcp_sysfs_shost_attrs,
-	},
-};

+ 1 - 0
drivers/scsi/Kconfig

@@ -381,6 +381,7 @@ config ISCSI_BOOT_SYSFS
 
 
 source "drivers/scsi/cxgbi/Kconfig"
 source "drivers/scsi/cxgbi/Kconfig"
 source "drivers/scsi/bnx2i/Kconfig"
 source "drivers/scsi/bnx2i/Kconfig"
+source "drivers/scsi/bnx2fc/Kconfig"
 source "drivers/scsi/be2iscsi/Kconfig"
 source "drivers/scsi/be2iscsi/Kconfig"
 
 
 config SGIWD93_SCSI
 config SGIWD93_SCSI

+ 1 - 0
drivers/scsi/Makefile

@@ -40,6 +40,7 @@ obj-$(CONFIG_LIBFC)		+= libfc/
 obj-$(CONFIG_LIBFCOE)		+= fcoe/
 obj-$(CONFIG_LIBFCOE)		+= fcoe/
 obj-$(CONFIG_FCOE)		+= fcoe/
 obj-$(CONFIG_FCOE)		+= fcoe/
 obj-$(CONFIG_FCOE_FNIC)		+= fnic/
 obj-$(CONFIG_FCOE_FNIC)		+= fnic/
+obj-$(CONFIG_SCSI_BNX2X_FCOE)	+= libfc/ fcoe/ bnx2fc/
 obj-$(CONFIG_ISCSI_TCP) 	+= libiscsi.o	libiscsi_tcp.o iscsi_tcp.o
 obj-$(CONFIG_ISCSI_TCP) 	+= libiscsi.o	libiscsi_tcp.o iscsi_tcp.o
 obj-$(CONFIG_INFINIBAND_ISER) 	+= libiscsi.o
 obj-$(CONFIG_INFINIBAND_ISER) 	+= libiscsi.o
 obj-$(CONFIG_ISCSI_BOOT_SYSFS)	+= iscsi_boot_sysfs.o
 obj-$(CONFIG_ISCSI_BOOT_SYSFS)	+= iscsi_boot_sysfs.o

+ 1 - 2
drivers/scsi/NCR5380.c

@@ -936,8 +936,7 @@ static void NCR5380_exit(struct Scsi_Host *instance)
 {
 {
 	struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
 	struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
 
 
-	cancel_delayed_work(&hostdata->coroutine);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&hostdata->coroutine);
 }
 }
 
 
 /**
 /**

+ 2 - 2
drivers/scsi/arcmsr/arcmsr_hba.c

@@ -1020,7 +1020,7 @@ static void arcmsr_remove(struct pci_dev *pdev)
 	int poll_count = 0;
 	int poll_count = 0;
 	arcmsr_free_sysfs_attr(acb);
 	arcmsr_free_sysfs_attr(acb);
 	scsi_remove_host(host);
 	scsi_remove_host(host);
-	flush_scheduled_work();
+	flush_work_sync(&acb->arcmsr_do_message_isr_bh);
 	del_timer_sync(&acb->eternal_timer);
 	del_timer_sync(&acb->eternal_timer);
 	arcmsr_disable_outbound_ints(acb);
 	arcmsr_disable_outbound_ints(acb);
 	arcmsr_stop_adapter_bgrb(acb);
 	arcmsr_stop_adapter_bgrb(acb);
@@ -1066,7 +1066,7 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
 		(struct AdapterControlBlock *)host->hostdata;
 		(struct AdapterControlBlock *)host->hostdata;
 	del_timer_sync(&acb->eternal_timer);
 	del_timer_sync(&acb->eternal_timer);
 	arcmsr_disable_outbound_ints(acb);
 	arcmsr_disable_outbound_ints(acb);
-	flush_scheduled_work();
+	flush_work_sync(&acb->arcmsr_do_message_isr_bh);
 	arcmsr_stop_adapter_bgrb(acb);
 	arcmsr_stop_adapter_bgrb(acb);
 	arcmsr_flush_adapter_cache(acb);
 	arcmsr_flush_adapter_cache(acb);
 }
 }

+ 5 - 13
drivers/scsi/be2iscsi/be_iscsi.c

@@ -210,28 +210,20 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
 }
 }
 
 
 /**
 /**
- * beiscsi_conn_get_param - get the iscsi parameter
- * @cls_conn: pointer to iscsi cls conn
+ * beiscsi_ep_get_param - get the iscsi parameter
+ * @ep: pointer to iscsi ep
  * @param: parameter type identifier
  * @param: parameter type identifier
  * @buf: buffer pointer
  * @buf: buffer pointer
  *
  *
  * returns iscsi parameter
  * returns iscsi parameter
  */
  */
-int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
 			   enum iscsi_param param, char *buf)
 			   enum iscsi_param param, char *buf)
 {
 {
-	struct beiscsi_endpoint *beiscsi_ep;
-	struct iscsi_conn *conn = cls_conn->dd_data;
-	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+	struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
 	int len = 0;
 	int len = 0;
 
 
 	SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_param, param= %d\n", param);
 	SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_param, param= %d\n", param);
-	beiscsi_ep = beiscsi_conn->ep;
-	if (!beiscsi_ep) {
-		SE_DEBUG(DBG_LVL_1,
-			 "In beiscsi_conn_get_param , no beiscsi_ep\n");
-		return -ENODEV;
-	}
 
 
 	switch (param) {
 	switch (param) {
 	case ISCSI_PARAM_CONN_PORT:
 	case ISCSI_PARAM_CONN_PORT:
@@ -244,7 +236,7 @@ int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
 			len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr);
 			len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr);
 		break;
 		break;
 	default:
 	default:
-		return iscsi_conn_get_param(cls_conn, param, buf);
+		return -ENOSYS;
 	}
 	}
 	return len;
 	return len;
 }
 }

+ 2 - 2
drivers/scsi/be2iscsi/be_iscsi.h

@@ -48,8 +48,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
 		      struct iscsi_cls_conn *cls_conn,
 		      struct iscsi_cls_conn *cls_conn,
 		      uint64_t transport_fd, int is_leading);
 		      uint64_t transport_fd, int is_leading);
 
 
-int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
-			   enum iscsi_param param, char *buf);
+int beiscsi_ep_get_param(struct iscsi_endpoint *ep, enum iscsi_param param,
+			 char *buf);
 
 
 int beiscsi_get_host_param(struct Scsi_Host *shost,
 int beiscsi_get_host_param(struct Scsi_Host *shost,
 			   enum iscsi_host_param param, char *buf);
 			   enum iscsi_host_param param, char *buf);

+ 2 - 1
drivers/scsi/be2iscsi/be_main.c

@@ -4384,7 +4384,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
 	.bind_conn = beiscsi_conn_bind,
 	.bind_conn = beiscsi_conn_bind,
 	.destroy_conn = iscsi_conn_teardown,
 	.destroy_conn = iscsi_conn_teardown,
 	.set_param = beiscsi_set_param,
 	.set_param = beiscsi_set_param,
-	.get_conn_param = beiscsi_conn_get_param,
+	.get_conn_param = iscsi_conn_get_param,
 	.get_session_param = iscsi_session_get_param,
 	.get_session_param = iscsi_session_get_param,
 	.get_host_param = beiscsi_get_host_param,
 	.get_host_param = beiscsi_get_host_param,
 	.start_conn = beiscsi_conn_start,
 	.start_conn = beiscsi_conn_start,
@@ -4395,6 +4395,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
 	.alloc_pdu = beiscsi_alloc_pdu,
 	.alloc_pdu = beiscsi_alloc_pdu,
 	.parse_pdu_itt = beiscsi_parse_pdu,
 	.parse_pdu_itt = beiscsi_parse_pdu,
 	.get_stats = beiscsi_conn_get_stats,
 	.get_stats = beiscsi_conn_get_stats,
+	.get_ep_param = beiscsi_ep_get_param,
 	.ep_connect = beiscsi_ep_connect,
 	.ep_connect = beiscsi_ep_connect,
 	.ep_poll = beiscsi_ep_poll,
 	.ep_poll = beiscsi_ep_poll,
 	.ep_disconnect = beiscsi_ep_disconnect,
 	.ep_disconnect = beiscsi_ep_disconnect,

+ 1080 - 0
drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h

@@ -0,0 +1,1080 @@
+#ifndef __57XX_FCOE_HSI_LINUX_LE__
+#define __57XX_FCOE_HSI_LINUX_LE__
+
+/*
+ * common data for all protocols
+ */
+struct b577xx_doorbell_hdr {
+	u8 header;
+#define B577XX_DOORBELL_HDR_RX (0x1<<0)
+#define B577XX_DOORBELL_HDR_RX_SHIFT 0
+#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1)
+#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1
+#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2)
+#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2
+#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4)
+#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4
+};
+
+/*
+ * doorbell message sent to the chip
+ */
+struct b577xx_doorbell_set_prod {
+#if defined(__BIG_ENDIAN)
+	u16 prod;
+	u8 zero_fill1;
+	struct b577xx_doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+	struct b577xx_doorbell_hdr header;
+	u8 zero_fill1;
+	u16 prod;
+#endif
+};
+
+
+struct regpair {
+	__le32 lo;
+	__le32 hi;
+};
+
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ */
+struct fcoe_abts_rsp_union {
+	u32 r_ctl;
+	u32 abts_rsp_payload[7];
+};
+
+
+/*
+ * 4 regs size
+ */
+struct fcoe_bd_ctx {
+	u32 buf_addr_hi;
+	u32 buf_addr_lo;
+#if defined(__BIG_ENDIAN)
+	u16 rsrv0;
+	u16 buf_len;
+#elif defined(__LITTLE_ENDIAN)
+	u16 buf_len;
+	u16 rsrv0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 rsrv1;
+	u16 flags;
+#elif defined(__LITTLE_ENDIAN)
+	u16 flags;
+	u16 rsrv1;
+#endif
+};
+
+
+struct fcoe_cleanup_flow_info {
+#if defined(__BIG_ENDIAN)
+	u16 reserved1;
+	u16 task_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 task_id;
+	u16 reserved1;
+#endif
+	u32 reserved2[7];
+};
+
+
+struct fcoe_fcp_cmd_payload {
+	u32 opaque[8];
+};
+
+struct fcoe_fc_hdr {
+#if defined(__BIG_ENDIAN)
+	u8 cs_ctl;
+	u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+	u8 s_id[3];
+	u8 cs_ctl;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 r_ctl;
+	u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+	u8 d_id[3];
+	u8 r_ctl;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 seq_id;
+	u8 df_ctl;
+	u16 seq_cnt;
+#elif defined(__LITTLE_ENDIAN)
+	u16 seq_cnt;
+	u8 df_ctl;
+	u8 seq_id;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 type;
+	u8 f_ctl[3];
+#elif defined(__LITTLE_ENDIAN)
+	u8 f_ctl[3];
+	u8 type;
+#endif
+	u32 parameters;
+#if defined(__BIG_ENDIAN)
+	u16 ox_id;
+	u16 rx_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rx_id;
+	u16 ox_id;
+#endif
+};
+
+struct fcoe_fc_frame {
+	struct fcoe_fc_hdr fc_hdr;
+	u32 reserved0[2];
+};
+
+union fcoe_cmd_flow_info {
+	struct fcoe_fcp_cmd_payload fcp_cmd_payload;
+	struct fcoe_fc_frame mp_fc_frame;
+};
+
+
+
+struct fcoe_fcp_rsp_flags {
+	u8 flags;
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4)
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5)
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
+};
+
+
+struct fcoe_fcp_rsp_payload {
+	struct regpair reserved0;
+	u32 fcp_resid;
+#if defined(__BIG_ENDIAN)
+	u16 retry_delay_timer;
+	struct fcoe_fcp_rsp_flags fcp_flags;
+	u8 scsi_status_code;
+#elif defined(__LITTLE_ENDIAN)
+	u8 scsi_status_code;
+	struct fcoe_fcp_rsp_flags fcp_flags;
+	u16 retry_delay_timer;
+#endif
+	u32 fcp_rsp_len;
+	u32 fcp_sns_len;
+};
+
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ */
+struct fcoe_fcp_rsp_union {
+	struct fcoe_fcp_rsp_payload payload;
+	struct regpair reserved0;
+};
+
+
+struct fcoe_fcp_xfr_rdy_payload {
+	u32 burst_len;
+	u32 data_ro;
+};
+
+struct fcoe_read_flow_info {
+	struct fcoe_fc_hdr fc_data_in_hdr;
+	u32 reserved[2];
+};
+
+struct fcoe_write_flow_info {
+	struct fcoe_fc_hdr fc_data_out_hdr;
+	struct fcoe_fcp_xfr_rdy_payload fcp_xfr_payload;
+};
+
+union fcoe_rsp_flow_info {
+	struct fcoe_fcp_rsp_union fcp_rsp;
+	struct fcoe_abts_rsp_union abts_rsp;
+};
+
+/*
+ * 32 bytes used for general purposes
+ */
+union fcoe_general_task_ctx {
+	union fcoe_cmd_flow_info cmd_info;
+	struct fcoe_read_flow_info read_info;
+	struct fcoe_write_flow_info write_info;
+	union fcoe_rsp_flow_info rsp_info;
+	struct fcoe_cleanup_flow_info cleanup_info;
+	u32 comp_info[8];
+};
+
+
+/*
+ * FCoE KCQ CQE parameters
+ */
+union fcoe_kcqe_params {
+	u32 reserved0[4];
+};
+
+/*
+ * FCoE KCQ CQE
+ */
+struct fcoe_kcqe {
+	u32 fcoe_conn_id;
+	u32 completion_status;
+	u32 fcoe_conn_context_id;
+	union fcoe_kcqe_params params;
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define FCOE_KCQE_RESERVED0 (0x7<<0)
+#define FCOE_KCQE_RESERVED0_SHIFT 0
+#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
+#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
+#define FCOE_KCQE_LAYER_CODE (0x7<<4)
+#define FCOE_KCQE_LAYER_CODE_SHIFT 4
+#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
+#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+	u16 qe_self_seq;
+	u8 op_code;
+	u8 flags;
+#define FCOE_KCQE_RESERVED0 (0x7<<0)
+#define FCOE_KCQE_RESERVED0_SHIFT 0
+#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
+#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
+#define FCOE_KCQE_LAYER_CODE (0x7<<4)
+#define FCOE_KCQE_LAYER_CODE_SHIFT 4
+#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
+#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+#endif
+};
+
+/*
+ * FCoE KWQE header
+ */
+struct fcoe_kwqe_header {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
+#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
+	u8 op_code;
+#elif defined(__LITTLE_ENDIAN)
+	u8 op_code;
+	u8 flags;
+#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
+#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
+#endif
+};
+
+/*
+ * FCoE firmware init request 1
+ */
+struct fcoe_kwqe_init1 {
+#if defined(__BIG_ENDIAN)
+	struct fcoe_kwqe_header hdr;
+	u16 num_tasks;
+#elif defined(__LITTLE_ENDIAN)
+	u16 num_tasks;
+	struct fcoe_kwqe_header hdr;
+#endif
+	u32 task_list_pbl_addr_lo;
+	u32 task_list_pbl_addr_hi;
+	u32 dummy_buffer_addr_lo;
+	u32 dummy_buffer_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u16 rq_num_wqes;
+	u16 sq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sq_num_wqes;
+	u16 rq_num_wqes;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 cq_num_wqes;
+	u16 rq_buffer_log_size;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rq_buffer_log_size;
+	u16 cq_num_wqes;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
+#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
+	u8 num_sessions_log;
+	u16 mtu;
+#elif defined(__LITTLE_ENDIAN)
+	u16 mtu;
+	u8 num_sessions_log;
+	u8 flags;
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
+#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
+#endif
+};
+
+/*
+ * FCoE firmware init request 2
+ */
+struct fcoe_kwqe_init2 {
+#if defined(__BIG_ENDIAN)
+	struct fcoe_kwqe_header hdr;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	struct fcoe_kwqe_header hdr;
+#endif
+	u32 hash_tbl_pbl_addr_lo;
+	u32 hash_tbl_pbl_addr_hi;
+	u32 t2_hash_tbl_addr_lo;
+	u32 t2_hash_tbl_addr_hi;
+	u32 t2_ptr_hash_tbl_addr_lo;
+	u32 t2_ptr_hash_tbl_addr_hi;
+	u32 free_list_count;
+};
+
+/*
+ * FCoE firmware init request 3
+ */
+struct fcoe_kwqe_init3 {
+#if defined(__BIG_ENDIAN)
+	struct fcoe_kwqe_header hdr;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	struct fcoe_kwqe_header hdr;
+#endif
+	u32 error_bit_map_lo;
+	u32 error_bit_map_hi;
+#if defined(__BIG_ENDIAN)
+	u8 reserved21[3];
+	u8 cached_session_enable;
+#elif defined(__LITTLE_ENDIAN)
+	u8 cached_session_enable;
+	u8 reserved21[3];
+#endif
+	u32 reserved2[4];
+};
+
+/*
+ * FCoE connection offload request 1
+ */
+struct fcoe_kwqe_conn_offload1 {
+#if defined(__BIG_ENDIAN)
+	struct fcoe_kwqe_header hdr;
+	u16 fcoe_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 fcoe_conn_id;
+	struct fcoe_kwqe_header hdr;
+#endif
+	u32 sq_addr_lo;
+	u32 sq_addr_hi;
+	u32 rq_pbl_addr_lo;
+	u32 rq_pbl_addr_hi;
+	u32 rq_first_pbe_addr_lo;
+	u32 rq_first_pbe_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u16 reserved0;
+	u16 rq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rq_prod;
+	u16 reserved0;
+#endif
+};
+
+/*
+ * FCoE connection offload request 2
+ */
+struct fcoe_kwqe_conn_offload2 {
+#if defined(__BIG_ENDIAN)
+	struct fcoe_kwqe_header hdr;
+	u16 tx_max_fc_pay_len;
+#elif defined(__LITTLE_ENDIAN)
+	u16 tx_max_fc_pay_len;
+	struct fcoe_kwqe_header hdr;
+#endif
+	u32 cq_addr_lo;
+	u32 cq_addr_hi;
+	u32 xferq_addr_lo;
+	u32 xferq_addr_hi;
+	u32 conn_db_addr_lo;
+	u32 conn_db_addr_hi;
+	u32 reserved1;
+};
+
+/*
+ * FCoE connection offload request 3
+ */
+struct fcoe_kwqe_conn_offload3 {
+#if defined(__BIG_ENDIAN)
+	struct fcoe_kwqe_header hdr;
+	u16 vlan_tag;
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
+#elif defined(__LITTLE_ENDIAN)
+	u16 vlan_tag;
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
+	struct fcoe_kwqe_header hdr;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 tx_max_conc_seqs_c3;
+	u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+	u8 s_id[3];
+	u8 tx_max_conc_seqs_c3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
+	u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+	u8 d_id[3];
+	u8 flags;
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
+#endif
+	u32 reserved;
+	u32 confq_first_pbe_addr_lo;
+	u32 confq_first_pbe_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u16 rx_max_fc_pay_len;
+	u16 tx_total_conc_seqs;
+#elif defined(__LITTLE_ENDIAN)
+	u16 tx_total_conc_seqs;
+	u16 rx_max_fc_pay_len;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 rx_open_seqs_exch_c3;
+	u8 rx_max_conc_seqs_c3;
+	u16 rx_total_conc_seqs;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rx_total_conc_seqs;
+	u8 rx_max_conc_seqs_c3;
+	u8 rx_open_seqs_exch_c3;
+#endif
+};
+
+/*
+ * FCoE connection offload request 4
+ */
+struct fcoe_kwqe_conn_offload4 {
+#if defined(__BIG_ENDIAN)
+	struct fcoe_kwqe_header hdr;
+	u8 reserved2;
+	u8 e_d_tov_timer_val;
+#elif defined(__LITTLE_ENDIAN)
+	u8 e_d_tov_timer_val;
+	u8 reserved2;
+	struct fcoe_kwqe_header hdr;
+#endif
+	u8 src_mac_addr_lo32[4];
+#if defined(__BIG_ENDIAN)
+	u8 dst_mac_addr_hi16[2];
+	u8 src_mac_addr_hi16[2];
+#elif defined(__LITTLE_ENDIAN)
+	u8 src_mac_addr_hi16[2];
+	u8 dst_mac_addr_hi16[2];
+#endif
+	u8 dst_mac_addr_lo32[4];
+	u32 lcq_addr_lo;
+	u32 lcq_addr_hi;
+	u32 confq_pbl_base_addr_lo;
+	u32 confq_pbl_base_addr_hi;
+};
+
+/*
+ * FCoE connection enable request
+ */
+struct fcoe_kwqe_conn_enable_disable {
+#if defined(__BIG_ENDIAN)
+	struct fcoe_kwqe_header hdr;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	struct fcoe_kwqe_header hdr;
+#endif
+	u8 src_mac_addr_lo32[4];
+#if defined(__BIG_ENDIAN)
+	u16 vlan_tag;
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
+	u8 src_mac_addr_hi16[2];
+#elif defined(__LITTLE_ENDIAN)
+	u8 src_mac_addr_hi16[2];
+	u16 vlan_tag;
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
+#endif
+	u8 dst_mac_addr_lo32[4];
+#if defined(__BIG_ENDIAN)
+	u16 reserved1;
+	u8 dst_mac_addr_hi16[2];
+#elif defined(__LITTLE_ENDIAN)
+	u8 dst_mac_addr_hi16[2];
+	u16 reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 vlan_flag;
+	u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+	u8 s_id[3];
+	u8 vlan_flag;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 reserved3;
+	u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+	u8 d_id[3];
+	u8 reserved3;
+#endif
+	u32 context_id;
+	u32 conn_id;
+	u32 reserved4;
+};
+
+/*
+ * FCoE connection destroy request
+ */
+struct fcoe_kwqe_conn_destroy {
+#if defined(__BIG_ENDIAN)
+	struct fcoe_kwqe_header hdr;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	struct fcoe_kwqe_header hdr;
+#endif
+	u32 context_id;
+	u32 conn_id;
+	u32 reserved1[5];
+};
+
+/*
+ * FCoe destroy request
+ */
+struct fcoe_kwqe_destroy {
+#if defined(__BIG_ENDIAN)
+	struct fcoe_kwqe_header hdr;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	struct fcoe_kwqe_header hdr;
+#endif
+	u32 reserved1[7];
+};
+
+/*
+ * FCoe statistics request
+ */
+struct fcoe_kwqe_stat {
+#if defined(__BIG_ENDIAN)
+	struct fcoe_kwqe_header hdr;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	struct fcoe_kwqe_header hdr;
+#endif
+	u32 stat_params_addr_lo;
+	u32 stat_params_addr_hi;
+	u32 reserved1[5];
+};
+
+/*
+ * FCoE KWQ WQE
+ */
+union fcoe_kwqe {
+	struct fcoe_kwqe_init1 init1;
+	struct fcoe_kwqe_init2 init2;
+	struct fcoe_kwqe_init3 init3;
+	struct fcoe_kwqe_conn_offload1 conn_offload1;
+	struct fcoe_kwqe_conn_offload2 conn_offload2;
+	struct fcoe_kwqe_conn_offload3 conn_offload3;
+	struct fcoe_kwqe_conn_offload4 conn_offload4;
+	struct fcoe_kwqe_conn_enable_disable conn_enable_disable;
+	struct fcoe_kwqe_conn_destroy conn_destroy;
+	struct fcoe_kwqe_destroy destroy;
+	struct fcoe_kwqe_stat statistics;
+};
+
+struct fcoe_mul_sges_ctx {
+	struct regpair cur_sge_addr;
+#if defined(__BIG_ENDIAN)
+	u8 sgl_size;
+	u8 cur_sge_idx;
+	u16 cur_sge_off;
+#elif defined(__LITTLE_ENDIAN)
+	u16 cur_sge_off;
+	u8 cur_sge_idx;
+	u8 sgl_size;
+#endif
+};
+
+struct fcoe_s_stat_ctx {
+	u8 flags;
+#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0)
+#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1)
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2)
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3)
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3
+#define FCOE_S_STAT_CTX_P_RJT (0x1<<4)
+#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4
+#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5)
+#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5
+#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6)
+#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6
+};
+
+struct fcoe_seq_ctx {
+#if defined(__BIG_ENDIAN)
+	u16 low_seq_cnt;
+	struct fcoe_s_stat_ctx s_stat;
+	u8 seq_id;
+#elif defined(__LITTLE_ENDIAN)
+	u8 seq_id;
+	struct fcoe_s_stat_ctx s_stat;
+	u16 low_seq_cnt;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 err_seq_cnt;
+	u16 high_seq_cnt;
+#elif defined(__LITTLE_ENDIAN)
+	u16 high_seq_cnt;
+	u16 err_seq_cnt;
+#endif
+	u32 low_exp_ro;
+	u32 high_exp_ro;
+};
+
+
+struct fcoe_single_sge_ctx {
+	struct regpair cur_buf_addr;
+#if defined(__BIG_ENDIAN)
+	u16 reserved0;
+	u16 cur_buf_rem;
+#elif defined(__LITTLE_ENDIAN)
+	u16 cur_buf_rem;
+	u16 reserved0;
+#endif
+};
+
+union fcoe_sgl_ctx {
+	struct fcoe_single_sge_ctx single_sge;
+	struct fcoe_mul_sges_ctx mul_sges;
+};
+
+
+
+/*
+ * FCoE SQ element
+ */
+struct fcoe_sqe {
+	u16 wqe;
+#define FCOE_SQE_TASK_ID (0x7FFF<<0)
+#define FCOE_SQE_TASK_ID_SHIFT 0
+#define FCOE_SQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_SQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+
+struct fcoe_task_ctx_entry_tx_only {
+	union fcoe_sgl_ctx sgl_ctx;
+};
+
+struct fcoe_task_ctx_entry_txwr_rxrd {
+#if defined(__BIG_ENDIAN)
+	u16 verify_tx_seq;
+	u8 init_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
+	u8 tx_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
+#elif defined(__LITTLE_ENDIAN)
+	u8 tx_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
+	u8 init_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
+	u16 verify_tx_seq;
+#endif
+};
+
+/*
+ * Common section. Both TX and RX processing might write and read from it in
+ * different flows
+ */
+struct fcoe_task_ctx_entry_tx_rx_cmn {
+	u32 data_2_trns;
+	union fcoe_general_task_ctx general;
+#if defined(__BIG_ENDIAN)
+	u16 tx_low_seq_cnt;
+	struct fcoe_s_stat_ctx tx_s_stat;
+	u8 tx_seq_id;
+#elif defined(__LITTLE_ENDIAN)
+	u8 tx_seq_id;
+	struct fcoe_s_stat_ctx tx_s_stat;
+	u16 tx_low_seq_cnt;
+#endif
+	u32 common_flags;
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID (0xFFFFFF<<0)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID (0x1<<24)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT 24
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT (0x1<<25)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT_SHIFT 25
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER (0x1<<26)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER_SHIFT 26
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF (0x1<<27)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF_SHIFT 27
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME (0x1<<28)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT 28
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV (0x7<<29)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV_SHIFT 29
+};
+
+struct fcoe_task_ctx_entry_rxwr_txrd {
+#if defined(__BIG_ENDIAN)
+	u16 rx_id;
+	u16 rx_flags;
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
+#elif defined(__LITTLE_ENDIAN)
+	u16 rx_flags;
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
+	u16 rx_id;
+#endif
+};
+
+struct fcoe_task_ctx_entry_rx_only {
+	struct fcoe_seq_ctx seq_ctx;
+	struct fcoe_seq_ctx ooo_seq_ctx;
+	u32 rsrv3;
+	union fcoe_sgl_ctx sgl_ctx;
+};
+
+struct fcoe_task_ctx_entry {
+	struct fcoe_task_ctx_entry_tx_only tx_wr_only;
+	struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
+	struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
+	struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
+	struct fcoe_task_ctx_entry_rx_only rx_wr_only;
+	u32 reserved[4];
+};
+
+
+/*
+ * FCoE XFRQ element
+ */
+struct fcoe_xfrqe {
+	u16 wqe;
+#define FCOE_XFRQE_TASK_ID (0x7FFF<<0)
+#define FCOE_XFRQE_TASK_ID_SHIFT 0
+#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * FCoE CONFQ element
+ */
+struct fcoe_confqe {
+#if defined(__BIG_ENDIAN)
+	u16 rx_id;
+	u16 ox_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 ox_id;
+	u16 rx_id;
+#endif
+	u32 param;
+};
+
+
+/*
+ * FCoE conection data base
+ */
+struct fcoe_conn_db {
+#if defined(__BIG_ENDIAN)
+	u16 rsrv0;
+	u16 rq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rq_prod;
+	u16 rsrv0;
+#endif
+	u32 rsrv1;
+	struct regpair cq_arm;
+};
+
+
+/*
+ * FCoE CQ element
+ */
+struct fcoe_cqe {
+	u16 wqe;
+#define FCOE_CQE_CQE_INFO (0x3FFF<<0)
+#define FCOE_CQE_CQE_INFO_SHIFT 0
+#define FCOE_CQE_CQE_TYPE (0x1<<14)
+#define FCOE_CQE_CQE_TYPE_SHIFT 14
+#define FCOE_CQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_CQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * FCoE error/warning resporting entry
+ */
+struct fcoe_err_report_entry {
+	u32 err_warn_bitmap_lo;
+	u32 err_warn_bitmap_hi;
+	u32 tx_buf_off;
+	u32 rx_buf_off;
+	struct fcoe_fc_hdr fc_hdr;
+};
+
+
+/*
+ * FCoE hash table entry (32 bytes)
+ */
+struct fcoe_hash_table_entry {
+#if defined(__BIG_ENDIAN)
+	u8 d_id_0;
+	u8 s_id_2;
+	u8 s_id_1;
+	u8 s_id_0;
+#elif defined(__LITTLE_ENDIAN)
+	u8 s_id_0;
+	u8 s_id_1;
+	u8 s_id_2;
+	u8 d_id_0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 dst_mac_addr_hi;
+	u8 d_id_2;
+	u8 d_id_1;
+#elif defined(__LITTLE_ENDIAN)
+	u8 d_id_1;
+	u8 d_id_2;
+	u16 dst_mac_addr_hi;
+#endif
+	u32 dst_mac_addr_lo;
+#if defined(__BIG_ENDIAN)
+	u16 vlan_id;
+	u16 src_mac_addr_hi;
+#elif defined(__LITTLE_ENDIAN)
+	u16 src_mac_addr_hi;
+	u16 vlan_id;
+#endif
+	u32 src_mac_addr_lo;
+#if defined(__BIG_ENDIAN)
+	u16 reserved1;
+	u8 reserved0;
+	u8 vlan_flag;
+#elif defined(__LITTLE_ENDIAN)
+	u8 vlan_flag;
+	u8 reserved0;
+	u16 reserved1;
+#endif
+	u32 reserved2;
+	u32 field_id;
+#define FCOE_HASH_TABLE_ENTRY_CID (0xFFFFFF<<0)
+#define FCOE_HASH_TABLE_ENTRY_CID_SHIFT 0
+#define FCOE_HASH_TABLE_ENTRY_RESERVED3 (0x7F<<24)
+#define FCOE_HASH_TABLE_ENTRY_RESERVED3_SHIFT 24
+#define FCOE_HASH_TABLE_ENTRY_VALID (0x1<<31)
+#define FCOE_HASH_TABLE_ENTRY_VALID_SHIFT 31
+};
+
+/*
+ * FCoE pending work request CQE
+ */
+struct fcoe_pend_wq_cqe {
+	u16 wqe;
+#define FCOE_PEND_WQ_CQE_TASK_ID (0x3FFF<<0)
+#define FCOE_PEND_WQ_CQE_TASK_ID_SHIFT 0
+#define FCOE_PEND_WQ_CQE_CQE_TYPE (0x1<<14)
+#define FCOE_PEND_WQ_CQE_CQE_TYPE_SHIFT 14
+#define FCOE_PEND_WQ_CQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_PEND_WQ_CQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * FCoE RX statistics parameters section#0
+ */
+struct fcoe_rx_stat_params_section0 {
+	u32 fcoe_ver_cnt;
+	u32 fcoe_rx_pkt_cnt;
+	u32 fcoe_rx_byte_cnt;
+	u32 fcoe_rx_drop_pkt_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#1
+ */
+struct fcoe_rx_stat_params_section1 {
+	u32 fc_crc_cnt;
+	u32 eofa_del_cnt;
+	u32 miss_frame_cnt;
+	u32 seq_timeout_cnt;
+	u32 drop_seq_cnt;
+	u32 fcoe_rx_drop_pkt_cnt;
+	u32 fcp_rx_pkt_cnt;
+	u32 reserved0;
+};
+
+
+/*
+ * FCoE TX statistics parameters
+ */
+struct fcoe_tx_stat_params {
+	u32 fcoe_tx_pkt_cnt;
+	u32 fcoe_tx_byte_cnt;
+	u32 fcp_tx_pkt_cnt;
+	u32 reserved0;
+};
+
+/*
+ * FCoE statistics parameters
+ */
+struct fcoe_statistics_params {
+	struct fcoe_tx_stat_params tx_stat;
+	struct fcoe_rx_stat_params_section0 rx_stat0;
+	struct fcoe_rx_stat_params_section1 rx_stat1;
+};
+
+
+/*
+ * FCoE t2 hash table entry (64 bytes)
+ */
+struct fcoe_t2_hash_table_entry {
+	struct fcoe_hash_table_entry data;
+	struct regpair next;
+	struct regpair reserved0[3];
+};
+
+/*
+ * FCoE unsolicited CQE
+ */
+struct fcoe_unsolicited_cqe {
+	u16 wqe;
+#define FCOE_UNSOLICITED_CQE_SUBTYPE (0x3<<0)
+#define FCOE_UNSOLICITED_CQE_SUBTYPE_SHIFT 0
+#define FCOE_UNSOLICITED_CQE_PKT_LEN (0xFFF<<2)
+#define FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT 2
+#define FCOE_UNSOLICITED_CQE_CQE_TYPE (0x1<<14)
+#define FCOE_UNSOLICITED_CQE_CQE_TYPE_SHIFT 14
+#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+
+#endif /* __57XX_FCOE_HSI_LINUX_LE__ */

+ 11 - 0
drivers/scsi/bnx2fc/Kconfig

@@ -0,0 +1,11 @@
+config SCSI_BNX2X_FCOE
+	tristate "Broadcom NetXtreme II FCoE support"
+	depends on PCI
+	select NETDEVICES
+	select NETDEV_1000
+	select LIBFC
+	select LIBFCOE
+	select CNIC
+	---help---
+	This driver supports FCoE offload for the Broadcom NetXtreme II
+	devices.

+ 3 - 0
drivers/scsi/bnx2fc/Makefile

@@ -0,0 +1,3 @@
+obj-$(CONFIG_SCSI_BNX2X_FCOE) += bnx2fc.o
+
+bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o

+ 511 - 0
drivers/scsi/bnx2fc/bnx2fc.h

@@ -0,0 +1,511 @@
+#ifndef _BNX2FC_H_
+#define _BNX2FC_H_
+/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/kthread.h>
+#include <linux/crc32.h>
+#include <linux/cpu.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/libfc.h>
+#include <scsi/libfcoe.h>
+#include <scsi/fc_encode.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fip.h>
+#include <scsi/fc/fc_fc2.h>
+#include <scsi/fc_frame.h>
+#include <scsi/fc/fc_fcoe.h>
+#include <scsi/fc/fc_fcp.h>
+
+#include "57xx_hsi_bnx2fc.h"
+#include "bnx2fc_debug.h"
+#include "../../net/cnic_if.h"
+#include "bnx2fc_constants.h"
+
+#define BNX2FC_NAME		"bnx2fc"
+#define BNX2FC_VERSION		"1.0.0"
+
+#define PFX			"bnx2fc: "
+
+#define BNX2X_DOORBELL_PCI_BAR		2
+
+#define BNX2FC_MAX_BD_LEN		0xffff
+#define BNX2FC_BD_SPLIT_SZ		0x8000
+#define BNX2FC_MAX_BDS_PER_CMD		256
+
+#define BNX2FC_SQ_WQES_MAX	256
+
+#define BNX2FC_SCSI_MAX_SQES	((3 * BNX2FC_SQ_WQES_MAX) / 8)
+#define BNX2FC_TM_MAX_SQES	((BNX2FC_SQ_WQES_MAX) / 2)
+#define BNX2FC_ELS_MAX_SQES	(BNX2FC_TM_MAX_SQES - 1)
+
+#define BNX2FC_RQ_WQES_MAX	16
+#define BNX2FC_CQ_WQES_MAX	(BNX2FC_SQ_WQES_MAX + BNX2FC_RQ_WQES_MAX)
+
+#define BNX2FC_NUM_MAX_SESS	128
+#define BNX2FC_NUM_MAX_SESS_LOG	(ilog2(BNX2FC_NUM_MAX_SESS))
+
+#define BNX2FC_MAX_OUTSTANDING_CMNDS	4096
+#define BNX2FC_MIN_PAYLOAD		256
+#define BNX2FC_MAX_PAYLOAD		2048
+
+#define BNX2FC_RQ_BUF_SZ		256
+#define BNX2FC_RQ_BUF_LOG_SZ		(ilog2(BNX2FC_RQ_BUF_SZ))
+
+#define BNX2FC_SQ_WQE_SIZE		(sizeof(struct fcoe_sqe))
+#define BNX2FC_CQ_WQE_SIZE		(sizeof(struct fcoe_cqe))
+#define BNX2FC_RQ_WQE_SIZE		(BNX2FC_RQ_BUF_SZ)
+#define BNX2FC_XFERQ_WQE_SIZE		(sizeof(struct fcoe_xfrqe))
+#define BNX2FC_CONFQ_WQE_SIZE		(sizeof(struct fcoe_confqe))
+#define BNX2FC_5771X_DB_PAGE_SIZE	128
+
+#define BNX2FC_MAX_TASKS		BNX2FC_MAX_OUTSTANDING_CMNDS
+#define BNX2FC_TASK_SIZE		128
+#define	BNX2FC_TASKS_PER_PAGE		(PAGE_SIZE/BNX2FC_TASK_SIZE)
+#define BNX2FC_TASK_CTX_ARR_SZ		(BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE)
+
+#define BNX2FC_MAX_ROWS_IN_HASH_TBL	8
+#define BNX2FC_HASH_TBL_CHUNK_SIZE	(16 * 1024)
+
+#define BNX2FC_MAX_SEQS			255
+
+#define BNX2FC_READ			(1 << 1)
+#define BNX2FC_WRITE			(1 << 0)
+
+#define BNX2FC_MIN_XID			0
+#define BNX2FC_MAX_XID			(BNX2FC_MAX_OUTSTANDING_CMNDS - 1)
+#define FCOE_MIN_XID			(BNX2FC_MAX_OUTSTANDING_CMNDS)
+#define FCOE_MAX_XID		\
+			(BNX2FC_MAX_OUTSTANDING_CMNDS + (nr_cpu_ids * 256))
+#define BNX2FC_MAX_LUN			0xFFFF
+#define BNX2FC_MAX_FCP_TGT		256
+#define BNX2FC_MAX_CMD_LEN		16
+
+#define BNX2FC_TM_TIMEOUT		60	/* secs */
+#define BNX2FC_IO_TIMEOUT		20000UL	/* msecs */
+
+#define BNX2FC_WAIT_CNT			120
+#define BNX2FC_FW_TIMEOUT		(3 * HZ)
+
+#define PORT_MAX			2
+
+#define CMD_SCSI_STATUS(Cmnd)		((Cmnd)->SCp.Status)
+
+/* FC FCP Status */
+#define	FC_GOOD				0
+
+#define BNX2FC_RNID_HBA			0x7
+
+/* bnx2fc driver uses only one instance of fcoe_percpu_s */
+extern struct fcoe_percpu_s bnx2fc_global;
+
+extern struct workqueue_struct *bnx2fc_wq;
+
+struct bnx2fc_percpu_s {
+	struct task_struct *iothread;
+	struct list_head work_list;
+	spinlock_t fp_work_lock;
+};
+
+
+struct bnx2fc_hba {
+	struct list_head link;
+	struct cnic_dev *cnic;
+	struct pci_dev *pcidev;
+	struct net_device *netdev;
+	struct net_device *phys_dev;
+	unsigned long reg_with_cnic;
+		#define BNX2FC_CNIC_REGISTERED           1
+	struct packet_type fcoe_packet_type;
+	struct packet_type fip_packet_type;
+	struct bnx2fc_cmd_mgr *cmd_mgr;
+	struct workqueue_struct *timer_work_queue;
+	struct kref kref;
+	spinlock_t hba_lock;
+	struct mutex hba_mutex;
+	unsigned long adapter_state;
+		#define ADAPTER_STATE_UP		0
+		#define ADAPTER_STATE_GOING_DOWN	1
+		#define ADAPTER_STATE_LINK_DOWN		2
+		#define ADAPTER_STATE_READY		3
+	u32 flags;
+	unsigned long init_done;
+		#define BNX2FC_FW_INIT_DONE		0
+		#define BNX2FC_CTLR_INIT_DONE		1
+		#define BNX2FC_CREATE_DONE		2
+	struct fcoe_ctlr ctlr;
+	u8 vlan_enabled;
+	int vlan_id;
+	u32 next_conn_id;
+	struct fcoe_task_ctx_entry **task_ctx;
+	dma_addr_t *task_ctx_dma;
+	struct regpair *task_ctx_bd_tbl;
+	dma_addr_t task_ctx_bd_dma;
+
+	int hash_tbl_segment_count;
+	void **hash_tbl_segments;
+	void *hash_tbl_pbl;
+	dma_addr_t hash_tbl_pbl_dma;
+	struct fcoe_t2_hash_table_entry *t2_hash_tbl;
+	dma_addr_t t2_hash_tbl_dma;
+	char *t2_hash_tbl_ptr;
+	dma_addr_t t2_hash_tbl_ptr_dma;
+
+	char *dummy_buffer;
+	dma_addr_t dummy_buf_dma;
+
+	struct fcoe_statistics_params *stats_buffer;
+	dma_addr_t stats_buf_dma;
+
+	/*
+	 * PCI related info.
+	 */
+	u16 pci_did;
+	u16 pci_vid;
+	u16 pci_sdid;
+	u16 pci_svid;
+	u16 pci_func;
+	u16 pci_devno;
+
+	struct task_struct *l2_thread;
+
+	/* linkdown handling */
+	wait_queue_head_t shutdown_wait;
+	int wait_for_link_down;
+
+	/*destroy handling */
+	struct timer_list destroy_timer;
+	wait_queue_head_t destroy_wait;
+
+	/* Active list of offloaded sessions */
+	struct bnx2fc_rport *tgt_ofld_list[BNX2FC_NUM_MAX_SESS];
+	int num_ofld_sess;
+
+	/* statistics */
+	struct completion stat_req_done;
+};
+
+#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr)
+
+struct bnx2fc_cmd_mgr {
+	struct bnx2fc_hba *hba;
+	u16 next_idx;
+	struct list_head *free_list;
+	spinlock_t *free_list_lock;
+	struct io_bdt **io_bdt_pool;
+	struct bnx2fc_cmd **cmds;
+};
+
+struct bnx2fc_rport {
+	struct fcoe_port *port;
+	struct fc_rport *rport;
+	struct fc_rport_priv *rdata;
+	void __iomem *ctx_base;
+#define DPM_TRIGER_TYPE		0x40
+	u32 fcoe_conn_id;
+	u32 context_id;
+	u32 sid;
+
+	unsigned long flags;
+#define BNX2FC_FLAG_SESSION_READY	0x1
+#define BNX2FC_FLAG_OFFLOADED		0x2
+#define BNX2FC_FLAG_DISABLED		0x3
+#define BNX2FC_FLAG_DESTROYED		0x4
+#define BNX2FC_FLAG_OFLD_REQ_CMPL	0x5
+#define BNX2FC_FLAG_DESTROY_CMPL	0x6
+#define BNX2FC_FLAG_CTX_ALLOC_FAILURE	0x7
+#define BNX2FC_FLAG_UPLD_REQ_COMPL	0x8
+#define BNX2FC_FLAG_EXPL_LOGO		0x9
+
+	u32 max_sqes;
+	u32 max_rqes;
+	u32 max_cqes;
+
+	struct fcoe_sqe *sq;
+	dma_addr_t sq_dma;
+	u16 sq_prod_idx;
+	u8 sq_curr_toggle_bit;
+	u32 sq_mem_size;
+
+	struct fcoe_cqe *cq;
+	dma_addr_t cq_dma;
+	u32 cq_cons_idx;
+	u8 cq_curr_toggle_bit;
+	u32 cq_mem_size;
+
+	void *rq;
+	dma_addr_t rq_dma;
+	u32 rq_prod_idx;
+	u32 rq_cons_idx;
+	u32 rq_mem_size;
+
+	void *rq_pbl;
+	dma_addr_t rq_pbl_dma;
+	u32 rq_pbl_size;
+
+	struct fcoe_xfrqe *xferq;
+	dma_addr_t xferq_dma;
+	u32 xferq_mem_size;
+
+	struct fcoe_confqe *confq;
+	dma_addr_t confq_dma;
+	u32 confq_mem_size;
+
+	void *confq_pbl;
+	dma_addr_t confq_pbl_dma;
+	u32 confq_pbl_size;
+
+	struct fcoe_conn_db *conn_db;
+	dma_addr_t conn_db_dma;
+	u32 conn_db_mem_size;
+
+	struct fcoe_sqe *lcq;
+	dma_addr_t lcq_dma;
+	u32 lcq_mem_size;
+
+	void *ofld_req[4];
+	dma_addr_t ofld_req_dma[4];
+	void *enbl_req;
+	dma_addr_t enbl_req_dma;
+
+	spinlock_t tgt_lock;
+	spinlock_t cq_lock;
+	atomic_t num_active_ios;
+	u32 flush_in_prog;
+	unsigned long work_time_slice;
+	unsigned long timestamp;
+	struct list_head free_task_list;
+	struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
+	atomic_t pi;
+	atomic_t ci;
+	struct list_head active_cmd_queue;
+	struct list_head els_queue;
+	struct list_head io_retire_queue;
+	struct list_head active_tm_queue;
+
+	struct timer_list ofld_timer;
+	wait_queue_head_t ofld_wait;
+
+	struct timer_list upld_timer;
+	wait_queue_head_t upld_wait;
+};
+
+struct bnx2fc_mp_req {
+	u8 tm_flags;
+
+	u32 req_len;
+	void *req_buf;
+	dma_addr_t req_buf_dma;
+	struct fcoe_bd_ctx *mp_req_bd;
+	dma_addr_t mp_req_bd_dma;
+	struct fc_frame_header req_fc_hdr;
+
+	u32 resp_len;
+	void *resp_buf;
+	dma_addr_t resp_buf_dma;
+	struct fcoe_bd_ctx *mp_resp_bd;
+	dma_addr_t mp_resp_bd_dma;
+	struct fc_frame_header resp_fc_hdr;
+};
+
+struct bnx2fc_els_cb_arg {
+	struct bnx2fc_cmd *aborted_io_req;
+	struct bnx2fc_cmd *io_req;
+	u16 l2_oxid;
+};
+
+/* bnx2fc command structure */
+struct bnx2fc_cmd {
+	struct list_head link;
+	u8 on_active_queue;
+	u8 on_tmf_queue;
+	u8 cmd_type;
+#define BNX2FC_SCSI_CMD		1
+#define BNX2FC_TASK_MGMT_CMD		2
+#define BNX2FC_ABTS			3
+#define BNX2FC_ELS			4
+#define BNX2FC_CLEANUP			5
+	u8 io_req_flags;
+	struct kref refcount;
+	struct fcoe_port *port;
+	struct bnx2fc_rport *tgt;
+	struct scsi_cmnd *sc_cmd;
+	struct bnx2fc_cmd_mgr *cmd_mgr;
+	struct bnx2fc_mp_req mp_req;
+	void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg);
+	struct bnx2fc_els_cb_arg *cb_arg;
+	struct delayed_work timeout_work; /* timer for ULP timeouts */
+	struct completion tm_done;
+	int wait_for_comp;
+	u16 xid;
+	struct fcoe_task_ctx_entry *task;
+	struct io_bdt *bd_tbl;
+	struct fcp_rsp *rsp;
+	size_t data_xfer_len;
+	unsigned long req_flags;
+#define BNX2FC_FLAG_ISSUE_RRQ		0x1
+#define BNX2FC_FLAG_ISSUE_ABTS		0x2
+#define BNX2FC_FLAG_ABTS_DONE		0x3
+#define BNX2FC_FLAG_TM_COMPL		0x4
+#define BNX2FC_FLAG_TM_TIMEOUT		0x5
+#define BNX2FC_FLAG_IO_CLEANUP		0x6
+#define BNX2FC_FLAG_RETIRE_OXID		0x7
+#define	BNX2FC_FLAG_EH_ABORT		0x8
+#define BNX2FC_FLAG_IO_COMPL		0x9
+#define BNX2FC_FLAG_ELS_DONE		0xa
+#define BNX2FC_FLAG_ELS_TIMEOUT		0xb
+	u32 fcp_resid;
+	u32 fcp_rsp_len;
+	u32 fcp_sns_len;
+	u8 cdb_status; /* SCSI IO status */
+	u8 fcp_status; /* FCP IO status */
+	u8 fcp_rsp_code;
+	u8 scsi_comp_flags;
+};
+
+struct io_bdt {
+	struct bnx2fc_cmd *io_req;
+	struct fcoe_bd_ctx *bd_tbl;
+	dma_addr_t bd_tbl_dma;
+	u16 bd_valid;
+};
+
+struct bnx2fc_work {
+	struct list_head list;
+	struct bnx2fc_rport *tgt;
+	u16 wqe;
+};
+struct bnx2fc_unsol_els {
+	struct fc_lport *lport;
+	struct fc_frame *fp;
+	struct work_struct unsol_els_work;
+};
+
+
+
+struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
+void bnx2fc_cmd_release(struct kref *ref);
+int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd);
+int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba);
+int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba);
+int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
+					struct bnx2fc_rport *tgt);
+int bnx2fc_send_session_disable_req(struct fcoe_port *port,
+				    struct bnx2fc_rport *tgt);
+int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
+					struct bnx2fc_rport *tgt);
+int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt);
+void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
+					u32 num_cqe);
+int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba);
+void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba);
+int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba);
+void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba);
+struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
+						u16 min_xid, u16 max_xid);
+void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr);
+void bnx2fc_get_link_state(struct bnx2fc_hba *hba);
+char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items);
+void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items);
+int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen);
+int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req);
+int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp);
+int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp);
+int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp);
+int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req);
+int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req);
+void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
+			  unsigned int timer_msec);
+int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req);
+void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
+			      struct fcoe_task_ctx_entry *task,
+			      u16 orig_xid);
+void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
+			 struct fcoe_task_ctx_entry *task);
+void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
+			     struct fcoe_task_ctx_entry *task);
+void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid);
+void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt);
+int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd);
+int bnx2fc_eh_host_reset(struct scsi_cmnd *sc_cmd);
+int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd);
+int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd);
+void bnx2fc_rport_event_handler(struct fc_lport *lport,
+				struct fc_rport_priv *rport,
+				enum fc_rport_event event);
+void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
+				   struct fcoe_task_ctx_entry *task,
+				   u8 num_rq);
+void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
+			       struct fcoe_task_ctx_entry *task,
+			       u8 num_rq);
+void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
+			       struct fcoe_task_ctx_entry *task,
+			       u8 num_rq);
+void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
+			     struct fcoe_task_ctx_entry *task,
+			     u8 num_rq);
+void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
+			      struct fcoe_task_ctx_entry *task,
+			      u8 num_rq);
+void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
+			   struct fcp_cmnd *fcp_cmnd);
+
+
+
+void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt);
+struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
+				      struct fc_frame *fp, unsigned int op,
+				      void (*resp)(struct fc_seq *,
+						   struct fc_frame *,
+						   void *),
+				      void *arg, u32 timeout);
+int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt);
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
+struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
+					     u32 port_id);
+void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
+				   unsigned char *buf,
+				   u32 frame_len, u16 l2_oxid);
+int bnx2fc_send_stat_req(struct bnx2fc_hba *hba);
+
+#endif

+ 206 - 0
drivers/scsi/bnx2fc/bnx2fc_constants.h

@@ -0,0 +1,206 @@
+#ifndef __BNX2FC_CONSTANTS_H_
+#define __BNX2FC_CONSTANTS_H_
+
+/**
+ * This file defines HSI constants for the FCoE flows
+ */
+
+/* KWQ/KCQ FCoE layer code */
+#define FCOE_KWQE_LAYER_CODE   (7)
+
+/* KWQ (kernel work queue) request op codes */
+#define FCOE_KWQE_OPCODE_INIT1			(0)
+#define FCOE_KWQE_OPCODE_INIT2			(1)
+#define FCOE_KWQE_OPCODE_INIT3			(2)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN1	(3)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN2	(4)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN3	(5)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN4	(6)
+#define FCOE_KWQE_OPCODE_ENABLE_CONN	(7)
+#define FCOE_KWQE_OPCODE_DISABLE_CONN	(8)
+#define FCOE_KWQE_OPCODE_DESTROY_CONN	(9)
+#define FCOE_KWQE_OPCODE_DESTROY		(10)
+#define FCOE_KWQE_OPCODE_STAT			(11)
+
+/* KCQ (kernel completion queue) response op codes */
+#define FCOE_KCQE_OPCODE_INIT_FUNC				(0x10)
+#define FCOE_KCQE_OPCODE_DESTROY_FUNC			(0x11)
+#define FCOE_KCQE_OPCODE_STAT_FUNC				(0x12)
+#define FCOE_KCQE_OPCODE_OFFLOAD_CONN			(0x15)
+#define FCOE_KCQE_OPCODE_ENABLE_CONN			(0x16)
+#define FCOE_KCQE_OPCODE_DISABLE_CONN			(0x17)
+#define FCOE_KCQE_OPCODE_DESTROY_CONN			(0x18)
+#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION  (0x20)
+#define FCOE_KCQE_OPCODE_FCOE_ERROR				(0x21)
+
+/* KCQ (kernel completion queue) completion status */
+#define FCOE_KCQE_COMPLETION_STATUS_SUCCESS				(0x0)
+#define FCOE_KCQE_COMPLETION_STATUS_ERROR				(0x1)
+#define FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE		(0x2)
+#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE	(0x3)
+#define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE	(0x4)
+#define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR			(0x5)
+
+/* Unsolicited CQE type */
+#define FCOE_UNSOLICITED_FRAME_CQE_TYPE			0
+#define FCOE_ERROR_DETECTION_CQE_TYPE			1
+#define FCOE_WARNING_DETECTION_CQE_TYPE			2
+
+/* Task context constants */
+/* After driver has initialize the task in case timer services required */
+#define	FCOE_TASK_TX_STATE_INIT					0
+/* In case timer services are required then shall be updated by Xstorm after
+ * start processing the task. In case no timer facilities are required then the
+ * driver would initialize the state to this value */
+#define	FCOE_TASK_TX_STATE_NORMAL				1
+/* Task is under abort procedure. Updated in order to stop processing of
+ * pending WQEs on this task */
+#define	FCOE_TASK_TX_STATE_ABORT				2
+/* For E_D_T_TOV timer expiration in Xstorm (Class 2 only) */
+#define	FCOE_TASK_TX_STATE_ERROR				3
+/* For REC_TOV timer expiration indication received from Xstorm */
+#define	FCOE_TASK_TX_STATE_WARNING				4
+/* For completed unsolicited task */
+#define	FCOE_TASK_TX_STATE_UNSOLICITED_COMPLETED		5
+/* For exchange cleanup request task */
+#define	FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP			6
+/* For sequence cleanup request task */
+#define	FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP			7
+/* Mark task as aborted and indicate that ABTS was not transmitted */
+#define	FCOE_TASK_TX_STATE_BEFORE_ABTS_TX			8
+/* Mark task as aborted and indicate that ABTS was transmitted */
+#define	FCOE_TASK_TX_STATE_AFTER_ABTS_TX			9
+/* For completion the ABTS task. */
+#define	FCOE_TASK_TX_STATE_ABTS_TX_COMPLETED			10
+/* Mark task as aborted and indicate that Exchange cleanup was not transmitted
+ */
+#define	FCOE_TASK_TX_STATE_BEFORE_EXCHANGE_CLEANUP_TX		11
+/* Mark task as aborted and indicate that Exchange cleanup was transmitted */
+#define	FCOE_TASK_TX_STATE_AFTER_EXCHANGE_CLEANUP_TX		12
+
+#define	FCOE_TASK_RX_STATE_NORMAL				0
+#define	FCOE_TASK_RX_STATE_COMPLETED				1
+/* Obsolete: Intermediate completion (middle path with local completion) */
+#define	FCOE_TASK_RX_STATE_INTER_COMP				2
+/* For REC_TOV timer expiration indication received from Xstorm */
+#define	FCOE_TASK_RX_STATE_WARNING				3
+/* For E_D_T_TOV timer expiration in Ustorm */
+#define	FCOE_TASK_RX_STATE_ERROR				4
+/* ABTS ACC arrived wait for local completion to finally complete the task. */
+#define	FCOE_TASK_RX_STATE_ABTS_ACC_ARRIVED			5
+/* local completion arrived wait for ABTS ACC to finally complete the task. */
+#define	FCOE_TASK_RX_STATE_ABTS_LOCAL_COMP_ARRIVED		6
+/* Special completion indication in case of task was aborted. */
+#define FCOE_TASK_RX_STATE_ABTS_COMPLETED			7
+/* Special completion indication in case of task was cleaned. */
+#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED		8
+/* Special completion indication (in task requested the exchange cleanup) in
+ * case cleaned task is in non-valid. */
+#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED		9
+/* Special completion indication (in task requested the sequence cleanup) in
+ * case cleaned task was already returned to normal. */
+#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP		10
+/* Exchange cleanup arrived wait until xfer will be handled to finally
+ * complete the task. */
+#define	FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_ARRIVED		11
+/* Xfer handled, wait for exchange cleanup to finally complete the task. */
+#define	FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_HANDLED_XFER	12
+
+#define	FCOE_TASK_TYPE_WRITE			0
+#define	FCOE_TASK_TYPE_READ				1
+#define	FCOE_TASK_TYPE_MIDPATH			2
+#define	FCOE_TASK_TYPE_UNSOLICITED		3
+#define	FCOE_TASK_TYPE_ABTS				4
+#define	FCOE_TASK_TYPE_EXCHANGE_CLEANUP	5
+#define	FCOE_TASK_TYPE_SEQUENCE_CLEANUP	6
+
+#define FCOE_TASK_DEV_TYPE_DISK			0
+#define FCOE_TASK_DEV_TYPE_TAPE			1
+
+#define FCOE_TASK_CLASS_TYPE_3			0
+#define FCOE_TASK_CLASS_TYPE_2			1
+
+/* Everest FCoE connection type */
+#define B577XX_FCOE_CONNECTION_TYPE		4
+
+/* Error codes for Error Reporting in fast path flows */
+/* XFER error codes */
+#define FCOE_ERROR_CODE_XFER_OOO_RO					0
+#define FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED				1
+#define FCOE_ERROR_CODE_XFER_NULL_BURST_LEN				2
+#define FCOE_ERROR_CODE_XFER_RO_GREATER_THAN_DATA2TRNS			3
+#define FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE			4
+#define FCOE_ERROR_CODE_XFER_TASK_TYPE_NOT_WRITE			5
+#define FCOE_ERROR_CODE_XFER_PEND_XFER_SET				6
+#define FCOE_ERROR_CODE_XFER_OPENED_SEQ					7
+#define FCOE_ERROR_CODE_XFER_FCTL					8
+
+/* FCP RSP error codes */
+#define FCOE_ERROR_CODE_FCP_RSP_BIDI_FLAGS_SET				9
+#define FCOE_ERROR_CODE_FCP_RSP_UNDERFLOW				10
+#define FCOE_ERROR_CODE_FCP_RSP_OVERFLOW				11
+#define FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD			12
+#define FCOE_ERROR_CODE_FCP_RSP_INVALID_SNS_FIELD			13
+#define FCOE_ERROR_CODE_FCP_RSP_INVALID_PAYLOAD_SIZE			14
+#define FCOE_ERROR_CODE_FCP_RSP_PEND_XFER_SET				15
+#define FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ				16
+#define FCOE_ERROR_CODE_FCP_RSP_FCTL					17
+#define FCOE_ERROR_CODE_FCP_RSP_LAST_SEQ_RESET				18
+#define FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET		19
+
+/* FCP DATA error codes */
+#define FCOE_ERROR_CODE_DATA_OOO_RO					20
+#define FCOE_ERROR_CODE_DATA_EXCEEDS_DEFINED_MAX_FRAME_SIZE		21
+#define FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS				22
+#define FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET			23
+#define FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET			24
+#define FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET				25
+#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET			26
+#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ			27
+#define FCOE_ERROR_CODE_DATA_FCTL					28
+
+/* Middle path error codes */
+#define FCOE_ERROR_CODE_MIDPATH_TYPE_NOT_ELS				29
+#define FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET			30
+#define FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET			31
+#define FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET			32
+#define FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET			33
+#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_FCTL				34
+#define FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY				35
+#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL				36
+
+/* ABTS error codes */
+#define FCOE_ERROR_CODE_ABTS_REPLY_F_CTL				37
+#define FCOE_ERROR_CODE_ABTS_REPLY_DDF_RCTL_FIELD			38
+#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_BLS_RCTL			39
+#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL			40
+#define FCOE_ERROR_CODE_ABTS_REPLY_RCTL_GENERAL_MISMATCH		41
+
+/* Common error codes */
+#define FCOE_ERROR_CODE_COMMON_MIDDLE_FRAME_WITH_PAD			42
+#define FCOE_ERROR_CODE_COMMON_SEQ_INIT_IN_TCE				43
+#define FCOE_ERROR_CODE_COMMON_FC_HDR_RX_ID_MISMATCH			44
+#define FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT			45
+#define FCOE_ERROR_CODE_COMMON_DATA_FC_HDR_FCP_TYPE_MISMATCH		46
+#define FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES			47
+#define FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR				48
+#define FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG			49
+#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED		50
+
+/* Unsolicited Rx error codes */
+#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_ELS			51
+#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_BLS			52
+#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_ELS				53
+#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_BLS				54
+#define FCOE_ERROR_CODE_UNSOLICITED_R_CTL				55
+
+#define FCOE_ERROR_CODE_RW_TASK_DDF_RCTL_INFO_FIELD			56
+#define FCOE_ERROR_CODE_RW_TASK_INVALID_RCTL				57
+#define FCOE_ERROR_CODE_RW_TASK_RCTL_GENERAL_MISMATCH			58
+
+/* Timer error codes */
+#define FCOE_ERROR_CODE_E_D_TOV_TIMER_EXPIRATION			60
+#define FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION			61
+
+
+#endif /* BNX2FC_CONSTANTS_H_ */

+ 70 - 0
drivers/scsi/bnx2fc/bnx2fc_debug.h

@@ -0,0 +1,70 @@
+#ifndef __BNX2FC_DEBUG__
+#define __BNX2FC_DEBUG__
+
+/* Log level bit mask */
+#define LOG_IO		0x01	/* scsi cmd error, cleanup */
+#define LOG_TGT		0x02	/* Session setup, cleanup, etc' */
+#define LOG_HBA		0x04	/* lport events, link, mtu, etc' */
+#define LOG_ELS		0x08	/* ELS logs */
+#define LOG_MISC	0x10	/* fcoe L2 frame related logs*/
+#define LOG_ALL		0xff	/* LOG all messages */
+
+extern unsigned int bnx2fc_debug_level;
+
+#define BNX2FC_CHK_LOGGING(LEVEL, CMD)					\
+	do {								\
+		if (unlikely(bnx2fc_debug_level & LEVEL))		\
+			do {						\
+				CMD;					\
+			} while (0);					\
+	} while (0)
+
+#define BNX2FC_ELS_DBG(fmt, arg...)					\
+	BNX2FC_CHK_LOGGING(LOG_ELS,					\
+			   printk(KERN_ALERT PFX fmt, ##arg))
+
+#define BNX2FC_MISC_DBG(fmt, arg...)					\
+	BNX2FC_CHK_LOGGING(LOG_MISC,					\
+			   printk(KERN_ALERT PFX fmt, ##arg))
+
+#define BNX2FC_IO_DBG(io_req, fmt, arg...)				\
+	do {								\
+		if (!io_req || !io_req->port || !io_req->port->lport ||	\
+		    !io_req->port->lport->host)				\
+			BNX2FC_CHK_LOGGING(LOG_IO,			\
+			   printk(KERN_ALERT PFX "NULL " fmt, ##arg));	\
+		else							\
+			BNX2FC_CHK_LOGGING(LOG_IO,			\
+			   shost_printk(KERN_ALERT,			\
+				   (io_req)->port->lport->host,		\
+				   PFX "xid:0x%x " fmt,			\
+				   (io_req)->xid, ##arg));		\
+	} while (0)
+
+#define BNX2FC_TGT_DBG(tgt, fmt, arg...)				\
+	do {								\
+		if (!tgt || !tgt->port || !tgt->port->lport ||		\
+		    !tgt->port->lport->host || !tgt->rport)		\
+			BNX2FC_CHK_LOGGING(LOG_TGT,			\
+			   printk(KERN_ALERT PFX "NULL " fmt, ##arg));	\
+		else							\
+			BNX2FC_CHK_LOGGING(LOG_TGT,			\
+			   shost_printk(KERN_ALERT,			\
+				   (tgt)->port->lport->host,		\
+				   PFX "port:%x " fmt,			\
+				   (tgt)->rport->port_id, ##arg));	\
+	} while (0)
+
+
+#define BNX2FC_HBA_DBG(lport, fmt, arg...)				\
+	do {								\
+		if (!lport || !lport->host)				\
+			BNX2FC_CHK_LOGGING(LOG_HBA,			\
+			   printk(KERN_ALERT PFX "NULL " fmt, ##arg));	\
+		else							\
+			BNX2FC_CHK_LOGGING(LOG_HBA,			\
+			   shost_printk(KERN_ALERT, lport->host,	\
+				   PFX fmt, ##arg));			\
+	} while (0)
+
+#endif

+ 515 - 0
drivers/scsi/bnx2fc/bnx2fc_els.c

@@ -0,0 +1,515 @@
+/*
+ * bnx2fc_els.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * This file contains helper routines that handle ELS requests
+ * and responses.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+
+static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
+			     void *arg);
+static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
+			      void *arg);
+static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
+			void *data, u32 data_len,
+			void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
+			struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
+
+static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+	struct bnx2fc_cmd *orig_io_req;
+	struct bnx2fc_cmd *rrq_req;
+	int rc = 0;
+
+	BUG_ON(!cb_arg);
+	rrq_req = cb_arg->io_req;
+	orig_io_req = cb_arg->aborted_io_req;
+	BUG_ON(!orig_io_req);
+	BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
+		   orig_io_req->xid, rrq_req->xid);
+
+	kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+
+	if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
+		/*
+		 * els req is timed out. cleanup the IO with FW and
+		 * drop the completion. Remove from active_cmd_queue.
+		 */
+		BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
+			   rrq_req->xid);
+
+		if (rrq_req->on_active_queue) {
+			list_del_init(&rrq_req->link);
+			rrq_req->on_active_queue = 0;
+			rc = bnx2fc_initiate_cleanup(rrq_req);
+			BUG_ON(rc);
+		}
+	}
+	kfree(cb_arg);
+}
+int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
+{
+
+	struct fc_els_rrq rrq;
+	struct bnx2fc_rport *tgt = aborted_io_req->tgt;
+	struct fc_lport *lport = tgt->rdata->local_port;
+	struct bnx2fc_els_cb_arg *cb_arg = NULL;
+	u32 sid = tgt->sid;
+	u32 r_a_tov = lport->r_a_tov;
+	unsigned long start = jiffies;
+	int rc;
+
+	BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
+		   aborted_io_req->xid);
+	memset(&rrq, 0, sizeof(rrq));
+
+	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
+	if (!cb_arg) {
+		printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
+		rc = -ENOMEM;
+		goto rrq_err;
+	}
+
+	cb_arg->aborted_io_req = aborted_io_req;
+
+	rrq.rrq_cmd = ELS_RRQ;
+	hton24(rrq.rrq_s_id, sid);
+	rrq.rrq_ox_id = htons(aborted_io_req->xid);
+	rrq.rrq_rx_id = htons(aborted_io_req->task->rx_wr_tx_rd.rx_id);
+
+retry_rrq:
+	rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
+				 bnx2fc_rrq_compl, cb_arg,
+				 r_a_tov);
+	if (rc == -ENOMEM) {
+		if (time_after(jiffies, start + (10 * HZ))) {
+			BNX2FC_ELS_DBG("rrq Failed\n");
+			rc = FAILED;
+			goto rrq_err;
+		}
+		msleep(20);
+		goto retry_rrq;
+	}
+rrq_err:
+	if (rc) {
+		BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
+			aborted_io_req->xid);
+		kfree(cb_arg);
+		spin_lock_bh(&tgt->tgt_lock);
+		kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
+		spin_unlock_bh(&tgt->tgt_lock);
+	}
+	return rc;
+}
+
+static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+	struct bnx2fc_cmd *els_req;
+	struct bnx2fc_rport *tgt;
+	struct bnx2fc_mp_req *mp_req;
+	struct fc_frame_header *fc_hdr;
+	unsigned char *buf;
+	void *resp_buf;
+	u32 resp_len, hdr_len;
+	u16 l2_oxid;
+	int frame_len;
+	int rc = 0;
+
+	l2_oxid = cb_arg->l2_oxid;
+	BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
+
+	els_req = cb_arg->io_req;
+	if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
+		/*
+		 * els req is timed out. cleanup the IO with FW and
+		 * drop the completion. libfc will handle the els timeout
+		 */
+		if (els_req->on_active_queue) {
+			list_del_init(&els_req->link);
+			els_req->on_active_queue = 0;
+			rc = bnx2fc_initiate_cleanup(els_req);
+			BUG_ON(rc);
+		}
+		goto free_arg;
+	}
+
+	tgt = els_req->tgt;
+	mp_req = &(els_req->mp_req);
+	fc_hdr = &(mp_req->resp_fc_hdr);
+	resp_len = mp_req->resp_len;
+	resp_buf = mp_req->resp_buf;
+
+	buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+	if (!buf) {
+		printk(KERN_ERR PFX "Unable to alloc mp buf\n");
+		goto free_arg;
+	}
+	hdr_len = sizeof(*fc_hdr);
+	if (hdr_len + resp_len > PAGE_SIZE) {
+		printk(KERN_ERR PFX "l2_els_compl: resp len is "
+				    "beyond page size\n");
+		goto free_buf;
+	}
+	memcpy(buf, fc_hdr, hdr_len);
+	memcpy(buf + hdr_len, resp_buf, resp_len);
+	frame_len = hdr_len + resp_len;
+
+	bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
+
+free_buf:
+	kfree(buf);
+free_arg:
+	kfree(cb_arg);
+}
+
+int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
+{
+	struct fc_els_adisc *adisc;
+	struct fc_frame_header *fh;
+	struct bnx2fc_els_cb_arg *cb_arg;
+	struct fc_lport *lport = tgt->rdata->local_port;
+	u32 r_a_tov = lport->r_a_tov;
+	int rc;
+
+	fh = fc_frame_header_get(fp);
+	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+	if (!cb_arg) {
+		printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
+		return -ENOMEM;
+	}
+
+	cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
+
+	BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
+	adisc = fc_frame_payload_get(fp, sizeof(*adisc));
+	/* adisc is initialized by libfc */
+	rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
+				 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
+	if (rc)
+		kfree(cb_arg);
+	return rc;
+}
+
+int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
+{
+	struct fc_els_logo *logo;
+	struct fc_frame_header *fh;
+	struct bnx2fc_els_cb_arg *cb_arg;
+	struct fc_lport *lport = tgt->rdata->local_port;
+	u32 r_a_tov = lport->r_a_tov;
+	int rc;
+
+	fh = fc_frame_header_get(fp);
+	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+	if (!cb_arg) {
+		printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
+		return -ENOMEM;
+	}
+
+	cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
+
+	BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
+	logo = fc_frame_payload_get(fp, sizeof(*logo));
+	/* logo is initialized by libfc */
+	rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
+				 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
+	if (rc)
+		kfree(cb_arg);
+	return rc;
+}
+
+int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
+{
+	struct fc_els_rls *rls;
+	struct fc_frame_header *fh;
+	struct bnx2fc_els_cb_arg *cb_arg;
+	struct fc_lport *lport = tgt->rdata->local_port;
+	u32 r_a_tov = lport->r_a_tov;
+	int rc;
+
+	fh = fc_frame_header_get(fp);
+	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+	if (!cb_arg) {
+		printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
+		return -ENOMEM;
+	}
+
+	cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
+
+	rls = fc_frame_payload_get(fp, sizeof(*rls));
+	/* rls is initialized by libfc */
+	rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
+				  bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
+	if (rc)
+		kfree(cb_arg);
+	return rc;
+}
+
+static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
+			void *data, u32 data_len,
+			void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
+			struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
+{
+	struct fcoe_port *port = tgt->port;
+	struct bnx2fc_hba *hba = port->priv;
+	struct fc_rport *rport = tgt->rport;
+	struct fc_lport *lport = port->lport;
+	struct bnx2fc_cmd *els_req;
+	struct bnx2fc_mp_req *mp_req;
+	struct fc_frame_header *fc_hdr;
+	struct fcoe_task_ctx_entry *task;
+	struct fcoe_task_ctx_entry *task_page;
+	int rc = 0;
+	int task_idx, index;
+	u32 did, sid;
+	u16 xid;
+
+	rc = fc_remote_port_chkready(rport);
+	if (rc) {
+		printk(KERN_ALERT PFX "els 0x%x: rport not ready\n", op);
+		rc = -EINVAL;
+		goto els_err;
+	}
+	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+		printk(KERN_ALERT PFX "els 0x%x: link is not ready\n", op);
+		rc = -EINVAL;
+		goto els_err;
+	}
+	if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) ||
+	     (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) {
+		printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
+		rc = -EINVAL;
+		goto els_err;
+	}
+	els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
+	if (!els_req) {
+		rc = -ENOMEM;
+		goto els_err;
+	}
+
+	els_req->sc_cmd = NULL;
+	els_req->port = port;
+	els_req->tgt = tgt;
+	els_req->cb_func = cb_func;
+	cb_arg->io_req = els_req;
+	els_req->cb_arg = cb_arg;
+
+	mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
+	rc = bnx2fc_init_mp_req(els_req);
+	if (rc == FAILED) {
+		printk(KERN_ALERT PFX "ELS MP request init failed\n");
+		spin_lock_bh(&tgt->tgt_lock);
+		kref_put(&els_req->refcount, bnx2fc_cmd_release);
+		spin_unlock_bh(&tgt->tgt_lock);
+		rc = -ENOMEM;
+		goto els_err;
+	} else {
+		/* rc SUCCESS */
+		rc = 0;
+	}
+
+	/* Set the data_xfer_len to the size of ELS payload */
+	mp_req->req_len = data_len;
+	els_req->data_xfer_len = mp_req->req_len;
+
+	/* Fill ELS Payload */
+	if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
+		memcpy(mp_req->req_buf, data, data_len);
+	} else {
+		printk(KERN_ALERT PFX "Invalid ELS op 0x%x\n", op);
+		els_req->cb_func = NULL;
+		els_req->cb_arg = NULL;
+		spin_lock_bh(&tgt->tgt_lock);
+		kref_put(&els_req->refcount, bnx2fc_cmd_release);
+		spin_unlock_bh(&tgt->tgt_lock);
+		rc = -EINVAL;
+	}
+
+	if (rc)
+		goto els_err;
+
+	/* Fill FC header */
+	fc_hdr = &(mp_req->req_fc_hdr);
+
+	did = tgt->rport->port_id;
+	sid = tgt->sid;
+
+	__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
+			   FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
+			   FC_FC_SEQ_INIT, 0);
+
+	/* Obtain exchange id */
+	xid = els_req->xid;
+	task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+	index = xid % BNX2FC_TASKS_PER_PAGE;
+
+	/* Initialize task context for this IO request */
+	task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+	task = &(task_page[index]);
+	bnx2fc_init_mp_task(els_req, task);
+
+	spin_lock_bh(&tgt->tgt_lock);
+
+	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+		printk(KERN_ERR PFX "initiate_els.. session not ready\n");
+		els_req->cb_func = NULL;
+		els_req->cb_arg = NULL;
+		kref_put(&els_req->refcount, bnx2fc_cmd_release);
+		spin_unlock_bh(&tgt->tgt_lock);
+		return -EINVAL;
+	}
+
+	if (timer_msec)
+		bnx2fc_cmd_timer_set(els_req, timer_msec);
+	bnx2fc_add_2_sq(tgt, xid);
+
+	els_req->on_active_queue = 1;
+	list_add_tail(&els_req->link, &tgt->els_queue);
+
+	/* Ring doorbell */
+	bnx2fc_ring_doorbell(tgt);
+	spin_unlock_bh(&tgt->tgt_lock);
+
+els_err:
+	return rc;
+}
+
+void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
+			      struct fcoe_task_ctx_entry *task, u8 num_rq)
+{
+	struct bnx2fc_mp_req *mp_req;
+	struct fc_frame_header *fc_hdr;
+	u64 *hdr;
+	u64 *temp_hdr;
+
+	BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
+			"cmd_type = %d\n", els_req->xid, els_req->cmd_type);
+
+	if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
+			     &els_req->req_flags)) {
+		BNX2FC_ELS_DBG("Timer context finished processing this "
+			   "els - 0x%x\n", els_req->xid);
+		/* This IO doesnt receive cleanup completion */
+		kref_put(&els_req->refcount, bnx2fc_cmd_release);
+		return;
+	}
+
+	/* Cancel the timeout_work, as we received the response */
+	if (cancel_delayed_work(&els_req->timeout_work))
+		kref_put(&els_req->refcount,
+			 bnx2fc_cmd_release); /* drop timer hold */
+
+	if (els_req->on_active_queue) {
+		list_del_init(&els_req->link);
+		els_req->on_active_queue = 0;
+	}
+
+	mp_req = &(els_req->mp_req);
+	fc_hdr = &(mp_req->resp_fc_hdr);
+
+	hdr = (u64 *)fc_hdr;
+	temp_hdr = (u64 *)
+		&task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+	hdr[0] = cpu_to_be64(temp_hdr[0]);
+	hdr[1] = cpu_to_be64(temp_hdr[1]);
+	hdr[2] = cpu_to_be64(temp_hdr[2]);
+
+	mp_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
+
+	/* Parse ELS response */
+	if ((els_req->cb_func) && (els_req->cb_arg)) {
+		els_req->cb_func(els_req->cb_arg);
+		els_req->cb_arg = NULL;
+	}
+
+	kref_put(&els_req->refcount, bnx2fc_cmd_release);
+}
+
+static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
+			      void *arg)
+{
+	struct fcoe_ctlr *fip = arg;
+	struct fc_exch *exch = fc_seq_exch(seq);
+	struct fc_lport *lport = exch->lp;
+	u8 *mac;
+	struct fc_frame_header *fh;
+	u8 op;
+
+	if (IS_ERR(fp))
+		goto done;
+
+	mac = fr_cb(fp)->granted_mac;
+	if (is_zero_ether_addr(mac)) {
+		fh = fc_frame_header_get(fp);
+		if (fh->fh_type != FC_TYPE_ELS) {
+			printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
+				"fh_type != FC_TYPE_ELS\n");
+			fc_frame_free(fp);
+			return;
+		}
+		op = fc_frame_payload_op(fp);
+		if (lport->vport) {
+			if (op == ELS_LS_RJT) {
+				printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
+				fc_vport_terminate(lport->vport);
+				fc_frame_free(fp);
+				return;
+			}
+		}
+		if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
+			fc_frame_free(fp);
+			return;
+		}
+	}
+	fip->update_mac(lport, mac);
+done:
+	fc_lport_flogi_resp(seq, fp, lport);
+}
+
+static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
+			     void *arg)
+{
+	struct fcoe_ctlr *fip = arg;
+	struct fc_exch *exch = fc_seq_exch(seq);
+	struct fc_lport *lport = exch->lp;
+	static u8 zero_mac[ETH_ALEN] = { 0 };
+
+	if (!IS_ERR(fp))
+		fip->update_mac(lport, zero_mac);
+	fc_lport_logo_resp(seq, fp, lport);
+}
+
+struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
+				      struct fc_frame *fp, unsigned int op,
+				      void (*resp)(struct fc_seq *,
+						   struct fc_frame *,
+						   void *),
+				      void *arg, u32 timeout)
+{
+	struct fcoe_port *port = lport_priv(lport);
+	struct bnx2fc_hba *hba = port->priv;
+	struct fcoe_ctlr *fip = &hba->ctlr;
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+
+	switch (op) {
+	case ELS_FLOGI:
+	case ELS_FDISC:
+		return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
+				     fip, timeout);
+	case ELS_LOGO:
+		/* only hook onto fabric logouts, not port logouts */
+		if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
+			break;
+		return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
+				     fip, timeout);
+	}
+	return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
+}

+ 2535 - 0
drivers/scsi/bnx2fc/bnx2fc_fcoe.c

@@ -0,0 +1,2535 @@
+/* bnx2fc_fcoe.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * This file contains the code that interacts with libfc, libfcoe,
+ * cnic modules to create FCoE instances, send/receive non-offloaded
+ * FIP/FCoE packets, listen to link events etc.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+
+static struct list_head adapter_list;
+static u32 adapter_count;
+static DEFINE_MUTEX(bnx2fc_dev_lock);
+DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
+
+#define DRV_MODULE_NAME		"bnx2fc"
+#define DRV_MODULE_VERSION	BNX2FC_VERSION
+#define DRV_MODULE_RELDATE	"Jan 25, 2011"
+
+
+static char version[] __devinitdata =
+		"Broadcom NetXtreme II FCoE Driver " DRV_MODULE_NAME \
+		" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+
+MODULE_AUTHOR("Bhanu Prakash Gollapudi <bprakash@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 FCoE Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define BNX2FC_MAX_QUEUE_DEPTH	256
+#define BNX2FC_MIN_QUEUE_DEPTH	32
+#define FCOE_WORD_TO_BYTE  4
+
+static struct scsi_transport_template	*bnx2fc_transport_template;
+static struct scsi_transport_template	*bnx2fc_vport_xport_template;
+
+struct workqueue_struct *bnx2fc_wq;
+
+/* bnx2fc structure needs only one instance of the fcoe_percpu_s structure.
+ * Here the io threads are per cpu but the l2 thread is just one
+ */
+struct fcoe_percpu_s bnx2fc_global;
+DEFINE_SPINLOCK(bnx2fc_global_lock);
+
+static struct cnic_ulp_ops bnx2fc_cnic_cb;
+static struct libfc_function_template bnx2fc_libfc_fcn_templ;
+static struct scsi_host_template bnx2fc_shost_template;
+static struct fc_function_template bnx2fc_transport_function;
+static struct fc_function_template bnx2fc_vport_xport_function;
+static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
+static int bnx2fc_destroy(struct net_device *net_device);
+static int bnx2fc_enable(struct net_device *netdev);
+static int bnx2fc_disable(struct net_device *netdev);
+
+static void bnx2fc_recv_frame(struct sk_buff *skb);
+
+static void bnx2fc_start_disc(struct bnx2fc_hba *hba);
+static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
+static int bnx2fc_net_config(struct fc_lport *lp);
+static int bnx2fc_lport_config(struct fc_lport *lport);
+static int bnx2fc_em_config(struct fc_lport *lport);
+static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
+static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba);
+static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
+static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
+static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
+				  struct device *parent, int npiv);
+static void bnx2fc_destroy_work(struct work_struct *work);
+
+static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
+static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic);
+
+static int bnx2fc_fw_init(struct bnx2fc_hba *hba);
+static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba);
+
+static void bnx2fc_port_shutdown(struct fc_lport *lport);
+static void bnx2fc_stop(struct bnx2fc_hba *hba);
+static int __init bnx2fc_mod_init(void);
+static void __exit bnx2fc_mod_exit(void);
+
+unsigned int bnx2fc_debug_level;
+module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
+
+static int bnx2fc_cpu_callback(struct notifier_block *nfb,
+			     unsigned long action, void *hcpu);
+/* notification function for CPU hotplug events */
+static struct notifier_block bnx2fc_cpu_notifier = {
+	.notifier_call = bnx2fc_cpu_callback,
+};
+
+static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
+{
+	struct fcoe_percpu_s *bg;
+	struct fcoe_rcv_info *fr;
+	struct sk_buff_head *list;
+	struct sk_buff *skb, *next;
+	struct sk_buff *head;
+
+	bg = &bnx2fc_global;
+	spin_lock_bh(&bg->fcoe_rx_list.lock);
+	list = &bg->fcoe_rx_list;
+	head = list->next;
+	for (skb = head; skb != (struct sk_buff *)list;
+	     skb = next) {
+		next = skb->next;
+		fr = fcoe_dev_from_skb(skb);
+		if (fr->fr_dev == lp) {
+			__skb_unlink(skb, list);
+			kfree_skb(skb);
+		}
+	}
+	spin_unlock_bh(&bg->fcoe_rx_list.lock);
+}
+
+int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen)
+{
+	int rc;
+	spin_lock(&bnx2fc_global_lock);
+	rc = fcoe_get_paged_crc_eof(skb, tlen, &bnx2fc_global);
+	spin_unlock(&bnx2fc_global_lock);
+
+	return rc;
+}
+
+static void bnx2fc_abort_io(struct fc_lport *lport)
+{
+	/*
+	 * This function is no-op for bnx2fc, but we do
+	 * not want to leave it as NULL either, as libfc
+	 * can call the default function which is
+	 * fc_fcp_abort_io.
+	 */
+}
+
+static void bnx2fc_cleanup(struct fc_lport *lport)
+{
+	struct fcoe_port *port = lport_priv(lport);
+	struct bnx2fc_hba *hba = port->priv;
+	struct bnx2fc_rport *tgt;
+	int i;
+
+	BNX2FC_MISC_DBG("Entered %s\n", __func__);
+	mutex_lock(&hba->hba_mutex);
+	spin_lock_bh(&hba->hba_lock);
+	for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
+		tgt = hba->tgt_ofld_list[i];
+		if (tgt) {
+			/* Cleanup IOs belonging to requested vport */
+			if (tgt->port == port) {
+				spin_unlock_bh(&hba->hba_lock);
+				BNX2FC_TGT_DBG(tgt, "flush/cleanup\n");
+				bnx2fc_flush_active_ios(tgt);
+				spin_lock_bh(&hba->hba_lock);
+			}
+		}
+	}
+	spin_unlock_bh(&hba->hba_lock);
+	mutex_unlock(&hba->hba_mutex);
+}
+
+static int bnx2fc_xmit_l2_frame(struct bnx2fc_rport *tgt,
+			     struct fc_frame *fp)
+{
+	struct fc_rport_priv *rdata = tgt->rdata;
+	struct fc_frame_header *fh;
+	int rc = 0;
+
+	fh = fc_frame_header_get(fp);
+	BNX2FC_TGT_DBG(tgt, "Xmit L2 frame rport = 0x%x, oxid = 0x%x, "
+			"r_ctl = 0x%x\n", rdata->ids.port_id,
+			ntohs(fh->fh_ox_id), fh->fh_r_ctl);
+	if ((fh->fh_type == FC_TYPE_ELS) &&
+	    (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
+
+		switch (fc_frame_payload_op(fp)) {
+		case ELS_ADISC:
+			rc = bnx2fc_send_adisc(tgt, fp);
+			break;
+		case ELS_LOGO:
+			rc = bnx2fc_send_logo(tgt, fp);
+			break;
+		case ELS_RLS:
+			rc = bnx2fc_send_rls(tgt, fp);
+			break;
+		default:
+			break;
+		}
+	} else if ((fh->fh_type ==  FC_TYPE_BLS) &&
+	    (fh->fh_r_ctl == FC_RCTL_BA_ABTS))
+		BNX2FC_TGT_DBG(tgt, "ABTS frame\n");
+	else {
+		BNX2FC_TGT_DBG(tgt, "Send L2 frame type 0x%x "
+				"rctl 0x%x thru non-offload path\n",
+				fh->fh_type, fh->fh_r_ctl);
+		return -ENODEV;
+	}
+	if (rc)
+		return -ENOMEM;
+	else
+		return 0;
+}
+
+/**
+ * bnx2fc_xmit - bnx2fc's FCoE frame transmit function
+ *
+ * @lport:	the associated local port
+ * @fp:	the fc_frame to be transmitted
+ */
+static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
+{
+	struct ethhdr		*eh;
+	struct fcoe_crc_eof	*cp;
+	struct sk_buff		*skb;
+	struct fc_frame_header	*fh;
+	struct bnx2fc_hba	*hba;
+	struct fcoe_port	*port;
+	struct fcoe_hdr		*hp;
+	struct bnx2fc_rport	*tgt;
+	struct fcoe_dev_stats	*stats;
+	u8			sof, eof;
+	u32			crc;
+	unsigned int		hlen, tlen, elen;
+	int			wlen, rc = 0;
+
+	port = (struct fcoe_port *)lport_priv(lport);
+	hba = port->priv;
+
+	fh = fc_frame_header_get(fp);
+
+	skb = fp_skb(fp);
+	if (!lport->link_up) {
+		BNX2FC_HBA_DBG(lport, "bnx2fc_xmit link down\n");
+		kfree_skb(skb);
+		return 0;
+	}
+
+	if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
+		if (!hba->ctlr.sel_fcf) {
+			BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
+			kfree_skb(skb);
+			return -EINVAL;
+		}
+		if (fcoe_ctlr_els_send(&hba->ctlr, lport, skb))
+			return 0;
+	}
+
+	sof = fr_sof(fp);
+	eof = fr_eof(fp);
+
+	/*
+	 * Snoop the frame header to check if the frame is for
+	 * an offloaded session
+	 */
+	/*
+	 * tgt_ofld_list access is synchronized using
+	 * both hba mutex and hba lock. Atleast hba mutex or
+	 * hba lock needs to be held for read access.
+	 */
+
+	spin_lock_bh(&hba->hba_lock);
+	tgt = bnx2fc_tgt_lookup(port, ntoh24(fh->fh_d_id));
+	if (tgt && (test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
+		/* This frame is for offloaded session */
+		BNX2FC_HBA_DBG(lport, "xmit: Frame is for offloaded session "
+				"port_id = 0x%x\n", ntoh24(fh->fh_d_id));
+		spin_unlock_bh(&hba->hba_lock);
+		rc = bnx2fc_xmit_l2_frame(tgt, fp);
+		if (rc != -ENODEV) {
+			kfree_skb(skb);
+			return rc;
+		}
+	} else {
+		spin_unlock_bh(&hba->hba_lock);
+	}
+
+	elen = sizeof(struct ethhdr);
+	hlen = sizeof(struct fcoe_hdr);
+	tlen = sizeof(struct fcoe_crc_eof);
+	wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
+
+	skb->ip_summed = CHECKSUM_NONE;
+	crc = fcoe_fc_crc(fp);
+
+	/* copy port crc and eof to the skb buff */
+	if (skb_is_nonlinear(skb)) {
+		skb_frag_t *frag;
+		if (bnx2fc_get_paged_crc_eof(skb, tlen)) {
+			kfree_skb(skb);
+			return -ENOMEM;
+		}
+		frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
+		cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
+				+ frag->page_offset;
+	} else {
+		cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
+	}
+
+	memset(cp, 0, sizeof(*cp));
+	cp->fcoe_eof = eof;
+	cp->fcoe_crc32 = cpu_to_le32(~crc);
+	if (skb_is_nonlinear(skb)) {
+		kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
+		cp = NULL;
+	}
+
+	/* adjust skb network/transport offsets to match mac/fcoe/port */
+	skb_push(skb, elen + hlen);
+	skb_reset_mac_header(skb);
+	skb_reset_network_header(skb);
+	skb->mac_len = elen;
+	skb->protocol = htons(ETH_P_FCOE);
+	skb->dev = hba->netdev;
+
+	/* fill up mac and fcoe headers */
+	eh = eth_hdr(skb);
+	eh->h_proto = htons(ETH_P_FCOE);
+	if (hba->ctlr.map_dest)
+		fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
+	else
+		/* insert GW address */
+		memcpy(eh->h_dest, hba->ctlr.dest_addr, ETH_ALEN);
+
+	if (unlikely(hba->ctlr.flogi_oxid != FC_XID_UNKNOWN))
+		memcpy(eh->h_source, hba->ctlr.ctl_src_addr, ETH_ALEN);
+	else
+		memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
+
+	hp = (struct fcoe_hdr *)(eh + 1);
+	memset(hp, 0, sizeof(*hp));
+	if (FC_FCOE_VER)
+		FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
+	hp->fcoe_sof = sof;
+
+	/* fcoe lso, mss is in max_payload which is non-zero for FCP data */
+	if (lport->seq_offload && fr_max_payload(fp)) {
+		skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
+		skb_shinfo(skb)->gso_size = fr_max_payload(fp);
+	} else {
+		skb_shinfo(skb)->gso_type = 0;
+		skb_shinfo(skb)->gso_size = 0;
+	}
+
+	/*update tx stats */
+	stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+	stats->TxFrames++;
+	stats->TxWords += wlen;
+	put_cpu();
+
+	/* send down to lld */
+	fr_dev(fp) = lport;
+	if (port->fcoe_pending_queue.qlen)
+		fcoe_check_wait_queue(lport, skb);
+	else if (fcoe_start_io(skb))
+		fcoe_check_wait_queue(lport, skb);
+
+	return 0;
+}
+
+/**
+ * bnx2fc_rcv - This is bnx2fc's receive function called by NET_RX_SOFTIRQ
+ *
+ * @skb:	the receive socket buffer
+ * @dev:	associated net device
+ * @ptype:	context
+ * @olddev:	last device
+ *
+ * This function receives the packet and builds FC frame and passes it up
+ */
+static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
+		struct packet_type *ptype, struct net_device *olddev)
+{
+	struct fc_lport *lport;
+	struct bnx2fc_hba *hba;
+	struct fc_frame_header *fh;
+	struct fcoe_rcv_info *fr;
+	struct fcoe_percpu_s *bg;
+	unsigned short oxid;
+
+	hba = container_of(ptype, struct bnx2fc_hba, fcoe_packet_type);
+	lport = hba->ctlr.lp;
+
+	if (unlikely(lport == NULL)) {
+		printk(KERN_ALERT PFX "bnx2fc_rcv: lport is NULL\n");
+		goto err;
+	}
+
+	if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
+		printk(KERN_ALERT PFX "bnx2fc_rcv: Wrong FC type frame\n");
+		goto err;
+	}
+
+	/*
+	 * Check for minimum frame length, and make sure required FCoE
+	 * and FC headers are pulled into the linear data area.
+	 */
+	if (unlikely((skb->len < FCOE_MIN_FRAME) ||
+	    !pskb_may_pull(skb, FCOE_HEADER_LEN)))
+		goto err;
+
+	skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
+	fh = (struct fc_frame_header *) skb_transport_header(skb);
+
+	oxid = ntohs(fh->fh_ox_id);
+
+	fr = fcoe_dev_from_skb(skb);
+	fr->fr_dev = lport;
+	fr->ptype = ptype;
+
+	bg = &bnx2fc_global;
+	spin_lock_bh(&bg->fcoe_rx_list.lock);
+
+	__skb_queue_tail(&bg->fcoe_rx_list, skb);
+	if (bg->fcoe_rx_list.qlen == 1)
+		wake_up_process(bg->thread);
+
+	spin_unlock_bh(&bg->fcoe_rx_list.lock);
+
+	return 0;
+err:
+	kfree_skb(skb);
+	return -1;
+}
+
+static int bnx2fc_l2_rcv_thread(void *arg)
+{
+	struct fcoe_percpu_s *bg = arg;
+	struct sk_buff *skb;
+
+	set_user_nice(current, -20);
+	set_current_state(TASK_INTERRUPTIBLE);
+	while (!kthread_should_stop()) {
+		schedule();
+		set_current_state(TASK_RUNNING);
+		spin_lock_bh(&bg->fcoe_rx_list.lock);
+		while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) {
+			spin_unlock_bh(&bg->fcoe_rx_list.lock);
+			bnx2fc_recv_frame(skb);
+			spin_lock_bh(&bg->fcoe_rx_list.lock);
+		}
+		spin_unlock_bh(&bg->fcoe_rx_list.lock);
+		set_current_state(TASK_INTERRUPTIBLE);
+	}
+	set_current_state(TASK_RUNNING);
+	return 0;
+}
+
+
+static void bnx2fc_recv_frame(struct sk_buff *skb)
+{
+	u32 fr_len;
+	struct fc_lport *lport;
+	struct fcoe_rcv_info *fr;
+	struct fcoe_dev_stats *stats;
+	struct fc_frame_header *fh;
+	struct fcoe_crc_eof crc_eof;
+	struct fc_frame *fp;
+	struct fc_lport *vn_port;
+	struct fcoe_port *port;
+	u8 *mac = NULL;
+	u8 *dest_mac = NULL;
+	struct fcoe_hdr *hp;
+
+	fr = fcoe_dev_from_skb(skb);
+	lport = fr->fr_dev;
+	if (unlikely(lport == NULL)) {
+		printk(KERN_ALERT PFX "Invalid lport struct\n");
+		kfree_skb(skb);
+		return;
+	}
+
+	if (skb_is_nonlinear(skb))
+		skb_linearize(skb);
+	mac = eth_hdr(skb)->h_source;
+	dest_mac = eth_hdr(skb)->h_dest;
+
+	/* Pull the header */
+	hp = (struct fcoe_hdr *) skb_network_header(skb);
+	fh = (struct fc_frame_header *) skb_transport_header(skb);
+	skb_pull(skb, sizeof(struct fcoe_hdr));
+	fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+
+	stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+	stats->RxFrames++;
+	stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+
+	fp = (struct fc_frame *)skb;
+	fc_frame_init(fp);
+	fr_dev(fp) = lport;
+	fr_sof(fp) = hp->fcoe_sof;
+	if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
+		put_cpu();
+		kfree_skb(skb);
+		return;
+	}
+	fr_eof(fp) = crc_eof.fcoe_eof;
+	fr_crc(fp) = crc_eof.fcoe_crc32;
+	if (pskb_trim(skb, fr_len)) {
+		put_cpu();
+		kfree_skb(skb);
+		return;
+	}
+
+	fh = fc_frame_header_get(fp);
+
+	vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
+	if (vn_port) {
+		port = lport_priv(vn_port);
+		if (compare_ether_addr(port->data_src_addr, dest_mac)
+		    != 0) {
+			BNX2FC_HBA_DBG(lport, "fpma mismatch\n");
+			put_cpu();
+			kfree_skb(skb);
+			return;
+		}
+	}
+	if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
+	    fh->fh_type == FC_TYPE_FCP) {
+		/* Drop FCP data. We dont this in L2 path */
+		put_cpu();
+		kfree_skb(skb);
+		return;
+	}
+	if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
+	    fh->fh_type == FC_TYPE_ELS) {
+		switch (fc_frame_payload_op(fp)) {
+		case ELS_LOGO:
+			if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
+				/* drop non-FIP LOGO */
+				put_cpu();
+				kfree_skb(skb);
+				return;
+			}
+			break;
+		}
+	}
+	if (le32_to_cpu(fr_crc(fp)) !=
+			~crc32(~0, skb->data, fr_len)) {
+		if (stats->InvalidCRCCount < 5)
+			printk(KERN_WARNING PFX "dropping frame with "
+			       "CRC error\n");
+		stats->InvalidCRCCount++;
+		put_cpu();
+		kfree_skb(skb);
+		return;
+	}
+	put_cpu();
+	fc_exch_recv(lport, fp);
+}
+
+/**
+ * bnx2fc_percpu_io_thread - thread per cpu for ios
+ *
+ * @arg:	ptr to bnx2fc_percpu_info structure
+ */
+int bnx2fc_percpu_io_thread(void *arg)
+{
+	struct bnx2fc_percpu_s *p = arg;
+	struct bnx2fc_work *work, *tmp;
+	LIST_HEAD(work_list);
+
+	set_user_nice(current, -20);
+	set_current_state(TASK_INTERRUPTIBLE);
+	while (!kthread_should_stop()) {
+		schedule();
+		set_current_state(TASK_RUNNING);
+		spin_lock_bh(&p->fp_work_lock);
+		while (!list_empty(&p->work_list)) {
+			list_splice_init(&p->work_list, &work_list);
+			spin_unlock_bh(&p->fp_work_lock);
+
+			list_for_each_entry_safe(work, tmp, &work_list, list) {
+				list_del_init(&work->list);
+				bnx2fc_process_cq_compl(work->tgt, work->wqe);
+				kfree(work);
+			}
+
+			spin_lock_bh(&p->fp_work_lock);
+		}
+		spin_unlock_bh(&p->fp_work_lock);
+		set_current_state(TASK_INTERRUPTIBLE);
+	}
+	set_current_state(TASK_RUNNING);
+
+	return 0;
+}
+
+static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
+{
+	struct fc_host_statistics *bnx2fc_stats;
+	struct fc_lport *lport = shost_priv(shost);
+	struct fcoe_port *port = lport_priv(lport);
+	struct bnx2fc_hba *hba = port->priv;
+	struct fcoe_statistics_params *fw_stats;
+	int rc = 0;
+
+	fw_stats = (struct fcoe_statistics_params *)hba->stats_buffer;
+	if (!fw_stats)
+		return NULL;
+
+	bnx2fc_stats = fc_get_host_stats(shost);
+
+	init_completion(&hba->stat_req_done);
+	if (bnx2fc_send_stat_req(hba))
+		return bnx2fc_stats;
+	rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
+	if (!rc) {
+		BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
+		return bnx2fc_stats;
+	}
+	bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat1.fc_crc_cnt;
+	bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt;
+	bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4;
+	bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt;
+	bnx2fc_stats->rx_words += (fw_stats->rx_stat0.fcoe_rx_byte_cnt) / 4;
+
+	bnx2fc_stats->dumped_frames = 0;
+	bnx2fc_stats->lip_count = 0;
+	bnx2fc_stats->nos_count = 0;
+	bnx2fc_stats->loss_of_sync_count = 0;
+	bnx2fc_stats->loss_of_signal_count = 0;
+	bnx2fc_stats->prim_seq_protocol_err_count = 0;
+
+	return bnx2fc_stats;
+}
+
+static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
+{
+	struct fcoe_port *port = lport_priv(lport);
+	struct bnx2fc_hba *hba = port->priv;
+	struct Scsi_Host *shost = lport->host;
+	int rc = 0;
+
+	shost->max_cmd_len = BNX2FC_MAX_CMD_LEN;
+	shost->max_lun = BNX2FC_MAX_LUN;
+	shost->max_id = BNX2FC_MAX_FCP_TGT;
+	shost->max_channel = 0;
+	if (lport->vport)
+		shost->transportt = bnx2fc_vport_xport_template;
+	else
+		shost->transportt = bnx2fc_transport_template;
+
+	/* Add the new host to SCSI-ml */
+	rc = scsi_add_host(lport->host, dev);
+	if (rc) {
+		printk(KERN_ERR PFX "Error on scsi_add_host\n");
+		return rc;
+	}
+	if (!lport->vport)
+		fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
+	sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s",
+		BNX2FC_NAME, BNX2FC_VERSION,
+		hba->netdev->name);
+
+	return 0;
+}
+
+static int  bnx2fc_mfs_update(struct fc_lport *lport)
+{
+	struct fcoe_port *port = lport_priv(lport);
+	struct bnx2fc_hba *hba = port->priv;
+	struct net_device *netdev = hba->netdev;
+	u32 mfs;
+	u32 max_mfs;
+
+	mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
+			     sizeof(struct fcoe_crc_eof));
+	max_mfs = BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header);
+	BNX2FC_HBA_DBG(lport, "mfs = %d, max_mfs = %d\n", mfs, max_mfs);
+	if (mfs > max_mfs)
+		mfs = max_mfs;
+
+	/* Adjust mfs to be a multiple of 256 bytes */
+	mfs = (((mfs - sizeof(struct fc_frame_header)) / BNX2FC_MIN_PAYLOAD) *
+			BNX2FC_MIN_PAYLOAD);
+	mfs = mfs + sizeof(struct fc_frame_header);
+
+	BNX2FC_HBA_DBG(lport, "Set MFS = %d\n", mfs);
+	if (fc_set_mfs(lport, mfs))
+		return -EINVAL;
+	return 0;
+}
+static void bnx2fc_link_speed_update(struct fc_lport *lport)
+{
+	struct fcoe_port *port = lport_priv(lport);
+	struct bnx2fc_hba *hba = port->priv;
+	struct net_device *netdev = hba->netdev;
+	struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+
+	if (!dev_ethtool_get_settings(netdev, &ecmd)) {
+		lport->link_supported_speeds &=
+			~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+		if (ecmd.supported & (SUPPORTED_1000baseT_Half |
+				      SUPPORTED_1000baseT_Full))
+			lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
+		if (ecmd.supported & SUPPORTED_10000baseT_Full)
+			lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
+
+		if (ecmd.speed == SPEED_1000)
+			lport->link_speed = FC_PORTSPEED_1GBIT;
+		if (ecmd.speed == SPEED_10000)
+			lport->link_speed = FC_PORTSPEED_10GBIT;
+	}
+	return;
+}
+static int bnx2fc_link_ok(struct fc_lport *lport)
+{
+	struct fcoe_port *port = lport_priv(lport);
+	struct bnx2fc_hba *hba = port->priv;
+	struct net_device *dev = hba->phys_dev;
+	int rc = 0;
+
+	if ((dev->flags & IFF_UP) && netif_carrier_ok(dev))
+		clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+	else {
+		set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+		rc = -1;
+	}
+	return rc;
+}
+
+/**
+ * bnx2fc_get_link_state - get network link state
+ *
+ * @hba:	adapter instance pointer
+ *
+ * updates adapter structure flag based on netdev state
+ */
+void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
+{
+	if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
+		set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+	else
+		clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+}
+
+static int bnx2fc_net_config(struct fc_lport *lport)
+{
+	struct bnx2fc_hba *hba;
+	struct fcoe_port *port;
+	u64 wwnn, wwpn;
+
+	port = lport_priv(lport);
+	hba = port->priv;
+
+	/* require support for get_pauseparam ethtool op. */
+	if (!hba->phys_dev->ethtool_ops ||
+	    !hba->phys_dev->ethtool_ops->get_pauseparam)
+		return -EOPNOTSUPP;
+
+	if (bnx2fc_mfs_update(lport))
+		return -EINVAL;
+
+	skb_queue_head_init(&port->fcoe_pending_queue);
+	port->fcoe_pending_queue_active = 0;
+	setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport);
+
+	bnx2fc_link_speed_update(lport);
+
+	if (!lport->vport) {
+		wwnn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 1, 0);
+		BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
+		fc_set_wwnn(lport, wwnn);
+
+		wwpn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 2, 0);
+		BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
+		fc_set_wwpn(lport, wwpn);
+	}
+
+	return 0;
+}
+
+static void bnx2fc_destroy_timer(unsigned long data)
+{
+	struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data;
+
+	BNX2FC_HBA_DBG(hba->ctlr.lp, "ERROR:bnx2fc_destroy_timer - "
+		   "Destroy compl not received!!\n");
+	hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
+	wake_up_interruptible(&hba->destroy_wait);
+}
+
+/**
+ * bnx2fc_indicate_netevent - Generic netdev event handler
+ *
+ * @context:	adapter structure pointer
+ * @event:	event type
+ *
+ * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and
+ * NETDEV_CHANGE_MTU events
+ */
+static void bnx2fc_indicate_netevent(void *context, unsigned long event)
+{
+	struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
+	struct fc_lport *lport = hba->ctlr.lp;
+	struct fc_lport *vport;
+	u32 link_possible = 1;
+
+	if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+		BNX2FC_MISC_DBG("driver not ready. event=%s %ld\n",
+			   hba->netdev->name, event);
+		return;
+	}
+
+	/*
+	 * ASSUMPTION:
+	 * indicate_netevent cannot be called from cnic unless bnx2fc
+	 * does register_device
+	 */
+	BUG_ON(!lport);
+
+	BNX2FC_HBA_DBG(lport, "enter netevent handler - event=%s %ld\n",
+				hba->netdev->name, event);
+
+	switch (event) {
+	case NETDEV_UP:
+		BNX2FC_HBA_DBG(lport, "Port up, adapter_state = %ld\n",
+			hba->adapter_state);
+		if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+			printk(KERN_ERR "indicate_netevent: "\
+					"adapter is not UP!!\n");
+		/* fall thru to update mfs if MTU has changed */
+	case NETDEV_CHANGEMTU:
+		BNX2FC_HBA_DBG(lport, "NETDEV_CHANGEMTU event\n");
+		bnx2fc_mfs_update(lport);
+		mutex_lock(&lport->lp_mutex);
+		list_for_each_entry(vport, &lport->vports, list)
+			bnx2fc_mfs_update(vport);
+		mutex_unlock(&lport->lp_mutex);
+		break;
+
+	case NETDEV_DOWN:
+		BNX2FC_HBA_DBG(lport, "Port down\n");
+		clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+		clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+		link_possible = 0;
+		break;
+
+	case NETDEV_GOING_DOWN:
+		BNX2FC_HBA_DBG(lport, "Port going down\n");
+		set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+		link_possible = 0;
+		break;
+
+	case NETDEV_CHANGE:
+		BNX2FC_HBA_DBG(lport, "NETDEV_CHANGE\n");
+		break;
+
+	default:
+		printk(KERN_ERR PFX "Unkonwn netevent %ld", event);
+		return;
+	}
+
+	bnx2fc_link_speed_update(lport);
+
+	if (link_possible && !bnx2fc_link_ok(lport)) {
+		printk(KERN_ERR "indicate_netevent: call ctlr_link_up\n");
+		fcoe_ctlr_link_up(&hba->ctlr);
+	} else {
+		printk(KERN_ERR "indicate_netevent: call ctlr_link_down\n");
+		if (fcoe_ctlr_link_down(&hba->ctlr)) {
+			clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+			mutex_lock(&lport->lp_mutex);
+			list_for_each_entry(vport, &lport->vports, list)
+				fc_host_port_type(vport->host) =
+							FC_PORTTYPE_UNKNOWN;
+			mutex_unlock(&lport->lp_mutex);
+			fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+			per_cpu_ptr(lport->dev_stats,
+				    get_cpu())->LinkFailureCount++;
+			put_cpu();
+			fcoe_clean_pending_queue(lport);
+
+			init_waitqueue_head(&hba->shutdown_wait);
+			BNX2FC_HBA_DBG(lport, "indicate_netevent "
+					     "num_ofld_sess = %d\n",
+				   hba->num_ofld_sess);
+			hba->wait_for_link_down = 1;
+			BNX2FC_HBA_DBG(lport, "waiting for uploads to "
+					     "compl proc = %s\n",
+				   current->comm);
+			wait_event_interruptible(hba->shutdown_wait,
+						 (hba->num_ofld_sess == 0));
+			BNX2FC_HBA_DBG(lport, "wakeup - num_ofld_sess = %d\n",
+				hba->num_ofld_sess);
+			hba->wait_for_link_down = 0;
+
+			if (signal_pending(current))
+				flush_signals(current);
+		}
+	}
+}
+
+static int bnx2fc_libfc_config(struct fc_lport *lport)
+{
+
+	/* Set the function pointers set by bnx2fc driver */
+	memcpy(&lport->tt, &bnx2fc_libfc_fcn_templ,
+		sizeof(struct libfc_function_template));
+	fc_elsct_init(lport);
+	fc_exch_init(lport);
+	fc_rport_init(lport);
+	fc_disc_init(lport);
+	return 0;
+}
+
+static int bnx2fc_em_config(struct fc_lport *lport)
+{
+	struct fcoe_port *port = lport_priv(lport);
+	struct bnx2fc_hba *hba = port->priv;
+
+	if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID,
+				FCOE_MAX_XID, NULL)) {
+		printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
+		return -ENOMEM;
+	}
+
+	hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID,
+					    BNX2FC_MAX_XID);
+
+	if (!hba->cmd_mgr) {
+		printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
+		fc_exch_mgr_free(lport);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int bnx2fc_lport_config(struct fc_lport *lport)
+{
+	lport->link_up = 0;
+	lport->qfull = 0;
+	lport->max_retry_count = 3;
+	lport->max_rport_retry_count = 3;
+	lport->e_d_tov = 2 * 1000;
+	lport->r_a_tov = 10 * 1000;
+
+	/* REVISIT: enable when supporting tape devices
+	lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
+				FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
+	*/
+	lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS);
+	lport->does_npiv = 1;
+
+	memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen));
+	lport->rnid_gen.rnid_atype = BNX2FC_RNID_HBA;
+
+	/* alloc stats structure */
+	if (fc_lport_init_stats(lport))
+		return -ENOMEM;
+
+	/* Finish fc_lport configuration */
+	fc_lport_config(lport);
+
+	return 0;
+}
+
+/**
+ * bnx2fc_fip_recv - handle a received FIP frame.
+ *
+ * @skb: the received skb
+ * @dev: associated &net_device
+ * @ptype: the &packet_type structure which was used to register this handler.
+ * @orig_dev: original receive &net_device, in case @ dev is a bond.
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
+			   struct packet_type *ptype,
+			   struct net_device *orig_dev)
+{
+	struct bnx2fc_hba *hba;
+	hba = container_of(ptype, struct bnx2fc_hba, fip_packet_type);
+	fcoe_ctlr_recv(&hba->ctlr, skb);
+	return 0;
+}
+
+/**
+ * bnx2fc_update_src_mac - Update Ethernet MAC filters.
+ *
+ * @fip: FCoE controller.
+ * @old: Unicast MAC address to delete if the MAC is non-zero.
+ * @new: Unicast MAC address to add.
+ *
+ * Remove any previously-set unicast MAC filter.
+ * Add secondary FCoE MAC address filter for our OUI.
+ */
+static void bnx2fc_update_src_mac(struct fc_lport *lport, u8 *addr)
+{
+	struct fcoe_port *port = lport_priv(lport);
+
+	memcpy(port->data_src_addr, addr, ETH_ALEN);
+}
+
+/**
+ * bnx2fc_get_src_mac - return the ethernet source address for an lport
+ *
+ * @lport: libfc port
+ */
+static u8 *bnx2fc_get_src_mac(struct fc_lport *lport)
+{
+	struct fcoe_port *port;
+
+	port = (struct fcoe_port *)lport_priv(lport);
+	return port->data_src_addr;
+}
+
+/**
+ * bnx2fc_fip_send - send an Ethernet-encapsulated FIP frame.
+ *
+ * @fip: FCoE controller.
+ * @skb: FIP Packet.
+ */
+static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
+{
+	skb->dev = bnx2fc_from_ctlr(fip)->netdev;
+	dev_queue_xmit(skb);
+}
+
+static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
+{
+	struct Scsi_Host *shost = vport_to_shost(vport);
+	struct fc_lport *n_port = shost_priv(shost);
+	struct fcoe_port *port = lport_priv(n_port);
+	struct bnx2fc_hba *hba = port->priv;
+	struct net_device *netdev = hba->netdev;
+	struct fc_lport *vn_port;
+
+	if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+		printk(KERN_ERR PFX "vn ports cannot be created on"
+			"this hba\n");
+		return -EIO;
+	}
+	mutex_lock(&bnx2fc_dev_lock);
+	vn_port = bnx2fc_if_create(hba, &vport->dev, 1);
+	mutex_unlock(&bnx2fc_dev_lock);
+
+	if (IS_ERR(vn_port)) {
+		printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n",
+			netdev->name);
+		return -EIO;
+	}
+
+	if (disabled) {
+		fc_vport_set_state(vport, FC_VPORT_DISABLED);
+	} else {
+		vn_port->boot_time = jiffies;
+		fc_lport_init(vn_port);
+		fc_fabric_login(vn_port);
+		fc_vport_setlink(vn_port);
+	}
+	return 0;
+}
+
+static int bnx2fc_vport_destroy(struct fc_vport *vport)
+{
+	struct Scsi_Host *shost = vport_to_shost(vport);
+	struct fc_lport *n_port = shost_priv(shost);
+	struct fc_lport *vn_port = vport->dd_data;
+	struct fcoe_port *port = lport_priv(vn_port);
+
+	mutex_lock(&n_port->lp_mutex);
+	list_del(&vn_port->list);
+	mutex_unlock(&n_port->lp_mutex);
+	queue_work(bnx2fc_wq, &port->destroy_work);
+	return 0;
+}
+
+static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable)
+{
+	struct fc_lport *lport = vport->dd_data;
+
+	if (disable) {
+		fc_vport_set_state(vport, FC_VPORT_DISABLED);
+		fc_fabric_logoff(lport);
+	} else {
+		lport->boot_time = jiffies;
+		fc_fabric_login(lport);
+		fc_vport_setlink(lport);
+	}
+	return 0;
+}
+
+
+static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
+{
+	struct net_device *netdev = hba->netdev;
+	struct net_device *physdev = hba->phys_dev;
+	struct netdev_hw_addr *ha;
+	int sel_san_mac = 0;
+
+	/* Do not support for bonding device */
+	if ((netdev->priv_flags & IFF_MASTER_ALB) ||
+			(netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
+			(netdev->priv_flags & IFF_MASTER_8023AD)) {
+		return -EOPNOTSUPP;
+	}
+
+	/* setup Source MAC Address */
+	rcu_read_lock();
+	for_each_dev_addr(physdev, ha) {
+		BNX2FC_MISC_DBG("net_config: ha->type = %d, fip_mac = ",
+				ha->type);
+		printk(KERN_INFO "%2x:%2x:%2x:%2x:%2x:%2x\n", ha->addr[0],
+				ha->addr[1], ha->addr[2], ha->addr[3],
+				ha->addr[4], ha->addr[5]);
+
+		if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
+		    (is_valid_ether_addr(ha->addr))) {
+			memcpy(hba->ctlr.ctl_src_addr, ha->addr, ETH_ALEN);
+			sel_san_mac = 1;
+			BNX2FC_MISC_DBG("Found SAN MAC\n");
+		}
+	}
+	rcu_read_unlock();
+
+	if (!sel_san_mac)
+		return -ENODEV;
+
+	hba->fip_packet_type.func = bnx2fc_fip_recv;
+	hba->fip_packet_type.type = htons(ETH_P_FIP);
+	hba->fip_packet_type.dev = netdev;
+	dev_add_pack(&hba->fip_packet_type);
+
+	hba->fcoe_packet_type.func = bnx2fc_rcv;
+	hba->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
+	hba->fcoe_packet_type.dev = netdev;
+	dev_add_pack(&hba->fcoe_packet_type);
+
+	return 0;
+}
+
+static int bnx2fc_attach_transport(void)
+{
+	bnx2fc_transport_template =
+		fc_attach_transport(&bnx2fc_transport_function);
+
+	if (bnx2fc_transport_template == NULL) {
+		printk(KERN_ERR PFX "Failed to attach FC transport\n");
+		return -ENODEV;
+	}
+
+	bnx2fc_vport_xport_template =
+		fc_attach_transport(&bnx2fc_vport_xport_function);
+	if (bnx2fc_vport_xport_template == NULL) {
+		printk(KERN_ERR PFX
+		       "Failed to attach FC transport for vport\n");
+		fc_release_transport(bnx2fc_transport_template);
+		bnx2fc_transport_template = NULL;
+		return -ENODEV;
+	}
+	return 0;
+}
+static void bnx2fc_release_transport(void)
+{
+	fc_release_transport(bnx2fc_transport_template);
+	fc_release_transport(bnx2fc_vport_xport_template);
+	bnx2fc_transport_template = NULL;
+	bnx2fc_vport_xport_template = NULL;
+}
+
+static void bnx2fc_interface_release(struct kref *kref)
+{
+	struct bnx2fc_hba *hba;
+	struct net_device *netdev;
+	struct net_device *phys_dev;
+
+	hba = container_of(kref, struct bnx2fc_hba, kref);
+	BNX2FC_HBA_DBG(hba->ctlr.lp, "Interface is being released\n");
+
+	netdev = hba->netdev;
+	phys_dev = hba->phys_dev;
+
+	/* tear-down FIP controller */
+	if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done))
+		fcoe_ctlr_destroy(&hba->ctlr);
+
+	/* Free the command manager */
+	if (hba->cmd_mgr) {
+		bnx2fc_cmd_mgr_free(hba->cmd_mgr);
+		hba->cmd_mgr = NULL;
+	}
+	dev_put(netdev);
+	module_put(THIS_MODULE);
+}
+
+static inline void bnx2fc_interface_get(struct bnx2fc_hba *hba)
+{
+	kref_get(&hba->kref);
+}
+
+static inline void bnx2fc_interface_put(struct bnx2fc_hba *hba)
+{
+	kref_put(&hba->kref, bnx2fc_interface_release);
+}
+static void bnx2fc_interface_destroy(struct bnx2fc_hba *hba)
+{
+	bnx2fc_unbind_pcidev(hba);
+	kfree(hba);
+}
+
+/**
+ * bnx2fc_interface_create - create a new fcoe instance
+ *
+ * @cnic:	pointer to cnic device
+ *
+ * Creates a new FCoE instance on the given device which include allocating
+ *	hba structure, scsi_host and lport structures.
+ */
+static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic)
+{
+	struct bnx2fc_hba *hba;
+	int rc;
+
+	hba = kzalloc(sizeof(*hba), GFP_KERNEL);
+	if (!hba) {
+		printk(KERN_ERR PFX "Unable to allocate hba structure\n");
+		return NULL;
+	}
+	spin_lock_init(&hba->hba_lock);
+	mutex_init(&hba->hba_mutex);
+
+	hba->cnic = cnic;
+	rc = bnx2fc_bind_pcidev(hba);
+	if (rc)
+		goto bind_err;
+	hba->phys_dev = cnic->netdev;
+	/* will get overwritten after we do vlan discovery */
+	hba->netdev = hba->phys_dev;
+
+	init_waitqueue_head(&hba->shutdown_wait);
+	init_waitqueue_head(&hba->destroy_wait);
+
+	return hba;
+bind_err:
+	printk(KERN_ERR PFX "create_interface: bind error\n");
+	kfree(hba);
+	return NULL;
+}
+
+static int bnx2fc_interface_setup(struct bnx2fc_hba *hba,
+				  enum fip_state fip_mode)
+{
+	int rc = 0;
+	struct net_device *netdev = hba->netdev;
+	struct fcoe_ctlr *fip = &hba->ctlr;
+
+	dev_hold(netdev);
+	kref_init(&hba->kref);
+
+	hba->flags = 0;
+
+	/* Initialize FIP */
+	memset(fip, 0, sizeof(*fip));
+	fcoe_ctlr_init(fip, fip_mode);
+	hba->ctlr.send = bnx2fc_fip_send;
+	hba->ctlr.update_mac = bnx2fc_update_src_mac;
+	hba->ctlr.get_src_addr = bnx2fc_get_src_mac;
+	set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done);
+
+	rc = bnx2fc_netdev_setup(hba);
+	if (rc)
+		goto setup_err;
+
+	hba->next_conn_id = 0;
+
+	memset(hba->tgt_ofld_list, 0, sizeof(hba->tgt_ofld_list));
+	hba->num_ofld_sess = 0;
+
+	return 0;
+
+setup_err:
+	fcoe_ctlr_destroy(&hba->ctlr);
+	dev_put(netdev);
+	bnx2fc_interface_put(hba);
+	return rc;
+}
+
+/**
+ * bnx2fc_if_create - Create FCoE instance on a given interface
+ *
+ * @hba:	FCoE interface to create a local port on
+ * @parent:	Device pointer to be the parent in sysfs for the SCSI host
+ * @npiv:	Indicates if the port is vport or not
+ *
+ * Creates a fc_lport instance and a Scsi_Host instance and configure them.
+ *
+ * Returns:	Allocated fc_lport or an error pointer
+ */
+static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
+				  struct device *parent, int npiv)
+{
+	struct fc_lport		*lport = NULL;
+	struct fcoe_port	*port;
+	struct Scsi_Host	*shost;
+	struct fc_vport		*vport = dev_to_vport(parent);
+	int			rc = 0;
+
+	/* Allocate Scsi_Host structure */
+	if (!npiv) {
+		lport = libfc_host_alloc(&bnx2fc_shost_template,
+					  sizeof(struct fcoe_port));
+	} else {
+		lport = libfc_vport_create(vport,
+					   sizeof(struct fcoe_port));
+	}
+
+	if (!lport) {
+		printk(KERN_ERR PFX "could not allocate scsi host structure\n");
+		return NULL;
+	}
+	shost = lport->host;
+	port = lport_priv(lport);
+	port->lport = lport;
+	port->priv = hba;
+	INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
+
+	/* Configure fcoe_port */
+	rc = bnx2fc_lport_config(lport);
+	if (rc)
+		goto lp_config_err;
+
+	if (npiv) {
+		vport = dev_to_vport(parent);
+		printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n",
+			vport->node_name, vport->port_name);
+		fc_set_wwnn(lport, vport->node_name);
+		fc_set_wwpn(lport, vport->port_name);
+	}
+	/* Configure netdev and networking properties of the lport */
+	rc = bnx2fc_net_config(lport);
+	if (rc) {
+		printk(KERN_ERR PFX "Error on bnx2fc_net_config\n");
+		goto lp_config_err;
+	}
+
+	rc = bnx2fc_shost_config(lport, parent);
+	if (rc) {
+		printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
+			hba->netdev->name);
+		goto lp_config_err;
+	}
+
+	/* Initialize the libfc library */
+	rc = bnx2fc_libfc_config(lport);
+	if (rc) {
+		printk(KERN_ERR PFX "Couldnt configure libfc\n");
+		goto shost_err;
+	}
+	fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+
+	/* Allocate exchange manager */
+	if (!npiv) {
+		rc = bnx2fc_em_config(lport);
+		if (rc) {
+			printk(KERN_ERR PFX "Error on bnx2fc_em_config\n");
+			goto shost_err;
+		}
+	}
+
+	bnx2fc_interface_get(hba);
+	return lport;
+
+shost_err:
+	scsi_remove_host(shost);
+lp_config_err:
+	scsi_host_put(lport->host);
+	return NULL;
+}
+
+static void bnx2fc_netdev_cleanup(struct bnx2fc_hba *hba)
+{
+	/* Dont listen for Ethernet packets anymore */
+	__dev_remove_pack(&hba->fcoe_packet_type);
+	__dev_remove_pack(&hba->fip_packet_type);
+	synchronize_net();
+}
+
+static void bnx2fc_if_destroy(struct fc_lport *lport)
+{
+	struct fcoe_port *port = lport_priv(lport);
+	struct bnx2fc_hba *hba = port->priv;
+
+	BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n");
+	/* Stop the transmit retry timer */
+	del_timer_sync(&port->timer);
+
+	/* Free existing transmit skbs */
+	fcoe_clean_pending_queue(lport);
+
+	bnx2fc_interface_put(hba);
+
+	/* Free queued packets for the receive thread */
+	bnx2fc_clean_rx_queue(lport);
+
+	/* Detach from scsi-ml */
+	fc_remove_host(lport->host);
+	scsi_remove_host(lport->host);
+
+	/*
+	 * Note that only the physical lport will have the exchange manager.
+	 * for vports, this function is NOP
+	 */
+	fc_exch_mgr_free(lport);
+
+	/* Free memory used by statistical counters */
+	fc_lport_free_stats(lport);
+
+	/* Release Scsi_Host */
+	scsi_host_put(lport->host);
+}
+
+/**
+ * bnx2fc_destroy - Destroy a bnx2fc FCoE interface
+ *
+ * @buffer: The name of the Ethernet interface to be destroyed
+ * @kp:     The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_destroy(struct net_device *netdev)
+{
+	struct bnx2fc_hba *hba = NULL;
+	struct net_device *phys_dev;
+	int rc = 0;
+
+	if (!rtnl_trylock())
+		return restart_syscall();
+
+	mutex_lock(&bnx2fc_dev_lock);
+#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE
+	if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+		rc = -ENODEV;
+		goto netdev_err;
+	}
+#endif
+	/* obtain physical netdev */
+	if (netdev->priv_flags & IFF_802_1Q_VLAN)
+		phys_dev = vlan_dev_real_dev(netdev);
+	else {
+		printk(KERN_ERR PFX "Not a vlan device\n");
+		rc = -ENODEV;
+		goto netdev_err;
+	}
+
+	hba = bnx2fc_hba_lookup(phys_dev);
+	if (!hba || !hba->ctlr.lp) {
+		rc = -ENODEV;
+		printk(KERN_ERR PFX "bnx2fc_destroy: hba or lport not found\n");
+		goto netdev_err;
+	}
+
+	if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+		printk(KERN_ERR PFX "bnx2fc_destroy: Create not called\n");
+		goto netdev_err;
+	}
+
+	bnx2fc_netdev_cleanup(hba);
+
+	bnx2fc_stop(hba);
+
+	bnx2fc_if_destroy(hba->ctlr.lp);
+
+	destroy_workqueue(hba->timer_work_queue);
+
+	if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
+		bnx2fc_fw_destroy(hba);
+
+	clear_bit(BNX2FC_CREATE_DONE, &hba->init_done);
+netdev_err:
+	mutex_unlock(&bnx2fc_dev_lock);
+	rtnl_unlock();
+	return rc;
+}
+
+static void bnx2fc_destroy_work(struct work_struct *work)
+{
+	struct fcoe_port *port;
+	struct fc_lport *lport;
+
+	port = container_of(work, struct fcoe_port, destroy_work);
+	lport = port->lport;
+
+	BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
+
+	bnx2fc_port_shutdown(lport);
+	rtnl_lock();
+	mutex_lock(&bnx2fc_dev_lock);
+	bnx2fc_if_destroy(lport);
+	mutex_unlock(&bnx2fc_dev_lock);
+	rtnl_unlock();
+}
+
+static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba)
+{
+	bnx2fc_free_fw_resc(hba);
+	bnx2fc_free_task_ctx(hba);
+}
+
+/**
+ * bnx2fc_bind_adapter_devices - binds bnx2fc adapter with the associated
+ *			pci structure
+ *
+ * @hba:		Adapter instance
+ */
+static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba)
+{
+	if (bnx2fc_setup_task_ctx(hba))
+		goto mem_err;
+
+	if (bnx2fc_setup_fw_resc(hba))
+		goto mem_err;
+
+	return 0;
+mem_err:
+	bnx2fc_unbind_adapter_devices(hba);
+	return -ENOMEM;
+}
+
+static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
+{
+	struct cnic_dev *cnic;
+
+	if (!hba->cnic) {
+		printk(KERN_ERR PFX "cnic is NULL\n");
+		return -ENODEV;
+	}
+	cnic = hba->cnic;
+	hba->pcidev = cnic->pcidev;
+	if (hba->pcidev)
+		pci_dev_get(hba->pcidev);
+
+	return 0;
+}
+
+static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
+{
+	if (hba->pcidev)
+		pci_dev_put(hba->pcidev);
+	hba->pcidev = NULL;
+}
+
+
+
+/**
+ * bnx2fc_ulp_start - cnic callback to initialize & start adapter instance
+ *
+ * @handle:	transport handle pointing to adapter struture
+ *
+ * This function maps adapter structure to pcidev structure and initiates
+ *	firmware handshake to enable/initialize on-chip FCoE components.
+ *	This bnx2fc - cnic interface api callback is used after following
+ *	conditions are met -
+ *	a) underlying network interface is up (marked by event NETDEV_UP
+ *		from netdev
+ *	b) bnx2fc adatper structure is registered.
+ */
+static void bnx2fc_ulp_start(void *handle)
+{
+	struct bnx2fc_hba *hba = handle;
+	struct fc_lport *lport = hba->ctlr.lp;
+
+	BNX2FC_MISC_DBG("Entered %s\n", __func__);
+	mutex_lock(&bnx2fc_dev_lock);
+
+	if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
+		goto start_disc;
+
+	if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done))
+		bnx2fc_fw_init(hba);
+
+start_disc:
+	mutex_unlock(&bnx2fc_dev_lock);
+
+	BNX2FC_MISC_DBG("bnx2fc started.\n");
+
+	/* Kick off Fabric discovery*/
+	if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+		printk(KERN_ERR PFX "ulp_init: start discovery\n");
+		lport->tt.frame_send = bnx2fc_xmit;
+		bnx2fc_start_disc(hba);
+	}
+}
+
+static void bnx2fc_port_shutdown(struct fc_lport *lport)
+{
+	BNX2FC_MISC_DBG("Entered %s\n", __func__);
+	fc_fabric_logoff(lport);
+	fc_lport_destroy(lport);
+}
+
+static void bnx2fc_stop(struct bnx2fc_hba *hba)
+{
+	struct fc_lport *lport;
+	struct fc_lport *vport;
+
+	BNX2FC_MISC_DBG("ENTERED %s - init_done = %ld\n", __func__,
+		   hba->init_done);
+	if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done) &&
+	    test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+		lport = hba->ctlr.lp;
+		bnx2fc_port_shutdown(lport);
+		BNX2FC_HBA_DBG(lport, "bnx2fc_stop: waiting for %d "
+				"offloaded sessions\n",
+				hba->num_ofld_sess);
+		wait_event_interruptible(hba->shutdown_wait,
+					 (hba->num_ofld_sess == 0));
+		mutex_lock(&lport->lp_mutex);
+		list_for_each_entry(vport, &lport->vports, list)
+			fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN;
+		mutex_unlock(&lport->lp_mutex);
+		fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+		fcoe_ctlr_link_down(&hba->ctlr);
+		fcoe_clean_pending_queue(lport);
+
+		mutex_lock(&hba->hba_mutex);
+		clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+		clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+
+		clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+		mutex_unlock(&hba->hba_mutex);
+	}
+}
+
+static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
+{
+#define BNX2FC_INIT_POLL_TIME		(1000 / HZ)
+	int rc = -1;
+	int i = HZ;
+
+	rc = bnx2fc_bind_adapter_devices(hba);
+	if (rc) {
+		printk(KERN_ALERT PFX
+			"bnx2fc_bind_adapter_devices failed - rc = %d\n", rc);
+		goto err_out;
+	}
+
+	rc = bnx2fc_send_fw_fcoe_init_msg(hba);
+	if (rc) {
+		printk(KERN_ALERT PFX
+			"bnx2fc_send_fw_fcoe_init_msg failed - rc = %d\n", rc);
+		goto err_unbind;
+	}
+
+	/*
+	 * Wait until the adapter init message is complete, and adapter
+	 * state is UP.
+	 */
+	while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
+		msleep(BNX2FC_INIT_POLL_TIME);
+
+	if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) {
+		printk(KERN_ERR PFX "bnx2fc_start: %s failed to initialize.  "
+				"Ignoring...\n",
+				hba->cnic->netdev->name);
+		rc = -1;
+		goto err_unbind;
+	}
+
+
+	/* Mark HBA to indicate that the FW INIT is done */
+	set_bit(BNX2FC_FW_INIT_DONE, &hba->init_done);
+	return 0;
+
+err_unbind:
+	bnx2fc_unbind_adapter_devices(hba);
+err_out:
+	return rc;
+}
+
+static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
+{
+	if (test_and_clear_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+		if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) {
+			init_timer(&hba->destroy_timer);
+			hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT +
+								jiffies;
+			hba->destroy_timer.function = bnx2fc_destroy_timer;
+			hba->destroy_timer.data = (unsigned long)hba;
+			add_timer(&hba->destroy_timer);
+			wait_event_interruptible(hba->destroy_wait,
+						 (hba->flags &
+						  BNX2FC_FLAG_DESTROY_CMPL));
+			/* This should never happen */
+			if (signal_pending(current))
+				flush_signals(current);
+
+			del_timer_sync(&hba->destroy_timer);
+		}
+		bnx2fc_unbind_adapter_devices(hba);
+	}
+}
+
+/**
+ * bnx2fc_ulp_stop - cnic callback to shutdown adapter instance
+ *
+ * @handle:	transport handle pointing to adapter structure
+ *
+ * Driver checks if adapter is already in shutdown mode, if not start
+ *	the shutdown process.
+ */
+static void bnx2fc_ulp_stop(void *handle)
+{
+	struct bnx2fc_hba *hba = (struct bnx2fc_hba *)handle;
+
+	printk(KERN_ERR "ULP_STOP\n");
+
+	mutex_lock(&bnx2fc_dev_lock);
+	bnx2fc_stop(hba);
+	bnx2fc_fw_destroy(hba);
+	mutex_unlock(&bnx2fc_dev_lock);
+}
+
+static void bnx2fc_start_disc(struct bnx2fc_hba *hba)
+{
+	struct fc_lport *lport;
+	int wait_cnt = 0;
+
+	BNX2FC_MISC_DBG("Entered %s\n", __func__);
+	/* Kick off FIP/FLOGI */
+	if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+		printk(KERN_ERR PFX "Init not done yet\n");
+		return;
+	}
+
+	lport = hba->ctlr.lp;
+	BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
+
+	if (!bnx2fc_link_ok(lport)) {
+		BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
+		fcoe_ctlr_link_up(&hba->ctlr);
+		fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
+		set_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+	}
+
+	/* wait for the FCF to be selected before issuing FLOGI */
+	while (!hba->ctlr.sel_fcf) {
+		msleep(250);
+		/* give up after 3 secs */
+		if (++wait_cnt > 12)
+			break;
+	}
+	fc_lport_init(lport);
+	fc_fabric_login(lport);
+}
+
+
+/**
+ * bnx2fc_ulp_init - Initialize an adapter instance
+ *
+ * @dev :	cnic device handle
+ * Called from cnic_register_driver() context to initialize all
+ *	enumerated cnic devices. This routine allocates adapter structure
+ *	and other device specific resources.
+ */
+static void bnx2fc_ulp_init(struct cnic_dev *dev)
+{
+	struct bnx2fc_hba *hba;
+	int rc = 0;
+
+	BNX2FC_MISC_DBG("Entered %s\n", __func__);
+	/* bnx2fc works only when bnx2x is loaded */
+	if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+		printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s,"
+				    " flags: %lx\n",
+			dev->netdev->name, dev->flags);
+		return;
+	}
+
+	/* Configure FCoE interface */
+	hba = bnx2fc_interface_create(dev);
+	if (!hba) {
+		printk(KERN_ERR PFX "hba initialization failed\n");
+		return;
+	}
+
+	/* Add HBA to the adapter list */
+	mutex_lock(&bnx2fc_dev_lock);
+	list_add_tail(&hba->link, &adapter_list);
+	adapter_count++;
+	mutex_unlock(&bnx2fc_dev_lock);
+
+	clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
+	rc = dev->register_device(dev, CNIC_ULP_FCOE,
+						(void *) hba);
+	if (rc)
+		printk(KERN_ALERT PFX "register_device failed, rc = %d\n", rc);
+	else
+		set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
+}
+
+
+static int bnx2fc_disable(struct net_device *netdev)
+{
+	struct bnx2fc_hba *hba;
+	struct net_device *phys_dev;
+	struct ethtool_drvinfo drvinfo;
+	int rc = 0;
+
+	if (!rtnl_trylock()) {
+		printk(KERN_ERR PFX "retrying for rtnl_lock\n");
+		return -EIO;
+	}
+
+	mutex_lock(&bnx2fc_dev_lock);
+
+	if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+		rc = -ENODEV;
+		goto nodev;
+	}
+
+	/* obtain physical netdev */
+	if (netdev->priv_flags & IFF_802_1Q_VLAN)
+		phys_dev = vlan_dev_real_dev(netdev);
+	else {
+		printk(KERN_ERR PFX "Not a vlan device\n");
+		rc = -ENODEV;
+		goto nodev;
+	}
+
+	/* verify if the physical device is a netxtreme2 device */
+	if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
+		memset(&drvinfo, 0, sizeof(drvinfo));
+		phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
+		if (strcmp(drvinfo.driver, "bnx2x")) {
+			printk(KERN_ERR PFX "Not a netxtreme2 device\n");
+			rc = -ENODEV;
+			goto nodev;
+		}
+	} else {
+		printk(KERN_ERR PFX "unable to obtain drv_info\n");
+		rc = -ENODEV;
+		goto nodev;
+	}
+
+	printk(KERN_ERR PFX "phys_dev is netxtreme2 device\n");
+
+	/* obtain hba and initialize rest of the structure */
+	hba = bnx2fc_hba_lookup(phys_dev);
+	if (!hba || !hba->ctlr.lp) {
+		rc = -ENODEV;
+		printk(KERN_ERR PFX "bnx2fc_disable: hba or lport not found\n");
+	} else {
+		fcoe_ctlr_link_down(&hba->ctlr);
+		fcoe_clean_pending_queue(hba->ctlr.lp);
+	}
+
+nodev:
+	mutex_unlock(&bnx2fc_dev_lock);
+	rtnl_unlock();
+	return rc;
+}
+
+
+static int bnx2fc_enable(struct net_device *netdev)
+{
+	struct bnx2fc_hba *hba;
+	struct net_device *phys_dev;
+	struct ethtool_drvinfo drvinfo;
+	int rc = 0;
+
+	if (!rtnl_trylock()) {
+		printk(KERN_ERR PFX "retrying for rtnl_lock\n");
+		return -EIO;
+	}
+
+	BNX2FC_MISC_DBG("Entered %s\n", __func__);
+	mutex_lock(&bnx2fc_dev_lock);
+
+	if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+		rc = -ENODEV;
+		goto nodev;
+	}
+
+	/* obtain physical netdev */
+	if (netdev->priv_flags & IFF_802_1Q_VLAN)
+		phys_dev = vlan_dev_real_dev(netdev);
+	else {
+		printk(KERN_ERR PFX "Not a vlan device\n");
+		rc = -ENODEV;
+		goto nodev;
+	}
+	/* verify if the physical device is a netxtreme2 device */
+	if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
+		memset(&drvinfo, 0, sizeof(drvinfo));
+		phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
+		if (strcmp(drvinfo.driver, "bnx2x")) {
+			printk(KERN_ERR PFX "Not a netxtreme2 device\n");
+			rc = -ENODEV;
+			goto nodev;
+		}
+	} else {
+		printk(KERN_ERR PFX "unable to obtain drv_info\n");
+		rc = -ENODEV;
+		goto nodev;
+	}
+
+	/* obtain hba and initialize rest of the structure */
+	hba = bnx2fc_hba_lookup(phys_dev);
+	if (!hba || !hba->ctlr.lp) {
+		rc = -ENODEV;
+		printk(KERN_ERR PFX "bnx2fc_enable: hba or lport not found\n");
+	} else if (!bnx2fc_link_ok(hba->ctlr.lp))
+		fcoe_ctlr_link_up(&hba->ctlr);
+
+nodev:
+	mutex_unlock(&bnx2fc_dev_lock);
+	rtnl_unlock();
+	return rc;
+}
+
+/**
+ * bnx2fc_create - Create bnx2fc FCoE interface
+ *
+ * @buffer: The name of Ethernet interface to create on
+ * @kp:     The associated kernel param
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
+{
+	struct bnx2fc_hba *hba;
+	struct net_device *phys_dev;
+	struct fc_lport *lport;
+	struct ethtool_drvinfo drvinfo;
+	int rc = 0;
+	int vlan_id;
+
+	BNX2FC_MISC_DBG("Entered bnx2fc_create\n");
+	if (fip_mode != FIP_MODE_FABRIC) {
+		printk(KERN_ERR "fip mode not FABRIC\n");
+		return -EIO;
+	}
+
+	if (!rtnl_trylock()) {
+		printk(KERN_ERR "trying for rtnl_lock\n");
+		return -EIO;
+	}
+	mutex_lock(&bnx2fc_dev_lock);
+
+#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE
+	if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+		rc = -ENODEV;
+		goto mod_err;
+	}
+#endif
+
+	if (!try_module_get(THIS_MODULE)) {
+		rc = -EINVAL;
+		goto mod_err;
+	}
+
+	/* obtain physical netdev */
+	if (netdev->priv_flags & IFF_802_1Q_VLAN) {
+		phys_dev = vlan_dev_real_dev(netdev);
+		vlan_id = vlan_dev_vlan_id(netdev);
+	} else {
+		printk(KERN_ERR PFX "Not a vlan device\n");
+		rc = -EINVAL;
+		goto netdev_err;
+	}
+	/* verify if the physical device is a netxtreme2 device */
+	if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
+		memset(&drvinfo, 0, sizeof(drvinfo));
+		phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
+		if (strcmp(drvinfo.driver, "bnx2x")) {
+			printk(KERN_ERR PFX "Not a netxtreme2 device\n");
+			rc = -EINVAL;
+			goto netdev_err;
+		}
+	} else {
+		printk(KERN_ERR PFX "unable to obtain drv_info\n");
+		rc = -EINVAL;
+		goto netdev_err;
+	}
+
+	/* obtain hba and initialize rest of the structure */
+	hba = bnx2fc_hba_lookup(phys_dev);
+	if (!hba) {
+		rc = -ENODEV;
+		printk(KERN_ERR PFX "bnx2fc_create: hba not found\n");
+		goto netdev_err;
+	}
+
+	if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+		rc = bnx2fc_fw_init(hba);
+		if (rc)
+			goto netdev_err;
+	}
+
+	if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+		rc = -EEXIST;
+		goto netdev_err;
+	}
+
+	/* update netdev with vlan netdev */
+	hba->netdev = netdev;
+	hba->vlan_id = vlan_id;
+	hba->vlan_enabled = 1;
+
+	rc = bnx2fc_interface_setup(hba, fip_mode);
+	if (rc) {
+		printk(KERN_ERR PFX "bnx2fc_interface_setup failed\n");
+		goto ifput_err;
+	}
+
+	hba->timer_work_queue =
+			create_singlethread_workqueue("bnx2fc_timer_wq");
+	if (!hba->timer_work_queue) {
+		printk(KERN_ERR PFX "ulp_init could not create timer_wq\n");
+		rc = -EINVAL;
+		goto ifput_err;
+	}
+
+	lport = bnx2fc_if_create(hba, &hba->pcidev->dev, 0);
+	if (!lport) {
+		printk(KERN_ERR PFX "Failed to create interface (%s)\n",
+			netdev->name);
+		bnx2fc_netdev_cleanup(hba);
+		rc = -EINVAL;
+		goto if_create_err;
+	}
+
+	lport->boot_time = jiffies;
+
+	/* Make this master N_port */
+	hba->ctlr.lp = lport;
+
+	set_bit(BNX2FC_CREATE_DONE, &hba->init_done);
+	printk(KERN_ERR PFX "create: START DISC\n");
+	bnx2fc_start_disc(hba);
+	/*
+	 * Release from kref_init in bnx2fc_interface_setup, on success
+	 * lport should be holding a reference taken in bnx2fc_if_create
+	 */
+	bnx2fc_interface_put(hba);
+	/* put netdev that was held while calling dev_get_by_name */
+	mutex_unlock(&bnx2fc_dev_lock);
+	rtnl_unlock();
+	return 0;
+
+if_create_err:
+	destroy_workqueue(hba->timer_work_queue);
+ifput_err:
+	bnx2fc_interface_put(hba);
+netdev_err:
+	module_put(THIS_MODULE);
+mod_err:
+	mutex_unlock(&bnx2fc_dev_lock);
+	rtnl_unlock();
+	return rc;
+}
+
+/**
+ * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc adapter instance
+ *
+ * @cnic:	Pointer to cnic device instance
+ *
+ **/
+static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic)
+{
+	struct list_head *list;
+	struct list_head *temp;
+	struct bnx2fc_hba *hba;
+
+	/* Called with bnx2fc_dev_lock held */
+	list_for_each_safe(list, temp, &adapter_list) {
+		hba = (struct bnx2fc_hba *)list;
+		if (hba->cnic == cnic)
+			return hba;
+	}
+	return NULL;
+}
+
+static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev)
+{
+	struct list_head *list;
+	struct list_head *temp;
+	struct bnx2fc_hba *hba;
+
+	/* Called with bnx2fc_dev_lock held */
+	list_for_each_safe(list, temp, &adapter_list) {
+		hba = (struct bnx2fc_hba *)list;
+		if (hba->phys_dev == phys_dev)
+			return hba;
+	}
+	printk(KERN_ERR PFX "hba_lookup: hba NULL\n");
+	return NULL;
+}
+
+/**
+ * bnx2fc_ulp_exit - shuts down adapter instance and frees all resources
+ *
+ * @dev		cnic device handle
+ */
+static void bnx2fc_ulp_exit(struct cnic_dev *dev)
+{
+	struct bnx2fc_hba *hba;
+
+	BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n");
+
+	if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+		printk(KERN_ERR PFX "bnx2fc port check: %s, flags: %lx\n",
+			dev->netdev->name, dev->flags);
+		return;
+	}
+
+	mutex_lock(&bnx2fc_dev_lock);
+	hba = bnx2fc_find_hba_for_cnic(dev);
+	if (!hba) {
+		printk(KERN_ERR PFX "bnx2fc_ulp_exit: hba not found, dev 0%p\n",
+		       dev);
+		mutex_unlock(&bnx2fc_dev_lock);
+		return;
+	}
+
+	list_del_init(&hba->link);
+	adapter_count--;
+
+	if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+		/* destroy not called yet, move to quiesced list */
+		bnx2fc_netdev_cleanup(hba);
+		bnx2fc_if_destroy(hba->ctlr.lp);
+	}
+	mutex_unlock(&bnx2fc_dev_lock);
+
+	bnx2fc_ulp_stop(hba);
+	/* unregister cnic device */
+	if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
+		hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
+	bnx2fc_interface_destroy(hba);
+}
+
+/**
+ * bnx2fc_fcoe_reset - Resets the fcoe
+ *
+ * @shost: shost the reset is from
+ *
+ * Returns: always 0
+ */
+static int bnx2fc_fcoe_reset(struct Scsi_Host *shost)
+{
+	struct fc_lport *lport = shost_priv(shost);
+	fc_lport_reset(lport);
+	return 0;
+}
+
+
+static bool bnx2fc_match(struct net_device *netdev)
+{
+	mutex_lock(&bnx2fc_dev_lock);
+	if (netdev->priv_flags & IFF_802_1Q_VLAN) {
+		struct net_device *phys_dev = vlan_dev_real_dev(netdev);
+
+		if (bnx2fc_hba_lookup(phys_dev)) {
+			mutex_unlock(&bnx2fc_dev_lock);
+			return true;
+		}
+	}
+	mutex_unlock(&bnx2fc_dev_lock);
+	return false;
+}
+
+
+static struct fcoe_transport bnx2fc_transport = {
+	.name = {"bnx2fc"},
+	.attached = false,
+	.list = LIST_HEAD_INIT(bnx2fc_transport.list),
+	.match = bnx2fc_match,
+	.create = bnx2fc_create,
+	.destroy = bnx2fc_destroy,
+	.enable = bnx2fc_enable,
+	.disable = bnx2fc_disable,
+};
+
+/**
+ * bnx2fc_percpu_thread_create - Create a receive thread for an
+ *				 online CPU
+ *
+ * @cpu: cpu index for the online cpu
+ */
+static void bnx2fc_percpu_thread_create(unsigned int cpu)
+{
+	struct bnx2fc_percpu_s *p;
+	struct task_struct *thread;
+
+	p = &per_cpu(bnx2fc_percpu, cpu);
+
+	thread = kthread_create(bnx2fc_percpu_io_thread,
+				(void *)p,
+				"bnx2fc_thread/%d", cpu);
+	/* bind thread to the cpu */
+	if (likely(!IS_ERR(p->iothread))) {
+		kthread_bind(thread, cpu);
+		p->iothread = thread;
+		wake_up_process(thread);
+	}
+}
+
+static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
+{
+	struct bnx2fc_percpu_s *p;
+	struct task_struct *thread;
+	struct bnx2fc_work *work, *tmp;
+	LIST_HEAD(work_list);
+
+	BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu);
+
+	/* Prevent any new work from being queued for this CPU */
+	p = &per_cpu(bnx2fc_percpu, cpu);
+	spin_lock_bh(&p->fp_work_lock);
+	thread = p->iothread;
+	p->iothread = NULL;
+
+
+	/* Free all work in the list */
+	list_for_each_entry_safe(work, tmp, &work_list, list) {
+		list_del_init(&work->list);
+		bnx2fc_process_cq_compl(work->tgt, work->wqe);
+		kfree(work);
+	}
+
+	spin_unlock_bh(&p->fp_work_lock);
+
+	if (thread)
+		kthread_stop(thread);
+}
+
+/**
+ * bnx2fc_cpu_callback - Handler for CPU hotplug events
+ *
+ * @nfb:    The callback data block
+ * @action: The event triggering the callback
+ * @hcpu:   The index of the CPU that the event is for
+ *
+ * This creates or destroys per-CPU data for fcoe
+ *
+ * Returns NOTIFY_OK always.
+ */
+static int bnx2fc_cpu_callback(struct notifier_block *nfb,
+			     unsigned long action, void *hcpu)
+{
+	unsigned cpu = (unsigned long)hcpu;
+
+	switch (action) {
+	case CPU_ONLINE:
+	case CPU_ONLINE_FROZEN:
+		printk(PFX "CPU %x online: Create Rx thread\n", cpu);
+		bnx2fc_percpu_thread_create(cpu);
+		break;
+	case CPU_DEAD:
+	case CPU_DEAD_FROZEN:
+		printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
+		bnx2fc_percpu_thread_destroy(cpu);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+/**
+ * bnx2fc_mod_init - module init entry point
+ *
+ * Initialize driver wide global data structures, and register
+ * with cnic module
+ **/
+static int __init bnx2fc_mod_init(void)
+{
+	struct fcoe_percpu_s *bg;
+	struct task_struct *l2_thread;
+	int rc = 0;
+	unsigned int cpu = 0;
+	struct bnx2fc_percpu_s *p;
+
+	printk(KERN_INFO PFX "%s", version);
+
+	/* register as a fcoe transport */
+	rc = fcoe_transport_attach(&bnx2fc_transport);
+	if (rc) {
+		printk(KERN_ERR "failed to register an fcoe transport, check "
+			"if libfcoe is loaded\n");
+		goto out;
+	}
+
+	INIT_LIST_HEAD(&adapter_list);
+	mutex_init(&bnx2fc_dev_lock);
+	adapter_count = 0;
+
+	/* Attach FC transport template */
+	rc = bnx2fc_attach_transport();
+	if (rc)
+		goto detach_ft;
+
+	bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0);
+	if (!bnx2fc_wq) {
+		rc = -ENOMEM;
+		goto release_bt;
+	}
+
+	bg = &bnx2fc_global;
+	skb_queue_head_init(&bg->fcoe_rx_list);
+	l2_thread = kthread_create(bnx2fc_l2_rcv_thread,
+				   (void *)bg,
+				   "bnx2fc_l2_thread");
+	if (IS_ERR(l2_thread)) {
+		rc = PTR_ERR(l2_thread);
+		goto free_wq;
+	}
+	wake_up_process(l2_thread);
+	spin_lock_bh(&bg->fcoe_rx_list.lock);
+	bg->thread = l2_thread;
+	spin_unlock_bh(&bg->fcoe_rx_list.lock);
+
+	for_each_possible_cpu(cpu) {
+		p = &per_cpu(bnx2fc_percpu, cpu);
+		INIT_LIST_HEAD(&p->work_list);
+		spin_lock_init(&p->fp_work_lock);
+	}
+
+	for_each_online_cpu(cpu) {
+		bnx2fc_percpu_thread_create(cpu);
+	}
+
+	/* Initialize per CPU interrupt thread */
+	register_hotcpu_notifier(&bnx2fc_cpu_notifier);
+
+	cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
+
+	return 0;
+
+free_wq:
+	destroy_workqueue(bnx2fc_wq);
+release_bt:
+	bnx2fc_release_transport();
+detach_ft:
+	fcoe_transport_detach(&bnx2fc_transport);
+out:
+	return rc;
+}
+
+static void __exit bnx2fc_mod_exit(void)
+{
+	LIST_HEAD(to_be_deleted);
+	struct bnx2fc_hba *hba, *next;
+	struct fcoe_percpu_s *bg;
+	struct task_struct *l2_thread;
+	struct sk_buff *skb;
+	unsigned int cpu = 0;
+
+	/*
+	 * NOTE: Since cnic calls register_driver routine rtnl_lock,
+	 * it will have higher precedence than bnx2fc_dev_lock.
+	 * unregister_device() cannot be called with bnx2fc_dev_lock
+	 * held.
+	 */
+	mutex_lock(&bnx2fc_dev_lock);
+	list_splice(&adapter_list, &to_be_deleted);
+	INIT_LIST_HEAD(&adapter_list);
+	adapter_count = 0;
+	mutex_unlock(&bnx2fc_dev_lock);
+
+	/* Unregister with cnic */
+	list_for_each_entry_safe(hba, next, &to_be_deleted, link) {
+		list_del_init(&hba->link);
+		printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p, kref = %d\n",
+			hba, atomic_read(&hba->kref.refcount));
+		bnx2fc_ulp_stop(hba);
+		/* unregister cnic device */
+		if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED,
+				       &hba->reg_with_cnic))
+			hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
+		bnx2fc_interface_destroy(hba);
+	}
+	cnic_unregister_driver(CNIC_ULP_FCOE);
+
+	/* Destroy global thread */
+	bg = &bnx2fc_global;
+	spin_lock_bh(&bg->fcoe_rx_list.lock);
+	l2_thread = bg->thread;
+	bg->thread = NULL;
+	while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL)
+		kfree_skb(skb);
+
+	spin_unlock_bh(&bg->fcoe_rx_list.lock);
+
+	if (l2_thread)
+		kthread_stop(l2_thread);
+
+	unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
+
+	/* Destroy per cpu threads */
+	for_each_online_cpu(cpu) {
+		bnx2fc_percpu_thread_destroy(cpu);
+	}
+
+	destroy_workqueue(bnx2fc_wq);
+	/*
+	 * detach from scsi transport
+	 * must happen after all destroys are done
+	 */
+	bnx2fc_release_transport();
+
+	/* detach from fcoe transport */
+	fcoe_transport_detach(&bnx2fc_transport);
+}
+
+module_init(bnx2fc_mod_init);
+module_exit(bnx2fc_mod_exit);
+
+static struct fc_function_template bnx2fc_transport_function = {
+	.show_host_node_name = 1,
+	.show_host_port_name = 1,
+	.show_host_supported_classes = 1,
+	.show_host_supported_fc4s = 1,
+	.show_host_active_fc4s = 1,
+	.show_host_maxframe_size = 1,
+
+	.show_host_port_id = 1,
+	.show_host_supported_speeds = 1,
+	.get_host_speed = fc_get_host_speed,
+	.show_host_speed = 1,
+	.show_host_port_type = 1,
+	.get_host_port_state = fc_get_host_port_state,
+	.show_host_port_state = 1,
+	.show_host_symbolic_name = 1,
+
+	.dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
+				sizeof(struct bnx2fc_rport)),
+	.show_rport_maxframe_size = 1,
+	.show_rport_supported_classes = 1,
+
+	.show_host_fabric_name = 1,
+	.show_starget_node_name = 1,
+	.show_starget_port_name = 1,
+	.show_starget_port_id = 1,
+	.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+	.show_rport_dev_loss_tmo = 1,
+	.get_fc_host_stats = bnx2fc_get_host_stats,
+
+	.issue_fc_host_lip = bnx2fc_fcoe_reset,
+
+	.terminate_rport_io = fc_rport_terminate_io,
+
+	.vport_create = bnx2fc_vport_create,
+	.vport_delete = bnx2fc_vport_destroy,
+	.vport_disable = bnx2fc_vport_disable,
+};
+
+static struct fc_function_template bnx2fc_vport_xport_function = {
+	.show_host_node_name = 1,
+	.show_host_port_name = 1,
+	.show_host_supported_classes = 1,
+	.show_host_supported_fc4s = 1,
+	.show_host_active_fc4s = 1,
+	.show_host_maxframe_size = 1,
+
+	.show_host_port_id = 1,
+	.show_host_supported_speeds = 1,
+	.get_host_speed = fc_get_host_speed,
+	.show_host_speed = 1,
+	.show_host_port_type = 1,
+	.get_host_port_state = fc_get_host_port_state,
+	.show_host_port_state = 1,
+	.show_host_symbolic_name = 1,
+
+	.dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
+				sizeof(struct bnx2fc_rport)),
+	.show_rport_maxframe_size = 1,
+	.show_rport_supported_classes = 1,
+
+	.show_host_fabric_name = 1,
+	.show_starget_node_name = 1,
+	.show_starget_port_name = 1,
+	.show_starget_port_id = 1,
+	.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+	.show_rport_dev_loss_tmo = 1,
+	.get_fc_host_stats = fc_get_host_stats,
+	.issue_fc_host_lip = bnx2fc_fcoe_reset,
+	.terminate_rport_io = fc_rport_terminate_io,
+};
+
+/**
+ * scsi_host_template structure used while registering with SCSI-ml
+ */
+static struct scsi_host_template bnx2fc_shost_template = {
+	.module			= THIS_MODULE,
+	.name			= "Broadcom Offload FCoE Initiator",
+	.queuecommand		= bnx2fc_queuecommand,
+	.eh_abort_handler	= bnx2fc_eh_abort,	  /* abts */
+	.eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */
+	.eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */
+	.eh_host_reset_handler	= fc_eh_host_reset,
+	.slave_alloc		= fc_slave_alloc,
+	.change_queue_depth	= fc_change_queue_depth,
+	.change_queue_type	= fc_change_queue_type,
+	.this_id		= -1,
+	.cmd_per_lun		= 3,
+	.can_queue		= (BNX2FC_MAX_OUTSTANDING_CMNDS/2),
+	.use_clustering		= ENABLE_CLUSTERING,
+	.sg_tablesize		= BNX2FC_MAX_BDS_PER_CMD,
+	.max_sectors		= 512,
+};
+
+static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
+	.frame_send		= bnx2fc_xmit,
+	.elsct_send		= bnx2fc_elsct_send,
+	.fcp_abort_io		= bnx2fc_abort_io,
+	.fcp_cleanup		= bnx2fc_cleanup,
+	.rport_event_callback	= bnx2fc_rport_event_handler,
+};
+
+/**
+ * bnx2fc_cnic_cb - global template of bnx2fc - cnic driver interface
+ *			structure carrying callback function pointers
+ */
+static struct cnic_ulp_ops bnx2fc_cnic_cb = {
+	.owner			= THIS_MODULE,
+	.cnic_init		= bnx2fc_ulp_init,
+	.cnic_exit		= bnx2fc_ulp_exit,
+	.cnic_start		= bnx2fc_ulp_start,
+	.cnic_stop		= bnx2fc_ulp_stop,
+	.indicate_kcqes		= bnx2fc_indicate_kcqe,
+	.indicate_netevent	= bnx2fc_indicate_netevent,
+};

+ 1868 - 0
drivers/scsi/bnx2fc/bnx2fc_hwi.c

@@ -0,0 +1,1868 @@
+/* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * This file contains the code that low level functions that interact
+ * with 57712 FCoE firmware.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+
+DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
+
+static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
+					struct fcoe_kcqe *new_cqe_kcqe);
+static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
+					struct fcoe_kcqe *ofld_kcqe);
+static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
+						struct fcoe_kcqe *ofld_kcqe);
+static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
+static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
+					struct fcoe_kcqe *conn_destroy);
+
+int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
+{
+	struct fcoe_kwqe_stat stat_req;
+	struct kwqe *kwqe_arr[2];
+	int num_kwqes = 1;
+	int rc = 0;
+
+	memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
+	stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
+	stat_req.hdr.flags =
+		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
+	stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
+
+	kwqe_arr[0] = (struct kwqe *) &stat_req;
+
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+	return rc;
+}
+
+/**
+ * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
+ *
+ * @hba:	adapter structure pointer
+ *
+ * Send down FCoE firmware init KWQEs which initiates the initial handshake
+ *	with the f/w.
+ *
+ */
+int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
+{
+	struct fcoe_kwqe_init1 fcoe_init1;
+	struct fcoe_kwqe_init2 fcoe_init2;
+	struct fcoe_kwqe_init3 fcoe_init3;
+	struct kwqe *kwqe_arr[3];
+	int num_kwqes = 3;
+	int rc = 0;
+
+	if (!hba->cnic) {
+		printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n");
+		return -ENODEV;
+	}
+
+	/* fill init1 KWQE */
+	memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
+	fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
+	fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+					FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	fcoe_init1.num_tasks = BNX2FC_MAX_TASKS;
+	fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
+	fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
+	fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
+	fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
+	fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
+	fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
+	fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
+	fcoe_init1.task_list_pbl_addr_hi =
+				(u32) ((u64) hba->task_ctx_bd_dma >> 32);
+	fcoe_init1.mtu = hba->netdev->mtu;
+
+	fcoe_init1.flags = (PAGE_SHIFT <<
+				FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
+
+	fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
+
+	/* fill init2 KWQE */
+	memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
+	fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
+	fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+					FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
+	fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
+					   ((u64) hba->hash_tbl_pbl_dma >> 32);
+
+	fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
+	fcoe_init2.t2_hash_tbl_addr_hi = (u32)
+					  ((u64) hba->t2_hash_tbl_dma >> 32);
+
+	fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
+	fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
+					((u64) hba->t2_hash_tbl_ptr_dma >> 32);
+
+	fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
+
+	/* fill init3 KWQE */
+	memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
+	fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
+	fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+					FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+	fcoe_init3.error_bit_map_lo = 0xffffffff;
+	fcoe_init3.error_bit_map_hi = 0xffffffff;
+
+
+	kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
+	kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
+	kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
+
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+	return rc;
+}
+int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
+{
+	struct fcoe_kwqe_destroy fcoe_destroy;
+	struct kwqe *kwqe_arr[2];
+	int num_kwqes = 1;
+	int rc = -1;
+
+	/* fill destroy KWQE */
+	memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
+	fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
+	fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+					FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+	kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
+
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+	return rc;
+}
+
+/**
+ * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
+ *
+ * @port:		port structure pointer
+ * @tgt:		bnx2fc_rport structure pointer
+ */
+int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
+					struct bnx2fc_rport *tgt)
+{
+	struct fc_lport *lport = port->lport;
+	struct bnx2fc_hba *hba = port->priv;
+	struct kwqe *kwqe_arr[4];
+	struct fcoe_kwqe_conn_offload1 ofld_req1;
+	struct fcoe_kwqe_conn_offload2 ofld_req2;
+	struct fcoe_kwqe_conn_offload3 ofld_req3;
+	struct fcoe_kwqe_conn_offload4 ofld_req4;
+	struct fc_rport_priv *rdata = tgt->rdata;
+	struct fc_rport *rport = tgt->rport;
+	int num_kwqes = 4;
+	u32 port_id;
+	int rc = 0;
+	u16 conn_id;
+
+	/* Initialize offload request 1 structure */
+	memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
+
+	ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
+	ofld_req1.hdr.flags =
+		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+
+	conn_id = (u16)tgt->fcoe_conn_id;
+	ofld_req1.fcoe_conn_id = conn_id;
+
+
+	ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
+	ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
+
+	ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
+	ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
+
+	ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
+	ofld_req1.rq_first_pbe_addr_hi =
+				(u32)((u64) tgt->rq_dma >> 32);
+
+	ofld_req1.rq_prod = 0x8000;
+
+	/* Initialize offload request 2 structure */
+	memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
+
+	ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
+	ofld_req2.hdr.flags =
+		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
+
+	ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
+	ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
+
+	ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
+	ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
+
+	ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
+	ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
+
+	/* Initialize offload request 3 structure */
+	memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
+
+	ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
+	ofld_req3.hdr.flags =
+		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	ofld_req3.vlan_tag = hba->vlan_id <<
+				FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
+	ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
+
+	port_id = fc_host_port_id(lport->host);
+	if (port_id == 0) {
+		BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Store s_id of the initiator for further reference. This will
+	 * be used during disable/destroy during linkdown processing as
+	 * when the lport is reset, the port_id also is reset to 0
+	 */
+	tgt->sid = port_id;
+	ofld_req3.s_id[0] = (port_id & 0x000000FF);
+	ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
+	ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
+
+	port_id = rport->port_id;
+	ofld_req3.d_id[0] = (port_id & 0x000000FF);
+	ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
+	ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
+
+	ofld_req3.tx_total_conc_seqs = rdata->max_seq;
+
+	ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
+	ofld_req3.rx_max_fc_pay_len  = lport->mfs;
+
+	ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
+	ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
+	ofld_req3.rx_open_seqs_exch_c3 = 1;
+
+	ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
+	ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
+
+	/* set mul_n_port_ids supported flag to 0, until it is supported */
+	ofld_req3.flags = 0;
+	/*
+	ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
+			    FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
+	*/
+	/* Info from PLOGI response */
+	ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
+			     FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
+
+	ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
+			     FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
+
+	/* vlan flag */
+	ofld_req3.flags |= (hba->vlan_enabled <<
+			    FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
+
+	/* C2_VALID and ACK flags are not set as they are not suppported */
+
+
+	/* Initialize offload request 4 structure */
+	memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
+	ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
+	ofld_req4.hdr.flags =
+		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
+
+
+	ofld_req4.src_mac_addr_lo32[0] =  port->data_src_addr[5];
+							/* local mac */
+	ofld_req4.src_mac_addr_lo32[1] =  port->data_src_addr[4];
+	ofld_req4.src_mac_addr_lo32[2] =  port->data_src_addr[3];
+	ofld_req4.src_mac_addr_lo32[3] =  port->data_src_addr[2];
+	ofld_req4.src_mac_addr_hi16[0] =  port->data_src_addr[1];
+	ofld_req4.src_mac_addr_hi16[1] =  port->data_src_addr[0];
+	ofld_req4.dst_mac_addr_lo32[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
+	ofld_req4.dst_mac_addr_lo32[1] =  hba->ctlr.dest_addr[4];
+	ofld_req4.dst_mac_addr_lo32[2] =  hba->ctlr.dest_addr[3];
+	ofld_req4.dst_mac_addr_lo32[3] =  hba->ctlr.dest_addr[2];
+	ofld_req4.dst_mac_addr_hi16[0] =  hba->ctlr.dest_addr[1];
+	ofld_req4.dst_mac_addr_hi16[1] =  hba->ctlr.dest_addr[0];
+
+	ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
+	ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
+
+	ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
+	ofld_req4.confq_pbl_base_addr_hi =
+					(u32)((u64) tgt->confq_pbl_dma >> 32);
+
+	kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+	kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+	kwqe_arr[2] = (struct kwqe *) &ofld_req3;
+	kwqe_arr[3] = (struct kwqe *) &ofld_req4;
+
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+	return rc;
+}
+
+/**
+ * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
+ *
+ * @port:		port structure pointer
+ * @tgt:		bnx2fc_rport structure pointer
+ */
+static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
+					struct bnx2fc_rport *tgt)
+{
+	struct kwqe *kwqe_arr[2];
+	struct bnx2fc_hba *hba = port->priv;
+	struct fcoe_kwqe_conn_enable_disable enbl_req;
+	struct fc_lport *lport = port->lport;
+	struct fc_rport *rport = tgt->rport;
+	int num_kwqes = 1;
+	int rc = 0;
+	u32 port_id;
+
+	memset(&enbl_req, 0x00,
+	       sizeof(struct fcoe_kwqe_conn_enable_disable));
+	enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
+	enbl_req.hdr.flags =
+		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	enbl_req.src_mac_addr_lo32[0] =  port->data_src_addr[5];
+							/* local mac */
+	enbl_req.src_mac_addr_lo32[1] =  port->data_src_addr[4];
+	enbl_req.src_mac_addr_lo32[2] =  port->data_src_addr[3];
+	enbl_req.src_mac_addr_lo32[3] =  port->data_src_addr[2];
+	enbl_req.src_mac_addr_hi16[0] =  port->data_src_addr[1];
+	enbl_req.src_mac_addr_hi16[1] =  port->data_src_addr[0];
+
+	enbl_req.dst_mac_addr_lo32[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
+	enbl_req.dst_mac_addr_lo32[1] =  hba->ctlr.dest_addr[4];
+	enbl_req.dst_mac_addr_lo32[2] =  hba->ctlr.dest_addr[3];
+	enbl_req.dst_mac_addr_lo32[3] =  hba->ctlr.dest_addr[2];
+	enbl_req.dst_mac_addr_hi16[0] =  hba->ctlr.dest_addr[1];
+	enbl_req.dst_mac_addr_hi16[1] =  hba->ctlr.dest_addr[0];
+
+	port_id = fc_host_port_id(lport->host);
+	if (port_id != tgt->sid) {
+		printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
+				"sid = 0x%x\n", port_id, tgt->sid);
+		port_id = tgt->sid;
+	}
+	enbl_req.s_id[0] = (port_id & 0x000000FF);
+	enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
+	enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
+
+	port_id = rport->port_id;
+	enbl_req.d_id[0] = (port_id & 0x000000FF);
+	enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
+	enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
+	enbl_req.vlan_tag = hba->vlan_id <<
+				FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
+	enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
+	enbl_req.vlan_flag = hba->vlan_enabled;
+	enbl_req.context_id = tgt->context_id;
+	enbl_req.conn_id = tgt->fcoe_conn_id;
+
+	kwqe_arr[0] = (struct kwqe *) &enbl_req;
+
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+	return rc;
+}
+
+/**
+ * bnx2fc_send_session_disable_req - initiates FCoE Session disable
+ *
+ * @port:		port structure pointer
+ * @tgt:		bnx2fc_rport structure pointer
+ */
+int bnx2fc_send_session_disable_req(struct fcoe_port *port,
+				    struct bnx2fc_rport *tgt)
+{
+	struct bnx2fc_hba *hba = port->priv;
+	struct fcoe_kwqe_conn_enable_disable disable_req;
+	struct kwqe *kwqe_arr[2];
+	struct fc_rport *rport = tgt->rport;
+	int num_kwqes = 1;
+	int rc = 0;
+	u32 port_id;
+
+	memset(&disable_req, 0x00,
+	       sizeof(struct fcoe_kwqe_conn_enable_disable));
+	disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
+	disable_req.hdr.flags =
+		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	disable_req.src_mac_addr_lo32[0] =  port->data_src_addr[5];
+	disable_req.src_mac_addr_lo32[2] =  port->data_src_addr[3];
+	disable_req.src_mac_addr_lo32[3] =  port->data_src_addr[2];
+	disable_req.src_mac_addr_hi16[0] =  port->data_src_addr[1];
+	disable_req.src_mac_addr_hi16[1] =  port->data_src_addr[0];
+
+	disable_req.dst_mac_addr_lo32[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
+	disable_req.dst_mac_addr_lo32[1] =  hba->ctlr.dest_addr[4];
+	disable_req.dst_mac_addr_lo32[2] =  hba->ctlr.dest_addr[3];
+	disable_req.dst_mac_addr_lo32[3] =  hba->ctlr.dest_addr[2];
+	disable_req.dst_mac_addr_hi16[0] =  hba->ctlr.dest_addr[1];
+	disable_req.dst_mac_addr_hi16[1] =  hba->ctlr.dest_addr[0];
+
+	port_id = tgt->sid;
+	disable_req.s_id[0] = (port_id & 0x000000FF);
+	disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
+	disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
+
+
+	port_id = rport->port_id;
+	disable_req.d_id[0] = (port_id & 0x000000FF);
+	disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
+	disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
+	disable_req.context_id = tgt->context_id;
+	disable_req.conn_id = tgt->fcoe_conn_id;
+	disable_req.vlan_tag = hba->vlan_id <<
+				FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
+	disable_req.vlan_tag |=
+			3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
+	disable_req.vlan_flag = hba->vlan_enabled;
+
+	kwqe_arr[0] = (struct kwqe *) &disable_req;
+
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+	return rc;
+}
+
+/**
+ * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
+ *
+ * @port:		port structure pointer
+ * @tgt:		bnx2fc_rport structure pointer
+ */
+int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
+					struct bnx2fc_rport *tgt)
+{
+	struct fcoe_kwqe_conn_destroy destroy_req;
+	struct kwqe *kwqe_arr[2];
+	int num_kwqes = 1;
+	int rc = 0;
+
+	memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
+	destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
+	destroy_req.hdr.flags =
+		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	destroy_req.context_id = tgt->context_id;
+	destroy_req.conn_id = tgt->fcoe_conn_id;
+
+	kwqe_arr[0] = (struct kwqe *) &destroy_req;
+
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+	return rc;
+}
+
+static void bnx2fc_unsol_els_work(struct work_struct *work)
+{
+	struct bnx2fc_unsol_els *unsol_els;
+	struct fc_lport *lport;
+	struct fc_frame *fp;
+
+	unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
+	lport = unsol_els->lport;
+	fp = unsol_els->fp;
+	fc_exch_recv(lport, fp);
+	kfree(unsol_els);
+}
+
+void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
+				   unsigned char *buf,
+				   u32 frame_len, u16 l2_oxid)
+{
+	struct fcoe_port *port = tgt->port;
+	struct fc_lport *lport = port->lport;
+	struct bnx2fc_unsol_els *unsol_els;
+	struct fc_frame_header *fh;
+	struct fc_frame *fp;
+	struct sk_buff *skb;
+	u32 payload_len;
+	u32 crc;
+	u8 op;
+
+
+	unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
+	if (!unsol_els) {
+		BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
+		return;
+	}
+
+	BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
+		l2_oxid, frame_len);
+
+	payload_len = frame_len - sizeof(struct fc_frame_header);
+
+	fp = fc_frame_alloc(lport, payload_len);
+	if (!fp) {
+		printk(KERN_ERR PFX "fc_frame_alloc failure\n");
+		return;
+	}
+
+	fh = (struct fc_frame_header *) fc_frame_header_get(fp);
+	/* Copy FC Frame header and payload into the frame */
+	memcpy(fh, buf, frame_len);
+
+	if (l2_oxid != FC_XID_UNKNOWN)
+		fh->fh_ox_id = htons(l2_oxid);
+
+	skb = fp_skb(fp);
+
+	if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
+	    (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
+
+		if (fh->fh_type == FC_TYPE_ELS) {
+			op = fc_frame_payload_op(fp);
+			if ((op == ELS_TEST) ||	(op == ELS_ESTC) ||
+			    (op == ELS_FAN) || (op == ELS_CSU)) {
+				/*
+				 * No need to reply for these
+				 * ELS requests
+				 */
+				printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
+				kfree_skb(skb);
+				return;
+			}
+		}
+		crc = fcoe_fc_crc(fp);
+		fc_frame_init(fp);
+		fr_dev(fp) = lport;
+		fr_sof(fp) = FC_SOF_I3;
+		fr_eof(fp) = FC_EOF_T;
+		fr_crc(fp) = cpu_to_le32(~crc);
+		unsol_els->lport = lport;
+		unsol_els->fp = fp;
+		INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
+		queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
+	} else {
+		BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
+		kfree_skb(skb);
+	}
+}
+
+static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
+{
+	u8 num_rq;
+	struct fcoe_err_report_entry *err_entry;
+	unsigned char *rq_data;
+	unsigned char *buf = NULL, *buf1;
+	int i;
+	u16 xid;
+	u32 frame_len, len;
+	struct bnx2fc_cmd *io_req = NULL;
+	struct fcoe_task_ctx_entry *task, *task_page;
+	struct bnx2fc_hba *hba = tgt->port->priv;
+	int task_idx, index;
+	int rc = 0;
+
+
+	BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
+	switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
+	case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
+		frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
+			     FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
+
+		num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
+
+		rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
+		if (rq_data) {
+			buf = rq_data;
+		} else {
+			buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
+					      GFP_ATOMIC);
+
+			if (!buf1) {
+				BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
+				break;
+			}
+
+			for (i = 0; i < num_rq; i++) {
+				rq_data = (unsigned char *)
+					   bnx2fc_get_next_rqe(tgt, 1);
+				len = BNX2FC_RQ_BUF_SZ;
+				memcpy(buf1, rq_data, len);
+				buf1 += len;
+			}
+		}
+		bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
+					      FC_XID_UNKNOWN);
+
+		if (buf != rq_data)
+			kfree(buf);
+		bnx2fc_return_rqe(tgt, num_rq);
+		break;
+
+	case FCOE_ERROR_DETECTION_CQE_TYPE:
+		/*
+		 *In case of error reporting CQE a single RQ entry
+		 * is consumes.
+		 */
+		spin_lock_bh(&tgt->tgt_lock);
+		num_rq = 1;
+		err_entry = (struct fcoe_err_report_entry *)
+			     bnx2fc_get_next_rqe(tgt, 1);
+		xid = err_entry->fc_hdr.ox_id;
+		BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
+		BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
+			err_entry->err_warn_bitmap_hi,
+			err_entry->err_warn_bitmap_lo);
+		BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
+			err_entry->tx_buf_off, err_entry->rx_buf_off);
+
+		bnx2fc_return_rqe(tgt, 1);
+
+		if (xid > BNX2FC_MAX_XID) {
+			BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
+				   xid);
+			spin_unlock_bh(&tgt->tgt_lock);
+			break;
+		}
+
+		task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+		index = xid % BNX2FC_TASKS_PER_PAGE;
+		task_page = (struct fcoe_task_ctx_entry *)
+						hba->task_ctx[task_idx];
+		task = &(task_page[index]);
+
+		io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
+		if (!io_req) {
+			spin_unlock_bh(&tgt->tgt_lock);
+			break;
+		}
+
+		if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
+			printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
+			spin_unlock_bh(&tgt->tgt_lock);
+			break;
+		}
+
+		if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
+				       &io_req->req_flags)) {
+			BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
+					    "progress.. ignore unsol err\n");
+			spin_unlock_bh(&tgt->tgt_lock);
+			break;
+		}
+
+		/*
+		 * If ABTS is already in progress, and FW error is
+		 * received after that, do not cancel the timeout_work
+		 * and let the error recovery continue by explicitly
+		 * logging out the target, when the ABTS eventually
+		 * times out.
+		 */
+		if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+				      &io_req->req_flags)) {
+			/*
+			 * Cancel the timeout_work, as we received IO
+			 * completion with FW error.
+			 */
+			if (cancel_delayed_work(&io_req->timeout_work))
+				kref_put(&io_req->refcount,
+					 bnx2fc_cmd_release); /* timer hold */
+
+			rc = bnx2fc_initiate_abts(io_req);
+			if (rc != SUCCESS) {
+				BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts "
+					"failed. issue cleanup\n");
+				rc = bnx2fc_initiate_cleanup(io_req);
+				BUG_ON(rc);
+			}
+		} else
+			printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
+					    "in ABTS processing\n", xid);
+		spin_unlock_bh(&tgt->tgt_lock);
+		break;
+
+	case FCOE_WARNING_DETECTION_CQE_TYPE:
+		/*
+		 *In case of warning reporting CQE a single RQ entry
+		 * is consumes.
+		 */
+		num_rq = 1;
+		err_entry = (struct fcoe_err_report_entry *)
+			     bnx2fc_get_next_rqe(tgt, 1);
+		xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
+		BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
+		BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
+			err_entry->err_warn_bitmap_hi,
+			err_entry->err_warn_bitmap_lo);
+		BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
+			err_entry->tx_buf_off, err_entry->rx_buf_off);
+
+		bnx2fc_return_rqe(tgt, 1);
+		break;
+
+	default:
+		printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
+		break;
+	}
+}
+
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
+{
+	struct fcoe_task_ctx_entry *task;
+	struct fcoe_task_ctx_entry *task_page;
+	struct fcoe_port *port = tgt->port;
+	struct bnx2fc_hba *hba = port->priv;
+	struct bnx2fc_cmd *io_req;
+	int task_idx, index;
+	u16 xid;
+	u8  cmd_type;
+	u8 rx_state = 0;
+	u8 num_rq;
+
+	spin_lock_bh(&tgt->tgt_lock);
+	xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
+	if (xid >= BNX2FC_MAX_TASKS) {
+		printk(KERN_ALERT PFX "ERROR:xid out of range\n");
+		spin_unlock_bh(&tgt->tgt_lock);
+		return;
+	}
+	task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+	index = xid % BNX2FC_TASKS_PER_PAGE;
+	task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
+	task = &(task_page[index]);
+
+	num_rq = ((task->rx_wr_tx_rd.rx_flags &
+		   FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >>
+		   FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT);
+
+	io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
+
+	if (io_req == NULL) {
+		printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
+		spin_unlock_bh(&tgt->tgt_lock);
+		return;
+	}
+
+	/* Timestamp IO completion time */
+	cmd_type = io_req->cmd_type;
+
+	/* optimized completion path */
+	if (cmd_type == BNX2FC_SCSI_CMD) {
+		rx_state = ((task->rx_wr_tx_rd.rx_flags &
+			    FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >>
+			    FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT);
+
+		if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
+			bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
+			spin_unlock_bh(&tgt->tgt_lock);
+			return;
+		}
+	}
+
+	/* Process other IO completion types */
+	switch (cmd_type) {
+	case BNX2FC_SCSI_CMD:
+		if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
+			bnx2fc_process_abts_compl(io_req, task, num_rq);
+		else if (rx_state ==
+			 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
+			bnx2fc_process_cleanup_compl(io_req, task, num_rq);
+		else
+			printk(KERN_ERR PFX "Invalid rx state - %d\n",
+				rx_state);
+		break;
+
+	case BNX2FC_TASK_MGMT_CMD:
+		BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
+		bnx2fc_process_tm_compl(io_req, task, num_rq);
+		break;
+
+	case BNX2FC_ABTS:
+		/*
+		 * ABTS request received by firmware. ABTS response
+		 * will be delivered to the task belonging to the IO
+		 * that was aborted
+		 */
+		BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
+		kref_put(&io_req->refcount, bnx2fc_cmd_release);
+		break;
+
+	case BNX2FC_ELS:
+		BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n");
+		bnx2fc_process_els_compl(io_req, task, num_rq);
+		break;
+
+	case BNX2FC_CLEANUP:
+		BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
+		kref_put(&io_req->refcount, bnx2fc_cmd_release);
+		break;
+
+	default:
+		printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
+		break;
+	}
+	spin_unlock_bh(&tgt->tgt_lock);
+}
+
+struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
+{
+	struct bnx2fc_work *work;
+	work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
+	if (!work)
+		return NULL;
+
+	INIT_LIST_HEAD(&work->list);
+	work->tgt = tgt;
+	work->wqe = wqe;
+	return work;
+}
+
+int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
+{
+	struct fcoe_cqe *cq;
+	u32 cq_cons;
+	struct fcoe_cqe *cqe;
+	u16 wqe;
+	bool more_cqes_found = false;
+
+	/*
+	 * cq_lock is a low contention lock used to protect
+	 * the CQ data structure from being freed up during
+	 * the upload operation
+	 */
+	spin_lock_bh(&tgt->cq_lock);
+
+	if (!tgt->cq) {
+		printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
+		spin_unlock_bh(&tgt->cq_lock);
+		return 0;
+	}
+	cq = tgt->cq;
+	cq_cons = tgt->cq_cons_idx;
+	cqe = &cq[cq_cons];
+
+	do {
+		more_cqes_found ^= true;
+
+		while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
+		       (tgt->cq_curr_toggle_bit <<
+		       FCOE_CQE_TOGGLE_BIT_SHIFT)) {
+
+			/* new entry on the cq */
+			if (wqe & FCOE_CQE_CQE_TYPE) {
+				/* Unsolicited event notification */
+				bnx2fc_process_unsol_compl(tgt, wqe);
+			} else {
+				struct bnx2fc_work *work = NULL;
+				struct bnx2fc_percpu_s *fps = NULL;
+				unsigned int cpu = wqe % num_possible_cpus();
+
+				fps = &per_cpu(bnx2fc_percpu, cpu);
+				spin_lock_bh(&fps->fp_work_lock);
+				if (unlikely(!fps->iothread))
+					goto unlock;
+
+				work = bnx2fc_alloc_work(tgt, wqe);
+				if (work)
+					list_add_tail(&work->list,
+							&fps->work_list);
+unlock:
+				spin_unlock_bh(&fps->fp_work_lock);
+
+				/* Pending work request completion */
+				if (fps->iothread && work)
+					wake_up_process(fps->iothread);
+				else
+					bnx2fc_process_cq_compl(tgt, wqe);
+			}
+			cqe++;
+			tgt->cq_cons_idx++;
+
+			if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
+				tgt->cq_cons_idx = 0;
+				cqe = cq;
+				tgt->cq_curr_toggle_bit =
+					1 - tgt->cq_curr_toggle_bit;
+			}
+		}
+		/* Re-arm CQ */
+		if (more_cqes_found) {
+			tgt->conn_db->cq_arm.lo = -1;
+			wmb();
+		}
+	} while (more_cqes_found);
+
+	/*
+	 * Commit tgt->cq_cons_idx change to the memory
+	 * spin_lock implies full memory barrier, no need to smp_wmb
+	 */
+
+	spin_unlock_bh(&tgt->cq_lock);
+	return 0;
+}
+
+/**
+ * bnx2fc_fastpath_notification - process global event queue (KCQ)
+ *
+ * @hba:		adapter structure pointer
+ * @new_cqe_kcqe:	pointer to newly DMA'd KCQ entry
+ *
+ * Fast path event notification handler
+ */
+static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
+					struct fcoe_kcqe *new_cqe_kcqe)
+{
+	u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
+	struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
+
+	if (!tgt) {
+		printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id);
+		return;
+	}
+
+	bnx2fc_process_new_cqes(tgt);
+}
+
+/**
+ * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
+ *
+ * @hba:	adapter structure pointer
+ * @ofld_kcqe:	connection offload kcqe pointer
+ *
+ * handle session offload completion, enable the session if offload is
+ * successful.
+ */
+static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
+					struct fcoe_kcqe *ofld_kcqe)
+{
+	struct bnx2fc_rport		*tgt;
+	struct fcoe_port		*port;
+	u32				conn_id;
+	u32				context_id;
+	int				rc;
+
+	conn_id = ofld_kcqe->fcoe_conn_id;
+	context_id = ofld_kcqe->fcoe_conn_context_id;
+	tgt = hba->tgt_ofld_list[conn_id];
+	if (!tgt) {
+		printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
+		return;
+	}
+	BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
+		ofld_kcqe->fcoe_conn_context_id);
+	port = tgt->port;
+	if (hba != tgt->port->priv) {
+		printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n");
+		goto ofld_cmpl_err;
+	}
+	/*
+	 * cnic has allocated a context_id for this session; use this
+	 * while enabling the session.
+	 */
+	tgt->context_id = context_id;
+	if (ofld_kcqe->completion_status) {
+		if (ofld_kcqe->completion_status ==
+				FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
+			printk(KERN_ERR PFX "unable to allocate FCoE context "
+				"resources\n");
+			set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
+		}
+		goto ofld_cmpl_err;
+	} else {
+
+		/* now enable the session */
+		rc = bnx2fc_send_session_enable_req(port, tgt);
+		if (rc) {
+			printk(KERN_ALERT PFX "enable session failed\n");
+			goto ofld_cmpl_err;
+		}
+	}
+	return;
+ofld_cmpl_err:
+	set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+	wake_up_interruptible(&tgt->ofld_wait);
+}
+
+/**
+ * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
+ *
+ * @hba:	adapter structure pointer
+ * @ofld_kcqe:	connection offload kcqe pointer
+ *
+ * handle session enable completion, mark the rport as ready
+ */
+
+static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
+						struct fcoe_kcqe *ofld_kcqe)
+{
+	struct bnx2fc_rport		*tgt;
+	u32				conn_id;
+	u32				context_id;
+
+	context_id = ofld_kcqe->fcoe_conn_context_id;
+	conn_id = ofld_kcqe->fcoe_conn_id;
+	tgt = hba->tgt_ofld_list[conn_id];
+	if (!tgt) {
+		printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n");
+		return;
+	}
+
+	BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
+		ofld_kcqe->fcoe_conn_context_id);
+
+	/*
+	 * context_id should be the same for this target during offload
+	 * and enable
+	 */
+	if (tgt->context_id != context_id) {
+		printk(KERN_ALERT PFX "context id mis-match\n");
+		return;
+	}
+	if (hba != tgt->port->priv) {
+		printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
+		goto enbl_cmpl_err;
+	}
+	if (ofld_kcqe->completion_status) {
+		goto enbl_cmpl_err;
+	} else {
+		/* enable successful - rport ready for issuing IOs */
+		set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+		set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+		wake_up_interruptible(&tgt->ofld_wait);
+	}
+	return;
+
+enbl_cmpl_err:
+	set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+	wake_up_interruptible(&tgt->ofld_wait);
+}
+
+static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
+					struct fcoe_kcqe *disable_kcqe)
+{
+
+	struct bnx2fc_rport		*tgt;
+	u32				conn_id;
+
+	conn_id = disable_kcqe->fcoe_conn_id;
+	tgt = hba->tgt_ofld_list[conn_id];
+	if (!tgt) {
+		printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n");
+		return;
+	}
+
+	BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
+
+	if (disable_kcqe->completion_status) {
+		printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n",
+			disable_kcqe->completion_status);
+		return;
+	} else {
+		/* disable successful */
+		BNX2FC_TGT_DBG(tgt, "disable successful\n");
+		clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+		set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
+		set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+		wake_up_interruptible(&tgt->upld_wait);
+	}
+}
+
+static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
+					struct fcoe_kcqe *destroy_kcqe)
+{
+	struct bnx2fc_rport		*tgt;
+	u32				conn_id;
+
+	conn_id = destroy_kcqe->fcoe_conn_id;
+	tgt = hba->tgt_ofld_list[conn_id];
+	if (!tgt) {
+		printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n");
+		return;
+	}
+
+	BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
+
+	if (destroy_kcqe->completion_status) {
+		printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n",
+			destroy_kcqe->completion_status);
+		return;
+	} else {
+		/* destroy successful */
+		BNX2FC_TGT_DBG(tgt, "upload successful\n");
+		clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
+		set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
+		set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+		wake_up_interruptible(&tgt->upld_wait);
+	}
+}
+
+static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
+{
+	switch (err_code) {
+	case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
+		printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
+		break;
+
+	case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
+		printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
+		break;
+
+	case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
+		printk(KERN_ERR PFX "init_failure due to NIC error\n");
+		break;
+
+	default:
+		printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
+	}
+}
+
+/**
+ * bnx2fc_indicae_kcqe - process KCQE
+ *
+ * @hba:	adapter structure pointer
+ * @kcqe:	kcqe pointer
+ * @num_cqe:	Number of completion queue elements
+ *
+ * Generic KCQ event handler
+ */
+void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
+					u32 num_cqe)
+{
+	struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
+	int i = 0;
+	struct fcoe_kcqe *kcqe = NULL;
+
+	while (i < num_cqe) {
+		kcqe = (struct fcoe_kcqe *) kcq[i++];
+
+		switch (kcqe->op_code) {
+		case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
+			bnx2fc_fastpath_notification(hba, kcqe);
+			break;
+
+		case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
+			bnx2fc_process_ofld_cmpl(hba, kcqe);
+			break;
+
+		case FCOE_KCQE_OPCODE_ENABLE_CONN:
+			bnx2fc_process_enable_conn_cmpl(hba, kcqe);
+			break;
+
+		case FCOE_KCQE_OPCODE_INIT_FUNC:
+			if (kcqe->completion_status !=
+					FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
+				bnx2fc_init_failure(hba,
+						kcqe->completion_status);
+			} else {
+				set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+				bnx2fc_get_link_state(hba);
+				printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
+					(u8)hba->pcidev->bus->number);
+			}
+			break;
+
+		case FCOE_KCQE_OPCODE_DESTROY_FUNC:
+			if (kcqe->completion_status !=
+					FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
+
+				printk(KERN_ERR PFX "DESTROY failed\n");
+			} else {
+				printk(KERN_ERR PFX "DESTROY success\n");
+			}
+			hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
+			wake_up_interruptible(&hba->destroy_wait);
+			break;
+
+		case FCOE_KCQE_OPCODE_DISABLE_CONN:
+			bnx2fc_process_conn_disable_cmpl(hba, kcqe);
+			break;
+
+		case FCOE_KCQE_OPCODE_DESTROY_CONN:
+			bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
+			break;
+
+		case FCOE_KCQE_OPCODE_STAT_FUNC:
+			if (kcqe->completion_status !=
+			    FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
+				printk(KERN_ERR PFX "STAT failed\n");
+			complete(&hba->stat_req_done);
+			break;
+
+		case FCOE_KCQE_OPCODE_FCOE_ERROR:
+			/* fall thru */
+		default:
+			printk(KERN_ALERT PFX "unknown opcode 0x%x\n",
+								kcqe->op_code);
+		}
+	}
+}
+
+void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
+{
+	struct fcoe_sqe *sqe;
+
+	sqe = &tgt->sq[tgt->sq_prod_idx];
+
+	/* Fill SQ WQE */
+	sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
+	sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
+
+	/* Advance SQ Prod Idx */
+	if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
+		tgt->sq_prod_idx = 0;
+		tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
+	}
+}
+
+void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
+{
+	struct b577xx_doorbell_set_prod ev_doorbell;
+	u32 msg;
+
+	wmb();
+
+	memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod));
+	ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE;
+
+	ev_doorbell.prod = tgt->sq_prod_idx |
+				(tgt->sq_curr_toggle_bit << 15);
+	ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE <<
+					B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
+	msg = *((u32 *)&ev_doorbell);
+	writel(cpu_to_le32(msg), tgt->ctx_base);
+
+	mmiowb();
+
+}
+
+int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
+{
+	u32 context_id = tgt->context_id;
+	struct fcoe_port *port = tgt->port;
+	u32 reg_off;
+	resource_size_t reg_base;
+	struct bnx2fc_hba *hba = port->priv;
+
+	reg_base = pci_resource_start(hba->pcidev,
+					BNX2X_DOORBELL_PCI_BAR);
+	reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
+			(context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
+	tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+	if (!tgt->ctx_base)
+		return -ENOMEM;
+	return 0;
+}
+
+char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
+{
+	char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
+
+	if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
+		return NULL;
+
+	tgt->rq_cons_idx += num_items;
+
+	if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
+		tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
+
+	return buf;
+}
+
+void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
+{
+	/* return the rq buffer */
+	u32 next_prod_idx = tgt->rq_prod_idx + num_items;
+	if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
+		/* Wrap around RQ */
+		next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
+	}
+	tgt->rq_prod_idx = next_prod_idx;
+	tgt->conn_db->rq_prod = tgt->rq_prod_idx;
+}
+
+void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
+			      struct fcoe_task_ctx_entry *task,
+			      u16 orig_xid)
+{
+	u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
+	struct bnx2fc_rport *tgt = io_req->tgt;
+	u32 context_id = tgt->context_id;
+
+	memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+	/* Tx Write Rx Read */
+	task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
+				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
+	task->tx_wr_rx_rd.init_flags = task_type <<
+				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
+	task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
+	/* Common */
+	task->cmn.common_flags = context_id <<
+				FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
+	task->cmn.general.cleanup_info.task_id = orig_xid;
+
+
+}
+
+void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
+				struct fcoe_task_ctx_entry *task)
+{
+	struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
+	struct bnx2fc_rport *tgt = io_req->tgt;
+	struct fc_frame_header *fc_hdr;
+	u8 task_type = 0;
+	u64 *hdr;
+	u64 temp_hdr[3];
+	u32 context_id;
+
+
+	/* Obtain task_type */
+	if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
+	    (io_req->cmd_type == BNX2FC_ELS)) {
+		task_type = FCOE_TASK_TYPE_MIDPATH;
+	} else if (io_req->cmd_type == BNX2FC_ABTS) {
+		task_type = FCOE_TASK_TYPE_ABTS;
+	}
+
+	memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+	/* Setup the task from io_req for easy reference */
+	io_req->task = task;
+
+	BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
+		io_req->cmd_type, task_type);
+
+	/* Tx only */
+	if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
+	    (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
+		task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+				(u32)mp_req->mp_req_bd_dma;
+		task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+				(u32)((u64)mp_req->mp_req_bd_dma >> 32);
+		task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
+		BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n",
+			      (unsigned long long)mp_req->mp_req_bd_dma);
+	}
+
+	/* Tx Write Rx Read */
+	task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT <<
+				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
+	task->tx_wr_rx_rd.init_flags = task_type <<
+				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
+	task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
+				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
+	task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
+
+	/* Common */
+	task->cmn.data_2_trns = io_req->data_xfer_len;
+	context_id = tgt->context_id;
+	task->cmn.common_flags = context_id <<
+				FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
+	task->cmn.common_flags |= 1 <<
+				FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
+	task->cmn.common_flags |= 1 <<
+			FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
+
+	/* Rx Write Tx Read */
+	fc_hdr = &(mp_req->req_fc_hdr);
+	if (task_type == FCOE_TASK_TYPE_MIDPATH) {
+		fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
+		fc_hdr->fh_rx_id = htons(0xffff);
+		task->rx_wr_tx_rd.rx_id = 0xffff;
+	} else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
+		fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
+	}
+
+	/* Fill FC Header into middle path buffer */
+	hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+	memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
+	hdr[0] = cpu_to_be64(temp_hdr[0]);
+	hdr[1] = cpu_to_be64(temp_hdr[1]);
+	hdr[2] = cpu_to_be64(temp_hdr[2]);
+
+	/* Rx Only */
+	if (task_type == FCOE_TASK_TYPE_MIDPATH) {
+
+		task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+				(u32)mp_req->mp_resp_bd_dma;
+		task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+				(u32)((u64)mp_req->mp_resp_bd_dma >> 32);
+		task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
+	}
+}
+
+void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
+			     struct fcoe_task_ctx_entry *task)
+{
+	u8 task_type;
+	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+	struct io_bdt *bd_tbl = io_req->bd_tbl;
+	struct bnx2fc_rport *tgt = io_req->tgt;
+	u64 *fcp_cmnd;
+	u64 tmp_fcp_cmnd[4];
+	u32 context_id;
+	int cnt, i;
+	int bd_count;
+
+	memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+	/* Setup the task from io_req for easy reference */
+	io_req->task = task;
+
+	if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
+		task_type = FCOE_TASK_TYPE_WRITE;
+	else
+		task_type = FCOE_TASK_TYPE_READ;
+
+	/* Tx only */
+	if (task_type == FCOE_TASK_TYPE_WRITE) {
+		task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+				(u32)bd_tbl->bd_tbl_dma;
+		task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+				(u32)((u64)bd_tbl->bd_tbl_dma >> 32);
+		task->tx_wr_only.sgl_ctx.mul_sges.sgl_size =
+				bd_tbl->bd_valid;
+	}
+
+	/*Tx Write Rx Read */
+	/* Init state to NORMAL */
+	task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
+				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
+	task->tx_wr_rx_rd.init_flags = task_type <<
+				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
+	task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
+				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
+	task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
+
+	/* Common */
+	task->cmn.data_2_trns = io_req->data_xfer_len;
+	context_id = tgt->context_id;
+	task->cmn.common_flags = context_id <<
+				FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
+	task->cmn.common_flags |= 1 <<
+				FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
+	task->cmn.common_flags |= 1 <<
+			FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
+
+	/* Set initiative ownership */
+	task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT;
+
+	/* Set initial seq counter */
+	task->cmn.tx_low_seq_cnt = 1;
+
+	/* Set state to "waiting for the first packet" */
+	task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME;
+
+	/* Fill FCP_CMND IU */
+	fcp_cmnd = (u64 *)
+		    task->cmn.general.cmd_info.fcp_cmd_payload.opaque;
+	bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
+
+	/* swap fcp_cmnd */
+	cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
+
+	for (i = 0; i < cnt; i++) {
+		*fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
+		fcp_cmnd++;
+	}
+
+	/* Rx Write Tx Read */
+	task->rx_wr_tx_rd.rx_id = 0xffff;
+
+	/* Rx Only */
+	if (task_type == FCOE_TASK_TYPE_READ) {
+
+		bd_count = bd_tbl->bd_valid;
+		if (bd_count == 1) {
+
+			struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
+
+			task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo =
+					fcoe_bd_tbl->buf_addr_lo;
+			task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi =
+					fcoe_bd_tbl->buf_addr_hi;
+			task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem =
+					fcoe_bd_tbl->buf_len;
+			task->tx_wr_rx_rd.init_flags |= 1 <<
+				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT;
+		} else {
+
+			task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+					(u32)bd_tbl->bd_tbl_dma;
+			task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+					(u32)((u64)bd_tbl->bd_tbl_dma >> 32);
+			task->rx_wr_only.sgl_ctx.mul_sges.sgl_size =
+					bd_tbl->bd_valid;
+		}
+	}
+}
+
+/**
+ * bnx2fc_setup_task_ctx - allocate and map task context
+ *
+ * @hba:	pointer to adapter structure
+ *
+ * allocate memory for task context, and associated BD table to be used
+ * by firmware
+ *
+ */
+int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
+{
+	int rc = 0;
+	struct regpair *task_ctx_bdt;
+	dma_addr_t addr;
+	int i;
+
+	/*
+	 * Allocate task context bd table. A page size of bd table
+	 * can map 256 buffers. Each buffer contains 32 task context
+	 * entries. Hence the limit with one page is 8192 task context
+	 * entries.
+	 */
+	hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+						  PAGE_SIZE,
+						  &hba->task_ctx_bd_dma,
+						  GFP_KERNEL);
+	if (!hba->task_ctx_bd_tbl) {
+		printk(KERN_ERR PFX "unable to allocate task context BDT\n");
+		rc = -1;
+		goto out;
+	}
+	memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
+
+	/*
+	 * Allocate task_ctx which is an array of pointers pointing to
+	 * a page containing 32 task contexts
+	 */
+	hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)),
+				 GFP_KERNEL);
+	if (!hba->task_ctx) {
+		printk(KERN_ERR PFX "unable to allocate task context array\n");
+		rc = -1;
+		goto out1;
+	}
+
+	/*
+	 * Allocate task_ctx_dma which is an array of dma addresses
+	 */
+	hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ *
+					sizeof(dma_addr_t)), GFP_KERNEL);
+	if (!hba->task_ctx_dma) {
+		printk(KERN_ERR PFX "unable to alloc context mapping array\n");
+		rc = -1;
+		goto out2;
+	}
+
+	task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
+	for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+
+		hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
+						      PAGE_SIZE,
+						      &hba->task_ctx_dma[i],
+						      GFP_KERNEL);
+		if (!hba->task_ctx[i]) {
+			printk(KERN_ERR PFX "unable to alloc task context\n");
+			rc = -1;
+			goto out3;
+		}
+		memset(hba->task_ctx[i], 0, PAGE_SIZE);
+		addr = (u64)hba->task_ctx_dma[i];
+		task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
+		task_ctx_bdt->lo = cpu_to_le32((u32)addr);
+		task_ctx_bdt++;
+	}
+	return 0;
+
+out3:
+	for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+		if (hba->task_ctx[i]) {
+
+			dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				hba->task_ctx[i], hba->task_ctx_dma[i]);
+			hba->task_ctx[i] = NULL;
+		}
+	}
+
+	kfree(hba->task_ctx_dma);
+	hba->task_ctx_dma = NULL;
+out2:
+	kfree(hba->task_ctx);
+	hba->task_ctx = NULL;
+out1:
+	dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+			hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
+	hba->task_ctx_bd_tbl = NULL;
+out:
+	return rc;
+}
+
+void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
+{
+	int i;
+
+	if (hba->task_ctx_bd_tbl) {
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				    hba->task_ctx_bd_tbl,
+				    hba->task_ctx_bd_dma);
+		hba->task_ctx_bd_tbl = NULL;
+	}
+
+	if (hba->task_ctx) {
+		for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+			if (hba->task_ctx[i]) {
+				dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+						    hba->task_ctx[i],
+						    hba->task_ctx_dma[i]);
+				hba->task_ctx[i] = NULL;
+			}
+		}
+		kfree(hba->task_ctx);
+		hba->task_ctx = NULL;
+	}
+
+	kfree(hba->task_ctx_dma);
+	hba->task_ctx_dma = NULL;
+}
+
+static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
+{
+	int i;
+	int segment_count;
+	int hash_table_size;
+	u32 *pbl;
+
+	segment_count = hba->hash_tbl_segment_count;
+	hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
+		sizeof(struct fcoe_hash_table_entry);
+
+	pbl = hba->hash_tbl_pbl;
+	for (i = 0; i < segment_count; ++i) {
+		dma_addr_t dma_address;
+
+		dma_address = le32_to_cpu(*pbl);
+		++pbl;
+		dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
+		++pbl;
+		dma_free_coherent(&hba->pcidev->dev,
+				  BNX2FC_HASH_TBL_CHUNK_SIZE,
+				  hba->hash_tbl_segments[i],
+				  dma_address);
+
+	}
+
+	if (hba->hash_tbl_pbl) {
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				    hba->hash_tbl_pbl,
+				    hba->hash_tbl_pbl_dma);
+		hba->hash_tbl_pbl = NULL;
+	}
+}
+
+static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
+{
+	int i;
+	int hash_table_size;
+	int segment_count;
+	int segment_array_size;
+	int dma_segment_array_size;
+	dma_addr_t *dma_segment_array;
+	u32 *pbl;
+
+	hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
+		sizeof(struct fcoe_hash_table_entry);
+
+	segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
+	segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
+	hba->hash_tbl_segment_count = segment_count;
+
+	segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
+	hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
+	if (!hba->hash_tbl_segments) {
+		printk(KERN_ERR PFX "hash table pointers alloc failed\n");
+		return -ENOMEM;
+	}
+	dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
+	dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
+	if (!dma_segment_array) {
+		printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < segment_count; ++i) {
+		hba->hash_tbl_segments[i] =
+			dma_alloc_coherent(&hba->pcidev->dev,
+					   BNX2FC_HASH_TBL_CHUNK_SIZE,
+					   &dma_segment_array[i],
+					   GFP_KERNEL);
+		if (!hba->hash_tbl_segments[i]) {
+			printk(KERN_ERR PFX "hash segment alloc failed\n");
+			while (--i >= 0) {
+				dma_free_coherent(&hba->pcidev->dev,
+						    BNX2FC_HASH_TBL_CHUNK_SIZE,
+						    hba->hash_tbl_segments[i],
+						    dma_segment_array[i]);
+				hba->hash_tbl_segments[i] = NULL;
+			}
+			kfree(dma_segment_array);
+			return -ENOMEM;
+		}
+		memset(hba->hash_tbl_segments[i], 0,
+		       BNX2FC_HASH_TBL_CHUNK_SIZE);
+	}
+
+	hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
+					       PAGE_SIZE,
+					       &hba->hash_tbl_pbl_dma,
+					       GFP_KERNEL);
+	if (!hba->hash_tbl_pbl) {
+		printk(KERN_ERR PFX "hash table pbl alloc failed\n");
+		kfree(dma_segment_array);
+		return -ENOMEM;
+	}
+	memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
+
+	pbl = hba->hash_tbl_pbl;
+	for (i = 0; i < segment_count; ++i) {
+		u64 paddr = dma_segment_array[i];
+		*pbl = cpu_to_le32((u32) paddr);
+		++pbl;
+		*pbl = cpu_to_le32((u32) (paddr >> 32));
+		++pbl;
+	}
+	pbl = hba->hash_tbl_pbl;
+	i = 0;
+	while (*pbl && *(pbl + 1)) {
+		u32 lo;
+		u32 hi;
+		lo = *pbl;
+		++pbl;
+		hi = *pbl;
+		++pbl;
+		++i;
+	}
+	kfree(dma_segment_array);
+	return 0;
+}
+
+/**
+ * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
+ *
+ * @hba:	Pointer to adapter structure
+ *
+ */
+int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
+{
+	u64 addr;
+	u32 mem_size;
+	int i;
+
+	if (bnx2fc_allocate_hash_table(hba))
+		return -ENOMEM;
+
+	mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
+	hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+						  &hba->t2_hash_tbl_ptr_dma,
+						  GFP_KERNEL);
+	if (!hba->t2_hash_tbl_ptr) {
+		printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
+		bnx2fc_free_fw_resc(hba);
+		return -ENOMEM;
+	}
+	memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
+
+	mem_size = BNX2FC_NUM_MAX_SESS *
+				sizeof(struct fcoe_t2_hash_table_entry);
+	hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+					      &hba->t2_hash_tbl_dma,
+					      GFP_KERNEL);
+	if (!hba->t2_hash_tbl) {
+		printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
+		bnx2fc_free_fw_resc(hba);
+		return -ENOMEM;
+	}
+	memset(hba->t2_hash_tbl, 0x00, mem_size);
+	for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
+		addr = (unsigned long) hba->t2_hash_tbl_dma +
+			 ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
+		hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
+		hba->t2_hash_tbl[i].next.hi = addr >> 32;
+	}
+
+	hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
+					       PAGE_SIZE, &hba->dummy_buf_dma,
+					       GFP_KERNEL);
+	if (!hba->dummy_buffer) {
+		printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
+		bnx2fc_free_fw_resc(hba);
+		return -ENOMEM;
+	}
+
+	hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
+					       PAGE_SIZE,
+					       &hba->stats_buf_dma,
+					       GFP_KERNEL);
+	if (!hba->stats_buffer) {
+		printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
+		bnx2fc_free_fw_resc(hba);
+		return -ENOMEM;
+	}
+	memset(hba->stats_buffer, 0x00, PAGE_SIZE);
+
+	return 0;
+}
+
+void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
+{
+	u32 mem_size;
+
+	if (hba->stats_buffer) {
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				  hba->stats_buffer, hba->stats_buf_dma);
+		hba->stats_buffer = NULL;
+	}
+
+	if (hba->dummy_buffer) {
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				  hba->dummy_buffer, hba->dummy_buf_dma);
+		hba->dummy_buffer = NULL;
+	}
+
+	if (hba->t2_hash_tbl_ptr) {
+		mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
+		dma_free_coherent(&hba->pcidev->dev, mem_size,
+				    hba->t2_hash_tbl_ptr,
+				    hba->t2_hash_tbl_ptr_dma);
+		hba->t2_hash_tbl_ptr = NULL;
+	}
+
+	if (hba->t2_hash_tbl) {
+		mem_size = BNX2FC_NUM_MAX_SESS *
+			    sizeof(struct fcoe_t2_hash_table_entry);
+		dma_free_coherent(&hba->pcidev->dev, mem_size,
+				    hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
+		hba->t2_hash_tbl = NULL;
+	}
+	bnx2fc_free_hash_table(hba);
+}

+ 1833 - 0
drivers/scsi/bnx2fc/bnx2fc_io.c

@@ -0,0 +1,1833 @@
+/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * IO manager and SCSI IO processing.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
+			   int bd_index);
+static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
+static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
+static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
+			       struct bnx2fc_cmd *io_req);
+static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
+static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
+static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
+				 struct fcoe_fcp_rsp_payload *fcp_rsp,
+				 u8 num_rq);
+
+void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
+			  unsigned int timer_msec)
+{
+	struct bnx2fc_hba *hba = io_req->port->priv;
+
+	if (queue_delayed_work(hba->timer_work_queue, &io_req->timeout_work,
+				  msecs_to_jiffies(timer_msec)))
+		kref_get(&io_req->refcount);
+}
+
+static void bnx2fc_cmd_timeout(struct work_struct *work)
+{
+	struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd,
+						 timeout_work.work);
+	struct fc_lport *lport;
+	struct fc_rport_priv *rdata;
+	u8 cmd_type = io_req->cmd_type;
+	struct bnx2fc_rport *tgt = io_req->tgt;
+	int logo_issued;
+	int rc;
+
+	BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d,"
+		      "req_flags = %lx\n", cmd_type, io_req->req_flags);
+
+	spin_lock_bh(&tgt->tgt_lock);
+	if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) {
+		clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
+		/*
+		 * ideally we should hold the io_req until RRQ complets,
+		 * and release io_req from timeout hold.
+		 */
+		spin_unlock_bh(&tgt->tgt_lock);
+		bnx2fc_send_rrq(io_req);
+		return;
+	}
+	if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) {
+		BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n");
+		goto done;
+	}
+
+	switch (cmd_type) {
+	case BNX2FC_SCSI_CMD:
+		if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+							&io_req->req_flags)) {
+			/* Handle eh_abort timeout */
+			BNX2FC_IO_DBG(io_req, "eh_abort timed out\n");
+			complete(&io_req->tm_done);
+		} else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS,
+				    &io_req->req_flags)) {
+			/* Handle internally generated ABTS timeout */
+			BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n",
+					io_req->refcount.refcount.counter);
+			if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+					       &io_req->req_flags))) {
+
+				lport = io_req->port->lport;
+				rdata = io_req->tgt->rdata;
+				logo_issued = test_and_set_bit(
+						BNX2FC_FLAG_EXPL_LOGO,
+						&tgt->flags);
+				kref_put(&io_req->refcount, bnx2fc_cmd_release);
+				spin_unlock_bh(&tgt->tgt_lock);
+
+				/* Explicitly logo the target */
+				if (!logo_issued) {
+					BNX2FC_IO_DBG(io_req, "Explicit "
+						   "logo - tgt flags = 0x%lx\n",
+						   tgt->flags);
+
+					mutex_lock(&lport->disc.disc_mutex);
+					lport->tt.rport_logoff(rdata);
+					mutex_unlock(&lport->disc.disc_mutex);
+				}
+				return;
+			}
+		} else {
+			/* Hanlde IO timeout */
+			BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n");
+			if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL,
+					     &io_req->req_flags)) {
+				BNX2FC_IO_DBG(io_req, "IO completed before "
+							   " timer expiry\n");
+				goto done;
+			}
+
+			if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+					      &io_req->req_flags)) {
+				rc = bnx2fc_initiate_abts(io_req);
+				if (rc == SUCCESS)
+					goto done;
+				/*
+				 * Explicitly logo the target if
+				 * abts initiation fails
+				 */
+				lport = io_req->port->lport;
+				rdata = io_req->tgt->rdata;
+				logo_issued = test_and_set_bit(
+						BNX2FC_FLAG_EXPL_LOGO,
+						&tgt->flags);
+				kref_put(&io_req->refcount, bnx2fc_cmd_release);
+				spin_unlock_bh(&tgt->tgt_lock);
+
+				if (!logo_issued) {
+					BNX2FC_IO_DBG(io_req, "Explicit "
+						   "logo - tgt flags = 0x%lx\n",
+						   tgt->flags);
+
+
+					mutex_lock(&lport->disc.disc_mutex);
+					lport->tt.rport_logoff(rdata);
+					mutex_unlock(&lport->disc.disc_mutex);
+				}
+				return;
+			} else {
+				BNX2FC_IO_DBG(io_req, "IO already in "
+						      "ABTS processing\n");
+			}
+		}
+		break;
+	case BNX2FC_ELS:
+
+		if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
+			BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n");
+
+			if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+					      &io_req->req_flags)) {
+				lport = io_req->port->lport;
+				rdata = io_req->tgt->rdata;
+				logo_issued = test_and_set_bit(
+						BNX2FC_FLAG_EXPL_LOGO,
+						&tgt->flags);
+				kref_put(&io_req->refcount, bnx2fc_cmd_release);
+				spin_unlock_bh(&tgt->tgt_lock);
+
+				/* Explicitly logo the target */
+				if (!logo_issued) {
+					BNX2FC_IO_DBG(io_req, "Explicitly logo"
+						   "(els)\n");
+					mutex_lock(&lport->disc.disc_mutex);
+					lport->tt.rport_logoff(rdata);
+					mutex_unlock(&lport->disc.disc_mutex);
+				}
+				return;
+			}
+		} else {
+			/*
+			 * Handle ELS timeout.
+			 * tgt_lock is used to sync compl path and timeout
+			 * path. If els compl path is processing this IO, we
+			 * have nothing to do here, just release the timer hold
+			 */
+			BNX2FC_IO_DBG(io_req, "ELS timed out\n");
+			if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
+					       &io_req->req_flags))
+				goto done;
+
+			/* Indicate the cb_func that this ELS is timed out */
+			set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags);
+
+			if ((io_req->cb_func) && (io_req->cb_arg)) {
+				io_req->cb_func(io_req->cb_arg);
+				io_req->cb_arg = NULL;
+			}
+		}
+		break;
+	default:
+		printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n",
+			cmd_type);
+		break;
+	}
+
+done:
+	/* release the cmd that was held when timer was set */
+	kref_put(&io_req->refcount, bnx2fc_cmd_release);
+	spin_unlock_bh(&tgt->tgt_lock);
+}
+
+static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
+{
+	/* Called with host lock held */
+	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+
+	/*
+	 * active_cmd_queue may have other command types as well,
+	 * and during flush operation,  we want to error back only
+	 * scsi commands.
+	 */
+	if (io_req->cmd_type != BNX2FC_SCSI_CMD)
+		return;
+
+	BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
+	bnx2fc_unmap_sg_list(io_req);
+	io_req->sc_cmd = NULL;
+	if (!sc_cmd) {
+		printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. "
+				    "IO(0x%x) already cleaned up\n",
+		       io_req->xid);
+		return;
+	}
+	sc_cmd->result = err_code << 16;
+
+	BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
+		sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
+		sc_cmd->allowed);
+	scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
+	sc_cmd->SCp.ptr = NULL;
+	sc_cmd->scsi_done(sc_cmd);
+}
+
+struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
+						u16 min_xid, u16 max_xid)
+{
+	struct bnx2fc_cmd_mgr *cmgr;
+	struct io_bdt *bdt_info;
+	struct bnx2fc_cmd *io_req;
+	size_t len;
+	u32 mem_size;
+	u16 xid;
+	int i;
+	int num_ios;
+	size_t bd_tbl_sz;
+
+	if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
+		printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
+					and max_xid 0x%x\n", min_xid, max_xid);
+		return NULL;
+	}
+	BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid);
+
+	num_ios = max_xid - min_xid + 1;
+	len = (num_ios * (sizeof(struct bnx2fc_cmd *)));
+	len += sizeof(struct bnx2fc_cmd_mgr);
+
+	cmgr = kzalloc(len, GFP_KERNEL);
+	if (!cmgr) {
+		printk(KERN_ERR PFX "failed to alloc cmgr\n");
+		return NULL;
+	}
+
+	cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) *
+				  num_possible_cpus(), GFP_KERNEL);
+	if (!cmgr->free_list) {
+		printk(KERN_ERR PFX "failed to alloc free_list\n");
+		goto mem_err;
+	}
+
+	cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) *
+				       num_possible_cpus(), GFP_KERNEL);
+	if (!cmgr->free_list_lock) {
+		printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
+		goto mem_err;
+	}
+
+	cmgr->hba = hba;
+	cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
+
+	for (i = 0; i < num_possible_cpus(); i++)  {
+		INIT_LIST_HEAD(&cmgr->free_list[i]);
+		spin_lock_init(&cmgr->free_list_lock[i]);
+	}
+
+	/* Pre-allocated pool of bnx2fc_cmds */
+	xid = BNX2FC_MIN_XID;
+	for (i = 0; i < num_ios; i++) {
+		io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
+
+		if (!io_req) {
+			printk(KERN_ERR PFX "failed to alloc io_req\n");
+			goto mem_err;
+		}
+
+		INIT_LIST_HEAD(&io_req->link);
+		INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout);
+
+		io_req->xid = xid++;
+		if (io_req->xid >= BNX2FC_MAX_OUTSTANDING_CMNDS)
+			printk(KERN_ERR PFX "ERROR allocating xids - 0x%x\n",
+				io_req->xid);
+		list_add_tail(&io_req->link,
+			&cmgr->free_list[io_req->xid % num_possible_cpus()]);
+		io_req++;
+	}
+
+	/* Allocate pool of io_bdts - one for each bnx2fc_cmd */
+	mem_size = num_ios * sizeof(struct io_bdt *);
+	cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
+	if (!cmgr->io_bdt_pool) {
+		printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
+		goto mem_err;
+	}
+
+	mem_size = sizeof(struct io_bdt);
+	for (i = 0; i < num_ios; i++) {
+		cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL);
+		if (!cmgr->io_bdt_pool[i]) {
+			printk(KERN_ERR PFX "failed to alloc "
+				"io_bdt_pool[%d]\n", i);
+			goto mem_err;
+		}
+	}
+
+	/* Allocate an map fcoe_bdt_ctx structures */
+	bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
+	for (i = 0; i < num_ios; i++) {
+		bdt_info = cmgr->io_bdt_pool[i];
+		bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+						      bd_tbl_sz,
+						      &bdt_info->bd_tbl_dma,
+						      GFP_KERNEL);
+		if (!bdt_info->bd_tbl) {
+			printk(KERN_ERR PFX "failed to alloc "
+				"bdt_tbl[%d]\n", i);
+			goto mem_err;
+		}
+	}
+
+	return cmgr;
+
+mem_err:
+	bnx2fc_cmd_mgr_free(cmgr);
+	return NULL;
+}
+
+void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr)
+{
+	struct io_bdt *bdt_info;
+	struct bnx2fc_hba *hba = cmgr->hba;
+	size_t bd_tbl_sz;
+	u16 min_xid = BNX2FC_MIN_XID;
+	u16 max_xid = BNX2FC_MAX_XID;
+	int num_ios;
+	int i;
+
+	num_ios = max_xid - min_xid + 1;
+
+	/* Free fcoe_bdt_ctx structures */
+	if (!cmgr->io_bdt_pool)
+		goto free_cmd_pool;
+
+	bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
+	for (i = 0; i < num_ios; i++) {
+		bdt_info = cmgr->io_bdt_pool[i];
+		if (bdt_info->bd_tbl) {
+			dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz,
+					    bdt_info->bd_tbl,
+					    bdt_info->bd_tbl_dma);
+			bdt_info->bd_tbl = NULL;
+		}
+	}
+
+	/* Destroy io_bdt pool */
+	for (i = 0; i < num_ios; i++) {
+		kfree(cmgr->io_bdt_pool[i]);
+		cmgr->io_bdt_pool[i] = NULL;
+	}
+
+	kfree(cmgr->io_bdt_pool);
+	cmgr->io_bdt_pool = NULL;
+
+free_cmd_pool:
+	kfree(cmgr->free_list_lock);
+
+	/* Destroy cmd pool */
+	if (!cmgr->free_list)
+		goto free_cmgr;
+
+	for (i = 0; i < num_possible_cpus(); i++)  {
+		struct list_head *list;
+		struct list_head *tmp;
+
+		list_for_each_safe(list, tmp, &cmgr->free_list[i]) {
+			struct bnx2fc_cmd *io_req = (struct bnx2fc_cmd *)list;
+			list_del(&io_req->link);
+			kfree(io_req);
+		}
+	}
+	kfree(cmgr->free_list);
+free_cmgr:
+	/* Free command manager itself */
+	kfree(cmgr);
+}
+
+struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
+{
+	struct fcoe_port *port = tgt->port;
+	struct bnx2fc_hba *hba = port->priv;
+	struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
+	struct bnx2fc_cmd *io_req;
+	struct list_head *listp;
+	struct io_bdt *bd_tbl;
+	u32 max_sqes;
+	u16 xid;
+
+	max_sqes = tgt->max_sqes;
+	switch (type) {
+	case BNX2FC_TASK_MGMT_CMD:
+		max_sqes = BNX2FC_TM_MAX_SQES;
+		break;
+	case BNX2FC_ELS:
+		max_sqes = BNX2FC_ELS_MAX_SQES;
+		break;
+	default:
+		break;
+	}
+
+	/*
+	 * NOTE: Free list insertions and deletions are protected with
+	 * cmgr lock
+	 */
+	spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+	if ((list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) ||
+	    (tgt->num_active_ios.counter  >= max_sqes)) {
+		BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
+			"ios(%d):sqes(%d)\n",
+			tgt->num_active_ios.counter, tgt->max_sqes);
+		if (list_empty(&(cmd_mgr->free_list[smp_processor_id()])))
+			printk(KERN_ERR PFX "elstm_alloc: list_empty\n");
+		spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+		return NULL;
+	}
+
+	listp = (struct list_head *)
+			cmd_mgr->free_list[smp_processor_id()].next;
+	list_del_init(listp);
+	io_req = (struct bnx2fc_cmd *) listp;
+	xid = io_req->xid;
+	cmd_mgr->cmds[xid] = io_req;
+	atomic_inc(&tgt->num_active_ios);
+	spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+
+	INIT_LIST_HEAD(&io_req->link);
+
+	io_req->port = port;
+	io_req->cmd_mgr = cmd_mgr;
+	io_req->req_flags = 0;
+	io_req->cmd_type = type;
+
+	/* Bind io_bdt for this io_req */
+	/* Have a static link between io_req and io_bdt_pool */
+	bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
+	bd_tbl->io_req = io_req;
+
+	/* Hold the io_req  against deletion */
+	kref_init(&io_req->refcount);
+	return io_req;
+}
+static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
+{
+	struct fcoe_port *port = tgt->port;
+	struct bnx2fc_hba *hba = port->priv;
+	struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
+	struct bnx2fc_cmd *io_req;
+	struct list_head *listp;
+	struct io_bdt *bd_tbl;
+	u32 max_sqes;
+	u16 xid;
+
+	max_sqes = BNX2FC_SCSI_MAX_SQES;
+	/*
+	 * NOTE: Free list insertions and deletions are protected with
+	 * cmgr lock
+	 */
+	spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+	if ((list_empty(&cmd_mgr->free_list[smp_processor_id()])) ||
+	    (tgt->num_active_ios.counter  >= max_sqes)) {
+		spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+		return NULL;
+	}
+
+	listp = (struct list_head *)
+		cmd_mgr->free_list[smp_processor_id()].next;
+	list_del_init(listp);
+	io_req = (struct bnx2fc_cmd *) listp;
+	xid = io_req->xid;
+	cmd_mgr->cmds[xid] = io_req;
+	atomic_inc(&tgt->num_active_ios);
+	spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+
+	INIT_LIST_HEAD(&io_req->link);
+
+	io_req->port = port;
+	io_req->cmd_mgr = cmd_mgr;
+	io_req->req_flags = 0;
+
+	/* Bind io_bdt for this io_req */
+	/* Have a static link between io_req and io_bdt_pool */
+	bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
+	bd_tbl->io_req = io_req;
+
+	/* Hold the io_req  against deletion */
+	kref_init(&io_req->refcount);
+	return io_req;
+}
+
+void bnx2fc_cmd_release(struct kref *ref)
+{
+	struct bnx2fc_cmd *io_req = container_of(ref,
+						struct bnx2fc_cmd, refcount);
+	struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
+
+	spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+	if (io_req->cmd_type != BNX2FC_SCSI_CMD)
+		bnx2fc_free_mp_resc(io_req);
+	cmd_mgr->cmds[io_req->xid] = NULL;
+	/* Delete IO from retire queue */
+	list_del_init(&io_req->link);
+	/* Add it to the free list */
+	list_add(&io_req->link,
+			&cmd_mgr->free_list[smp_processor_id()]);
+	atomic_dec(&io_req->tgt->num_active_ios);
+	spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+}
+
+static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
+{
+	struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
+	struct bnx2fc_hba *hba = io_req->port->priv;
+	size_t sz = sizeof(struct fcoe_bd_ctx);
+
+	/* clear tm flags */
+	mp_req->tm_flags = 0;
+	if (mp_req->mp_req_bd) {
+		dma_free_coherent(&hba->pcidev->dev, sz,
+				     mp_req->mp_req_bd,
+				     mp_req->mp_req_bd_dma);
+		mp_req->mp_req_bd = NULL;
+	}
+	if (mp_req->mp_resp_bd) {
+		dma_free_coherent(&hba->pcidev->dev, sz,
+				     mp_req->mp_resp_bd,
+				     mp_req->mp_resp_bd_dma);
+		mp_req->mp_resp_bd = NULL;
+	}
+	if (mp_req->req_buf) {
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				     mp_req->req_buf,
+				     mp_req->req_buf_dma);
+		mp_req->req_buf = NULL;
+	}
+	if (mp_req->resp_buf) {
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				     mp_req->resp_buf,
+				     mp_req->resp_buf_dma);
+		mp_req->resp_buf = NULL;
+	}
+}
+
+int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
+{
+	struct bnx2fc_mp_req *mp_req;
+	struct fcoe_bd_ctx *mp_req_bd;
+	struct fcoe_bd_ctx *mp_resp_bd;
+	struct bnx2fc_hba *hba = io_req->port->priv;
+	dma_addr_t addr;
+	size_t sz;
+
+	mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
+	memset(mp_req, 0, sizeof(struct bnx2fc_mp_req));
+
+	mp_req->req_len = sizeof(struct fcp_cmnd);
+	io_req->data_xfer_len = mp_req->req_len;
+	mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+					     &mp_req->req_buf_dma,
+					     GFP_ATOMIC);
+	if (!mp_req->req_buf) {
+		printk(KERN_ERR PFX "unable to alloc MP req buffer\n");
+		bnx2fc_free_mp_resc(io_req);
+		return FAILED;
+	}
+
+	mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+					      &mp_req->resp_buf_dma,
+					      GFP_ATOMIC);
+	if (!mp_req->resp_buf) {
+		printk(KERN_ERR PFX "unable to alloc TM resp buffer\n");
+		bnx2fc_free_mp_resc(io_req);
+		return FAILED;
+	}
+	memset(mp_req->req_buf, 0, PAGE_SIZE);
+	memset(mp_req->resp_buf, 0, PAGE_SIZE);
+
+	/* Allocate and map mp_req_bd and mp_resp_bd */
+	sz = sizeof(struct fcoe_bd_ctx);
+	mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
+						 &mp_req->mp_req_bd_dma,
+						 GFP_ATOMIC);
+	if (!mp_req->mp_req_bd) {
+		printk(KERN_ERR PFX "unable to alloc MP req bd\n");
+		bnx2fc_free_mp_resc(io_req);
+		return FAILED;
+	}
+	mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
+						 &mp_req->mp_resp_bd_dma,
+						 GFP_ATOMIC);
+	if (!mp_req->mp_req_bd) {
+		printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
+		bnx2fc_free_mp_resc(io_req);
+		return FAILED;
+	}
+	/* Fill bd table */
+	addr = mp_req->req_buf_dma;
+	mp_req_bd = mp_req->mp_req_bd;
+	mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
+	mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
+	mp_req_bd->buf_len = PAGE_SIZE;
+	mp_req_bd->flags = 0;
+
+	/*
+	 * MP buffer is either a task mgmt command or an ELS.
+	 * So the assumption is that it consumes a single bd
+	 * entry in the bd table
+	 */
+	mp_resp_bd = mp_req->mp_resp_bd;
+	addr = mp_req->resp_buf_dma;
+	mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
+	mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
+	mp_resp_bd->buf_len = PAGE_SIZE;
+	mp_resp_bd->flags = 0;
+
+	return SUCCESS;
+}
+
+static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
+{
+	struct fc_lport *lport;
+	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fcoe_port *port;
+	struct bnx2fc_hba *hba;
+	struct bnx2fc_rport *tgt;
+	struct bnx2fc_cmd *io_req;
+	struct bnx2fc_mp_req *tm_req;
+	struct fcoe_task_ctx_entry *task;
+	struct fcoe_task_ctx_entry *task_page;
+	struct Scsi_Host *host = sc_cmd->device->host;
+	struct fc_frame_header *fc_hdr;
+	struct fcp_cmnd *fcp_cmnd;
+	int task_idx, index;
+	int rc = SUCCESS;
+	u16 xid;
+	u32 sid, did;
+	unsigned long start = jiffies;
+
+	lport = shost_priv(host);
+	port = lport_priv(lport);
+	hba = port->priv;
+
+	if (rport == NULL) {
+		printk(KERN_ALERT PFX "device_reset: rport is NULL\n");
+		rc = FAILED;
+		goto tmf_err;
+	}
+
+	rc = fc_block_scsi_eh(sc_cmd);
+	if (rc)
+		return rc;
+
+	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+		printk(KERN_ERR PFX "device_reset: link is not ready\n");
+		rc = FAILED;
+		goto tmf_err;
+	}
+	/* rport and tgt are allocated together, so tgt should be non-NULL */
+	tgt = (struct bnx2fc_rport *)&rp[1];
+
+	if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
+		printk(KERN_ERR PFX "device_reset: tgt not offloaded\n");
+		rc = FAILED;
+		goto tmf_err;
+	}
+retry_tmf:
+	io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD);
+	if (!io_req) {
+		if (time_after(jiffies, start + HZ)) {
+			printk(KERN_ERR PFX "tmf: Failed TMF");
+			rc = FAILED;
+			goto tmf_err;
+		}
+		msleep(20);
+		goto retry_tmf;
+	}
+	/* Initialize rest of io_req fields */
+	io_req->sc_cmd = sc_cmd;
+	io_req->port = port;
+	io_req->tgt = tgt;
+
+	tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
+
+	rc = bnx2fc_init_mp_req(io_req);
+	if (rc == FAILED) {
+		printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
+		kref_put(&io_req->refcount, bnx2fc_cmd_release);
+		goto tmf_err;
+	}
+
+	/* Set TM flags */
+	io_req->io_req_flags = 0;
+	tm_req->tm_flags = tm_flags;
+
+	/* Fill FCP_CMND */
+	bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
+	fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
+	memset(fcp_cmnd->fc_cdb, 0,  sc_cmd->cmd_len);
+	fcp_cmnd->fc_dl = 0;
+
+	/* Fill FC header */
+	fc_hdr = &(tm_req->req_fc_hdr);
+	sid = tgt->sid;
+	did = rport->port_id;
+	__fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid,
+			   FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
+			   FC_FC_SEQ_INIT, 0);
+	/* Obtain exchange id */
+	xid = io_req->xid;
+
+	BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid);
+	task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+	index = xid % BNX2FC_TASKS_PER_PAGE;
+
+	/* Initialize task context for this IO request */
+	task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+	task = &(task_page[index]);
+	bnx2fc_init_mp_task(io_req, task);
+
+	sc_cmd->SCp.ptr = (char *)io_req;
+
+	/* Obtain free SQ entry */
+	spin_lock_bh(&tgt->tgt_lock);
+	bnx2fc_add_2_sq(tgt, xid);
+
+	/* Enqueue the io_req to active_tm_queue */
+	io_req->on_tmf_queue = 1;
+	list_add_tail(&io_req->link, &tgt->active_tm_queue);
+
+	init_completion(&io_req->tm_done);
+	io_req->wait_for_comp = 1;
+
+	/* Ring doorbell */
+	bnx2fc_ring_doorbell(tgt);
+	spin_unlock_bh(&tgt->tgt_lock);
+
+	rc = wait_for_completion_timeout(&io_req->tm_done,
+					 BNX2FC_TM_TIMEOUT * HZ);
+	spin_lock_bh(&tgt->tgt_lock);
+
+	io_req->wait_for_comp = 0;
+	if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags)))
+		set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
+
+	spin_unlock_bh(&tgt->tgt_lock);
+
+	if (!rc) {
+		printk(KERN_ERR PFX "task mgmt command failed...\n");
+		rc = FAILED;
+	} else {
+		printk(KERN_ERR PFX "task mgmt command success...\n");
+		rc = SUCCESS;
+	}
+tmf_err:
+	return rc;
+}
+
+int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
+{
+	struct fc_lport *lport;
+	struct bnx2fc_rport *tgt = io_req->tgt;
+	struct fc_rport *rport = tgt->rport;
+	struct fc_rport_priv *rdata = tgt->rdata;
+	struct bnx2fc_hba *hba;
+	struct fcoe_port *port;
+	struct bnx2fc_cmd *abts_io_req;
+	struct fcoe_task_ctx_entry *task;
+	struct fcoe_task_ctx_entry *task_page;
+	struct fc_frame_header *fc_hdr;
+	struct bnx2fc_mp_req *abts_req;
+	int task_idx, index;
+	u32 sid, did;
+	u16 xid;
+	int rc = SUCCESS;
+	u32 r_a_tov = rdata->r_a_tov;
+
+	/* called with tgt_lock held */
+	BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
+
+	port = io_req->port;
+	hba = port->priv;
+	lport = port->lport;
+
+	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+		printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n");
+		rc = FAILED;
+		goto abts_err;
+	}
+
+	if (rport == NULL) {
+		printk(KERN_ALERT PFX "initiate_abts: rport is NULL\n");
+		rc = FAILED;
+		goto abts_err;
+	}
+
+	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+		printk(KERN_ERR PFX "initiate_abts: link is not ready\n");
+		rc = FAILED;
+		goto abts_err;
+	}
+
+	abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
+	if (!abts_io_req) {
+		printk(KERN_ERR PFX "abts: couldnt allocate cmd\n");
+		rc = FAILED;
+		goto abts_err;
+	}
+
+	/* Initialize rest of io_req fields */
+	abts_io_req->sc_cmd = NULL;
+	abts_io_req->port = port;
+	abts_io_req->tgt = tgt;
+	abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */
+
+	abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req);
+	memset(abts_req, 0, sizeof(struct bnx2fc_mp_req));
+
+	/* Fill FC header */
+	fc_hdr = &(abts_req->req_fc_hdr);
+
+	/* Obtain oxid and rxid for the original exchange to be aborted */
+	fc_hdr->fh_ox_id = htons(io_req->xid);
+	fc_hdr->fh_rx_id = htons(io_req->task->rx_wr_tx_rd.rx_id);
+
+	sid = tgt->sid;
+	did = rport->port_id;
+
+	__fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid,
+			   FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
+			   FC_FC_SEQ_INIT, 0);
+
+	xid = abts_io_req->xid;
+	BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n");
+	task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+	index = xid % BNX2FC_TASKS_PER_PAGE;
+
+	/* Initialize task context for this IO request */
+	task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+	task = &(task_page[index]);
+	bnx2fc_init_mp_task(abts_io_req, task);
+
+	/*
+	 * ABTS task is a temporary task that will be cleaned up
+	 * irrespective of ABTS response. We need to start the timer
+	 * for the original exchange, as the CQE is posted for the original
+	 * IO request.
+	 *
+	 * Timer for ABTS is started only when it is originated by a
+	 * TM request. For the ABTS issued as part of ULP timeout,
+	 * scsi-ml maintains the timers.
+	 */
+
+	/* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/
+	bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov);
+
+	/* Obtain free SQ entry */
+	bnx2fc_add_2_sq(tgt, xid);
+
+	/* Ring doorbell */
+	bnx2fc_ring_doorbell(tgt);
+
+abts_err:
+	return rc;
+}
+
+int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
+{
+	struct fc_lport *lport;
+	struct bnx2fc_rport *tgt = io_req->tgt;
+	struct bnx2fc_hba *hba;
+	struct fcoe_port *port;
+	struct bnx2fc_cmd *cleanup_io_req;
+	struct fcoe_task_ctx_entry *task;
+	struct fcoe_task_ctx_entry *task_page;
+	int task_idx, index;
+	u16 xid, orig_xid;
+	int rc = 0;
+
+	/* ASSUMPTION: called with tgt_lock held */
+	BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
+
+	port = io_req->port;
+	hba = port->priv;
+	lport = port->lport;
+
+	cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
+	if (!cleanup_io_req) {
+		printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
+		rc = -1;
+		goto cleanup_err;
+	}
+
+	/* Initialize rest of io_req fields */
+	cleanup_io_req->sc_cmd = NULL;
+	cleanup_io_req->port = port;
+	cleanup_io_req->tgt = tgt;
+	cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */
+
+	xid = cleanup_io_req->xid;
+
+	task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+	index = xid % BNX2FC_TASKS_PER_PAGE;
+
+	/* Initialize task context for this IO request */
+	task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+	task = &(task_page[index]);
+	orig_xid = io_req->xid;
+
+	BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid);
+
+	bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid);
+
+	/* Obtain free SQ entry */
+	bnx2fc_add_2_sq(tgt, xid);
+
+	/* Ring doorbell */
+	bnx2fc_ring_doorbell(tgt);
+
+cleanup_err:
+	return rc;
+}
+
+/**
+ * bnx2fc_eh_target_reset: Reset a target
+ *
+ * @sc_cmd:	SCSI command
+ *
+ * Set from SCSI host template to send task mgmt command to the target
+ *	and wait for the response
+ */
+int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
+{
+	return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
+}
+
+/**
+ * bnx2fc_eh_device_reset - Reset a single LUN
+ *
+ * @sc_cmd:	SCSI command
+ *
+ * Set from SCSI host template to send task mgmt command to the target
+ *	and wait for the response
+ */
+int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
+{
+	return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
+}
+
+/**
+ * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
+ *			SCSI command
+ *
+ * @sc_cmd:	SCSI_ML command pointer
+ *
+ * SCSI abort request handler
+ */
+int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
+{
+	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct bnx2fc_cmd *io_req;
+	struct fc_lport *lport;
+	struct bnx2fc_rport *tgt;
+	int rc = FAILED;
+
+
+	rc = fc_block_scsi_eh(sc_cmd);
+	if (rc)
+		return rc;
+
+	lport = shost_priv(sc_cmd->device->host);
+	if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
+		printk(KERN_ALERT PFX "eh_abort: link not ready\n");
+		return rc;
+	}
+
+	tgt = (struct bnx2fc_rport *)&rp[1];
+
+	BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
+
+	spin_lock_bh(&tgt->tgt_lock);
+	io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
+	if (!io_req) {
+		/* Command might have just completed */
+		printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
+		spin_unlock_bh(&tgt->tgt_lock);
+		return SUCCESS;
+	}
+	BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n",
+		      io_req->refcount.refcount.counter);
+
+	/* Hold IO request across abort processing */
+	kref_get(&io_req->refcount);
+
+	BUG_ON(tgt != io_req->tgt);
+
+	/* Remove the io_req from the active_q. */
+	/*
+	 * Task Mgmt functions (LUN RESET & TGT RESET) will not
+	 * issue an ABTS on this particular IO req, as the
+	 * io_req is no longer in the active_q.
+	 */
+	if (tgt->flush_in_prog) {
+		printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+			"flush in progress\n", io_req->xid);
+		kref_put(&io_req->refcount, bnx2fc_cmd_release);
+		spin_unlock_bh(&tgt->tgt_lock);
+		return SUCCESS;
+	}
+
+	if (io_req->on_active_queue == 0) {
+		printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+				"not on active_q\n", io_req->xid);
+		/*
+		 * This condition can happen only due to the FW bug,
+		 * where we do not receive cleanup response from
+		 * the FW. Handle this case gracefully by erroring
+		 * back the IO request to SCSI-ml
+		 */
+		bnx2fc_scsi_done(io_req, DID_ABORT);
+
+		kref_put(&io_req->refcount, bnx2fc_cmd_release);
+		spin_unlock_bh(&tgt->tgt_lock);
+		return SUCCESS;
+	}
+
+	/*
+	 * Only eh_abort processing will remove the IO from
+	 * active_cmd_q before processing the request. this is
+	 * done to avoid race conditions between IOs aborted
+	 * as part of task management completion and eh_abort
+	 * processing
+	 */
+	list_del_init(&io_req->link);
+	io_req->on_active_queue = 0;
+	/* Move IO req to retire queue */
+	list_add_tail(&io_req->link, &tgt->io_retire_queue);
+
+	init_completion(&io_req->tm_done);
+	io_req->wait_for_comp = 1;
+
+	if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
+		/* Cancel the current timer running on this io_req */
+		if (cancel_delayed_work(&io_req->timeout_work))
+			kref_put(&io_req->refcount,
+				 bnx2fc_cmd_release); /* drop timer hold */
+		set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
+		rc = bnx2fc_initiate_abts(io_req);
+	} else {
+		printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+				"already in abts processing\n", io_req->xid);
+		kref_put(&io_req->refcount, bnx2fc_cmd_release);
+		spin_unlock_bh(&tgt->tgt_lock);
+		return SUCCESS;
+	}
+	if (rc == FAILED) {
+		kref_put(&io_req->refcount, bnx2fc_cmd_release);
+		spin_unlock_bh(&tgt->tgt_lock);
+		return rc;
+	}
+	spin_unlock_bh(&tgt->tgt_lock);
+
+	wait_for_completion(&io_req->tm_done);
+
+	spin_lock_bh(&tgt->tgt_lock);
+	io_req->wait_for_comp = 0;
+	if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+				    &io_req->req_flags))) {
+		/* Let the scsi-ml try to recover this command */
+		printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
+		       io_req->xid);
+		rc = FAILED;
+	} else {
+		/*
+		 * We come here even when there was a race condition
+		 * between timeout and abts completion, and abts
+		 * completion happens just in time.
+		 */
+		BNX2FC_IO_DBG(io_req, "abort succeeded\n");
+		rc = SUCCESS;
+		bnx2fc_scsi_done(io_req, DID_ABORT);
+		kref_put(&io_req->refcount, bnx2fc_cmd_release);
+	}
+
+	/* release the reference taken in eh_abort */
+	kref_put(&io_req->refcount, bnx2fc_cmd_release);
+	spin_unlock_bh(&tgt->tgt_lock);
+	return rc;
+}
+
+void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
+				  struct fcoe_task_ctx_entry *task,
+				  u8 num_rq)
+{
+	BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
+			      "refcnt = %d, cmd_type = %d\n",
+		   io_req->refcount.refcount.counter, io_req->cmd_type);
+	bnx2fc_scsi_done(io_req, DID_ERROR);
+	kref_put(&io_req->refcount, bnx2fc_cmd_release);
+}
+
+void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
+			       struct fcoe_task_ctx_entry *task,
+			       u8 num_rq)
+{
+	u32 r_ctl;
+	u32 r_a_tov = FC_DEF_R_A_TOV;
+	u8 issue_rrq = 0;
+	struct bnx2fc_rport *tgt = io_req->tgt;
+
+	BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x"
+			      "refcnt = %d, cmd_type = %d\n",
+		   io_req->xid,
+		   io_req->refcount.refcount.counter, io_req->cmd_type);
+
+	if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+				       &io_req->req_flags)) {
+		BNX2FC_IO_DBG(io_req, "Timer context finished processing"
+				" this io\n");
+		return;
+	}
+
+	/* Do not issue RRQ as this IO is already cleanedup */
+	if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP,
+				&io_req->req_flags))
+		goto io_compl;
+
+	/*
+	 * For ABTS issued due to SCSI eh_abort_handler, timeout
+	 * values are maintained by scsi-ml itself. Cancel timeout
+	 * in case ABTS issued as part of task management function
+	 * or due to FW error.
+	 */
+	if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))
+		if (cancel_delayed_work(&io_req->timeout_work))
+			kref_put(&io_req->refcount,
+				 bnx2fc_cmd_release); /* drop timer hold */
+
+	r_ctl = task->cmn.general.rsp_info.abts_rsp.r_ctl;
+
+	switch (r_ctl) {
+	case FC_RCTL_BA_ACC:
+		/*
+		 * Dont release this cmd yet. It will be relesed
+		 * after we get RRQ response
+		 */
+		BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n");
+		issue_rrq = 1;
+		break;
+
+	case FC_RCTL_BA_RJT:
+		BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n");
+		break;
+	default:
+		printk(KERN_ERR PFX "Unknown ABTS response\n");
+		break;
+	}
+
+	if (issue_rrq) {
+		BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n");
+		set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
+	}
+	set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
+	bnx2fc_cmd_timer_set(io_req, r_a_tov);
+
+io_compl:
+	if (io_req->wait_for_comp) {
+		if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+				       &io_req->req_flags))
+			complete(&io_req->tm_done);
+	} else {
+		/*
+		 * We end up here when ABTS is issued as
+		 * in asynchronous context, i.e., as part
+		 * of task management completion, or
+		 * when FW error is received or when the
+		 * ABTS is issued when the IO is timed
+		 * out.
+		 */
+
+		if (io_req->on_active_queue) {
+			list_del_init(&io_req->link);
+			io_req->on_active_queue = 0;
+			/* Move IO req to retire queue */
+			list_add_tail(&io_req->link, &tgt->io_retire_queue);
+		}
+		bnx2fc_scsi_done(io_req, DID_ERROR);
+		kref_put(&io_req->refcount, bnx2fc_cmd_release);
+	}
+}
+
+static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
+{
+	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+	struct bnx2fc_rport *tgt = io_req->tgt;
+	struct list_head *list;
+	struct list_head *tmp;
+	struct bnx2fc_cmd *cmd;
+	int tm_lun = sc_cmd->device->lun;
+	int rc = 0;
+	int lun;
+
+	/* called with tgt_lock held */
+	BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n");
+	/*
+	 * Walk thru the active_ios queue and ABORT the IO
+	 * that matches with the LUN that was reset
+	 */
+	list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+		BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
+		cmd = (struct bnx2fc_cmd *)list;
+		lun = cmd->sc_cmd->device->lun;
+		if (lun == tm_lun) {
+			/* Initiate ABTS on this cmd */
+			if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+					      &cmd->req_flags)) {
+				/* cancel the IO timeout */
+				if (cancel_delayed_work(&io_req->timeout_work))
+					kref_put(&io_req->refcount,
+						 bnx2fc_cmd_release);
+							/* timer hold */
+				rc = bnx2fc_initiate_abts(cmd);
+				/* abts shouldnt fail in this context */
+				WARN_ON(rc != SUCCESS);
+			} else
+				printk(KERN_ERR PFX "lun_rst: abts already in"
+					" progress for this IO 0x%x\n",
+					cmd->xid);
+		}
+	}
+}
+
+static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
+{
+	struct bnx2fc_rport *tgt = io_req->tgt;
+	struct list_head *list;
+	struct list_head *tmp;
+	struct bnx2fc_cmd *cmd;
+	int rc = 0;
+
+	/* called with tgt_lock held */
+	BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n");
+	/*
+	 * Walk thru the active_ios queue and ABORT the IO
+	 * that matches with the LUN that was reset
+	 */
+	list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+		BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
+		cmd = (struct bnx2fc_cmd *)list;
+		/* Initiate ABTS */
+		if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+							&cmd->req_flags)) {
+			/* cancel the IO timeout */
+			if (cancel_delayed_work(&io_req->timeout_work))
+				kref_put(&io_req->refcount,
+					 bnx2fc_cmd_release); /* timer hold */
+			rc = bnx2fc_initiate_abts(cmd);
+			/* abts shouldnt fail in this context */
+			WARN_ON(rc != SUCCESS);
+
+		} else
+			printk(KERN_ERR PFX "tgt_rst: abts already in progress"
+				" for this IO 0x%x\n", cmd->xid);
+	}
+}
+
+void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
+			     struct fcoe_task_ctx_entry *task, u8 num_rq)
+{
+	struct bnx2fc_mp_req *tm_req;
+	struct fc_frame_header *fc_hdr;
+	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+	u64 *hdr;
+	u64 *temp_hdr;
+	void *rsp_buf;
+
+	/* Called with tgt_lock held */
+	BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n");
+
+	if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags)))
+		set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags);
+	else {
+		/* TM has already timed out and we got
+		 * delayed completion. Ignore completion
+		 * processing.
+		 */
+		return;
+	}
+
+	tm_req = &(io_req->mp_req);
+	fc_hdr = &(tm_req->resp_fc_hdr);
+	hdr = (u64 *)fc_hdr;
+	temp_hdr = (u64 *)
+		&task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+	hdr[0] = cpu_to_be64(temp_hdr[0]);
+	hdr[1] = cpu_to_be64(temp_hdr[1]);
+	hdr[2] = cpu_to_be64(temp_hdr[2]);
+
+	tm_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
+
+	rsp_buf = tm_req->resp_buf;
+
+	if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
+		bnx2fc_parse_fcp_rsp(io_req,
+				     (struct fcoe_fcp_rsp_payload *)
+				     rsp_buf, num_rq);
+		if (io_req->fcp_rsp_code == 0) {
+			/* TM successful */
+			if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
+				bnx2fc_lun_reset_cmpl(io_req);
+			else if (tm_req->tm_flags & FCP_TMF_TGT_RESET)
+				bnx2fc_tgt_reset_cmpl(io_req);
+		}
+	} else {
+		printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
+			fc_hdr->fh_r_ctl);
+	}
+	if (!sc_cmd->SCp.ptr) {
+		printk(KERN_ALERT PFX "tm_compl: SCp.ptr is NULL\n");
+		return;
+	}
+	switch (io_req->fcp_status) {
+	case FC_GOOD:
+		if (io_req->cdb_status == 0) {
+			/* Good IO completion */
+			sc_cmd->result = DID_OK << 16;
+		} else {
+			/* Transport status is good, SCSI status not good */
+			sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+		}
+		if (io_req->fcp_resid)
+			scsi_set_resid(sc_cmd, io_req->fcp_resid);
+		break;
+
+	default:
+		BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
+			   io_req->fcp_status);
+		break;
+	}
+
+	sc_cmd = io_req->sc_cmd;
+	io_req->sc_cmd = NULL;
+
+	/* check if the io_req exists in tgt's tmf_q */
+	if (io_req->on_tmf_queue) {
+
+		list_del_init(&io_req->link);
+		io_req->on_tmf_queue = 0;
+	} else {
+
+		printk(KERN_ALERT PFX "Command not on active_cmd_queue!\n");
+		return;
+	}
+
+	sc_cmd->SCp.ptr = NULL;
+	sc_cmd->scsi_done(sc_cmd);
+
+	kref_put(&io_req->refcount, bnx2fc_cmd_release);
+	if (io_req->wait_for_comp) {
+		BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
+		complete(&io_req->tm_done);
+	}
+}
+
+static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
+			   int bd_index)
+{
+	struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
+	int frag_size, sg_frags;
+
+	sg_frags = 0;
+	while (sg_len) {
+		if (sg_len >= BNX2FC_BD_SPLIT_SZ)
+			frag_size = BNX2FC_BD_SPLIT_SZ;
+		else
+			frag_size = sg_len;
+		bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff;
+		bd[bd_index + sg_frags].buf_addr_hi  = addr >> 32;
+		bd[bd_index + sg_frags].buf_len = (u16)frag_size;
+		bd[bd_index + sg_frags].flags = 0;
+
+		addr += (u64) frag_size;
+		sg_frags++;
+		sg_len -= frag_size;
+	}
+	return sg_frags;
+
+}
+
+static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
+{
+	struct scsi_cmnd *sc = io_req->sc_cmd;
+	struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
+	struct scatterlist *sg;
+	int byte_count = 0;
+	int sg_count = 0;
+	int bd_count = 0;
+	int sg_frags;
+	unsigned int sg_len;
+	u64 addr;
+	int i;
+
+	sg_count = scsi_dma_map(sc);
+	scsi_for_each_sg(sc, sg, sg_count, i) {
+		sg_len = sg_dma_len(sg);
+		addr = sg_dma_address(sg);
+		if (sg_len > BNX2FC_MAX_BD_LEN) {
+			sg_frags = bnx2fc_split_bd(io_req, addr, sg_len,
+						   bd_count);
+		} else {
+
+			sg_frags = 1;
+			bd[bd_count].buf_addr_lo = addr & 0xffffffff;
+			bd[bd_count].buf_addr_hi  = addr >> 32;
+			bd[bd_count].buf_len = (u16)sg_len;
+			bd[bd_count].flags = 0;
+		}
+		bd_count += sg_frags;
+		byte_count += sg_len;
+	}
+	if (byte_count != scsi_bufflen(sc))
+		printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, "
+			"task_id = 0x%x\n", byte_count, scsi_bufflen(sc),
+			io_req->xid);
+	return bd_count;
+}
+
+static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
+{
+	struct scsi_cmnd *sc = io_req->sc_cmd;
+	struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
+	int bd_count;
+
+	if (scsi_sg_count(sc))
+		bd_count = bnx2fc_map_sg(io_req);
+	else {
+		bd_count = 0;
+		bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0;
+		bd[0].buf_len = bd[0].flags = 0;
+	}
+	io_req->bd_tbl->bd_valid = bd_count;
+}
+
+static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
+{
+	struct scsi_cmnd *sc = io_req->sc_cmd;
+
+	if (io_req->bd_tbl->bd_valid && sc) {
+		scsi_dma_unmap(sc);
+		io_req->bd_tbl->bd_valid = 0;
+	}
+}
+
+void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
+				  struct fcp_cmnd *fcp_cmnd)
+{
+	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+	char tag[2];
+
+	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+
+	int_to_scsilun(sc_cmd->device->lun,
+			(struct scsi_lun *) fcp_cmnd->fc_lun);
+
+
+	fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
+	memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
+
+	fcp_cmnd->fc_cmdref = 0;
+	fcp_cmnd->fc_pri_ta = 0;
+	fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
+	fcp_cmnd->fc_flags = io_req->io_req_flags;
+
+	if (scsi_populate_tag_msg(sc_cmd, tag)) {
+		switch (tag[0]) {
+		case HEAD_OF_QUEUE_TAG:
+			fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ;
+			break;
+		case ORDERED_QUEUE_TAG:
+			fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED;
+			break;
+		default:
+			fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
+			break;
+		}
+	} else {
+		fcp_cmnd->fc_pri_ta = 0;
+	}
+}
+
+static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
+				 struct fcoe_fcp_rsp_payload *fcp_rsp,
+				 u8 num_rq)
+{
+	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+	struct bnx2fc_rport *tgt = io_req->tgt;
+	u8 rsp_flags = fcp_rsp->fcp_flags.flags;
+	u32 rq_buff_len = 0;
+	int i;
+	unsigned char *rq_data;
+	unsigned char *dummy;
+	int fcp_sns_len = 0;
+	int fcp_rsp_len = 0;
+
+	io_req->fcp_status = FC_GOOD;
+	io_req->fcp_resid = fcp_rsp->fcp_resid;
+
+	io_req->scsi_comp_flags = rsp_flags;
+	CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
+				fcp_rsp->scsi_status_code;
+
+	/* Fetch fcp_rsp_info and fcp_sns_info if available */
+	if (num_rq) {
+
+		/*
+		 * We do not anticipate num_rq >1, as the linux defined
+		 * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO
+		 * 256 bytes of single rq buffer is good enough to hold this.
+		 */
+
+		if (rsp_flags &
+		    FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) {
+			fcp_rsp_len = rq_buff_len
+					= fcp_rsp->fcp_rsp_len;
+		}
+
+		if (rsp_flags &
+		    FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) {
+			fcp_sns_len = fcp_rsp->fcp_sns_len;
+			rq_buff_len += fcp_rsp->fcp_sns_len;
+		}
+
+		io_req->fcp_rsp_len = fcp_rsp_len;
+		io_req->fcp_sns_len = fcp_sns_len;
+
+		if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
+			/* Invalid sense sense length. */
+			printk(KERN_ALERT PFX "invalid sns length %d\n",
+				rq_buff_len);
+			/* reset rq_buff_len */
+			rq_buff_len =  num_rq * BNX2FC_RQ_BUF_SZ;
+		}
+
+		rq_data = bnx2fc_get_next_rqe(tgt, 1);
+
+		if (num_rq > 1) {
+			/* We do not need extra sense data */
+			for (i = 1; i < num_rq; i++)
+				dummy = bnx2fc_get_next_rqe(tgt, 1);
+		}
+
+		/* fetch fcp_rsp_code */
+		if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
+			/* Only for task management function */
+			io_req->fcp_rsp_code = rq_data[3];
+			printk(KERN_ERR PFX "fcp_rsp_code = %d\n",
+				io_req->fcp_rsp_code);
+		}
+
+		/* fetch sense data */
+		rq_data += fcp_rsp_len;
+
+		if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
+			printk(KERN_ERR PFX "Truncating sense buffer\n");
+			fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
+		}
+
+		memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer));
+		if (fcp_sns_len)
+			memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
+
+		/* return RQ entries */
+		for (i = 0; i < num_rq; i++)
+			bnx2fc_return_rqe(tgt, 1);
+	}
+}
+
+/**
+ * bnx2fc_queuecommand - Queuecommand function of the scsi template
+ *
+ * @host:	The Scsi_Host the command was issued to
+ * @sc_cmd:	struct scsi_cmnd to be executed
+ *
+ * This is the IO strategy routine, called by SCSI-ML
+ **/
+int bnx2fc_queuecommand(struct Scsi_Host *host,
+			struct scsi_cmnd *sc_cmd)
+{
+	struct fc_lport *lport = shost_priv(host);
+	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct bnx2fc_rport *tgt;
+	struct bnx2fc_cmd *io_req;
+	int rc = 0;
+	int rval;
+
+	rval = fc_remote_port_chkready(rport);
+	if (rval) {
+		sc_cmd->result = rval;
+		sc_cmd->scsi_done(sc_cmd);
+		return 0;
+	}
+
+	if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
+		rc = SCSI_MLQUEUE_HOST_BUSY;
+		goto exit_qcmd;
+	}
+
+	/* rport and tgt are allocated together, so tgt should be non-NULL */
+	tgt = (struct bnx2fc_rport *)&rp[1];
+
+	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+		/*
+		 * Session is not offloaded yet. Let SCSI-ml retry
+		 * the command.
+		 */
+		rc = SCSI_MLQUEUE_TARGET_BUSY;
+		goto exit_qcmd;
+	}
+
+	io_req = bnx2fc_cmd_alloc(tgt);
+	if (!io_req) {
+		rc = SCSI_MLQUEUE_HOST_BUSY;
+		goto exit_qcmd;
+	}
+	io_req->sc_cmd = sc_cmd;
+
+	if (bnx2fc_post_io_req(tgt, io_req)) {
+		printk(KERN_ERR PFX "Unable to post io_req\n");
+		rc = SCSI_MLQUEUE_HOST_BUSY;
+		goto exit_qcmd;
+	}
+exit_qcmd:
+	return rc;
+}
+
+void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
+				   struct fcoe_task_ctx_entry *task,
+				   u8 num_rq)
+{
+	struct fcoe_fcp_rsp_payload *fcp_rsp;
+	struct bnx2fc_rport *tgt = io_req->tgt;
+	struct scsi_cmnd *sc_cmd;
+	struct Scsi_Host *host;
+
+
+	/* scsi_cmd_cmpl is called with tgt lock held */
+
+	if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
+		/* we will not receive ABTS response for this IO */
+		BNX2FC_IO_DBG(io_req, "Timer context finished processing "
+			   "this scsi cmd\n");
+	}
+
+	/* Cancel the timeout_work, as we received IO completion */
+	if (cancel_delayed_work(&io_req->timeout_work))
+		kref_put(&io_req->refcount,
+			 bnx2fc_cmd_release); /* drop timer hold */
+
+	sc_cmd = io_req->sc_cmd;
+	if (sc_cmd == NULL) {
+		printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n");
+		return;
+	}
+
+	/* Fetch fcp_rsp from task context and perform cmd completion */
+	fcp_rsp = (struct fcoe_fcp_rsp_payload *)
+		   &(task->cmn.general.rsp_info.fcp_rsp.payload);
+
+	/* parse fcp_rsp and obtain sense data from RQ if available */
+	bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
+
+	host = sc_cmd->device->host;
+	if (!sc_cmd->SCp.ptr) {
+		printk(KERN_ERR PFX "SCp.ptr is NULL\n");
+		return;
+	}
+	io_req->sc_cmd = NULL;
+
+	if (io_req->on_active_queue) {
+		list_del_init(&io_req->link);
+		io_req->on_active_queue = 0;
+		/* Move IO req to retire queue */
+		list_add_tail(&io_req->link, &tgt->io_retire_queue);
+	} else {
+		/* This should not happen, but could have been pulled
+		 * by bnx2fc_flush_active_ios(), or during a race
+		 * between command abort and (late) completion.
+		 */
+		BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n");
+		if (io_req->wait_for_comp)
+			if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+					       &io_req->req_flags))
+				complete(&io_req->tm_done);
+	}
+
+	bnx2fc_unmap_sg_list(io_req);
+
+	switch (io_req->fcp_status) {
+	case FC_GOOD:
+		if (io_req->cdb_status == 0) {
+			/* Good IO completion */
+			sc_cmd->result = DID_OK << 16;
+		} else {
+			/* Transport status is good, SCSI status not good */
+			BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d"
+				 " fcp_resid = 0x%x\n",
+				io_req->cdb_status, io_req->fcp_resid);
+			sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+		}
+		if (io_req->fcp_resid)
+			scsi_set_resid(sc_cmd, io_req->fcp_resid);
+		break;
+	default:
+		printk(KERN_ALERT PFX "scsi_cmd_compl: fcp_status = %d\n",
+			io_req->fcp_status);
+		break;
+	}
+	sc_cmd->SCp.ptr = NULL;
+	sc_cmd->scsi_done(sc_cmd);
+	kref_put(&io_req->refcount, bnx2fc_cmd_release);
+}
+
+static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
+			       struct bnx2fc_cmd *io_req)
+{
+	struct fcoe_task_ctx_entry *task;
+	struct fcoe_task_ctx_entry *task_page;
+	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+	struct fcoe_port *port = tgt->port;
+	struct bnx2fc_hba *hba = port->priv;
+	struct fc_lport *lport = port->lport;
+	struct fcoe_dev_stats *stats;
+	int task_idx, index;
+	u16 xid;
+
+	/* Initialize rest of io_req fields */
+	io_req->cmd_type = BNX2FC_SCSI_CMD;
+	io_req->port = port;
+	io_req->tgt = tgt;
+	io_req->data_xfer_len = scsi_bufflen(sc_cmd);
+	sc_cmd->SCp.ptr = (char *)io_req;
+
+	stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+	if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
+		io_req->io_req_flags = BNX2FC_READ;
+		stats->InputRequests++;
+		stats->InputBytes += io_req->data_xfer_len;
+	} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+		io_req->io_req_flags = BNX2FC_WRITE;
+		stats->OutputRequests++;
+		stats->OutputBytes += io_req->data_xfer_len;
+	} else {
+		io_req->io_req_flags = 0;
+		stats->ControlRequests++;
+	}
+	put_cpu();
+
+	xid = io_req->xid;
+
+	/* Build buffer descriptor list for firmware from sg list */
+	bnx2fc_build_bd_list_from_sg(io_req);
+
+	task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+	index = xid % BNX2FC_TASKS_PER_PAGE;
+
+	/* Initialize task context for this IO request */
+	task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+	task = &(task_page[index]);
+	bnx2fc_init_task(io_req, task);
+
+	spin_lock_bh(&tgt->tgt_lock);
+
+	if (tgt->flush_in_prog) {
+		printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
+		kref_put(&io_req->refcount, bnx2fc_cmd_release);
+		spin_unlock_bh(&tgt->tgt_lock);
+		return -EAGAIN;
+	}
+
+	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+		printk(KERN_ERR PFX "Session not ready...post_io\n");
+		kref_put(&io_req->refcount, bnx2fc_cmd_release);
+		spin_unlock_bh(&tgt->tgt_lock);
+		return -EAGAIN;
+	}
+
+	/* Time IO req */
+	bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
+	/* Obtain free SQ entry */
+	bnx2fc_add_2_sq(tgt, xid);
+
+	/* Enqueue the io_req to active_cmd_queue */
+
+	io_req->on_active_queue = 1;
+	/* move io_req from pending_queue to active_queue */
+	list_add_tail(&io_req->link, &tgt->active_cmd_queue);
+
+	/* Ring doorbell */
+	bnx2fc_ring_doorbell(tgt);
+	spin_unlock_bh(&tgt->tgt_lock);
+	return 0;
+}

+ 844 - 0
drivers/scsi/bnx2fc/bnx2fc_tgt.c

@@ -0,0 +1,844 @@
+/* bnx2fc_tgt.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * Handles operations such as session offload/upload etc, and manages
+ * session resources such as connection id and qp resources.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+static void bnx2fc_upld_timer(unsigned long data);
+static void bnx2fc_ofld_timer(unsigned long data);
+static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
+			   struct fcoe_port *port,
+			   struct fc_rport_priv *rdata);
+static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
+				struct bnx2fc_rport *tgt);
+static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
+			      struct bnx2fc_rport *tgt);
+static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
+			      struct bnx2fc_rport *tgt);
+static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id);
+
+static void bnx2fc_upld_timer(unsigned long data)
+{
+
+	struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+
+	BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
+	/* fake upload completion */
+	clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+	set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+	wake_up_interruptible(&tgt->upld_wait);
+}
+
+static void bnx2fc_ofld_timer(unsigned long data)
+{
+
+	struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+
+	BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
+	/* NOTE: This function should never be called, as
+	 * offload should never timeout
+	 */
+	/*
+	 * If the timer has expired, this session is dead
+	 * Clear offloaded flag and logout of this device.
+	 * Since OFFLOADED flag is cleared, this case
+	 * will be considered as offload error and the
+	 * port will be logged off, and conn_id, session
+	 * resources are freed up in bnx2fc_offload_session
+	 */
+	clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+	set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+	wake_up_interruptible(&tgt->ofld_wait);
+}
+
+static void bnx2fc_offload_session(struct fcoe_port *port,
+					struct bnx2fc_rport *tgt,
+					struct fc_rport_priv *rdata)
+{
+	struct fc_lport *lport = rdata->local_port;
+	struct fc_rport *rport = rdata->rport;
+	struct bnx2fc_hba *hba = port->priv;
+	int rval;
+	int i = 0;
+
+	/* Initialize bnx2fc_rport */
+	/* NOTE: tgt is already bzero'd */
+	rval = bnx2fc_init_tgt(tgt, port, rdata);
+	if (rval) {
+		printk(KERN_ERR PFX "Failed to allocate conn id for "
+			"port_id (%6x)\n", rport->port_id);
+		goto ofld_err;
+	}
+
+	/* Allocate session resources */
+	rval = bnx2fc_alloc_session_resc(hba, tgt);
+	if (rval) {
+		printk(KERN_ERR PFX "Failed to allocate resources\n");
+		goto ofld_err;
+	}
+
+	/*
+	 * Initialize FCoE session offload process.
+	 * Upon completion of offload process add
+	 * rport to list of rports
+	 */
+retry_ofld:
+	clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+	rval = bnx2fc_send_session_ofld_req(port, tgt);
+	if (rval) {
+		printk(KERN_ERR PFX "ofld_req failed\n");
+		goto ofld_err;
+	}
+
+	/*
+	 * wait for the session is offloaded and enabled. 3 Secs
+	 * should be ample time for this process to complete.
+	 */
+	setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
+	mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+	wait_event_interruptible(tgt->ofld_wait,
+				 (test_bit(
+				  BNX2FC_FLAG_OFLD_REQ_CMPL,
+				  &tgt->flags)));
+	if (signal_pending(current))
+		flush_signals(current);
+
+	del_timer_sync(&tgt->ofld_timer);
+
+	if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
+		if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE,
+				       &tgt->flags)) {
+			BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, "
+				"retry ofld..%d\n", i++);
+			msleep_interruptible(1000);
+			if (i > 3) {
+				i = 0;
+				goto ofld_err;
+			}
+			goto retry_ofld;
+		}
+		goto ofld_err;
+	}
+	if (bnx2fc_map_doorbell(tgt)) {
+		printk(KERN_ERR PFX "map doorbell failed - no mem\n");
+		/* upload will take care of cleaning up sess resc */
+		lport->tt.rport_logoff(rdata);
+	}
+	return;
+
+ofld_err:
+	/* couldn't offload the session. log off from this rport */
+	BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
+	lport->tt.rport_logoff(rdata);
+	/* Free session resources */
+	bnx2fc_free_session_resc(hba, tgt);
+	if (tgt->fcoe_conn_id != -1)
+		bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
+}
+
+void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
+{
+	struct bnx2fc_cmd *io_req;
+	struct list_head *list;
+	struct list_head *tmp;
+	int rc;
+	int i = 0;
+	BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n",
+		       tgt->num_active_ios.counter);
+
+	spin_lock_bh(&tgt->tgt_lock);
+	tgt->flush_in_prog = 1;
+
+	list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+		i++;
+		io_req = (struct bnx2fc_cmd *)list;
+		list_del_init(&io_req->link);
+		io_req->on_active_queue = 0;
+		BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n");
+
+		if (cancel_delayed_work(&io_req->timeout_work)) {
+			if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+						&io_req->req_flags)) {
+				/* Handle eh_abort timeout */
+				BNX2FC_IO_DBG(io_req, "eh_abort for IO "
+					      "cleaned up\n");
+				complete(&io_req->tm_done);
+			}
+			kref_put(&io_req->refcount,
+				 bnx2fc_cmd_release); /* drop timer hold */
+		}
+
+		set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags);
+		set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
+		rc = bnx2fc_initiate_cleanup(io_req);
+		BUG_ON(rc);
+	}
+
+	list_for_each_safe(list, tmp, &tgt->els_queue) {
+		i++;
+		io_req = (struct bnx2fc_cmd *)list;
+		list_del_init(&io_req->link);
+		io_req->on_active_queue = 0;
+
+		BNX2FC_IO_DBG(io_req, "els_queue cleanup\n");
+
+		if (cancel_delayed_work(&io_req->timeout_work))
+			kref_put(&io_req->refcount,
+				 bnx2fc_cmd_release); /* drop timer hold */
+
+		if ((io_req->cb_func) && (io_req->cb_arg)) {
+			io_req->cb_func(io_req->cb_arg);
+			io_req->cb_arg = NULL;
+		}
+
+		rc = bnx2fc_initiate_cleanup(io_req);
+		BUG_ON(rc);
+	}
+
+	list_for_each_safe(list, tmp, &tgt->io_retire_queue) {
+		i++;
+		io_req = (struct bnx2fc_cmd *)list;
+		list_del_init(&io_req->link);
+
+		BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
+
+		if (cancel_delayed_work(&io_req->timeout_work))
+			kref_put(&io_req->refcount, bnx2fc_cmd_release);
+
+		clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
+	}
+
+	BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i);
+	i = 0;
+	spin_unlock_bh(&tgt->tgt_lock);
+	/* wait for active_ios to go to 0 */
+	while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT))
+		msleep(25);
+	if (tgt->num_active_ios.counter != 0)
+		printk(KERN_ERR PFX "CLEANUP on port 0x%x:"
+				    " active_ios = %d\n",
+			tgt->rdata->ids.port_id, tgt->num_active_ios.counter);
+	spin_lock_bh(&tgt->tgt_lock);
+	tgt->flush_in_prog = 0;
+	spin_unlock_bh(&tgt->tgt_lock);
+}
+
+static void bnx2fc_upload_session(struct fcoe_port *port,
+					struct bnx2fc_rport *tgt)
+{
+	struct bnx2fc_hba *hba = port->priv;
+
+	BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n",
+		tgt->num_active_ios.counter);
+
+	/*
+	 * Called with hba->hba_mutex held.
+	 * This is a blocking call
+	 */
+	clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+	bnx2fc_send_session_disable_req(port, tgt);
+
+	/*
+	 * wait for upload to complete. 3 Secs
+	 * should be sufficient time for this process to complete.
+	 */
+	setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
+	mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+	BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n");
+	wait_event_interruptible(tgt->upld_wait,
+				 (test_bit(
+				  BNX2FC_FLAG_UPLD_REQ_COMPL,
+				  &tgt->flags)));
+
+	if (signal_pending(current))
+		flush_signals(current);
+
+	del_timer_sync(&tgt->upld_timer);
+
+	/*
+	 * traverse thru the active_q and tmf_q and cleanup
+	 * IOs in these lists
+	 */
+	BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n",
+		       tgt->flags);
+	bnx2fc_flush_active_ios(tgt);
+
+	/* Issue destroy KWQE */
+	if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) {
+		BNX2FC_TGT_DBG(tgt, "send destroy req\n");
+		clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+		bnx2fc_send_session_destroy_req(hba, tgt);
+
+		/* wait for destroy to complete */
+		setup_timer(&tgt->upld_timer,
+			    bnx2fc_upld_timer, (unsigned long)tgt);
+		mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+		wait_event_interruptible(tgt->upld_wait,
+					 (test_bit(
+					  BNX2FC_FLAG_UPLD_REQ_COMPL,
+					  &tgt->flags)));
+
+		if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags)))
+			printk(KERN_ERR PFX "ERROR!! destroy timed out\n");
+
+		BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n",
+			tgt->flags);
+		if (signal_pending(current))
+			flush_signals(current);
+
+		del_timer_sync(&tgt->upld_timer);
+
+	} else
+		printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy"
+				" not sent to FW\n");
+
+	/* Free session resources */
+	spin_lock_bh(&tgt->cq_lock);
+	bnx2fc_free_session_resc(hba, tgt);
+	bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
+	spin_unlock_bh(&tgt->cq_lock);
+}
+
+static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
+			   struct fcoe_port *port,
+			   struct fc_rport_priv *rdata)
+{
+
+	struct fc_rport *rport = rdata->rport;
+	struct bnx2fc_hba *hba = port->priv;
+
+	tgt->rport = rport;
+	tgt->rdata = rdata;
+	tgt->port = port;
+
+	if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) {
+		BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n");
+		tgt->fcoe_conn_id = -1;
+		return -1;
+	}
+
+	tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt);
+	if (tgt->fcoe_conn_id == -1)
+		return -1;
+
+	BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id);
+
+	tgt->max_sqes = BNX2FC_SQ_WQES_MAX;
+	tgt->max_rqes = BNX2FC_RQ_WQES_MAX;
+	tgt->max_cqes = BNX2FC_CQ_WQES_MAX;
+
+	/* Initialize the toggle bit */
+	tgt->sq_curr_toggle_bit = 1;
+	tgt->cq_curr_toggle_bit = 1;
+	tgt->sq_prod_idx = 0;
+	tgt->cq_cons_idx = 0;
+	tgt->rq_prod_idx = 0x8000;
+	tgt->rq_cons_idx = 0;
+	atomic_set(&tgt->num_active_ios, 0);
+
+	tgt->work_time_slice = 2;
+
+	spin_lock_init(&tgt->tgt_lock);
+	spin_lock_init(&tgt->cq_lock);
+
+	/* Initialize active_cmd_queue list */
+	INIT_LIST_HEAD(&tgt->active_cmd_queue);
+
+	/* Initialize IO retire queue */
+	INIT_LIST_HEAD(&tgt->io_retire_queue);
+
+	INIT_LIST_HEAD(&tgt->els_queue);
+
+	/* Initialize active_tm_queue list */
+	INIT_LIST_HEAD(&tgt->active_tm_queue);
+
+	init_waitqueue_head(&tgt->ofld_wait);
+	init_waitqueue_head(&tgt->upld_wait);
+
+	return 0;
+}
+
+/**
+ * This event_callback is called after successful completion of libfc
+ * initiated target login. bnx2fc can proceed with initiating the session
+ * establishment.
+ */
+void bnx2fc_rport_event_handler(struct fc_lport *lport,
+				struct fc_rport_priv *rdata,
+				enum fc_rport_event event)
+{
+	struct fcoe_port *port = lport_priv(lport);
+	struct bnx2fc_hba *hba = port->priv;
+	struct fc_rport *rport = rdata->rport;
+	struct fc_rport_libfc_priv *rp;
+	struct bnx2fc_rport *tgt;
+	u32 port_id;
+
+	BNX2FC_HBA_DBG(lport, "rport_event_hdlr: event = %d, port_id = 0x%x\n",
+		event, rdata->ids.port_id);
+	switch (event) {
+	case RPORT_EV_READY:
+		if (!rport) {
+			printk(KERN_ALERT PFX "rport is NULL: ERROR!\n");
+			break;
+		}
+
+		rp = rport->dd_data;
+		if (rport->port_id == FC_FID_DIR_SERV) {
+			/*
+			 * bnx2fc_rport structure doesnt exist for
+			 * directory server.
+			 * We should not come here, as lport will
+			 * take care of fabric login
+			 */
+			printk(KERN_ALERT PFX "%x - rport_event_handler ERROR\n",
+				rdata->ids.port_id);
+			break;
+		}
+
+		if (rdata->spp_type != FC_TYPE_FCP) {
+			BNX2FC_HBA_DBG(lport, "not FCP type target."
+				   " not offloading\n");
+			break;
+		}
+		if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
+			BNX2FC_HBA_DBG(lport, "not FCP_TARGET"
+				   " not offloading\n");
+			break;
+		}
+
+		/*
+		 * Offlaod process is protected with hba mutex.
+		 * Use the same mutex_lock for upload process too
+		 */
+		mutex_lock(&hba->hba_mutex);
+		tgt = (struct bnx2fc_rport *)&rp[1];
+
+		/* This can happen when ADISC finds the same target */
+		if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
+			BNX2FC_TGT_DBG(tgt, "already offloaded\n");
+			mutex_unlock(&hba->hba_mutex);
+			return;
+		}
+
+		/*
+		 * Offload the session. This is a blocking call, and will
+		 * wait until the session is offloaded.
+		 */
+		bnx2fc_offload_session(port, tgt, rdata);
+
+		BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n",
+			hba->num_ofld_sess);
+
+		if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
+			/*
+			 * Session is offloaded and enabled. Map
+			 * doorbell register for this target
+			 */
+			BNX2FC_TGT_DBG(tgt, "sess offloaded\n");
+			/* This counter is protected with hba mutex */
+			hba->num_ofld_sess++;
+
+			set_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
+		} else {
+			/*
+			 * Offload or enable would have failed.
+			 * In offload/enable completion path, the
+			 * rport would have already been removed
+			 */
+			BNX2FC_TGT_DBG(tgt, "Port is being logged off as "
+				   "offloaded flag not set\n");
+		}
+		mutex_unlock(&hba->hba_mutex);
+		break;
+	case RPORT_EV_LOGO:
+	case RPORT_EV_FAILED:
+	case RPORT_EV_STOP:
+		port_id = rdata->ids.port_id;
+		if (port_id == FC_FID_DIR_SERV)
+			break;
+
+		if (!rport) {
+			printk(KERN_ALERT PFX "%x - rport not created Yet!!\n",
+				port_id);
+			break;
+		}
+		rp = rport->dd_data;
+		mutex_lock(&hba->hba_mutex);
+		/*
+		 * Perform session upload. Note that rdata->peers is already
+		 * removed from disc->rports list before we get this event.
+		 */
+		tgt = (struct bnx2fc_rport *)&rp[1];
+
+		if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
+			mutex_unlock(&hba->hba_mutex);
+			break;
+		}
+		clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
+
+		bnx2fc_upload_session(port, tgt);
+		hba->num_ofld_sess--;
+		BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n",
+			hba->num_ofld_sess);
+		/*
+		 * Try to wake up the linkdown wait thread. If num_ofld_sess
+		 * is 0, the waiting therad wakes up
+		 */
+		if ((hba->wait_for_link_down) &&
+		    (hba->num_ofld_sess == 0)) {
+			wake_up_interruptible(&hba->shutdown_wait);
+		}
+		if (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags)) {
+			printk(KERN_ERR PFX "Relogin to the tgt\n");
+			mutex_lock(&lport->disc.disc_mutex);
+			lport->tt.rport_login(rdata);
+			mutex_unlock(&lport->disc.disc_mutex);
+		}
+		mutex_unlock(&hba->hba_mutex);
+
+		break;
+
+	case RPORT_EV_NONE:
+		break;
+	}
+}
+
+/**
+ * bnx2fc_tgt_lookup() - Lookup a bnx2fc_rport by port_id
+ *
+ * @port:  fcoe_port struct to lookup the target port on
+ * @port_id: The remote port ID to look up
+ */
+struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
+					     u32 port_id)
+{
+	struct bnx2fc_hba *hba = port->priv;
+	struct bnx2fc_rport *tgt;
+	struct fc_rport_priv *rdata;
+	int i;
+
+	for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
+		tgt = hba->tgt_ofld_list[i];
+		if ((tgt) && (tgt->port == port)) {
+			rdata = tgt->rdata;
+			if (rdata->ids.port_id == port_id) {
+				if (rdata->rp_state != RPORT_ST_DELETE) {
+					BNX2FC_TGT_DBG(tgt, "rport "
+						"obtained\n");
+					return tgt;
+				} else {
+					printk(KERN_ERR PFX "rport 0x%x "
+						"is in DELETED state\n",
+						rdata->ids.port_id);
+					return NULL;
+				}
+			}
+		}
+	}
+	return NULL;
+}
+
+
+/**
+ * bnx2fc_alloc_conn_id - allocates FCOE Connection id
+ *
+ * @hba:	pointer to adapter structure
+ * @tgt:	pointer to bnx2fc_rport structure
+ */
+static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
+				struct bnx2fc_rport *tgt)
+{
+	u32 conn_id, next;
+
+	/* called with hba mutex held */
+
+	/*
+	 * tgt_ofld_list access is synchronized using
+	 * both hba mutex and hba lock. Atleast hba mutex or
+	 * hba lock needs to be held for read access.
+	 */
+
+	spin_lock_bh(&hba->hba_lock);
+	next = hba->next_conn_id;
+	conn_id = hba->next_conn_id++;
+	if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS)
+		hba->next_conn_id = 0;
+
+	while (hba->tgt_ofld_list[conn_id] != NULL) {
+		conn_id++;
+		if (conn_id == BNX2FC_NUM_MAX_SESS)
+			conn_id = 0;
+
+		if (conn_id == next) {
+			/* No free conn_ids are available */
+			spin_unlock_bh(&hba->hba_lock);
+			return -1;
+		}
+	}
+	hba->tgt_ofld_list[conn_id] = tgt;
+	tgt->fcoe_conn_id = conn_id;
+	spin_unlock_bh(&hba->hba_lock);
+	return conn_id;
+}
+
+static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id)
+{
+	/* called with hba mutex held */
+	spin_lock_bh(&hba->hba_lock);
+	hba->tgt_ofld_list[conn_id] = NULL;
+	hba->next_conn_id = conn_id;
+	spin_unlock_bh(&hba->hba_lock);
+}
+
+/**
+ *bnx2fc_alloc_session_resc - Allocate qp resources for the session
+ *
+ */
+static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
+					struct bnx2fc_rport *tgt)
+{
+	dma_addr_t page;
+	int num_pages;
+	u32 *pbl;
+
+	/* Allocate and map SQ */
+	tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
+	tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+	tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+				     &tgt->sq_dma, GFP_KERNEL);
+	if (!tgt->sq) {
+		printk(KERN_ALERT PFX "unable to allocate SQ memory %d\n",
+			tgt->sq_mem_size);
+		goto mem_alloc_failure;
+	}
+	memset(tgt->sq, 0, tgt->sq_mem_size);
+
+	/* Allocate and map CQ */
+	tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
+	tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+	tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+				     &tgt->cq_dma, GFP_KERNEL);
+	if (!tgt->cq) {
+		printk(KERN_ALERT PFX "unable to allocate CQ memory %d\n",
+			tgt->cq_mem_size);
+		goto mem_alloc_failure;
+	}
+	memset(tgt->cq, 0, tgt->cq_mem_size);
+
+	/* Allocate and map RQ and RQ PBL */
+	tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
+	tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+	tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+					&tgt->rq_dma, GFP_KERNEL);
+	if (!tgt->rq) {
+		printk(KERN_ALERT PFX "unable to allocate RQ memory %d\n",
+			tgt->rq_mem_size);
+		goto mem_alloc_failure;
+	}
+	memset(tgt->rq, 0, tgt->rq_mem_size);
+
+	tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *);
+	tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+	tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+					 &tgt->rq_pbl_dma, GFP_KERNEL);
+	if (!tgt->rq_pbl) {
+		printk(KERN_ALERT PFX "unable to allocate RQ PBL %d\n",
+			tgt->rq_pbl_size);
+		goto mem_alloc_failure;
+	}
+
+	memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
+	num_pages = tgt->rq_mem_size / PAGE_SIZE;
+	page = tgt->rq_dma;
+	pbl = (u32 *)tgt->rq_pbl;
+
+	while (num_pages--) {
+		*pbl = (u32)page;
+		pbl++;
+		*pbl = (u32)((u64)page >> 32);
+		pbl++;
+		page += PAGE_SIZE;
+	}
+
+	/* Allocate and map XFERQ */
+	tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
+	tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) &
+			       PAGE_MASK;
+
+	tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
+					&tgt->xferq_dma, GFP_KERNEL);
+	if (!tgt->xferq) {
+		printk(KERN_ALERT PFX "unable to allocate XFERQ %d\n",
+			tgt->xferq_mem_size);
+		goto mem_alloc_failure;
+	}
+	memset(tgt->xferq, 0, tgt->xferq_mem_size);
+
+	/* Allocate and map CONFQ & CONFQ PBL */
+	tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
+	tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) &
+			       PAGE_MASK;
+
+	tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
+					&tgt->confq_dma, GFP_KERNEL);
+	if (!tgt->confq) {
+		printk(KERN_ALERT PFX "unable to allocate CONFQ %d\n",
+			tgt->confq_mem_size);
+		goto mem_alloc_failure;
+	}
+	memset(tgt->confq, 0, tgt->confq_mem_size);
+
+	tgt->confq_pbl_size =
+		(tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *);
+	tgt->confq_pbl_size =
+		(tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+	tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
+					    tgt->confq_pbl_size,
+					    &tgt->confq_pbl_dma, GFP_KERNEL);
+	if (!tgt->confq_pbl) {
+		printk(KERN_ALERT PFX "unable to allocate CONFQ PBL %d\n",
+			tgt->confq_pbl_size);
+		goto mem_alloc_failure;
+	}
+
+	memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
+	num_pages = tgt->confq_mem_size / PAGE_SIZE;
+	page = tgt->confq_dma;
+	pbl = (u32 *)tgt->confq_pbl;
+
+	while (num_pages--) {
+		*pbl = (u32)page;
+		pbl++;
+		*pbl = (u32)((u64)page >> 32);
+		pbl++;
+		page += PAGE_SIZE;
+	}
+
+	/* Allocate and map ConnDB */
+	tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
+
+	tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
+					  tgt->conn_db_mem_size,
+					  &tgt->conn_db_dma, GFP_KERNEL);
+	if (!tgt->conn_db) {
+		printk(KERN_ALERT PFX "unable to allocate conn_db %d\n",
+						tgt->conn_db_mem_size);
+		goto mem_alloc_failure;
+	}
+	memset(tgt->conn_db, 0, tgt->conn_db_mem_size);
+
+
+	/* Allocate and map LCQ */
+	tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
+	tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) &
+			     PAGE_MASK;
+
+	tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+				      &tgt->lcq_dma, GFP_KERNEL);
+
+	if (!tgt->lcq) {
+		printk(KERN_ALERT PFX "unable to allocate lcq %d\n",
+		       tgt->lcq_mem_size);
+		goto mem_alloc_failure;
+	}
+	memset(tgt->lcq, 0, tgt->lcq_mem_size);
+
+	/* Arm CQ */
+	tgt->conn_db->cq_arm.lo = -1;
+	tgt->conn_db->rq_prod = 0x8000;
+
+	return 0;
+
+mem_alloc_failure:
+	bnx2fc_free_session_resc(hba, tgt);
+	bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
+	return -ENOMEM;
+}
+
+/**
+ * bnx2i_free_session_resc - free qp resources for the session
+ *
+ * @hba:	adapter structure pointer
+ * @tgt:	bnx2fc_rport structure pointer
+ *
+ * Free QP resources - SQ/RQ/CQ/XFERQ memory and PBL
+ */
+static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
+						struct bnx2fc_rport *tgt)
+{
+	BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
+
+	if (tgt->ctx_base) {
+		iounmap(tgt->ctx_base);
+		tgt->ctx_base = NULL;
+	}
+	/* Free LCQ */
+	if (tgt->lcq) {
+		dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+				    tgt->lcq, tgt->lcq_dma);
+		tgt->lcq = NULL;
+	}
+	/* Free connDB */
+	if (tgt->conn_db) {
+		dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size,
+				    tgt->conn_db, tgt->conn_db_dma);
+		tgt->conn_db = NULL;
+	}
+	/* Free confq  and confq pbl */
+	if (tgt->confq_pbl) {
+		dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size,
+				    tgt->confq_pbl, tgt->confq_pbl_dma);
+		tgt->confq_pbl = NULL;
+	}
+	if (tgt->confq) {
+		dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
+				    tgt->confq, tgt->confq_dma);
+		tgt->confq = NULL;
+	}
+	/* Free XFERQ */
+	if (tgt->xferq) {
+		dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
+				    tgt->xferq, tgt->xferq_dma);
+		tgt->xferq = NULL;
+	}
+	/* Free RQ PBL and RQ */
+	if (tgt->rq_pbl) {
+		dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+				    tgt->rq_pbl, tgt->rq_pbl_dma);
+		tgt->rq_pbl = NULL;
+	}
+	if (tgt->rq) {
+		dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+				    tgt->rq, tgt->rq_dma);
+		tgt->rq = NULL;
+	}
+	/* Free CQ */
+	if (tgt->cq) {
+		dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+				    tgt->cq, tgt->cq_dma);
+		tgt->cq = NULL;
+	}
+	/* Free SQ */
+	if (tgt->sq) {
+		dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+				    tgt->sq, tgt->sq_dma);
+		tgt->sq = NULL;
+	}
+}

+ 3 - 1
drivers/scsi/bnx2i/bnx2i.h

@@ -360,7 +360,7 @@ struct bnx2i_hba {
 		#define ADAPTER_STATE_LINK_DOWN		2
 		#define ADAPTER_STATE_LINK_DOWN		2
 		#define ADAPTER_STATE_INIT_FAILED	31
 		#define ADAPTER_STATE_INIT_FAILED	31
 	unsigned int mtu_supported;
 	unsigned int mtu_supported;
-		#define BNX2I_MAX_MTU_SUPPORTED		1500
+		#define BNX2I_MAX_MTU_SUPPORTED		9000
 
 
 	struct Scsi_Host *shost;
 	struct Scsi_Host *shost;
 
 
@@ -751,6 +751,8 @@ extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
 				  struct iscsi_task *mtask);
 				  struct iscsi_task *mtask);
 extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
 extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
 				  struct iscsi_task *mtask);
 				  struct iscsi_task *mtask);
+extern int bnx2i_send_iscsi_text(struct bnx2i_conn *conn,
+				 struct iscsi_task *mtask);
 extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
 extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
 				    struct bnx2i_cmd *cmnd);
 				    struct bnx2i_cmd *cmnd);
 extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
 extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,

+ 122 - 3
drivers/scsi/bnx2i/bnx2i_hwi.c

@@ -444,6 +444,56 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
 	return 0;
 	return 0;
 }
 }
 
 
+/**
+ * bnx2i_send_iscsi_text - post iSCSI text WQE to hardware
+ * @conn:	iscsi connection
+ * @mtask:	driver command structure which is requesting
+ *		a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Text request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_text(struct bnx2i_conn *bnx2i_conn,
+			  struct iscsi_task *mtask)
+{
+	struct bnx2i_cmd *bnx2i_cmd;
+	struct bnx2i_text_request *text_wqe;
+	struct iscsi_text *text_hdr;
+	u32 dword;
+
+	bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
+	text_hdr = (struct iscsi_text *)mtask->hdr;
+	text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe;
+
+	memset(text_wqe, 0, sizeof(struct bnx2i_text_request));
+
+	text_wqe->op_code = text_hdr->opcode;
+	text_wqe->op_attr = text_hdr->flags;
+	text_wqe->data_length = ntoh24(text_hdr->dlength);
+	text_wqe->itt = mtask->itt |
+		(ISCSI_TASK_TYPE_MPATH << ISCSI_TEXT_REQUEST_TYPE_SHIFT);
+	text_wqe->ttt = be32_to_cpu(text_hdr->ttt);
+
+	text_wqe->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
+
+	text_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
+	text_wqe->resp_bd_list_addr_hi =
+			(u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
+
+	dword = ((1 << ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT) |
+		 (bnx2i_conn->gen_pdu.resp_buf_size <<
+		  ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
+	text_wqe->resp_buffer = dword;
+	text_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
+	text_wqe->bd_list_addr_hi =
+			(u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
+	text_wqe->num_bds = 1;
+	text_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+	return 0;
+}
+
+
 /**
 /**
  * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
  * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
  * @conn:	iscsi connection
  * @conn:	iscsi connection
@@ -490,15 +540,18 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
 	bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
 	bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
 	nopout_hdr = (struct iscsi_nopout *)task->hdr;
 	nopout_hdr = (struct iscsi_nopout *)task->hdr;
 	nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
 	nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
+
+	memset(nopout_wqe, 0x00, sizeof(struct bnx2i_nop_out_request));
+
 	nopout_wqe->op_code = nopout_hdr->opcode;
 	nopout_wqe->op_code = nopout_hdr->opcode;
 	nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
 	nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
 	memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
 	memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
 
 
 	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
 	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
-		u32 tmp = nopout_hdr->lun[0];
+		u32 tmp = nopout_wqe->lun[0];
 		/* 57710 requires LUN field to be swapped */
 		/* 57710 requires LUN field to be swapped */
-		nopout_hdr->lun[0] = nopout_hdr->lun[1];
-		nopout_hdr->lun[1] = tmp;
+		nopout_wqe->lun[0] = nopout_wqe->lun[1];
+		nopout_wqe->lun[1] = tmp;
 	}
 	}
 
 
 	nopout_wqe->itt = ((u16)task->itt |
 	nopout_wqe->itt = ((u16)task->itt |
@@ -1425,6 +1478,68 @@ done:
 	return 0;
 	return 0;
 }
 }
 
 
+
+/**
+ * bnx2i_process_text_resp - this function handles iscsi text response
+ * @session:	iscsi session pointer
+ * @bnx2i_conn:	iscsi connection pointer
+ * @cqe:	pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI Text Response CQE&  complete it to open-iscsi user daemon
+ */
+static int bnx2i_process_text_resp(struct iscsi_session *session,
+				   struct bnx2i_conn *bnx2i_conn,
+				   struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_task *task;
+	struct bnx2i_text_response *text;
+	struct iscsi_text_rsp *resp_hdr;
+	int pld_len;
+	int pad_len;
+
+	text = (struct bnx2i_text_response *) cqe;
+	spin_lock(&session->lock);
+	task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX);
+	if (!task)
+		goto done;
+
+	resp_hdr = (struct iscsi_text_rsp *)&bnx2i_conn->gen_pdu.resp_hdr;
+	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+	resp_hdr->opcode = text->op_code;
+	resp_hdr->flags = text->response_flags;
+	resp_hdr->hlength = 0;
+
+	hton24(resp_hdr->dlength, text->data_length);
+	resp_hdr->itt = task->hdr->itt;
+	resp_hdr->ttt = cpu_to_be32(text->ttt);
+	resp_hdr->statsn = task->hdr->exp_statsn;
+	resp_hdr->exp_cmdsn = cpu_to_be32(text->exp_cmd_sn);
+	resp_hdr->max_cmdsn = cpu_to_be32(text->max_cmd_sn);
+	pld_len = text->data_length;
+	bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf +
+					  pld_len;
+	pad_len = 0;
+	if (pld_len & 0x3)
+		pad_len = 4 - (pld_len % 4);
+
+	if (pad_len) {
+		int i = 0;
+		for (i = 0; i < pad_len; i++) {
+			bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
+			bnx2i_conn->gen_pdu.resp_wr_ptr++;
+		}
+	}
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
+			     bnx2i_conn->gen_pdu.resp_buf,
+			     bnx2i_conn->gen_pdu.resp_wr_ptr -
+			     bnx2i_conn->gen_pdu.resp_buf);
+done:
+	spin_unlock(&session->lock);
+	return 0;
+}
+
+
 /**
 /**
  * bnx2i_process_tmf_resp - this function handles iscsi TMF response
  * bnx2i_process_tmf_resp - this function handles iscsi TMF response
  * @session:		iscsi session pointer
  * @session:		iscsi session pointer
@@ -1766,6 +1881,10 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
 			bnx2i_process_tmf_resp(session, bnx2i_conn,
 			bnx2i_process_tmf_resp(session, bnx2i_conn,
 					       qp->cq_cons_qe);
 					       qp->cq_cons_qe);
 			break;
 			break;
+		case ISCSI_OP_TEXT_RSP:
+			bnx2i_process_text_resp(session, bnx2i_conn,
+						qp->cq_cons_qe);
+			break;
 		case ISCSI_OP_LOGOUT_RSP:
 		case ISCSI_OP_LOGOUT_RSP:
 			bnx2i_process_logout_resp(session, bnx2i_conn,
 			bnx2i_process_logout_resp(session, bnx2i_conn,
 						  qp->cq_cons_qe);
 						  qp->cq_cons_qe);

+ 23 - 6
drivers/scsi/bnx2i/bnx2i_init.c

@@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
 static u32 adapter_count;
 static u32 adapter_count;
 
 
 #define DRV_MODULE_NAME		"bnx2i"
 #define DRV_MODULE_NAME		"bnx2i"
-#define DRV_MODULE_VERSION	"2.6.2.2"
-#define DRV_MODULE_RELDATE	"Nov 23, 2010"
+#define DRV_MODULE_VERSION	"2.6.2.3"
+#define DRV_MODULE_RELDATE	"Dec 31, 2010"
 
 
 static char version[] __devinitdata =
 static char version[] __devinitdata =
 		"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
 		"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -29,7 +29,7 @@ static char version[] __devinitdata =
 MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com> and "
 MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com> and "
 	      "Eddie Wai <eddie.wai@broadcom.com>");
 	      "Eddie Wai <eddie.wai@broadcom.com>");
 
 
-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711"
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711/57712"
 		   " iSCSI Driver");
 		   " iSCSI Driver");
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_MODULE_VERSION);
 MODULE_VERSION(DRV_MODULE_VERSION);
@@ -88,9 +88,11 @@ void bnx2i_identify_device(struct bnx2i_hba *hba)
 	    (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
 	    (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
 		set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
 		set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
 		hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
 		hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
-	} else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
-		   hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
-		   hba->pci_did == PCI_DEVICE_ID_NX2_57711E)
+	} else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710  ||
+		   hba->pci_did == PCI_DEVICE_ID_NX2_57711  ||
+		   hba->pci_did == PCI_DEVICE_ID_NX2_57711E ||
+		   hba->pci_did == PCI_DEVICE_ID_NX2_57712  ||
+		   hba->pci_did == PCI_DEVICE_ID_NX2_57712E)
 		set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
 		set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
 	else
 	else
 		printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
 		printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
@@ -161,6 +163,21 @@ void bnx2i_start(void *handle)
 	struct bnx2i_hba *hba = handle;
 	struct bnx2i_hba *hba = handle;
 	int i = HZ;
 	int i = HZ;
 
 
+	if (!hba->cnic->max_iscsi_conn) {
+		printk(KERN_ALERT "bnx2i: dev %s does not support "
+			"iSCSI\n", hba->netdev->name);
+
+		if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+			mutex_lock(&bnx2i_dev_lock);
+			list_del_init(&hba->link);
+			adapter_count--;
+			hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+			clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+			mutex_unlock(&bnx2i_dev_lock);
+			bnx2i_free_hba(hba);
+		}
+		return;
+	}
 	bnx2i_send_fw_iscsi_init_msg(hba);
 	bnx2i_send_fw_iscsi_init_msg(hba);
 	while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
 	while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
 		msleep(BNX2I_INIT_POLL_TIME);
 		msleep(BNX2I_INIT_POLL_TIME);

+ 28 - 25
drivers/scsi/bnx2i/bnx2i_iscsi.c

@@ -1092,6 +1092,9 @@ static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
 	case ISCSI_OP_SCSI_TMFUNC:
 	case ISCSI_OP_SCSI_TMFUNC:
 		rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
 		rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
 		break;
 		break;
+	case ISCSI_OP_TEXT:
+		rc = bnx2i_send_iscsi_text(bnx2i_conn, task);
+		break;
 	default:
 	default:
 		iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
 		iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
 				  "send_gen: unsupported op 0x%x\n",
 				  "send_gen: unsupported op 0x%x\n",
@@ -1455,42 +1458,40 @@ static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
 
 
 
 
 /**
 /**
- * bnx2i_conn_get_param - return iscsi connection parameter to caller
- * @cls_conn:	pointer to iscsi cls conn
+ * bnx2i_ep_get_param - return iscsi ep parameter to caller
+ * @ep:		pointer to iscsi endpoint
  * @param:	parameter type identifier
  * @param:	parameter type identifier
  * @buf: 	buffer pointer
  * @buf: 	buffer pointer
  *
  *
- * returns iSCSI connection parameters
+ * returns iSCSI ep parameters
  */
  */
-static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
-				enum iscsi_param param, char *buf)
+static int bnx2i_ep_get_param(struct iscsi_endpoint *ep,
+			      enum iscsi_param param, char *buf)
 {
 {
-	struct iscsi_conn *conn = cls_conn->dd_data;
-	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
-	int len = 0;
+	struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
+	struct bnx2i_hba *hba = bnx2i_ep->hba;
+	int len = -ENOTCONN;
 
 
-	if (!(bnx2i_conn && bnx2i_conn->ep && bnx2i_conn->ep->hba))
-		goto out;
+	if (!hba)
+		return -ENOTCONN;
 
 
 	switch (param) {
 	switch (param) {
 	case ISCSI_PARAM_CONN_PORT:
 	case ISCSI_PARAM_CONN_PORT:
-		mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock);
-		if (bnx2i_conn->ep->cm_sk)
-			len = sprintf(buf, "%hu\n",
-				      bnx2i_conn->ep->cm_sk->dst_port);
-		mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock);
+		mutex_lock(&hba->net_dev_lock);
+		if (bnx2i_ep->cm_sk)
+			len = sprintf(buf, "%hu\n", bnx2i_ep->cm_sk->dst_port);
+		mutex_unlock(&hba->net_dev_lock);
 		break;
 		break;
 	case ISCSI_PARAM_CONN_ADDRESS:
 	case ISCSI_PARAM_CONN_ADDRESS:
-		mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock);
-		if (bnx2i_conn->ep->cm_sk)
-			len = sprintf(buf, "%pI4\n",
-				      &bnx2i_conn->ep->cm_sk->dst_ip);
-		mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock);
+		mutex_lock(&hba->net_dev_lock);
+		if (bnx2i_ep->cm_sk)
+			len = sprintf(buf, "%pI4\n", &bnx2i_ep->cm_sk->dst_ip);
+		mutex_unlock(&hba->net_dev_lock);
 		break;
 		break;
 	default:
 	default:
-		return iscsi_conn_get_param(cls_conn, param, buf);
+		return -ENOSYS;
 	}
 	}
-out:
+
 	return len;
 	return len;
 }
 }
 
 
@@ -1935,13 +1936,13 @@ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
 		cnic_dev_10g = 1;
 		cnic_dev_10g = 1;
 
 
 	switch (bnx2i_ep->state) {
 	switch (bnx2i_ep->state) {
-	case EP_STATE_CONNECT_FAILED:
 	case EP_STATE_CLEANUP_FAILED:
 	case EP_STATE_CLEANUP_FAILED:
 	case EP_STATE_OFLD_FAILED:
 	case EP_STATE_OFLD_FAILED:
 	case EP_STATE_DISCONN_TIMEDOUT:
 	case EP_STATE_DISCONN_TIMEDOUT:
 		ret = 0;
 		ret = 0;
 		break;
 		break;
 	case EP_STATE_CONNECT_START:
 	case EP_STATE_CONNECT_START:
+	case EP_STATE_CONNECT_FAILED:
 	case EP_STATE_CONNECT_COMPL:
 	case EP_STATE_CONNECT_COMPL:
 	case EP_STATE_ULP_UPDATE_START:
 	case EP_STATE_ULP_UPDATE_START:
 	case EP_STATE_ULP_UPDATE_COMPL:
 	case EP_STATE_ULP_UPDATE_COMPL:
@@ -2167,7 +2168,8 @@ struct iscsi_transport bnx2i_iscsi_transport = {
 	.name			= "bnx2i",
 	.name			= "bnx2i",
 	.caps			= CAP_RECOVERY_L0 | CAP_HDRDGST |
 	.caps			= CAP_RECOVERY_L0 | CAP_HDRDGST |
 				  CAP_MULTI_R2T | CAP_DATADGST |
 				  CAP_MULTI_R2T | CAP_DATADGST |
-				  CAP_DATA_PATH_OFFLOAD,
+				  CAP_DATA_PATH_OFFLOAD |
+				  CAP_TEXT_NEGO,
 	.param_mask		= ISCSI_MAX_RECV_DLENGTH |
 	.param_mask		= ISCSI_MAX_RECV_DLENGTH |
 				  ISCSI_MAX_XMIT_DLENGTH |
 				  ISCSI_MAX_XMIT_DLENGTH |
 				  ISCSI_HDRDGST_EN |
 				  ISCSI_HDRDGST_EN |
@@ -2200,7 +2202,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
 	.bind_conn		= bnx2i_conn_bind,
 	.bind_conn		= bnx2i_conn_bind,
 	.destroy_conn		= bnx2i_conn_destroy,
 	.destroy_conn		= bnx2i_conn_destroy,
 	.set_param		= iscsi_set_param,
 	.set_param		= iscsi_set_param,
-	.get_conn_param		= bnx2i_conn_get_param,
+	.get_conn_param		= iscsi_conn_get_param,
 	.get_session_param	= iscsi_session_get_param,
 	.get_session_param	= iscsi_session_get_param,
 	.get_host_param		= bnx2i_host_get_param,
 	.get_host_param		= bnx2i_host_get_param,
 	.start_conn		= bnx2i_conn_start,
 	.start_conn		= bnx2i_conn_start,
@@ -2209,6 +2211,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
 	.xmit_task		= bnx2i_task_xmit,
 	.xmit_task		= bnx2i_task_xmit,
 	.get_stats		= bnx2i_conn_get_stats,
 	.get_stats		= bnx2i_conn_get_stats,
 	/* TCP connect - disconnect - option-2 interface calls */
 	/* TCP connect - disconnect - option-2 interface calls */
+	.get_ep_param		= bnx2i_ep_get_param,
 	.ep_connect		= bnx2i_ep_connect,
 	.ep_connect		= bnx2i_ep_connect,
 	.ep_poll		= bnx2i_ep_poll,
 	.ep_poll		= bnx2i_ep_poll,
 	.ep_disconnect		= bnx2i_ep_disconnect,
 	.ep_disconnect		= bnx2i_ep_disconnect,

+ 10 - 46
drivers/scsi/cxgbi/cxgb3i/cxgb3i.c

@@ -105,7 +105,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
 	/* owner and name should be set already */
 	/* owner and name should be set already */
 	.caps		= CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
 	.caps		= CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
 				| CAP_DATADGST | CAP_DIGEST_OFFLOAD |
 				| CAP_DATADGST | CAP_DIGEST_OFFLOAD |
-				CAP_PADDING_OFFLOAD,
+				CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
 	.param_mask	= ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
 	.param_mask	= ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
 				ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
 				ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
 				ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
 				ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
@@ -137,7 +137,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
 	.destroy_conn	= iscsi_tcp_conn_teardown,
 	.destroy_conn	= iscsi_tcp_conn_teardown,
 	.start_conn	= iscsi_conn_start,
 	.start_conn	= iscsi_conn_start,
 	.stop_conn	= iscsi_conn_stop,
 	.stop_conn	= iscsi_conn_stop,
-	.get_conn_param	= cxgbi_get_conn_param,
+	.get_conn_param	= iscsi_conn_get_param,
 	.set_param	= cxgbi_set_conn_param,
 	.set_param	= cxgbi_set_conn_param,
 	.get_stats	= cxgbi_get_conn_stats,
 	.get_stats	= cxgbi_get_conn_stats,
 	/* pdu xmit req from user space */
 	/* pdu xmit req from user space */
@@ -152,6 +152,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
 	.xmit_pdu	= cxgbi_conn_xmit_pdu,
 	.xmit_pdu	= cxgbi_conn_xmit_pdu,
 	.parse_pdu_itt	= cxgbi_parse_pdu_itt,
 	.parse_pdu_itt	= cxgbi_parse_pdu_itt,
 	/* TCP connect/disconnect */
 	/* TCP connect/disconnect */
+	.get_ep_param	= cxgbi_get_ep_param,
 	.ep_connect	= cxgbi_ep_connect,
 	.ep_connect	= cxgbi_ep_connect,
 	.ep_poll	= cxgbi_ep_poll,
 	.ep_poll	= cxgbi_ep_poll,
 	.ep_disconnect	= cxgbi_ep_disconnect,
 	.ep_disconnect	= cxgbi_ep_disconnect,
@@ -1108,10 +1109,11 @@ static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
 		csk, idx, npods, gl);
 		csk, idx, npods, gl);
 
 
 	for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
 	for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
-		struct sk_buff *skb = ddp->gl_skb[idx];
+		struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
+						PPOD_SIZE, 0, GFP_ATOMIC);
 
 
-		/* hold on to the skb until we clear the ddp mapping */
-		skb_get(skb);
+		if (!skb)
+			return -ENOMEM;
 
 
 		ulp_mem_io_set_hdr(skb, pm_addr);
 		ulp_mem_io_set_hdr(skb, pm_addr);
 		cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head +
 		cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head +
@@ -1136,56 +1138,20 @@ static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
 		cdev, idx, npods, tag);
 		cdev, idx, npods, tag);
 
 
 	for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
 	for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
-		struct sk_buff *skb = ddp->gl_skb[idx];
+		struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
+						PPOD_SIZE, 0, GFP_ATOMIC);
 
 
 		if (!skb) {
 		if (!skb) {
-			pr_err("tag 0x%x, 0x%x, %d/%u, skb NULL.\n",
+			pr_err("tag 0x%x, 0x%x, %d/%u, skb OOM.\n",
 				tag, idx, i, npods);
 				tag, idx, i, npods);
 			continue;
 			continue;
 		}
 		}
-		ddp->gl_skb[idx] = NULL;
-		memset(skb->head + sizeof(struct ulp_mem_io), 0, PPOD_SIZE);
 		ulp_mem_io_set_hdr(skb, pm_addr);
 		ulp_mem_io_set_hdr(skb, pm_addr);
 		skb->priority = CPL_PRIORITY_CONTROL;
 		skb->priority = CPL_PRIORITY_CONTROL;
 		cxgb3_ofld_send(cdev->lldev, skb);
 		cxgb3_ofld_send(cdev->lldev, skb);
 	}
 	}
 }
 }
 
 
-static void ddp_free_gl_skb(struct cxgbi_ddp_info *ddp, int idx, int cnt)
-{
-	int i;
-
-	log_debug(1 << CXGBI_DBG_DDP,
-		"ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt);
-
-	for (i = 0; i < cnt; i++, idx++)
-		if (ddp->gl_skb[idx]) {
-			kfree_skb(ddp->gl_skb[idx]);
-			ddp->gl_skb[idx] = NULL;
-		}
-}
-
-static int ddp_alloc_gl_skb(struct cxgbi_ddp_info *ddp, int idx,
-				   int cnt, gfp_t gfp)
-{
-	int i;
-
-	log_debug(1 << CXGBI_DBG_DDP,
-		"ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt);
-
-	for (i = 0; i < cnt; i++) {
-		struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
-						PPOD_SIZE, 0, gfp);
-		if (skb)
-			ddp->gl_skb[idx + i] = skb;
-		else {
-			ddp_free_gl_skb(ddp, idx, i);
-			return -ENOMEM;
-		}
-	}
-	return 0;
-}
-
 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
 				       unsigned int tid, int pg_idx, bool reply)
 				       unsigned int tid, int pg_idx, bool reply)
 {
 {
@@ -1316,8 +1282,6 @@ static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
 	}
 	}
 	tdev->ulp_iscsi = ddp;
 	tdev->ulp_iscsi = ddp;
 
 
-	cdev->csk_ddp_free_gl_skb = ddp_free_gl_skb;
-	cdev->csk_ddp_alloc_gl_skb = ddp_alloc_gl_skb;
 	cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
 	cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
 	cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
 	cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
 	cdev->csk_ddp_set = ddp_set_map;
 	cdev->csk_ddp_set = ddp_set_map;

+ 15 - 4
drivers/scsi/cxgbi/cxgb3i/cxgb3i.h

@@ -24,10 +24,21 @@
 
 
 extern cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
 extern cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
 
 
-#define cxgb3i_get_private_ipv4addr(ndev) \
-	(((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr)
-#define cxgb3i_set_private_ipv4addr(ndev, addr) \
-	(((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr
+static inline unsigned int cxgb3i_get_private_ipv4addr(struct net_device *ndev)
+{
+	return ((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr;
+}
+
+static inline void cxgb3i_set_private_ipv4addr(struct net_device *ndev,
+						unsigned int addr)
+{
+	struct port_info *pi =  (struct port_info *)netdev_priv(ndev);
+
+	pi->iscsic.flags = addr ? 1 : 0;
+	pi->iscsi_ipv4addr = addr;
+	if (addr)
+		memcpy(pi->iscsic.mac_addr, ndev->dev_addr, ETH_ALEN);
+}
 
 
 struct cpl_iscsi_hdr_norss {
 struct cpl_iscsi_hdr_norss {
 	union opcode_tid ot;
 	union opcode_tid ot;

+ 3 - 4
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c

@@ -106,7 +106,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
 	.name		= DRV_MODULE_NAME,
 	.name		= DRV_MODULE_NAME,
 	.caps		= CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
 	.caps		= CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
 				CAP_DATADGST | CAP_DIGEST_OFFLOAD |
 				CAP_DATADGST | CAP_DIGEST_OFFLOAD |
-				CAP_PADDING_OFFLOAD,
+				CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
 	.param_mask	= ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
 	.param_mask	= ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
 				ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
 				ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
 				ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
 				ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
@@ -138,7 +138,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
 	.destroy_conn	= iscsi_tcp_conn_teardown,
 	.destroy_conn	= iscsi_tcp_conn_teardown,
 	.start_conn		= iscsi_conn_start,
 	.start_conn		= iscsi_conn_start,
 	.stop_conn		= iscsi_conn_stop,
 	.stop_conn		= iscsi_conn_stop,
-	.get_conn_param	= cxgbi_get_conn_param,
+	.get_conn_param	= iscsi_conn_get_param,
 	.set_param	= cxgbi_set_conn_param,
 	.set_param	= cxgbi_set_conn_param,
 	.get_stats	= cxgbi_get_conn_stats,
 	.get_stats	= cxgbi_get_conn_stats,
 	/* pdu xmit req from user space */
 	/* pdu xmit req from user space */
@@ -153,6 +153,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
 	.xmit_pdu	= cxgbi_conn_xmit_pdu,
 	.xmit_pdu	= cxgbi_conn_xmit_pdu,
 	.parse_pdu_itt	= cxgbi_parse_pdu_itt,
 	.parse_pdu_itt	= cxgbi_parse_pdu_itt,
 	/* TCP connect/disconnect */
 	/* TCP connect/disconnect */
+	.get_ep_param	= cxgbi_get_ep_param,
 	.ep_connect	= cxgbi_ep_connect,
 	.ep_connect	= cxgbi_ep_connect,
 	.ep_poll	= cxgbi_ep_poll,
 	.ep_poll	= cxgbi_ep_poll,
 	.ep_disconnect	= cxgbi_ep_disconnect,
 	.ep_disconnect	= cxgbi_ep_disconnect,
@@ -1425,8 +1426,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
 	cxgbi_ddp_page_size_factor(pgsz_factor);
 	cxgbi_ddp_page_size_factor(pgsz_factor);
 	cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor);
 	cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor);
 
 
-	cdev->csk_ddp_free_gl_skb = NULL;
-	cdev->csk_ddp_alloc_gl_skb = NULL;
 	cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
 	cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
 	cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
 	cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
 	cdev->csk_ddp_set = ddp_set_map;
 	cdev->csk_ddp_set = ddp_set_map;

+ 27 - 39
drivers/scsi/cxgbi/libcxgbi.c

@@ -530,6 +530,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
 	csk->dst = dst;
 	csk->dst = dst;
 	csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
 	csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
 	csk->daddr.sin_port = daddr->sin_port;
 	csk->daddr.sin_port = daddr->sin_port;
+	csk->daddr.sin_family = daddr->sin_family;
 	csk->saddr.sin_addr.s_addr = rt->rt_src;
 	csk->saddr.sin_addr.s_addr = rt->rt_src;
 
 
 	return csk;
 	return csk;
@@ -1264,12 +1265,6 @@ static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid,
 		return idx;
 		return idx;
 	}
 	}
 
 
-	if (cdev->csk_ddp_alloc_gl_skb) {
-		err = cdev->csk_ddp_alloc_gl_skb(ddp, idx, npods, gfp);
-		if (err < 0)
-			goto unmark_entries;
-	}
-
 	tag = cxgbi_ddp_tag_base(tformat, sw_tag);
 	tag = cxgbi_ddp_tag_base(tformat, sw_tag);
 	tag |= idx << PPOD_IDX_SHIFT;
 	tag |= idx << PPOD_IDX_SHIFT;
 
 
@@ -1280,11 +1275,8 @@ static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid,
 	hdr.page_offset = htonl(gl->offset);
 	hdr.page_offset = htonl(gl->offset);
 
 
 	err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl);
 	err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl);
-	if (err < 0) {
-		if (cdev->csk_ddp_free_gl_skb)
-			cdev->csk_ddp_free_gl_skb(ddp, idx, npods);
+	if (err < 0)
 		goto unmark_entries;
 		goto unmark_entries;
-	}
 
 
 	ddp->idx_last = idx;
 	ddp->idx_last = idx;
 	log_debug(1 << CXGBI_DBG_DDP,
 	log_debug(1 << CXGBI_DBG_DDP,
@@ -1350,8 +1342,6 @@ static void ddp_destroy(struct kref *kref)
 					>> PPOD_PAGES_SHIFT;
 					>> PPOD_PAGES_SHIFT;
 			pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods);
 			pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods);
 			kfree(gl);
 			kfree(gl);
-			if (cdev->csk_ddp_free_gl_skb)
-				cdev->csk_ddp_free_gl_skb(ddp, i, npods);
 			i += npods;
 			i += npods;
 		} else
 		} else
 			i++;
 			i++;
@@ -1394,8 +1384,6 @@ int cxgbi_ddp_init(struct cxgbi_device *cdev,
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 	ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1);
 	ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1);
-	ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) +
-				ppmax * sizeof(struct cxgbi_gather_list *));
 	cdev->ddp = ddp;
 	cdev->ddp = ddp;
 
 
 	spin_lock_init(&ddp->map_lock);
 	spin_lock_init(&ddp->map_lock);
@@ -1895,13 +1883,16 @@ EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu);
 
 
 static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
 static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
 {
 {
-	u8 submode = 0;
+	if (hcrc || dcrc) {
+		u8 submode = 0;
 
 
-	if (hcrc)
-		submode |= 1;
-	if (dcrc)
-		submode |= 2;
-	cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
+		if (hcrc)
+			submode |= 1;
+		if (dcrc)
+			submode |= 2;
+		cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
+	} else
+		cxgbi_skcb_ulp_mode(skb) = 0;
 }
 }
 
 
 int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
 int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
@@ -2197,32 +2188,34 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
 }
 }
 EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
 EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
 
 
-int cxgbi_get_conn_param(struct iscsi_cls_conn *cls_conn,
-			enum iscsi_param param, char *buf)
+int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
+		       char *buf)
 {
 {
-	struct iscsi_conn *iconn = cls_conn->dd_data;
+	struct cxgbi_endpoint *cep = ep->dd_data;
+	struct cxgbi_sock *csk;
 	int len;
 	int len;
 
 
 	log_debug(1 << CXGBI_DBG_ISCSI,
 	log_debug(1 << CXGBI_DBG_ISCSI,
-		"cls_conn 0x%p, param %d.\n", cls_conn, param);
+		"cls_conn 0x%p, param %d.\n", ep, param);
 
 
 	switch (param) {
 	switch (param) {
 	case ISCSI_PARAM_CONN_PORT:
 	case ISCSI_PARAM_CONN_PORT:
-		spin_lock_bh(&iconn->session->lock);
-		len = sprintf(buf, "%hu\n", iconn->portal_port);
-		spin_unlock_bh(&iconn->session->lock);
-		break;
 	case ISCSI_PARAM_CONN_ADDRESS:
 	case ISCSI_PARAM_CONN_ADDRESS:
-		spin_lock_bh(&iconn->session->lock);
-		len = sprintf(buf, "%s\n", iconn->portal_address);
-		spin_unlock_bh(&iconn->session->lock);
-		break;
+		if (!cep)
+			return -ENOTCONN;
+
+		csk = cep->csk;
+		if (!csk)
+			return -ENOTCONN;
+
+		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+						 &csk->daddr, param, buf);
 	default:
 	default:
-		return iscsi_conn_get_param(cls_conn, param, buf);
+		return -ENOSYS;
 	}
 	}
 	return len;
 	return len;
 }
 }
-EXPORT_SYMBOL_GPL(cxgbi_get_conn_param);
+EXPORT_SYMBOL_GPL(cxgbi_get_ep_param);
 
 
 struct iscsi_cls_conn *
 struct iscsi_cls_conn *
 cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid)
 cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid)
@@ -2289,11 +2282,6 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
 	cxgbi_conn_max_xmit_dlength(conn);
 	cxgbi_conn_max_xmit_dlength(conn);
 	cxgbi_conn_max_recv_dlength(conn);
 	cxgbi_conn_max_recv_dlength(conn);
 
 
-	spin_lock_bh(&conn->session->lock);
-	sprintf(conn->portal_address, "%pI4", &csk->daddr.sin_addr.s_addr);
-	conn->portal_port = ntohs(csk->daddr.sin_port);
-	spin_unlock_bh(&conn->session->lock);
-
 	log_debug(1 << CXGBI_DBG_ISCSI,
 	log_debug(1 << CXGBI_DBG_ISCSI,
 		"cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
 		"cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
 		cls_session, cls_conn, ep, cconn, csk);
 		cls_session, cls_conn, ep, cconn, csk);

+ 1 - 4
drivers/scsi/cxgbi/libcxgbi.h

@@ -131,7 +131,6 @@ struct cxgbi_ddp_info {
 	unsigned int rsvd_tag_mask;
 	unsigned int rsvd_tag_mask;
 	spinlock_t map_lock;
 	spinlock_t map_lock;
 	struct cxgbi_gather_list **gl_map;
 	struct cxgbi_gather_list **gl_map;
-	struct sk_buff **gl_skb;
 };
 };
 
 
 #define DDP_PGIDX_MAX		4
 #define DDP_PGIDX_MAX		4
@@ -536,8 +535,6 @@ struct cxgbi_device {
 	struct cxgbi_ddp_info *ddp;
 	struct cxgbi_ddp_info *ddp;
 
 
 	void (*dev_ddp_cleanup)(struct cxgbi_device *);
 	void (*dev_ddp_cleanup)(struct cxgbi_device *);
-	void (*csk_ddp_free_gl_skb)(struct cxgbi_ddp_info *, int, int);
-	int (*csk_ddp_alloc_gl_skb)(struct cxgbi_ddp_info *, int, int, gfp_t);
 	int (*csk_ddp_set)(struct cxgbi_sock *, struct cxgbi_pagepod_hdr *,
 	int (*csk_ddp_set)(struct cxgbi_sock *, struct cxgbi_pagepod_hdr *,
 				unsigned int, unsigned int,
 				unsigned int, unsigned int,
 				struct cxgbi_gather_list *);
 				struct cxgbi_gather_list *);
@@ -715,7 +712,7 @@ void cxgbi_cleanup_task(struct iscsi_task *task);
 void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
 void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
 int cxgbi_set_conn_param(struct iscsi_cls_conn *,
 int cxgbi_set_conn_param(struct iscsi_cls_conn *,
 			enum iscsi_param, char *, int);
 			enum iscsi_param, char *, int);
-int cxgbi_get_conn_param(struct iscsi_cls_conn *, enum iscsi_param, char *);
+int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param, char *);
 struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32);
 struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32);
 int cxgbi_bind_conn(struct iscsi_cls_session *,
 int cxgbi_bind_conn(struct iscsi_cls_session *,
 			struct iscsi_cls_conn *, u64, int);
 			struct iscsi_cls_conn *, u64, int);

+ 37 - 75
drivers/scsi/device_handler/scsi_dh.c

@@ -25,16 +25,9 @@
 #include <scsi/scsi_dh.h>
 #include <scsi/scsi_dh.h>
 #include "../scsi_priv.h"
 #include "../scsi_priv.h"
 
 
-struct scsi_dh_devinfo_list {
-	struct list_head node;
-	char vendor[9];
-	char model[17];
-	struct scsi_device_handler *handler;
-};
-
 static DEFINE_SPINLOCK(list_lock);
 static DEFINE_SPINLOCK(list_lock);
 static LIST_HEAD(scsi_dh_list);
 static LIST_HEAD(scsi_dh_list);
-static LIST_HEAD(scsi_dh_dev_list);
+static int scsi_dh_list_idx = 1;
 
 
 static struct scsi_device_handler *get_device_handler(const char *name)
 static struct scsi_device_handler *get_device_handler(const char *name)
 {
 {
@@ -51,40 +44,18 @@ static struct scsi_device_handler *get_device_handler(const char *name)
 	return found;
 	return found;
 }
 }
 
 
-
-static struct scsi_device_handler *
-scsi_dh_cache_lookup(struct scsi_device *sdev)
+static struct scsi_device_handler *get_device_handler_by_idx(int idx)
 {
 {
-	struct scsi_dh_devinfo_list *tmp;
-	struct scsi_device_handler *found_dh = NULL;
+	struct scsi_device_handler *tmp, *found = NULL;
 
 
 	spin_lock(&list_lock);
 	spin_lock(&list_lock);
-	list_for_each_entry(tmp, &scsi_dh_dev_list, node) {
-		if (!strncmp(sdev->vendor, tmp->vendor, strlen(tmp->vendor)) &&
-		    !strncmp(sdev->model, tmp->model, strlen(tmp->model))) {
-			found_dh = tmp->handler;
+	list_for_each_entry(tmp, &scsi_dh_list, list) {
+		if (tmp->idx == idx) {
+			found = tmp;
 			break;
 			break;
 		}
 		}
 	}
 	}
 	spin_unlock(&list_lock);
 	spin_unlock(&list_lock);
-
-	return found_dh;
-}
-
-static int scsi_dh_handler_lookup(struct scsi_device_handler *scsi_dh,
-				  struct scsi_device *sdev)
-{
-	int i, found = 0;
-
-	for(i = 0; scsi_dh->devlist[i].vendor; i++) {
-		if (!strncmp(sdev->vendor, scsi_dh->devlist[i].vendor,
-			     strlen(scsi_dh->devlist[i].vendor)) &&
-		    !strncmp(sdev->model, scsi_dh->devlist[i].model,
-			     strlen(scsi_dh->devlist[i].model))) {
-			found = 1;
-			break;
-		}
-	}
 	return found;
 	return found;
 }
 }
 
 
@@ -102,41 +73,14 @@ device_handler_match(struct scsi_device_handler *scsi_dh,
 		     struct scsi_device *sdev)
 		     struct scsi_device *sdev)
 {
 {
 	struct scsi_device_handler *found_dh = NULL;
 	struct scsi_device_handler *found_dh = NULL;
-	struct scsi_dh_devinfo_list *tmp;
+	int idx;
 
 
-	found_dh = scsi_dh_cache_lookup(sdev);
-	if (found_dh)
-		return found_dh;
+	idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model,
+					  SCSI_DEVINFO_DH);
+	found_dh = get_device_handler_by_idx(idx);
 
 
-	if (scsi_dh) {
-		if (scsi_dh_handler_lookup(scsi_dh, sdev))
-			found_dh = scsi_dh;
-	} else {
-		struct scsi_device_handler *tmp_dh;
-
-		spin_lock(&list_lock);
-		list_for_each_entry(tmp_dh, &scsi_dh_list, list) {
-			if (scsi_dh_handler_lookup(tmp_dh, sdev))
-				found_dh = tmp_dh;
-		}
-		spin_unlock(&list_lock);
-	}
-
-	if (found_dh) { /* If device is found, add it to the cache */
-		tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
-		if (tmp) {
-			strncpy(tmp->vendor, sdev->vendor, 8);
-			strncpy(tmp->model, sdev->model, 16);
-			tmp->vendor[8] = '\0';
-			tmp->model[16] = '\0';
-			tmp->handler = found_dh;
-			spin_lock(&list_lock);
-			list_add(&tmp->node, &scsi_dh_dev_list);
-			spin_unlock(&list_lock);
-		} else {
-			found_dh = NULL;
-		}
-	}
+	if (scsi_dh && found_dh != scsi_dh)
+		found_dh = NULL;
 
 
 	return found_dh;
 	return found_dh;
 }
 }
@@ -373,12 +317,25 @@ static int scsi_dh_notifier_remove(struct device *dev, void *data)
  */
  */
 int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
 int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
 {
 {
+	int i;
+
 	if (get_device_handler(scsi_dh->name))
 	if (get_device_handler(scsi_dh->name))
 		return -EBUSY;
 		return -EBUSY;
 
 
 	spin_lock(&list_lock);
 	spin_lock(&list_lock);
+	scsi_dh->idx = scsi_dh_list_idx++;
 	list_add(&scsi_dh->list, &scsi_dh_list);
 	list_add(&scsi_dh->list, &scsi_dh_list);
 	spin_unlock(&list_lock);
 	spin_unlock(&list_lock);
+
+	for (i = 0; scsi_dh->devlist[i].vendor; i++) {
+		scsi_dev_info_list_add_keyed(0,
+					scsi_dh->devlist[i].vendor,
+					scsi_dh->devlist[i].model,
+					NULL,
+					scsi_dh->idx,
+					SCSI_DEVINFO_DH);
+	}
+
 	bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
 	bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
 	printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
 	printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
 
 
@@ -395,7 +352,7 @@ EXPORT_SYMBOL_GPL(scsi_register_device_handler);
  */
  */
 int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
 int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
 {
 {
-	struct scsi_dh_devinfo_list *tmp, *pos;
+	int i;
 
 
 	if (!get_device_handler(scsi_dh->name))
 	if (!get_device_handler(scsi_dh->name))
 		return -ENODEV;
 		return -ENODEV;
@@ -403,14 +360,14 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
 	bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
 	bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
 			 scsi_dh_notifier_remove);
 			 scsi_dh_notifier_remove);
 
 
+	for (i = 0; scsi_dh->devlist[i].vendor; i++) {
+		scsi_dev_info_list_del_keyed(scsi_dh->devlist[i].vendor,
+					     scsi_dh->devlist[i].model,
+					     SCSI_DEVINFO_DH);
+	}
+
 	spin_lock(&list_lock);
 	spin_lock(&list_lock);
 	list_del(&scsi_dh->list);
 	list_del(&scsi_dh->list);
-	list_for_each_entry_safe(pos, tmp, &scsi_dh_dev_list, node) {
-		if (pos->handler == scsi_dh) {
-			list_del(&pos->node);
-			kfree(pos);
-		}
-	}
 	spin_unlock(&list_lock);
 	spin_unlock(&list_lock);
 	printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name);
 	printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name);
 
 
@@ -576,6 +533,10 @@ static int __init scsi_dh_init(void)
 {
 {
 	int r;
 	int r;
 
 
+	r = scsi_dev_info_add_list(SCSI_DEVINFO_DH, "SCSI Device Handler");
+	if (r)
+		return r;
+
 	r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
 	r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
 
 
 	if (!r)
 	if (!r)
@@ -590,6 +551,7 @@ static void __exit scsi_dh_exit(void)
 	bus_for_each_dev(&scsi_bus_type, NULL, NULL,
 	bus_for_each_dev(&scsi_bus_type, NULL, NULL,
 			 scsi_dh_sysfs_attr_remove);
 			 scsi_dh_sysfs_attr_remove);
 	bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
 	bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
+	scsi_dev_info_remove_list(SCSI_DEVINFO_DH);
 }
 }
 
 
 module_init(scsi_dh_init);
 module_init(scsi_dh_init);

+ 11 - 7
drivers/scsi/device_handler/scsi_dh_alua.c

@@ -253,13 +253,15 @@ static void stpg_endio(struct request *req, int error)
 {
 {
 	struct alua_dh_data *h = req->end_io_data;
 	struct alua_dh_data *h = req->end_io_data;
 	struct scsi_sense_hdr sense_hdr;
 	struct scsi_sense_hdr sense_hdr;
-	unsigned err = SCSI_DH_IO;
+	unsigned err = SCSI_DH_OK;
 
 
 	if (error || host_byte(req->errors) != DID_OK ||
 	if (error || host_byte(req->errors) != DID_OK ||
-			msg_byte(req->errors) != COMMAND_COMPLETE)
+			msg_byte(req->errors) != COMMAND_COMPLETE) {
+		err = SCSI_DH_IO;
 		goto done;
 		goto done;
+	}
 
 
-	if (err == SCSI_DH_IO && h->senselen > 0) {
+	if (h->senselen > 0) {
 		err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
 		err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
 					   &sense_hdr);
 					   &sense_hdr);
 		if (!err) {
 		if (!err) {
@@ -285,7 +287,8 @@ static void stpg_endio(struct request *req, int error)
 			    print_alua_state(h->state));
 			    print_alua_state(h->state));
 	}
 	}
 done:
 done:
-	blk_put_request(req);
+	req->end_io_data = NULL;
+	__blk_put_request(req->q, req);
 	if (h->callback_fn) {
 	if (h->callback_fn) {
 		h->callback_fn(h->callback_data, err);
 		h->callback_fn(h->callback_data, err);
 		h->callback_fn = h->callback_data = NULL;
 		h->callback_fn = h->callback_data = NULL;
@@ -303,7 +306,6 @@ done:
 static unsigned submit_stpg(struct alua_dh_data *h)
 static unsigned submit_stpg(struct alua_dh_data *h)
 {
 {
 	struct request *rq;
 	struct request *rq;
-	int err = SCSI_DH_RES_TEMP_UNAVAIL;
 	int stpg_len = 8;
 	int stpg_len = 8;
 	struct scsi_device *sdev = h->sdev;
 	struct scsi_device *sdev = h->sdev;
 
 
@@ -332,7 +334,7 @@ static unsigned submit_stpg(struct alua_dh_data *h)
 	rq->end_io_data = h;
 	rq->end_io_data = h;
 
 
 	blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio);
 	blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio);
-	return err;
+	return SCSI_DH_OK;
 }
 }
 
 
 /*
 /*
@@ -730,7 +732,9 @@ static const struct scsi_dh_devlist alua_dev_list[] = {
 	{"Pillar", "Axiom" },
 	{"Pillar", "Axiom" },
 	{"Intel", "Multi-Flex"},
 	{"Intel", "Multi-Flex"},
 	{"NETAPP", "LUN"},
 	{"NETAPP", "LUN"},
+	{"NETAPP", "LUN C-Mode"},
 	{"AIX", "NVDISK"},
 	{"AIX", "NVDISK"},
+	{"Promise", "VTrak"},
 	{NULL, NULL}
 	{NULL, NULL}
 };
 };
 
 
@@ -759,7 +763,7 @@ static int alua_bus_attach(struct scsi_device *sdev)
 	unsigned long flags;
 	unsigned long flags;
 	int err = SCSI_DH_OK;
 	int err = SCSI_DH_OK;
 
 
-	scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+	scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
 			       + sizeof(*h) , GFP_KERNEL);
 			       + sizeof(*h) , GFP_KERNEL);
 	if (!scsi_dh_data) {
 	if (!scsi_dh_data) {
 		sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
 		sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",

+ 1 - 1
drivers/scsi/device_handler/scsi_dh_emc.c

@@ -650,7 +650,7 @@ static int clariion_bus_attach(struct scsi_device *sdev)
 	unsigned long flags;
 	unsigned long flags;
 	int err;
 	int err;
 
 
-	scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+	scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
 			       + sizeof(*h) , GFP_KERNEL);
 			       + sizeof(*h) , GFP_KERNEL);
 	if (!scsi_dh_data) {
 	if (!scsi_dh_data) {
 		sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
 		sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",

+ 4 - 3
drivers/scsi/device_handler/scsi_dh_hp_sw.c

@@ -225,7 +225,8 @@ static void start_stop_endio(struct request *req, int error)
 		}
 		}
 	}
 	}
 done:
 done:
-	blk_put_request(req);
+	req->end_io_data = NULL;
+	__blk_put_request(req->q, req);
 	if (h->callback_fn) {
 	if (h->callback_fn) {
 		h->callback_fn(h->callback_data, err);
 		h->callback_fn(h->callback_data, err);
 		h->callback_fn = h->callback_data = NULL;
 		h->callback_fn = h->callback_data = NULL;
@@ -338,8 +339,8 @@ static int hp_sw_bus_attach(struct scsi_device *sdev)
 	unsigned long flags;
 	unsigned long flags;
 	int ret;
 	int ret;
 
 
-	scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
-			       + sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
+	scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+			       + sizeof(*h) , GFP_KERNEL);
 	if (!scsi_dh_data) {
 	if (!scsi_dh_data) {
 		sdev_printk(KERN_ERR, sdev, "%s: Attach Failed\n",
 		sdev_printk(KERN_ERR, sdev, "%s: Attach Failed\n",
 			    HP_SW_NAME);
 			    HP_SW_NAME);

+ 13 - 13
drivers/scsi/device_handler/scsi_dh_rdac.c

@@ -281,11 +281,13 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
 }
 }
 
 
 static struct request *rdac_failover_get(struct scsi_device *sdev,
 static struct request *rdac_failover_get(struct scsi_device *sdev,
-					 struct rdac_dh_data *h)
+			struct rdac_dh_data *h, struct list_head *list)
 {
 {
 	struct request *rq;
 	struct request *rq;
 	struct rdac_mode_common *common;
 	struct rdac_mode_common *common;
 	unsigned data_size;
 	unsigned data_size;
+	struct rdac_queue_data *qdata;
+	u8 *lun_table;
 
 
 	if (h->ctlr->use_ms10) {
 	if (h->ctlr->use_ms10) {
 		struct rdac_pg_expanded *rdac_pg;
 		struct rdac_pg_expanded *rdac_pg;
@@ -298,6 +300,7 @@ static struct request *rdac_failover_get(struct scsi_device *sdev,
 		rdac_pg->subpage_code = 0x1;
 		rdac_pg->subpage_code = 0x1;
 		rdac_pg->page_len[0] = 0x01;
 		rdac_pg->page_len[0] = 0x01;
 		rdac_pg->page_len[1] = 0x28;
 		rdac_pg->page_len[1] = 0x28;
+		lun_table = rdac_pg->lun_table;
 	} else {
 	} else {
 		struct rdac_pg_legacy *rdac_pg;
 		struct rdac_pg_legacy *rdac_pg;
 
 
@@ -307,11 +310,16 @@ static struct request *rdac_failover_get(struct scsi_device *sdev,
 		common = &rdac_pg->common;
 		common = &rdac_pg->common;
 		rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
 		rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
 		rdac_pg->page_len = 0x68;
 		rdac_pg->page_len = 0x68;
+		lun_table = rdac_pg->lun_table;
 	}
 	}
 	common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
 	common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
 	common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
 	common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
 	common->rdac_options = RDAC_FORCED_QUIESENCE;
 	common->rdac_options = RDAC_FORCED_QUIESENCE;
 
 
+	list_for_each_entry(qdata, list, entry) {
+		lun_table[qdata->h->lun] = 0x81;
+	}
+
 	/* get request for block layer packet command */
 	/* get request for block layer packet command */
 	rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
 	rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
 	if (!rq)
 	if (!rq)
@@ -565,7 +573,6 @@ static void send_mode_select(struct work_struct *work)
 	int err, retry_cnt = RDAC_RETRY_COUNT;
 	int err, retry_cnt = RDAC_RETRY_COUNT;
 	struct rdac_queue_data *tmp, *qdata;
 	struct rdac_queue_data *tmp, *qdata;
 	LIST_HEAD(list);
 	LIST_HEAD(list);
-	u8 *lun_table;
 
 
 	spin_lock(&ctlr->ms_lock);
 	spin_lock(&ctlr->ms_lock);
 	list_splice_init(&ctlr->ms_head, &list);
 	list_splice_init(&ctlr->ms_head, &list);
@@ -573,21 +580,12 @@ static void send_mode_select(struct work_struct *work)
 	ctlr->ms_sdev = NULL;
 	ctlr->ms_sdev = NULL;
 	spin_unlock(&ctlr->ms_lock);
 	spin_unlock(&ctlr->ms_lock);
 
 
-	if (ctlr->use_ms10)
-		lun_table = ctlr->mode_select.expanded.lun_table;
-	else
-		lun_table = ctlr->mode_select.legacy.lun_table;
-
 retry:
 retry:
 	err = SCSI_DH_RES_TEMP_UNAVAIL;
 	err = SCSI_DH_RES_TEMP_UNAVAIL;
-	rq = rdac_failover_get(sdev, h);
+	rq = rdac_failover_get(sdev, h, &list);
 	if (!rq)
 	if (!rq)
 		goto done;
 		goto done;
 
 
-	list_for_each_entry(qdata, &list, entry) {
-		lun_table[qdata->h->lun] = 0x81;
-	}
-
 	RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
 	RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
 		"%s MODE_SELECT command",
 		"%s MODE_SELECT command",
 		(char *) h->ctlr->array_name, h->ctlr->index,
 		(char *) h->ctlr->array_name, h->ctlr->index,
@@ -769,6 +767,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
 	{"DELL", "MD32xx"},
 	{"DELL", "MD32xx"},
 	{"DELL", "MD32xxi"},
 	{"DELL", "MD32xxi"},
 	{"DELL", "MD36xxi"},
 	{"DELL", "MD36xxi"},
+	{"DELL", "MD36xxf"},
 	{"LSI", "INF-01-00"},
 	{"LSI", "INF-01-00"},
 	{"ENGENIO", "INF-01-00"},
 	{"ENGENIO", "INF-01-00"},
 	{"STK", "FLEXLINE 380"},
 	{"STK", "FLEXLINE 380"},
@@ -800,7 +799,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
 	int err;
 	int err;
 	char array_name[ARRAY_LABEL_LEN];
 	char array_name[ARRAY_LABEL_LEN];
 
 
-	scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+	scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
 			       + sizeof(*h) , GFP_KERNEL);
 			       + sizeof(*h) , GFP_KERNEL);
 	if (!scsi_dh_data) {
 	if (!scsi_dh_data) {
 		sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
 		sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
@@ -906,4 +905,5 @@ module_exit(rdac_exit);
 
 
 MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver");
 MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver");
 MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
 MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
+MODULE_VERSION("01.00.0000.0000");
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");

+ 2 - 0
drivers/scsi/fcoe/Makefile

@@ -1,2 +1,4 @@
 obj-$(CONFIG_FCOE) += fcoe.o
 obj-$(CONFIG_FCOE) += fcoe.o
 obj-$(CONFIG_LIBFCOE) += libfcoe.o
 obj-$(CONFIG_LIBFCOE) += libfcoe.o
+
+libfcoe-objs := fcoe_ctlr.o fcoe_transport.o

File diff suppressed because it is too large
+ 194 - 383
drivers/scsi/fcoe/fcoe.c


+ 3 - 47
drivers/scsi/fcoe/fcoe.h

@@ -24,7 +24,7 @@
 #include <linux/kthread.h>
 #include <linux/kthread.h>
 
 
 #define FCOE_MAX_QUEUE_DEPTH	256
 #define FCOE_MAX_QUEUE_DEPTH	256
-#define FCOE_LOW_QUEUE_DEPTH	32
+#define FCOE_MIN_QUEUE_DEPTH	32
 
 
 #define FCOE_WORD_TO_BYTE	4
 #define FCOE_WORD_TO_BYTE	4
 
 
@@ -40,12 +40,6 @@
 #define FCOE_MIN_XID		0x0000	/* the min xid supported by fcoe_sw */
 #define FCOE_MIN_XID		0x0000	/* the min xid supported by fcoe_sw */
 #define FCOE_MAX_XID		0x0FFF	/* the max xid supported by fcoe_sw */
 #define FCOE_MAX_XID		0x0FFF	/* the max xid supported by fcoe_sw */
 
 
-/*
- * Max MTU for FCoE: 14 (FCoE header) + 24 (FC header) + 2112 (max FC payload)
- * + 4 (FC CRC) + 4 (FCoE trailer) =  2158 bytes
- */
-#define FCOE_MTU	2158
-
 unsigned int fcoe_debug_logging;
 unsigned int fcoe_debug_logging;
 module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
 module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
 MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
@@ -70,21 +64,6 @@ do {                                                            	\
 			   printk(KERN_INFO "fcoe: %s: " fmt,	\
 			   printk(KERN_INFO "fcoe: %s: " fmt,	\
 				  netdev->name, ##args);)
 				  netdev->name, ##args);)
 
 
-/**
- * struct fcoe_percpu_s - The per-CPU context for FCoE receive threads
- * @thread:	    The thread context
- * @fcoe_rx_list:   The queue of pending packets to process
- * @page:	    The memory page for calculating frame trailer CRCs
- * @crc_eof_offset: The offset into the CRC page pointing to available
- *		    memory for a new trailer
- */
-struct fcoe_percpu_s {
-	struct task_struct *thread;
-	struct sk_buff_head fcoe_rx_list;
-	struct page *crc_eof_page;
-	int crc_eof_offset;
-};
-
 /**
 /**
  * struct fcoe_interface - A FCoE interface
  * struct fcoe_interface - A FCoE interface
  * @list:	      Handle for a list of FCoE interfaces
  * @list:	      Handle for a list of FCoE interfaces
@@ -108,30 +87,6 @@ struct fcoe_interface {
 	struct kref	   kref;
 	struct kref	   kref;
 };
 };
 
 
-/**
- * struct fcoe_port - The FCoE private structure
- * @fcoe:		       The associated fcoe interface
- * @lport:		       The associated local port
- * @fcoe_pending_queue:	       The pending Rx queue of skbs
- * @fcoe_pending_queue_active: Indicates if the pending queue is active
- * @timer:		       The queue timer
- * @destroy_work:	       Handle for work context
- *			       (to prevent RTNL deadlocks)
- * @data_srt_addr:	       Source address for data
- *
- * An instance of this structure is to be allocated along with the
- * Scsi_Host and libfc fc_lport structures.
- */
-struct fcoe_port {
-	struct fcoe_interface *fcoe;
-	struct fc_lport	      *lport;
-	struct sk_buff_head   fcoe_pending_queue;
-	u8		      fcoe_pending_queue_active;
-	struct timer_list     timer;
-	struct work_struct    destroy_work;
-	u8		      data_src_addr[ETH_ALEN];
-};
-
 #define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
 #define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
 
 
 /**
 /**
@@ -140,7 +95,8 @@ struct fcoe_port {
  */
  */
 static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
 static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
 {
 {
-	return ((struct fcoe_port *)lport_priv(lport))->fcoe->netdev;
+	return ((struct fcoe_interface *)
+			((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
 }
 }
 
 
 #endif /* _FCOE_H_ */
 #endif /* _FCOE_H_ */

+ 7 - 33
drivers/scsi/fcoe/libfcoe.c → drivers/scsi/fcoe/fcoe_ctlr.c

@@ -44,9 +44,7 @@
 #include <scsi/libfc.h>
 #include <scsi/libfc.h>
 #include <scsi/libfcoe.h>
 #include <scsi/libfcoe.h>
 
 
-MODULE_AUTHOR("Open-FCoE.org");
-MODULE_DESCRIPTION("FIP discovery protocol support for FCoE HBAs");
-MODULE_LICENSE("GPL v2");
+#include "libfcoe.h"
 
 
 #define	FCOE_CTLR_MIN_FKA	500		/* min keep alive (mS) */
 #define	FCOE_CTLR_MIN_FKA	500		/* min keep alive (mS) */
 #define	FCOE_CTLR_DEF_FKA	FIP_DEF_FKA	/* default keep alive (mS) */
 #define	FCOE_CTLR_DEF_FKA	FIP_DEF_FKA	/* default keep alive (mS) */
@@ -66,31 +64,7 @@ static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS;
 static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS;
 static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS;
 static u8 fcoe_all_p2p[ETH_ALEN] = FIP_ALL_P2P_MACS;
 static u8 fcoe_all_p2p[ETH_ALEN] = FIP_ALL_P2P_MACS;
 
 
-unsigned int libfcoe_debug_logging;
-module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
-
-#define LIBFCOE_LOGGING	    0x01 /* General logging, not categorized */
-#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
-
-#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD)		\
-do {							\
-	if (unlikely(libfcoe_debug_logging & LEVEL))	\
-		do {					\
-			CMD;				\
-		} while (0);				\
-} while (0)
-
-#define LIBFCOE_DBG(fmt, args...)					\
-	LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING,				\
-			      printk(KERN_INFO "libfcoe: " fmt, ##args);)
-
-#define LIBFCOE_FIP_DBG(fip, fmt, args...)				\
-	LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING,			\
-			      printk(KERN_INFO "host%d: fip: " fmt, 	\
-				     (fip)->lp->host->host_no, ##args);)
-
-static const char *fcoe_ctlr_states[] = {
+static const char * const fcoe_ctlr_states[] = {
 	[FIP_ST_DISABLED] =	"DISABLED",
 	[FIP_ST_DISABLED] =	"DISABLED",
 	[FIP_ST_LINK_WAIT] =	"LINK_WAIT",
 	[FIP_ST_LINK_WAIT] =	"LINK_WAIT",
 	[FIP_ST_AUTO] =		"AUTO",
 	[FIP_ST_AUTO] =		"AUTO",
@@ -308,8 +282,8 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
 			struct fip_mac_desc mac;
 			struct fip_mac_desc mac;
 			struct fip_wwn_desc wwnn;
 			struct fip_wwn_desc wwnn;
 			struct fip_size_desc size;
 			struct fip_size_desc size;
-		} __attribute__((packed)) desc;
-	}  __attribute__((packed)) *sol;
+		} __packed desc;
+	}  __packed * sol;
 	u32 fcoe_size;
 	u32 fcoe_size;
 
 
 	skb = dev_alloc_skb(sizeof(*sol));
 	skb = dev_alloc_skb(sizeof(*sol));
@@ -456,7 +430,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
 		struct ethhdr eth;
 		struct ethhdr eth;
 		struct fip_header fip;
 		struct fip_header fip;
 		struct fip_mac_desc mac;
 		struct fip_mac_desc mac;
-	} __attribute__((packed)) *kal;
+	} __packed * kal;
 	struct fip_vn_desc *vn;
 	struct fip_vn_desc *vn;
 	u32 len;
 	u32 len;
 	struct fc_lport *lp;
 	struct fc_lport *lp;
@@ -527,7 +501,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
 		struct ethhdr eth;
 		struct ethhdr eth;
 		struct fip_header fip;
 		struct fip_header fip;
 		struct fip_encaps encaps;
 		struct fip_encaps encaps;
-	} __attribute__((packed)) *cap;
+	} __packed * cap;
 	struct fc_frame_header *fh;
 	struct fc_frame_header *fh;
 	struct fip_mac_desc *mac;
 	struct fip_mac_desc *mac;
 	struct fcoe_fcf *fcf;
 	struct fcoe_fcf *fcf;
@@ -1819,7 +1793,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
 		struct fip_mac_desc mac;
 		struct fip_mac_desc mac;
 		struct fip_wwn_desc wwnn;
 		struct fip_wwn_desc wwnn;
 		struct fip_vn_desc vn;
 		struct fip_vn_desc vn;
-	} __attribute__((packed)) *frame;
+	} __packed * frame;
 	struct fip_fc4_feat *ff;
 	struct fip_fc4_feat *ff;
 	struct fip_size_desc *size;
 	struct fip_size_desc *size;
 	u32 fcp_feat;
 	u32 fcp_feat;

+ 770 - 0
drivers/scsi/fcoe/fcoe_transport.c

@@ -0,0 +1,770 @@
+/*
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+#include <linux/crc32.h>
+#include <scsi/libfcoe.h>
+
+#include "libfcoe.h"
+
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("FIP discovery protocol and FCoE transport for FCoE HBAs");
+MODULE_LICENSE("GPL v2");
+
+static int fcoe_transport_create(const char *, struct kernel_param *);
+static int fcoe_transport_destroy(const char *, struct kernel_param *);
+static int fcoe_transport_show(char *buffer, const struct kernel_param *kp);
+static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device);
+static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device);
+static int fcoe_transport_enable(const char *, struct kernel_param *);
+static int fcoe_transport_disable(const char *, struct kernel_param *);
+static int libfcoe_device_notification(struct notifier_block *notifier,
+				    ulong event, void *ptr);
+
+static LIST_HEAD(fcoe_transports);
+static DEFINE_MUTEX(ft_mutex);
+static LIST_HEAD(fcoe_netdevs);
+static DEFINE_MUTEX(fn_mutex);
+
+unsigned int libfcoe_debug_logging;
+module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+module_param_call(show, NULL, fcoe_transport_show, NULL, S_IRUSR);
+__MODULE_PARM_TYPE(show, "string");
+MODULE_PARM_DESC(show, " Show attached FCoE transports");
+
+module_param_call(create, fcoe_transport_create, NULL,
+		  (void *)FIP_MODE_FABRIC, S_IWUSR);
+__MODULE_PARM_TYPE(create, "string");
+MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
+
+module_param_call(create_vn2vn, fcoe_transport_create, NULL,
+		  (void *)FIP_MODE_VN2VN, S_IWUSR);
+__MODULE_PARM_TYPE(create_vn2vn, "string");
+MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance "
+		 "on an Ethernet interface");
+
+module_param_call(destroy, fcoe_transport_destroy, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(destroy, "string");
+MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
+
+module_param_call(enable, fcoe_transport_enable, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(enable, "string");
+MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
+
+module_param_call(disable, fcoe_transport_disable, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(disable, "string");
+MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
+
+/* notification function for packets from net device */
+static struct notifier_block libfcoe_notifier = {
+	.notifier_call = libfcoe_device_notification,
+};
+
+/**
+ * fcoe_fc_crc() - Calculates the CRC for a given frame
+ * @fp: The frame to be checksumed
+ *
+ * This uses crc32() routine to calculate the CRC for a frame
+ *
+ * Return: The 32 bit CRC value
+ */
+u32 fcoe_fc_crc(struct fc_frame *fp)
+{
+	struct sk_buff *skb = fp_skb(fp);
+	struct skb_frag_struct *frag;
+	unsigned char *data;
+	unsigned long off, len, clen;
+	u32 crc;
+	unsigned i;
+
+	crc = crc32(~0, skb->data, skb_headlen(skb));
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		frag = &skb_shinfo(skb)->frags[i];
+		off = frag->page_offset;
+		len = frag->size;
+		while (len > 0) {
+			clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
+			data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
+					   KM_SKB_DATA_SOFTIRQ);
+			crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
+			kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
+			off += clen;
+			len -= clen;
+		}
+	}
+	return crc;
+}
+EXPORT_SYMBOL_GPL(fcoe_fc_crc);
+
+/**
+ * fcoe_start_io() - Start FCoE I/O
+ * @skb: The packet to be transmitted
+ *
+ * This routine is called from the net device to start transmitting
+ * FCoE packets.
+ *
+ * Returns: 0 for success
+ */
+int fcoe_start_io(struct sk_buff *skb)
+{
+	struct sk_buff *nskb;
+	int rc;
+
+	nskb = skb_clone(skb, GFP_ATOMIC);
+	if (!nskb)
+		return -ENOMEM;
+	rc = dev_queue_xmit(nskb);
+	if (rc != 0)
+		return rc;
+	kfree_skb(skb);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_start_io);
+
+
+/**
+ * fcoe_clean_pending_queue() - Dequeue a skb and free it
+ * @lport: The local port to dequeue a skb on
+ */
+void fcoe_clean_pending_queue(struct fc_lport *lport)
+{
+	struct fcoe_port  *port = lport_priv(lport);
+	struct sk_buff *skb;
+
+	spin_lock_bh(&port->fcoe_pending_queue.lock);
+	while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
+		spin_unlock_bh(&port->fcoe_pending_queue.lock);
+		kfree_skb(skb);
+		spin_lock_bh(&port->fcoe_pending_queue.lock);
+	}
+	spin_unlock_bh(&port->fcoe_pending_queue.lock);
+}
+EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
+
+/**
+ * fcoe_check_wait_queue() - Attempt to clear the transmit backlog
+ * @lport: The local port whose backlog is to be cleared
+ *
+ * This empties the wait_queue, dequeues the head of the wait_queue queue
+ * and calls fcoe_start_io() for each packet. If all skb have been
+ * transmitted it returns the qlen. If an error occurs it restores
+ * wait_queue (to try again later) and returns -1.
+ *
+ * The wait_queue is used when the skb transmit fails. The failed skb
+ * will go in the wait_queue which will be emptied by the timer function or
+ * by the next skb transmit.
+ */
+void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb)
+{
+	struct fcoe_port *port = lport_priv(lport);
+	int rc;
+
+	spin_lock_bh(&port->fcoe_pending_queue.lock);
+
+	if (skb)
+		__skb_queue_tail(&port->fcoe_pending_queue, skb);
+
+	if (port->fcoe_pending_queue_active)
+		goto out;
+	port->fcoe_pending_queue_active = 1;
+
+	while (port->fcoe_pending_queue.qlen) {
+		/* keep qlen > 0 until fcoe_start_io succeeds */
+		port->fcoe_pending_queue.qlen++;
+		skb = __skb_dequeue(&port->fcoe_pending_queue);
+
+		spin_unlock_bh(&port->fcoe_pending_queue.lock);
+		rc = fcoe_start_io(skb);
+		spin_lock_bh(&port->fcoe_pending_queue.lock);
+
+		if (rc) {
+			__skb_queue_head(&port->fcoe_pending_queue, skb);
+			/* undo temporary increment above */
+			port->fcoe_pending_queue.qlen--;
+			break;
+		}
+		/* undo temporary increment above */
+		port->fcoe_pending_queue.qlen--;
+	}
+
+	if (port->fcoe_pending_queue.qlen < port->min_queue_depth)
+		lport->qfull = 0;
+	if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
+		mod_timer(&port->timer, jiffies + 2);
+	port->fcoe_pending_queue_active = 0;
+out:
+	if (port->fcoe_pending_queue.qlen > port->max_queue_depth)
+		lport->qfull = 1;
+	spin_unlock_bh(&port->fcoe_pending_queue.lock);
+}
+EXPORT_SYMBOL_GPL(fcoe_check_wait_queue);
+
+/**
+ * fcoe_queue_timer() - The fcoe queue timer
+ * @lport: The local port
+ *
+ * Calls fcoe_check_wait_queue on timeout
+ */
+void fcoe_queue_timer(ulong lport)
+{
+	fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
+}
+EXPORT_SYMBOL_GPL(fcoe_queue_timer);
+
+/**
+ * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC
+ * @skb:  The packet to be transmitted
+ * @tlen: The total length of the trailer
+ * @fps:  The fcoe context
+ *
+ * This routine allocates a page for frame trailers. The page is re-used if
+ * there is enough room left on it for the current trailer. If there isn't
+ * enough buffer left a new page is allocated for the trailer. Reference to
+ * the page from this function as well as the skbs using the page fragments
+ * ensure that the page is freed at the appropriate time.
+ *
+ * Returns: 0 for success
+ */
+int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
+			   struct fcoe_percpu_s *fps)
+{
+	struct page *page;
+
+	page = fps->crc_eof_page;
+	if (!page) {
+		page = alloc_page(GFP_ATOMIC);
+		if (!page)
+			return -ENOMEM;
+
+		fps->crc_eof_page = page;
+		fps->crc_eof_offset = 0;
+	}
+
+	get_page(page);
+	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
+			   fps->crc_eof_offset, tlen);
+	skb->len += tlen;
+	skb->data_len += tlen;
+	skb->truesize += tlen;
+	fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
+
+	if (fps->crc_eof_offset >= PAGE_SIZE) {
+		fps->crc_eof_page = NULL;
+		fps->crc_eof_offset = 0;
+		put_page(page);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_get_paged_crc_eof);
+
+/**
+ * fcoe_transport_lookup - find an fcoe transport that matches a netdev
+ * @netdev: The netdev to look for from all attached transports
+ *
+ * Returns : ptr to the fcoe transport that supports this netdev or NULL
+ * if not found.
+ *
+ * The ft_mutex should be held when this is called
+ */
+static struct fcoe_transport *fcoe_transport_lookup(struct net_device *netdev)
+{
+	struct fcoe_transport *ft = NULL;
+
+	list_for_each_entry(ft, &fcoe_transports, list)
+		if (ft->match && ft->match(netdev))
+			return ft;
+	return NULL;
+}
+
+/**
+ * fcoe_transport_attach - Attaches an FCoE transport
+ * @ft: The fcoe transport to be attached
+ *
+ * Returns : 0 for success
+ */
+int fcoe_transport_attach(struct fcoe_transport *ft)
+{
+	int rc = 0;
+
+	mutex_lock(&ft_mutex);
+	if (ft->attached) {
+		LIBFCOE_TRANSPORT_DBG("transport %s already attached\n",
+				       ft->name);
+		rc = -EEXIST;
+		goto out_attach;
+	}
+
+	/* Add default transport to the tail */
+	if (strcmp(ft->name, FCOE_TRANSPORT_DEFAULT))
+		list_add(&ft->list, &fcoe_transports);
+	else
+		list_add_tail(&ft->list, &fcoe_transports);
+
+	ft->attached = true;
+	LIBFCOE_TRANSPORT_DBG("attaching transport %s\n", ft->name);
+
+out_attach:
+	mutex_unlock(&ft_mutex);
+	return rc;
+}
+EXPORT_SYMBOL(fcoe_transport_attach);
+
+/**
+ * fcoe_transport_attach - Detaches an FCoE transport
+ * @ft: The fcoe transport to be attached
+ *
+ * Returns : 0 for success
+ */
+int fcoe_transport_detach(struct fcoe_transport *ft)
+{
+	int rc = 0;
+
+	mutex_lock(&ft_mutex);
+	if (!ft->attached) {
+		LIBFCOE_TRANSPORT_DBG("transport %s already detached\n",
+			ft->name);
+		rc = -ENODEV;
+		goto out_attach;
+	}
+
+	list_del(&ft->list);
+	ft->attached = false;
+	LIBFCOE_TRANSPORT_DBG("detaching transport %s\n", ft->name);
+
+out_attach:
+	mutex_unlock(&ft_mutex);
+	return rc;
+
+}
+EXPORT_SYMBOL(fcoe_transport_detach);
+
+static int fcoe_transport_show(char *buffer, const struct kernel_param *kp)
+{
+	int i, j;
+	struct fcoe_transport *ft = NULL;
+
+	i = j = sprintf(buffer, "Attached FCoE transports:");
+	mutex_lock(&ft_mutex);
+	list_for_each_entry(ft, &fcoe_transports, list) {
+		i += snprintf(&buffer[i], IFNAMSIZ, "%s ", ft->name);
+		if (i >= PAGE_SIZE)
+			break;
+	}
+	mutex_unlock(&ft_mutex);
+	if (i == j)
+		i += snprintf(&buffer[i], IFNAMSIZ, "none");
+	return i;
+}
+
+static int __init fcoe_transport_init(void)
+{
+	register_netdevice_notifier(&libfcoe_notifier);
+	return 0;
+}
+
+static int __exit fcoe_transport_exit(void)
+{
+	struct fcoe_transport *ft;
+
+	unregister_netdevice_notifier(&libfcoe_notifier);
+	mutex_lock(&ft_mutex);
+	list_for_each_entry(ft, &fcoe_transports, list)
+		printk(KERN_ERR "FCoE transport %s is still attached!\n",
+		      ft->name);
+	mutex_unlock(&ft_mutex);
+	return 0;
+}
+
+
+static int fcoe_add_netdev_mapping(struct net_device *netdev,
+					struct fcoe_transport *ft)
+{
+	struct fcoe_netdev_mapping *nm;
+
+	nm = kmalloc(sizeof(*nm), GFP_KERNEL);
+	if (!nm) {
+		printk(KERN_ERR "Unable to allocate netdev_mapping");
+		return -ENOMEM;
+	}
+
+	nm->netdev = netdev;
+	nm->ft = ft;
+
+	mutex_lock(&fn_mutex);
+	list_add(&nm->list, &fcoe_netdevs);
+	mutex_unlock(&fn_mutex);
+	return 0;
+}
+
+
+static void fcoe_del_netdev_mapping(struct net_device *netdev)
+{
+	struct fcoe_netdev_mapping *nm = NULL, *tmp;
+
+	mutex_lock(&fn_mutex);
+	list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) {
+		if (nm->netdev == netdev) {
+			list_del(&nm->list);
+			kfree(nm);
+			mutex_unlock(&fn_mutex);
+			return;
+		}
+	}
+	mutex_unlock(&fn_mutex);
+}
+
+
+/**
+ * fcoe_netdev_map_lookup - find the fcoe transport that matches the netdev on which
+ * it was created
+ *
+ * Returns : ptr to the fcoe transport that supports this netdev or NULL
+ * if not found.
+ *
+ * The ft_mutex should be held when this is called
+ */
+static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *netdev)
+{
+	struct fcoe_transport *ft = NULL;
+	struct fcoe_netdev_mapping *nm;
+
+	mutex_lock(&fn_mutex);
+	list_for_each_entry(nm, &fcoe_netdevs, list) {
+		if (netdev == nm->netdev) {
+			ft = nm->ft;
+			mutex_unlock(&fn_mutex);
+			return ft;
+		}
+	}
+
+	mutex_unlock(&fn_mutex);
+	return NULL;
+}
+
+/**
+ * fcoe_if_to_netdev() - Parse a name buffer to get a net device
+ * @buffer: The name of the net device
+ *
+ * Returns: NULL or a ptr to net_device
+ */
+static struct net_device *fcoe_if_to_netdev(const char *buffer)
+{
+	char *cp;
+	char ifname[IFNAMSIZ + 2];
+
+	if (buffer) {
+		strlcpy(ifname, buffer, IFNAMSIZ);
+		cp = ifname + strlen(ifname);
+		while (--cp >= ifname && *cp == '\n')
+			*cp = '\0';
+		return dev_get_by_name(&init_net, ifname);
+	}
+	return NULL;
+}
+
+/**
+ * libfcoe_device_notification() - Handler for net device events
+ * @notifier: The context of the notification
+ * @event:    The type of event
+ * @ptr:      The net device that the event was on
+ *
+ * This function is called by the Ethernet driver in case of link change event.
+ *
+ * Returns: 0 for success
+ */
+static int libfcoe_device_notification(struct notifier_block *notifier,
+				    ulong event, void *ptr)
+{
+	struct net_device *netdev = ptr;
+
+	switch (event) {
+	case NETDEV_UNREGISTER:
+		printk(KERN_ERR "libfcoe_device_notification: NETDEV_UNREGISTER %s\n",
+				netdev->name);
+		fcoe_del_netdev_mapping(netdev);
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+
+/**
+ * fcoe_transport_create() - Create a fcoe interface
+ * @buffer: The name of the Ethernet interface to create on
+ * @kp:	    The associated kernel param
+ *
+ * Called from sysfs. This holds the ft_mutex while calling the
+ * registered fcoe transport's create function.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
+{
+	int rc = -ENODEV;
+	struct net_device *netdev = NULL;
+	struct fcoe_transport *ft = NULL;
+	enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
+
+	if (!mutex_trylock(&ft_mutex))
+		return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+	/*
+	 * Make sure the module has been initialized, and is not about to be
+	 * removed.  Module parameter sysfs files are writable before the
+	 * module_init function is called and after module_exit.
+	 */
+	if (THIS_MODULE->state != MODULE_STATE_LIVE)
+		goto out_nodev;
+#endif
+
+	netdev = fcoe_if_to_netdev(buffer);
+	if (!netdev) {
+		LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buffer);
+		goto out_nodev;
+	}
+
+	ft = fcoe_netdev_map_lookup(netdev);
+	if (ft) {
+		LIBFCOE_TRANSPORT_DBG("transport %s already has existing "
+				      "FCoE instance on %s.\n",
+				      ft->name, netdev->name);
+		rc = -EEXIST;
+		goto out_putdev;
+	}
+
+	ft = fcoe_transport_lookup(netdev);
+	if (!ft) {
+		LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+				      netdev->name);
+		goto out_putdev;
+	}
+
+	rc = fcoe_add_netdev_mapping(netdev, ft);
+	if (rc) {
+		LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping "
+				      "for FCoE transport %s for %s.\n",
+				      ft->name, netdev->name);
+		goto out_putdev;
+	}
+
+	/* pass to transport create */
+	rc = ft->create ? ft->create(netdev, fip_mode) : -ENODEV;
+	if (rc)
+		fcoe_del_netdev_mapping(netdev);
+
+	LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n",
+			      ft->name, (rc) ? "failed" : "succeeded",
+			      netdev->name);
+
+out_putdev:
+	dev_put(netdev);
+out_nodev:
+	mutex_unlock(&ft_mutex);
+	if (rc == -ERESTARTSYS)
+		return restart_syscall();
+	else
+		return rc;
+}
+
+/**
+ * fcoe_transport_destroy() - Destroy a FCoE interface
+ * @buffer: The name of the Ethernet interface to be destroyed
+ * @kp:	    The associated kernel parameter
+ *
+ * Called from sysfs. This holds the ft_mutex while calling the
+ * registered fcoe transport's destroy function.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
+{
+	int rc = -ENODEV;
+	struct net_device *netdev = NULL;
+	struct fcoe_transport *ft = NULL;
+
+	if (!mutex_trylock(&ft_mutex))
+		return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+	/*
+	 * Make sure the module has been initialized, and is not about to be
+	 * removed.  Module parameter sysfs files are writable before the
+	 * module_init function is called and after module_exit.
+	 */
+	if (THIS_MODULE->state != MODULE_STATE_LIVE)
+		goto out_nodev;
+#endif
+
+	netdev = fcoe_if_to_netdev(buffer);
+	if (!netdev) {
+		LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buffer);
+		goto out_nodev;
+	}
+
+	ft = fcoe_netdev_map_lookup(netdev);
+	if (!ft) {
+		LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+				      netdev->name);
+		goto out_putdev;
+	}
+
+	/* pass to transport destroy */
+	rc = ft->destroy ? ft->destroy(netdev) : -ENODEV;
+	fcoe_del_netdev_mapping(netdev);
+	LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n",
+			      ft->name, (rc) ? "failed" : "succeeded",
+			      netdev->name);
+
+out_putdev:
+	dev_put(netdev);
+out_nodev:
+	mutex_unlock(&ft_mutex);
+
+	if (rc == -ERESTARTSYS)
+		return restart_syscall();
+	else
+		return rc;
+}
+
+/**
+ * fcoe_transport_disable() - Disables a FCoE interface
+ * @buffer: The name of the Ethernet interface to be disabled
+ * @kp:	    The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
+{
+	int rc = -ENODEV;
+	struct net_device *netdev = NULL;
+	struct fcoe_transport *ft = NULL;
+
+	if (!mutex_trylock(&ft_mutex))
+		return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+	/*
+	 * Make sure the module has been initialized, and is not about to be
+	 * removed.  Module parameter sysfs files are writable before the
+	 * module_init function is called and after module_exit.
+	 */
+	if (THIS_MODULE->state != MODULE_STATE_LIVE)
+		goto out_nodev;
+#endif
+
+	netdev = fcoe_if_to_netdev(buffer);
+	if (!netdev)
+		goto out_nodev;
+
+	ft = fcoe_netdev_map_lookup(netdev);
+	if (!ft)
+		goto out_putdev;
+
+	rc = ft->disable ? ft->disable(netdev) : -ENODEV;
+
+out_putdev:
+	dev_put(netdev);
+out_nodev:
+	mutex_unlock(&ft_mutex);
+
+	if (rc == -ERESTARTSYS)
+		return restart_syscall();
+	else
+		return rc;
+}
+
+/**
+ * fcoe_transport_enable() - Enables a FCoE interface
+ * @buffer: The name of the Ethernet interface to be enabled
+ * @kp:     The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
+{
+	int rc = -ENODEV;
+	struct net_device *netdev = NULL;
+	struct fcoe_transport *ft = NULL;
+
+	if (!mutex_trylock(&ft_mutex))
+		return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+	/*
+	 * Make sure the module has been initialized, and is not about to be
+	 * removed.  Module parameter sysfs files are writable before the
+	 * module_init function is called and after module_exit.
+	 */
+	if (THIS_MODULE->state != MODULE_STATE_LIVE)
+		goto out_nodev;
+#endif
+
+	netdev = fcoe_if_to_netdev(buffer);
+	if (!netdev)
+		goto out_nodev;
+
+	ft = fcoe_netdev_map_lookup(netdev);
+	if (!ft)
+		goto out_putdev;
+
+	rc = ft->enable ? ft->enable(netdev) : -ENODEV;
+
+out_putdev:
+	dev_put(netdev);
+out_nodev:
+	mutex_unlock(&ft_mutex);
+	if (rc == -ERESTARTSYS)
+		return restart_syscall();
+	else
+		return rc;
+}
+
+/**
+ * libfcoe_init() - Initialization routine for libfcoe.ko
+ */
+static int __init libfcoe_init(void)
+{
+	fcoe_transport_init();
+
+	return 0;
+}
+module_init(libfcoe_init);
+
+/**
+ * libfcoe_exit() - Tear down libfcoe.ko
+ */
+static void __exit libfcoe_exit(void)
+{
+	fcoe_transport_exit();
+}
+module_exit(libfcoe_exit);

+ 31 - 0
drivers/scsi/fcoe/libfcoe.h

@@ -0,0 +1,31 @@
+#ifndef _FCOE_LIBFCOE_H_
+#define _FCOE_LIBFCOE_H_
+
+extern unsigned int libfcoe_debug_logging;
+#define LIBFCOE_LOGGING	    0x01 /* General logging, not categorized */
+#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
+#define LIBFCOE_TRANSPORT_LOGGING	0x04 /* FCoE transport logging */
+
+#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD)		\
+do {							\
+	if (unlikely(libfcoe_debug_logging & LEVEL))	\
+		do {					\
+			CMD;				\
+		} while (0);				\
+} while (0)
+
+#define LIBFCOE_DBG(fmt, args...)					\
+	LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING,				\
+			      printk(KERN_INFO "libfcoe: " fmt, ##args);)
+
+#define LIBFCOE_FIP_DBG(fip, fmt, args...)				\
+	LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING,			\
+			      printk(KERN_INFO "host%d: fip: " fmt,	\
+				     (fip)->lp->host->host_no, ##args);)
+
+#define LIBFCOE_TRANSPORT_DBG(fmt, args...)				\
+	LIBFCOE_CHECK_LOGGING(LIBFCOE_TRANSPORT_LOGGING,		\
+			      printk(KERN_INFO "%s: " fmt,		\
+				     __func__, ##args);)
+
+#endif /* _FCOE_LIBFCOE_H_ */

+ 1 - 1
drivers/scsi/fnic/fnic.h

@@ -37,7 +37,7 @@
 
 
 #define DRV_NAME		"fnic"
 #define DRV_NAME		"fnic"
 #define DRV_DESCRIPTION		"Cisco FCoE HBA Driver"
 #define DRV_DESCRIPTION		"Cisco FCoE HBA Driver"
-#define DRV_VERSION		"1.4.0.145"
+#define DRV_VERSION		"1.5.0.1"
 #define PFX			DRV_NAME ": "
 #define PFX			DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 #define DFX                     DRV_NAME "%d: "
 
 

+ 1 - 1
drivers/scsi/fnic/vnic_dev.c

@@ -654,7 +654,7 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
 				vdev->linkstatus_pa);
 				vdev->linkstatus_pa);
 		if (vdev->stats)
 		if (vdev->stats)
 			pci_free_consistent(vdev->pdev,
 			pci_free_consistent(vdev->pdev,
-				sizeof(struct vnic_dev),
+				sizeof(struct vnic_stats),
 				vdev->stats, vdev->stats_pa);
 				vdev->stats, vdev->stats_pa);
 		if (vdev->fw_info)
 		if (vdev->fw_info)
 			pci_free_consistent(vdev->pdev,
 			pci_free_consistent(vdev->pdev,

+ 326 - 253
drivers/scsi/hpsa.c

@@ -74,6 +74,10 @@ static int hpsa_allow_any;
 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(hpsa_allow_any,
 MODULE_PARM_DESC(hpsa_allow_any,
 		"Allow hpsa driver to access unknown HP Smart Array hardware");
 		"Allow hpsa driver to access unknown HP Smart Array hardware");
+static int hpsa_simple_mode;
+module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(hpsa_simple_mode,
+	"Use 'simple mode' rather than 'performant mode'");
 
 
 /* define the PCI info for the cards we can control */
 /* define the PCI info for the cards we can control */
 static const struct pci_device_id hpsa_pci_device_id[] = {
 static const struct pci_device_id hpsa_pci_device_id[] = {
@@ -85,11 +89,13 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324a},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324a},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324b},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324b},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3250},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3251},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3252},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3253},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3254},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3350},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
 	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,
 	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,
 		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
 		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
 	{0,}
 	{0,}
@@ -109,11 +115,13 @@ static struct board_type products[] = {
 	{0x3249103C, "Smart Array P812", &SA5_access},
 	{0x3249103C, "Smart Array P812", &SA5_access},
 	{0x324a103C, "Smart Array P712m", &SA5_access},
 	{0x324a103C, "Smart Array P712m", &SA5_access},
 	{0x324b103C, "Smart Array P711m", &SA5_access},
 	{0x324b103C, "Smart Array P711m", &SA5_access},
-	{0x3250103C, "Smart Array", &SA5_access},
-	{0x3250113C, "Smart Array", &SA5_access},
-	{0x3250123C, "Smart Array", &SA5_access},
-	{0x3250133C, "Smart Array", &SA5_access},
-	{0x3250143C, "Smart Array", &SA5_access},
+	{0x3350103C, "Smart Array", &SA5_access},
+	{0x3351103C, "Smart Array", &SA5_access},
+	{0x3352103C, "Smart Array", &SA5_access},
+	{0x3353103C, "Smart Array", &SA5_access},
+	{0x3354103C, "Smart Array", &SA5_access},
+	{0x3355103C, "Smart Array", &SA5_access},
+	{0x3356103C, "Smart Array", &SA5_access},
 	{0xFFFF103C, "Unknown Smart Array", &SA5_access},
 	{0xFFFF103C, "Unknown Smart Array", &SA5_access},
 };
 };
 
 
@@ -147,17 +155,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
 static int hpsa_slave_alloc(struct scsi_device *sdev);
 static int hpsa_slave_alloc(struct scsi_device *sdev);
 static void hpsa_slave_destroy(struct scsi_device *sdev);
 static void hpsa_slave_destroy(struct scsi_device *sdev);
 
 
-static ssize_t raid_level_show(struct device *dev,
-	struct device_attribute *attr, char *buf);
-static ssize_t lunid_show(struct device *dev,
-	struct device_attribute *attr, char *buf);
-static ssize_t unique_id_show(struct device *dev,
-	struct device_attribute *attr, char *buf);
-static ssize_t host_show_firmware_revision(struct device *dev,
-	     struct device_attribute *attr, char *buf);
 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
-static ssize_t host_store_rescan(struct device *dev,
-	 struct device_attribute *attr, const char *buf, size_t count);
 static int check_for_unit_attention(struct ctlr_info *h,
 static int check_for_unit_attention(struct ctlr_info *h,
 	struct CommandList *c);
 	struct CommandList *c);
 static void check_ioctl_unit_attention(struct ctlr_info *h,
 static void check_ioctl_unit_attention(struct ctlr_info *h,
@@ -173,47 +171,10 @@ static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
 	unsigned long *memory_bar);
 	unsigned long *memory_bar);
 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
-
-static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
-static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
-static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
-static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
-static DEVICE_ATTR(firmware_revision, S_IRUGO,
-	host_show_firmware_revision, NULL);
-
-static struct device_attribute *hpsa_sdev_attrs[] = {
-	&dev_attr_raid_level,
-	&dev_attr_lunid,
-	&dev_attr_unique_id,
-	NULL,
-};
-
-static struct device_attribute *hpsa_shost_attrs[] = {
-	&dev_attr_rescan,
-	&dev_attr_firmware_revision,
-	NULL,
-};
-
-static struct scsi_host_template hpsa_driver_template = {
-	.module			= THIS_MODULE,
-	.name			= "hpsa",
-	.proc_name		= "hpsa",
-	.queuecommand		= hpsa_scsi_queue_command,
-	.scan_start		= hpsa_scan_start,
-	.scan_finished		= hpsa_scan_finished,
-	.change_queue_depth	= hpsa_change_queue_depth,
-	.this_id		= -1,
-	.use_clustering		= ENABLE_CLUSTERING,
-	.eh_device_reset_handler = hpsa_eh_device_reset_handler,
-	.ioctl			= hpsa_ioctl,
-	.slave_alloc		= hpsa_slave_alloc,
-	.slave_destroy		= hpsa_slave_destroy,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl		= hpsa_compat_ioctl,
-#endif
-	.sdev_attrs = hpsa_sdev_attrs,
-	.shost_attrs = hpsa_shost_attrs,
-};
+static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
+	void __iomem *vaddr, int wait_for_ready);
+#define BOARD_NOT_READY 0
+#define BOARD_READY 1
 
 
 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
 {
 {
@@ -291,67 +252,63 @@ static ssize_t host_show_firmware_revision(struct device *dev,
 		fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
 		fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
 }
 }
 
 
-/* Enqueuing and dequeuing functions for cmdlists. */
-static inline void addQ(struct hlist_head *list, struct CommandList *c)
+static ssize_t host_show_commands_outstanding(struct device *dev,
+	     struct device_attribute *attr, char *buf)
 {
 {
-	hlist_add_head(&c->list, list);
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ctlr_info *h = shost_to_hba(shost);
+
+	return snprintf(buf, 20, "%d\n", h->commands_outstanding);
 }
 }
 
 
-static inline u32 next_command(struct ctlr_info *h)
+static ssize_t host_show_transport_mode(struct device *dev,
+	struct device_attribute *attr, char *buf)
 {
 {
-	u32 a;
-
-	if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
-		return h->access.command_completed(h);
+	struct ctlr_info *h;
+	struct Scsi_Host *shost = class_to_shost(dev);
 
 
-	if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
-		a = *(h->reply_pool_head); /* Next cmd in ring buffer */
-		(h->reply_pool_head)++;
-		h->commands_outstanding--;
-	} else {
-		a = FIFO_EMPTY;
-	}
-	/* Check for wraparound */
-	if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
-		h->reply_pool_head = h->reply_pool;
-		h->reply_pool_wraparound ^= 1;
-	}
-	return a;
+	h = shost_to_hba(shost);
+	return snprintf(buf, 20, "%s\n",
+		h->transMethod & CFGTBL_Trans_Performant ?
+			"performant" : "simple");
 }
 }
 
 
-/* set_performant_mode: Modify the tag for cciss performant
- * set bit 0 for pull model, bits 3-1 for block fetch
- * register number
- */
-static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
-{
-	if (likely(h->transMethod == CFGTBL_Trans_Performant))
-		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
-}
+/* List of controllers which cannot be reset on kexec with reset_devices */
+static u32 unresettable_controller[] = {
+	0x324a103C, /* Smart Array P712m */
+	0x324b103C, /* SmartArray P711m */
+	0x3223103C, /* Smart Array P800 */
+	0x3234103C, /* Smart Array P400 */
+	0x3235103C, /* Smart Array P400i */
+	0x3211103C, /* Smart Array E200i */
+	0x3212103C, /* Smart Array E200 */
+	0x3213103C, /* Smart Array E200i */
+	0x3214103C, /* Smart Array E200i */
+	0x3215103C, /* Smart Array E200i */
+	0x3237103C, /* Smart Array E500 */
+	0x323D103C, /* Smart Array P700m */
+	0x409C0E11, /* Smart Array 6400 */
+	0x409D0E11, /* Smart Array 6400 EM */
+};
 
 
-static void enqueue_cmd_and_start_io(struct ctlr_info *h,
-	struct CommandList *c)
+static int ctlr_is_resettable(struct ctlr_info *h)
 {
 {
-	unsigned long flags;
+	int i;
 
 
-	set_performant_mode(h, c);
-	spin_lock_irqsave(&h->lock, flags);
-	addQ(&h->reqQ, c);
-	h->Qdepth++;
-	start_io(h);
-	spin_unlock_irqrestore(&h->lock, flags);
+	for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
+		if (unresettable_controller[i] == h->board_id)
+			return 0;
+	return 1;
 }
 }
 
 
-static inline void removeQ(struct CommandList *c)
+static ssize_t host_show_resettable(struct device *dev,
+	struct device_attribute *attr, char *buf)
 {
 {
-	if (WARN_ON(hlist_unhashed(&c->list)))
-		return;
-	hlist_del_init(&c->list);
-}
+	struct ctlr_info *h;
+	struct Scsi_Host *shost = class_to_shost(dev);
 
 
-static inline int is_hba_lunid(unsigned char scsi3addr[])
-{
-	return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
+	h = shost_to_hba(shost);
+	return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h));
 }
 }
 
 
 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
@@ -359,15 +316,6 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
 	return (scsi3addr[3] & 0xC0) == 0x40;
 	return (scsi3addr[3] & 0xC0) == 0x40;
 }
 }
 
 
-static inline int is_scsi_rev_5(struct ctlr_info *h)
-{
-	if (!h->hba_inquiry_data)
-		return 0;
-	if ((h->hba_inquiry_data[2] & 0x07) == 5)
-		return 1;
-	return 0;
-}
-
 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
 	"UNKNOWN"
 	"UNKNOWN"
 };
 };
@@ -459,6 +407,129 @@ static ssize_t unique_id_show(struct device *dev,
 			sn[12], sn[13], sn[14], sn[15]);
 			sn[12], sn[13], sn[14], sn[15]);
 }
 }
 
 
+static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
+static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
+static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
+static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
+static DEVICE_ATTR(firmware_revision, S_IRUGO,
+	host_show_firmware_revision, NULL);
+static DEVICE_ATTR(commands_outstanding, S_IRUGO,
+	host_show_commands_outstanding, NULL);
+static DEVICE_ATTR(transport_mode, S_IRUGO,
+	host_show_transport_mode, NULL);
+static DEVICE_ATTR(resettable, S_IRUGO,
+	host_show_resettable, NULL);
+
+static struct device_attribute *hpsa_sdev_attrs[] = {
+	&dev_attr_raid_level,
+	&dev_attr_lunid,
+	&dev_attr_unique_id,
+	NULL,
+};
+
+static struct device_attribute *hpsa_shost_attrs[] = {
+	&dev_attr_rescan,
+	&dev_attr_firmware_revision,
+	&dev_attr_commands_outstanding,
+	&dev_attr_transport_mode,
+	&dev_attr_resettable,
+	NULL,
+};
+
+static struct scsi_host_template hpsa_driver_template = {
+	.module			= THIS_MODULE,
+	.name			= "hpsa",
+	.proc_name		= "hpsa",
+	.queuecommand		= hpsa_scsi_queue_command,
+	.scan_start		= hpsa_scan_start,
+	.scan_finished		= hpsa_scan_finished,
+	.change_queue_depth	= hpsa_change_queue_depth,
+	.this_id		= -1,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.eh_device_reset_handler = hpsa_eh_device_reset_handler,
+	.ioctl			= hpsa_ioctl,
+	.slave_alloc		= hpsa_slave_alloc,
+	.slave_destroy		= hpsa_slave_destroy,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= hpsa_compat_ioctl,
+#endif
+	.sdev_attrs = hpsa_sdev_attrs,
+	.shost_attrs = hpsa_shost_attrs,
+};
+
+
+/* Enqueuing and dequeuing functions for cmdlists. */
+static inline void addQ(struct list_head *list, struct CommandList *c)
+{
+	list_add_tail(&c->list, list);
+}
+
+static inline u32 next_command(struct ctlr_info *h)
+{
+	u32 a;
+
+	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
+		return h->access.command_completed(h);
+
+	if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
+		a = *(h->reply_pool_head); /* Next cmd in ring buffer */
+		(h->reply_pool_head)++;
+		h->commands_outstanding--;
+	} else {
+		a = FIFO_EMPTY;
+	}
+	/* Check for wraparound */
+	if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
+		h->reply_pool_head = h->reply_pool;
+		h->reply_pool_wraparound ^= 1;
+	}
+	return a;
+}
+
+/* set_performant_mode: Modify the tag for cciss performant
+ * set bit 0 for pull model, bits 3-1 for block fetch
+ * register number
+ */
+static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
+{
+	if (likely(h->transMethod & CFGTBL_Trans_Performant))
+		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
+}
+
+static void enqueue_cmd_and_start_io(struct ctlr_info *h,
+	struct CommandList *c)
+{
+	unsigned long flags;
+
+	set_performant_mode(h, c);
+	spin_lock_irqsave(&h->lock, flags);
+	addQ(&h->reqQ, c);
+	h->Qdepth++;
+	start_io(h);
+	spin_unlock_irqrestore(&h->lock, flags);
+}
+
+static inline void removeQ(struct CommandList *c)
+{
+	if (WARN_ON(list_empty(&c->list)))
+		return;
+	list_del_init(&c->list);
+}
+
+static inline int is_hba_lunid(unsigned char scsi3addr[])
+{
+	return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
+}
+
+static inline int is_scsi_rev_5(struct ctlr_info *h)
+{
+	if (!h->hba_inquiry_data)
+		return 0;
+	if ((h->hba_inquiry_data[2] & 0x07) == 5)
+		return 1;
+	return 0;
+}
+
 static int hpsa_find_target_lun(struct ctlr_info *h,
 static int hpsa_find_target_lun(struct ctlr_info *h,
 	unsigned char scsi3addr[], int bus, int *target, int *lun)
 	unsigned char scsi3addr[], int bus, int *target, int *lun)
 {
 {
@@ -1130,6 +1201,10 @@ static void complete_scsi_command(struct CommandList *cp,
 		cmd->result = DID_TIME_OUT << 16;
 		cmd->result = DID_TIME_OUT << 16;
 		dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
 		dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
 		break;
 		break;
+	case CMD_UNABORTABLE:
+		cmd->result = DID_ERROR << 16;
+		dev_warn(&h->pdev->dev, "Command unabortable\n");
+		break;
 	default:
 	default:
 		cmd->result = DID_ERROR << 16;
 		cmd->result = DID_ERROR << 16;
 		dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
 		dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
@@ -1160,7 +1235,7 @@ static int hpsa_scsi_detect(struct ctlr_info *h)
 	sh->sg_tablesize = h->maxsgentries;
 	sh->sg_tablesize = h->maxsgentries;
 	h->scsi_host = sh;
 	h->scsi_host = sh;
 	sh->hostdata[0] = (unsigned long) h;
 	sh->hostdata[0] = (unsigned long) h;
-	sh->irq = h->intr[PERF_MODE_INT];
+	sh->irq = h->intr[h->intr_mode];
 	sh->unique_id = sh->irq;
 	sh->unique_id = sh->irq;
 	error = scsi_add_host(sh, &h->pdev->dev);
 	error = scsi_add_host(sh, &h->pdev->dev);
 	if (error)
 	if (error)
@@ -1295,6 +1370,9 @@ static void hpsa_scsi_interpret_error(struct CommandList *cp)
 	case CMD_TIMEOUT:
 	case CMD_TIMEOUT:
 		dev_warn(d, "cp %p timed out\n", cp);
 		dev_warn(d, "cp %p timed out\n", cp);
 		break;
 		break;
+	case CMD_UNABORTABLE:
+		dev_warn(d, "Command unabortable\n");
+		break;
 	default:
 	default:
 		dev_warn(d, "cp %p returned unknown status %x\n", cp,
 		dev_warn(d, "cp %p returned unknown status %x\n", cp,
 				ei->CommandStatus);
 				ei->CommandStatus);
@@ -1595,6 +1673,8 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
 	if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
 	if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
 		return 0;
 		return 0;
 
 
+	memset(scsi3addr, 0, 8);
+	scsi3addr[3] = target;
 	if (is_hba_lunid(scsi3addr))
 	if (is_hba_lunid(scsi3addr))
 		return 0; /* Don't add the RAID controller here. */
 		return 0; /* Don't add the RAID controller here. */
 
 
@@ -1609,8 +1689,6 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
 		return 0;
 		return 0;
 	}
 	}
 
 
-	memset(scsi3addr, 0, 8);
-	scsi3addr[3] = target;
 	if (hpsa_update_device_info(h, scsi3addr, this_device))
 	if (hpsa_update_device_info(h, scsi3addr, this_device))
 		return 0;
 		return 0;
 	(*nmsa2xxx_enclosures)++;
 	(*nmsa2xxx_enclosures)++;
@@ -2199,7 +2277,7 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
 
 
 	c->cmdindex = i;
 	c->cmdindex = i;
 
 
-	INIT_HLIST_NODE(&c->list);
+	INIT_LIST_HEAD(&c->list);
 	c->busaddr = (u32) cmd_dma_handle;
 	c->busaddr = (u32) cmd_dma_handle;
 	temp64.val = (u64) err_dma_handle;
 	temp64.val = (u64) err_dma_handle;
 	c->ErrDesc.Addr.lower = temp64.val32.lower;
 	c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -2237,7 +2315,7 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
 	}
 	}
 	memset(c->err_info, 0, sizeof(*c->err_info));
 	memset(c->err_info, 0, sizeof(*c->err_info));
 
 
-	INIT_HLIST_NODE(&c->list);
+	INIT_LIST_HEAD(&c->list);
 	c->busaddr = (u32) cmd_dma_handle;
 	c->busaddr = (u32) cmd_dma_handle;
 	temp64.val = (u64) err_dma_handle;
 	temp64.val = (u64) err_dma_handle;
 	c->ErrDesc.Addr.lower = temp64.val32.lower;
 	c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -2267,7 +2345,7 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
 	pci_free_consistent(h->pdev, sizeof(*c->err_info),
 	pci_free_consistent(h->pdev, sizeof(*c->err_info),
 			    c->err_info, (dma_addr_t) temp64.val);
 			    c->err_info, (dma_addr_t) temp64.val);
 	pci_free_consistent(h->pdev, sizeof(*c),
 	pci_free_consistent(h->pdev, sizeof(*c),
-			    c, (dma_addr_t) c->busaddr);
+			    c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
 }
 }
 
 
 #ifdef CONFIG_COMPAT
 #ifdef CONFIG_COMPAT
@@ -2281,6 +2359,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
 	int err;
 	int err;
 	u32 cp;
 	u32 cp;
 
 
+	memset(&arg64, 0, sizeof(arg64));
 	err = 0;
 	err = 0;
 	err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
 	err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
 			   sizeof(arg64.LUN_info));
 			   sizeof(arg64.LUN_info));
@@ -2317,6 +2396,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
 	int err;
 	int err;
 	u32 cp;
 	u32 cp;
 
 
+	memset(&arg64, 0, sizeof(arg64));
 	err = 0;
 	err = 0;
 	err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
 	err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
 			   sizeof(arg64.LUN_info));
 			   sizeof(arg64.LUN_info));
@@ -2433,15 +2513,17 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
 		buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
 		buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
 		if (buff == NULL)
 		if (buff == NULL)
 			return -EFAULT;
 			return -EFAULT;
-	}
-	if (iocommand.Request.Type.Direction == XFER_WRITE) {
-		/* Copy the data into the buffer we created */
-		if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
-			kfree(buff);
-			return -EFAULT;
+		if (iocommand.Request.Type.Direction == XFER_WRITE) {
+			/* Copy the data into the buffer we created */
+			if (copy_from_user(buff, iocommand.buf,
+				iocommand.buf_size)) {
+				kfree(buff);
+				return -EFAULT;
+			}
+		} else {
+			memset(buff, 0, iocommand.buf_size);
 		}
 		}
-	} else
-		memset(buff, 0, iocommand.buf_size);
+	}
 	c = cmd_special_alloc(h);
 	c = cmd_special_alloc(h);
 	if (c == NULL) {
 	if (c == NULL) {
 		kfree(buff);
 		kfree(buff);
@@ -2487,8 +2569,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
 		cmd_special_free(h, c);
 		cmd_special_free(h, c);
 		return -EFAULT;
 		return -EFAULT;
 	}
 	}
-
-	if (iocommand.Request.Type.Direction == XFER_READ) {
+	if (iocommand.Request.Type.Direction == XFER_READ &&
+		iocommand.buf_size > 0) {
 		/* Copy the data out of the buffer we created */
 		/* Copy the data out of the buffer we created */
 		if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
 		if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
 			kfree(buff);
 			kfree(buff);
@@ -2581,14 +2663,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
 	}
 	}
 	c->cmd_type = CMD_IOCTL_PEND;
 	c->cmd_type = CMD_IOCTL_PEND;
 	c->Header.ReplyQueue = 0;
 	c->Header.ReplyQueue = 0;
-
-	if (ioc->buf_size > 0) {
-		c->Header.SGList = sg_used;
-		c->Header.SGTotal = sg_used;
-	} else {
-		c->Header.SGList = 0;
-		c->Header.SGTotal = 0;
-	}
+	c->Header.SGList = c->Header.SGTotal = sg_used;
 	memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
 	memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
 	c->Header.Tag.lower = c->busaddr;
 	c->Header.Tag.lower = c->busaddr;
 	memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
 	memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
@@ -2605,7 +2680,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
 		}
 		}
 	}
 	}
 	hpsa_scsi_do_simple_cmd_core(h, c);
 	hpsa_scsi_do_simple_cmd_core(h, c);
-	hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
+	if (sg_used)
+		hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
 	check_ioctl_unit_attention(h, c);
 	check_ioctl_unit_attention(h, c);
 	/* Copy the error information out */
 	/* Copy the error information out */
 	memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
 	memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
@@ -2614,7 +2690,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
 		status = -EFAULT;
 		status = -EFAULT;
 		goto cleanup1;
 		goto cleanup1;
 	}
 	}
-	if (ioc->Request.Type.Direction == XFER_READ) {
+	if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
 		/* Copy the data out of the buffer we created */
 		/* Copy the data out of the buffer we created */
 		BYTE __user *ptr = ioc->buf;
 		BYTE __user *ptr = ioc->buf;
 		for (i = 0; i < sg_used; i++) {
 		for (i = 0; i < sg_used; i++) {
@@ -2810,8 +2886,8 @@ static void start_io(struct ctlr_info *h)
 {
 {
 	struct CommandList *c;
 	struct CommandList *c;
 
 
-	while (!hlist_empty(&h->reqQ)) {
-		c = hlist_entry(h->reqQ.first, struct CommandList, list);
+	while (!list_empty(&h->reqQ)) {
+		c = list_entry(h->reqQ.next, struct CommandList, list);
 		/* can't do anything if fifo is full */
 		/* can't do anything if fifo is full */
 		if ((h->access.fifo_full(h))) {
 		if ((h->access.fifo_full(h))) {
 			dev_warn(&h->pdev->dev, "fifo full\n");
 			dev_warn(&h->pdev->dev, "fifo full\n");
@@ -2867,20 +2943,22 @@ static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
 
 
 static inline u32 hpsa_tag_contains_index(u32 tag)
 static inline u32 hpsa_tag_contains_index(u32 tag)
 {
 {
-#define DIRECT_LOOKUP_BIT 0x10
 	return tag & DIRECT_LOOKUP_BIT;
 	return tag & DIRECT_LOOKUP_BIT;
 }
 }
 
 
 static inline u32 hpsa_tag_to_index(u32 tag)
 static inline u32 hpsa_tag_to_index(u32 tag)
 {
 {
-#define DIRECT_LOOKUP_SHIFT 5
 	return tag >> DIRECT_LOOKUP_SHIFT;
 	return tag >> DIRECT_LOOKUP_SHIFT;
 }
 }
 
 
-static inline u32 hpsa_tag_discard_error_bits(u32 tag)
+
+static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
 {
 {
-#define HPSA_ERROR_BITS 0x03
-	return tag & ~HPSA_ERROR_BITS;
+#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
+#define HPSA_SIMPLE_ERROR_BITS 0x03
+	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
+		return tag & ~HPSA_SIMPLE_ERROR_BITS;
+	return tag & ~HPSA_PERF_ERROR_BITS;
 }
 }
 
 
 /* process completion of an indexed ("direct lookup") command */
 /* process completion of an indexed ("direct lookup") command */
@@ -2904,10 +2982,9 @@ static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
 {
 {
 	u32 tag;
 	u32 tag;
 	struct CommandList *c = NULL;
 	struct CommandList *c = NULL;
-	struct hlist_node *tmp;
 
 
-	tag = hpsa_tag_discard_error_bits(raw_tag);
-	hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+	tag = hpsa_tag_discard_error_bits(h, raw_tag);
+	list_for_each_entry(c, &h->cmpQ, list) {
 		if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
 		if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
 			finish_cmd(c, raw_tag);
 			finish_cmd(c, raw_tag);
 			return next_command(h);
 			return next_command(h);
@@ -2957,7 +3034,10 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
 
 
-/* Send a message CDB to the firmware. */
+/* Send a message CDB to the firmware. Careful, this only works
+ * in simple mode, not performant mode due to the tag lookup.
+ * We only ever use this immediately after a controller reset.
+ */
 static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
 static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
 						unsigned char type)
 						unsigned char type)
 {
 {
@@ -3023,7 +3103,7 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
 
 
 	for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
 	for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
 		tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
 		tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
-		if (hpsa_tag_discard_error_bits(tag) == paddr32)
+		if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
 			break;
 			break;
 		msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
 		msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
 	}
 	}
@@ -3055,38 +3135,6 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
 #define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
 #define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
 #define hpsa_noop(p) hpsa_message(p, 3, 0)
 #define hpsa_noop(p) hpsa_message(p, 3, 0)
 
 
-static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
-{
-/* the #defines are stolen from drivers/pci/msi.h. */
-#define msi_control_reg(base)		(base + PCI_MSI_FLAGS)
-#define PCI_MSIX_FLAGS_ENABLE		(1 << 15)
-
-	int pos;
-	u16 control = 0;
-
-	pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
-	if (pos) {
-		pci_read_config_word(pdev, msi_control_reg(pos), &control);
-		if (control & PCI_MSI_FLAGS_ENABLE) {
-			dev_info(&pdev->dev, "resetting MSI\n");
-			pci_write_config_word(pdev, msi_control_reg(pos),
-					control & ~PCI_MSI_FLAGS_ENABLE);
-		}
-	}
-
-	pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
-	if (pos) {
-		pci_read_config_word(pdev, msi_control_reg(pos), &control);
-		if (control & PCI_MSIX_FLAGS_ENABLE) {
-			dev_info(&pdev->dev, "resetting MSI-X\n");
-			pci_write_config_word(pdev, msi_control_reg(pos),
-					control & ~PCI_MSIX_FLAGS_ENABLE);
-		}
-	}
-
-	return 0;
-}
-
 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
 	void * __iomem vaddr, bool use_doorbell)
 	void * __iomem vaddr, bool use_doorbell)
 {
 {
@@ -3142,17 +3190,17 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
  */
  */
 static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
 static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
 {
 {
-	u16 saved_config_space[32];
 	u64 cfg_offset;
 	u64 cfg_offset;
 	u32 cfg_base_addr;
 	u32 cfg_base_addr;
 	u64 cfg_base_addr_index;
 	u64 cfg_base_addr_index;
 	void __iomem *vaddr;
 	void __iomem *vaddr;
 	unsigned long paddr;
 	unsigned long paddr;
 	u32 misc_fw_support, active_transport;
 	u32 misc_fw_support, active_transport;
-	int rc, i;
+	int rc;
 	struct CfgTable __iomem *cfgtable;
 	struct CfgTable __iomem *cfgtable;
 	bool use_doorbell;
 	bool use_doorbell;
 	u32 board_id;
 	u32 board_id;
+	u16 command_register;
 
 
 	/* For controllers as old as the P600, this is very nearly
 	/* For controllers as old as the P600, this is very nearly
 	 * the same thing as
 	 * the same thing as
@@ -3162,14 +3210,6 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
 	 * pci_set_power_state(pci_dev, PCI_D0);
 	 * pci_set_power_state(pci_dev, PCI_D0);
 	 * pci_restore_state(pci_dev);
 	 * pci_restore_state(pci_dev);
 	 *
 	 *
-	 * but we can't use these nice canned kernel routines on
-	 * kexec, because they also check the MSI/MSI-X state in PCI
-	 * configuration space and do the wrong thing when it is
-	 * set/cleared.  Also, the pci_save/restore_state functions
-	 * violate the ordering requirements for restoring the
-	 * configuration space from the CCISS document (see the
-	 * comment below).  So we roll our own ....
-	 *
 	 * For controllers newer than the P600, the pci power state
 	 * For controllers newer than the P600, the pci power state
 	 * method of resetting doesn't work so we have another way
 	 * method of resetting doesn't work so we have another way
 	 * using the doorbell register.
 	 * using the doorbell register.
@@ -3182,13 +3222,21 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
 	 * likely not be happy.  Just forbid resetting this conjoined mess.
 	 * likely not be happy.  Just forbid resetting this conjoined mess.
 	 * The 640x isn't really supported by hpsa anyway.
 	 * The 640x isn't really supported by hpsa anyway.
 	 */
 	 */
-	hpsa_lookup_board_id(pdev, &board_id);
+	rc = hpsa_lookup_board_id(pdev, &board_id);
+	if (rc < 0) {
+		dev_warn(&pdev->dev, "Not resetting device.\n");
+		return -ENODEV;
+	}
 	if (board_id == 0x409C0E11 || board_id == 0x409D0E11)
 	if (board_id == 0x409C0E11 || board_id == 0x409D0E11)
 		return -ENOTSUPP;
 		return -ENOTSUPP;
 
 
-	for (i = 0; i < 32; i++)
-		pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
-
+	/* Save the PCI command register */
+	pci_read_config_word(pdev, 4, &command_register);
+	/* Turn the board off.  This is so that later pci_restore_state()
+	 * won't turn the board on before the rest of config space is ready.
+	 */
+	pci_disable_device(pdev);
+	pci_save_state(pdev);
 
 
 	/* find the first memory BAR, so we can find the cfg table */
 	/* find the first memory BAR, so we can find the cfg table */
 	rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
 	rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
@@ -3214,46 +3262,47 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
 	misc_fw_support = readl(&cfgtable->misc_fw_support);
 	misc_fw_support = readl(&cfgtable->misc_fw_support);
 	use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
 	use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
 
 
-	/* The doorbell reset seems to cause lockups on some Smart
-	 * Arrays (e.g. P410, P410i, maybe others).  Until this is
-	 * fixed or at least isolated, avoid the doorbell reset.
-	 */
-	use_doorbell = 0;
-
 	rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
 	rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
 	if (rc)
 	if (rc)
 		goto unmap_cfgtable;
 		goto unmap_cfgtable;
 
 
-	/* Restore the PCI configuration space.  The Open CISS
-	 * Specification says, "Restore the PCI Configuration
-	 * Registers, offsets 00h through 60h. It is important to
-	 * restore the command register, 16-bits at offset 04h,
-	 * last. Do not restore the configuration status register,
-	 * 16-bits at offset 06h."  Note that the offset is 2*i.
-	 */
-	for (i = 0; i < 32; i++) {
-		if (i == 2 || i == 3)
-			continue;
-		pci_write_config_word(pdev, 2*i, saved_config_space[i]);
+	pci_restore_state(pdev);
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		dev_warn(&pdev->dev, "failed to enable device.\n");
+		goto unmap_cfgtable;
 	}
 	}
-	wmb();
-	pci_write_config_word(pdev, 4, saved_config_space[2]);
+	pci_write_config_word(pdev, 4, command_register);
 
 
 	/* Some devices (notably the HP Smart Array 5i Controller)
 	/* Some devices (notably the HP Smart Array 5i Controller)
 	   need a little pause here */
 	   need a little pause here */
 	msleep(HPSA_POST_RESET_PAUSE_MSECS);
 	msleep(HPSA_POST_RESET_PAUSE_MSECS);
 
 
+	/* Wait for board to become not ready, then ready. */
+	dev_info(&pdev->dev, "Waiting for board to become ready.\n");
+	rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
+	if (rc)
+		dev_warn(&pdev->dev,
+			"failed waiting for board to become not ready\n");
+	rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
+	if (rc) {
+		dev_warn(&pdev->dev,
+			"failed waiting for board to become ready\n");
+		goto unmap_cfgtable;
+	}
+	dev_info(&pdev->dev, "board ready.\n");
+
 	/* Controller should be in simple mode at this point.  If it's not,
 	/* Controller should be in simple mode at this point.  If it's not,
 	 * It means we're on one of those controllers which doesn't support
 	 * It means we're on one of those controllers which doesn't support
 	 * the doorbell reset method and on which the PCI power management reset
 	 * the doorbell reset method and on which the PCI power management reset
 	 * method doesn't work (P800, for example.)
 	 * method doesn't work (P800, for example.)
-	 * In those cases, pretend the reset worked and hope for the best.
+	 * In those cases, don't try to proceed, as it generally doesn't work.
 	 */
 	 */
 	active_transport = readl(&cfgtable->TransportActive);
 	active_transport = readl(&cfgtable->TransportActive);
 	if (active_transport & PERFORMANT_MODE) {
 	if (active_transport & PERFORMANT_MODE) {
 		dev_warn(&pdev->dev, "Unable to successfully reset controller,"
 		dev_warn(&pdev->dev, "Unable to successfully reset controller,"
-			" proceeding anyway.\n");
-		rc = -ENOTSUPP;
+			" Ignoring controller.\n");
+		rc = -ENODEV;
 	}
 	}
 
 
 unmap_cfgtable:
 unmap_cfgtable:
@@ -3386,7 +3435,7 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
 default_int_mode:
 default_int_mode:
 #endif				/* CONFIG_PCI_MSI */
 #endif				/* CONFIG_PCI_MSI */
 	/* if we get here we're going to use the default interrupt mode */
 	/* if we get here we're going to use the default interrupt mode */
-	h->intr[PERF_MODE_INT] = h->pdev->irq;
+	h->intr[h->intr_mode] = h->pdev->irq;
 }
 }
 
 
 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
@@ -3438,18 +3487,28 @@ static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
 	return -ENODEV;
 	return -ENODEV;
 }
 }
 
 
-static int __devinit hpsa_wait_for_board_ready(struct ctlr_info *h)
+static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
+	void __iomem *vaddr, int wait_for_ready)
 {
 {
-	int i;
+	int i, iterations;
 	u32 scratchpad;
 	u32 scratchpad;
+	if (wait_for_ready)
+		iterations = HPSA_BOARD_READY_ITERATIONS;
+	else
+		iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
 
 
-	for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
-		scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
-		if (scratchpad == HPSA_FIRMWARE_READY)
-			return 0;
+	for (i = 0; i < iterations; i++) {
+		scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
+		if (wait_for_ready) {
+			if (scratchpad == HPSA_FIRMWARE_READY)
+				return 0;
+		} else {
+			if (scratchpad != HPSA_FIRMWARE_READY)
+				return 0;
+		}
 		msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
 		msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
 	}
 	}
-	dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
+	dev_warn(&pdev->dev, "board not ready, timed out.\n");
 	return -ENODEV;
 	return -ENODEV;
 }
 }
 
 
@@ -3497,6 +3556,11 @@ static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
 static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
 static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
 {
 {
 	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
 	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+
+	/* Limit commands in memory limited kdump scenario. */
+	if (reset_devices && h->max_commands > 32)
+		h->max_commands = 32;
+
 	if (h->max_commands < 16) {
 	if (h->max_commands < 16) {
 		dev_warn(&h->pdev->dev, "Controller reports "
 		dev_warn(&h->pdev->dev, "Controller reports "
 			"max supported commands of %d, an obvious lie. "
 			"max supported commands of %d, an obvious lie. "
@@ -3571,16 +3635,21 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
 static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
 static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
 {
 {
 	int i;
 	int i;
+	u32 doorbell_value;
+	unsigned long flags;
 
 
 	/* under certain very rare conditions, this can take awhile.
 	/* under certain very rare conditions, this can take awhile.
 	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
 	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
 	 * as we enter this code.)
 	 * as we enter this code.)
 	 */
 	 */
 	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
 	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
-		if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
+		spin_lock_irqsave(&h->lock, flags);
+		doorbell_value = readl(h->vaddr + SA5_DOORBELL);
+		spin_unlock_irqrestore(&h->lock, flags);
+		if (!(doorbell_value & CFGTBL_ChangeReq))
 			break;
 			break;
 		/* delay and try again */
 		/* delay and try again */
-		msleep(10);
+		usleep_range(10000, 20000);
 	}
 	}
 }
 }
 
 
@@ -3603,6 +3672,7 @@ static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
 			"unable to get board into simple mode\n");
 			"unable to get board into simple mode\n");
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}
+	h->transMethod = CFGTBL_Trans_Simple;
 	return 0;
 	return 0;
 }
 }
 
 
@@ -3641,7 +3711,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
 		err = -ENOMEM;
 		err = -ENOMEM;
 		goto err_out_free_res;
 		goto err_out_free_res;
 	}
 	}
-	err = hpsa_wait_for_board_ready(h);
+	err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
 	if (err)
 	if (err)
 		goto err_out_free_res;
 		goto err_out_free_res;
 	err = hpsa_find_cfgtables(h);
 	err = hpsa_find_cfgtables(h);
@@ -3710,8 +3780,6 @@ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
 		return 0; /* just try to do the kdump anyhow. */
 		return 0; /* just try to do the kdump anyhow. */
 	if (rc)
 	if (rc)
 		return -ENODEV;
 		return -ENODEV;
-	if (hpsa_reset_msi(pdev))
-		return -ENODEV;
 
 
 	/* Now try to get the controller to respond to a no-op */
 	/* Now try to get the controller to respond to a no-op */
 	for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
 	for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
@@ -3749,8 +3817,11 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
 
 
 	h->pdev = pdev;
 	h->pdev = pdev;
 	h->busy_initializing = 1;
 	h->busy_initializing = 1;
-	INIT_HLIST_HEAD(&h->cmpQ);
-	INIT_HLIST_HEAD(&h->reqQ);
+	h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
+	INIT_LIST_HEAD(&h->cmpQ);
+	INIT_LIST_HEAD(&h->reqQ);
+	spin_lock_init(&h->lock);
+	spin_lock_init(&h->scan_lock);
 	rc = hpsa_pci_init(h);
 	rc = hpsa_pci_init(h);
 	if (rc != 0)
 	if (rc != 0)
 		goto clean1;
 		goto clean1;
@@ -3777,20 +3848,20 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
 
 
 	if (h->msix_vector || h->msi_vector)
 	if (h->msix_vector || h->msi_vector)
-		rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_msi,
+		rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_msi,
 				IRQF_DISABLED, h->devname, h);
 				IRQF_DISABLED, h->devname, h);
 	else
 	else
-		rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_intx,
+		rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_intx,
 				IRQF_DISABLED, h->devname, h);
 				IRQF_DISABLED, h->devname, h);
 	if (rc) {
 	if (rc) {
 		dev_err(&pdev->dev, "unable to get irq %d for %s\n",
 		dev_err(&pdev->dev, "unable to get irq %d for %s\n",
-		       h->intr[PERF_MODE_INT], h->devname);
+		       h->intr[h->intr_mode], h->devname);
 		goto clean2;
 		goto clean2;
 	}
 	}
 
 
 	dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
 	dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
 	       h->devname, pdev->device,
 	       h->devname, pdev->device,
-	       h->intr[PERF_MODE_INT], dac ? "" : " not");
+	       h->intr[h->intr_mode], dac ? "" : " not");
 
 
 	h->cmd_pool_bits =
 	h->cmd_pool_bits =
 	    kmalloc(((h->nr_cmds + BITS_PER_LONG -
 	    kmalloc(((h->nr_cmds + BITS_PER_LONG -
@@ -3810,8 +3881,6 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
 	}
 	}
 	if (hpsa_allocate_sg_chain_blocks(h))
 	if (hpsa_allocate_sg_chain_blocks(h))
 		goto clean4;
 		goto clean4;
-	spin_lock_init(&h->lock);
-	spin_lock_init(&h->scan_lock);
 	init_waitqueue_head(&h->scan_wait_queue);
 	init_waitqueue_head(&h->scan_wait_queue);
 	h->scan_finished = 1; /* no scan currently in progress */
 	h->scan_finished = 1; /* no scan currently in progress */
 
 
@@ -3843,7 +3912,7 @@ clean4:
 			    h->nr_cmds * sizeof(struct ErrorInfo),
 			    h->nr_cmds * sizeof(struct ErrorInfo),
 			    h->errinfo_pool,
 			    h->errinfo_pool,
 			    h->errinfo_pool_dhandle);
 			    h->errinfo_pool_dhandle);
-	free_irq(h->intr[PERF_MODE_INT], h);
+	free_irq(h->intr[h->intr_mode], h);
 clean2:
 clean2:
 clean1:
 clean1:
 	h->busy_initializing = 0;
 	h->busy_initializing = 0;
@@ -3887,7 +3956,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
 	 */
 	 */
 	hpsa_flush_cache(h);
 	hpsa_flush_cache(h);
 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
-	free_irq(h->intr[PERF_MODE_INT], h);
+	free_irq(h->intr[h->intr_mode], h);
 #ifdef CONFIG_PCI_MSI
 #ifdef CONFIG_PCI_MSI
 	if (h->msix_vector)
 	if (h->msix_vector)
 		pci_disable_msix(h->pdev);
 		pci_disable_msix(h->pdev);
@@ -3989,7 +4058,8 @@ static void  calc_bucket_map(int bucket[], int num_buckets,
 	}
 	}
 }
 }
 
 
-static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
+static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
+	u32 use_short_tags)
 {
 {
 	int i;
 	int i;
 	unsigned long register_value;
 	unsigned long register_value;
@@ -4037,7 +4107,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
 	writel(0, &h->transtable->RepQCtrAddrHigh32);
 	writel(0, &h->transtable->RepQCtrAddrHigh32);
 	writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
 	writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
 	writel(0, &h->transtable->RepQAddr0High32);
 	writel(0, &h->transtable->RepQAddr0High32);
-	writel(CFGTBL_Trans_Performant,
+	writel(CFGTBL_Trans_Performant | use_short_tags,
 		&(h->cfgtable->HostWrite.TransportRequest));
 		&(h->cfgtable->HostWrite.TransportRequest));
 	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
 	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
 	hpsa_wait_for_mode_change_ack(h);
 	hpsa_wait_for_mode_change_ack(h);
@@ -4047,12 +4117,18 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
 					" performant mode\n");
 					" performant mode\n");
 		return;
 		return;
 	}
 	}
+	/* Change the access methods to the performant access methods */
+	h->access = SA5_performant_access;
+	h->transMethod = CFGTBL_Trans_Performant;
 }
 }
 
 
 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
 {
 {
 	u32 trans_support;
 	u32 trans_support;
 
 
+	if (hpsa_simple_mode)
+		return;
+
 	trans_support = readl(&(h->cfgtable->TransportSupport));
 	trans_support = readl(&(h->cfgtable->TransportSupport));
 	if (!(trans_support & PERFORMANT_MODE))
 	if (!(trans_support & PERFORMANT_MODE))
 		return;
 		return;
@@ -4072,11 +4148,8 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
 		|| (h->blockFetchTable == NULL))
 		|| (h->blockFetchTable == NULL))
 		goto clean_up;
 		goto clean_up;
 
 
-	hpsa_enter_performant_mode(h);
-
-	/* Change the access methods to the performant access methods */
-	h->access = SA5_performant_access;
-	h->transMethod = CFGTBL_Trans_Performant;
+	hpsa_enter_performant_mode(h,
+		trans_support & CFGTBL_Trans_use_short_tags);
 
 
 	return;
 	return;
 
 

+ 7 - 2
drivers/scsi/hpsa.h

@@ -72,11 +72,12 @@ struct ctlr_info {
 	unsigned int intr[4];
 	unsigned int intr[4];
 	unsigned int msix_vector;
 	unsigned int msix_vector;
 	unsigned int msi_vector;
 	unsigned int msi_vector;
+	int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
 	struct access_method access;
 	struct access_method access;
 
 
 	/* queue and queue Info */
 	/* queue and queue Info */
-	struct hlist_head reqQ;
-	struct hlist_head cmpQ;
+	struct list_head reqQ;
+	struct list_head cmpQ;
 	unsigned int Qdepth;
 	unsigned int Qdepth;
 	unsigned int maxQsinceinit;
 	unsigned int maxQsinceinit;
 	unsigned int maxSG;
 	unsigned int maxSG;
@@ -154,12 +155,16 @@ struct ctlr_info {
  * HPSA_BOARD_READY_ITERATIONS are derived from those.
  * HPSA_BOARD_READY_ITERATIONS are derived from those.
  */
  */
 #define HPSA_BOARD_READY_WAIT_SECS (120)
 #define HPSA_BOARD_READY_WAIT_SECS (120)
+#define HPSA_BOARD_NOT_READY_WAIT_SECS (10)
 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
 #define HPSA_BOARD_READY_POLL_INTERVAL \
 #define HPSA_BOARD_READY_POLL_INTERVAL \
 	((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
 	((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
 #define HPSA_BOARD_READY_ITERATIONS \
 #define HPSA_BOARD_READY_ITERATIONS \
 	((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
 	((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
 		HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
 		HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
+#define HPSA_BOARD_NOT_READY_ITERATIONS \
+	((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
+		HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
 #define HPSA_POST_RESET_PAUSE_MSECS (3000)
 #define HPSA_POST_RESET_PAUSE_MSECS (3000)
 #define HPSA_POST_RESET_NOOP_RETRIES (12)
 #define HPSA_POST_RESET_NOOP_RETRIES (12)
 
 

+ 3 - 1
drivers/scsi/hpsa_cmd.h

@@ -104,6 +104,7 @@
 
 
 #define CFGTBL_Trans_Simple     0x00000002l
 #define CFGTBL_Trans_Simple     0x00000002l
 #define CFGTBL_Trans_Performant 0x00000004l
 #define CFGTBL_Trans_Performant 0x00000004l
+#define CFGTBL_Trans_use_short_tags 0x20000000l
 
 
 #define CFGTBL_BusType_Ultra2   0x00000001l
 #define CFGTBL_BusType_Ultra2   0x00000001l
 #define CFGTBL_BusType_Ultra3   0x00000002l
 #define CFGTBL_BusType_Ultra3   0x00000002l
@@ -265,6 +266,7 @@ struct ErrorInfo {
 
 
 #define DIRECT_LOOKUP_SHIFT 5
 #define DIRECT_LOOKUP_SHIFT 5
 #define DIRECT_LOOKUP_BIT 0x10
 #define DIRECT_LOOKUP_BIT 0x10
+#define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1))
 
 
 #define HPSA_ERROR_BIT          0x02
 #define HPSA_ERROR_BIT          0x02
 struct ctlr_info; /* defined in hpsa.h */
 struct ctlr_info; /* defined in hpsa.h */
@@ -291,7 +293,7 @@ struct CommandList {
 	struct ctlr_info	   *h;
 	struct ctlr_info	   *h;
 	int			   cmd_type;
 	int			   cmd_type;
 	long			   cmdindex;
 	long			   cmdindex;
-	struct hlist_node list;
+	struct list_head list;
 	struct request *rq;
 	struct request *rq;
 	struct completion *waiting;
 	struct completion *waiting;
 	void   *scsi_cmd;
 	void   *scsi_cmd;

+ 6 - 3
drivers/scsi/ipr.c

@@ -1301,7 +1301,7 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
 			ipr_clear_res_target(res);
 			ipr_clear_res_target(res);
 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
 		}
 		}
-	} else if (!res->sdev) {
+	} else if (!res->sdev || res->del_from_ml) {
 		res->add_to_ml = 1;
 		res->add_to_ml = 1;
 		if (ioa_cfg->allow_ml_add_del)
 		if (ioa_cfg->allow_ml_add_del)
 			schedule_work(&ioa_cfg->work_q);
 			schedule_work(&ioa_cfg->work_q);
@@ -3104,7 +3104,10 @@ restart:
 				did_work = 1;
 				did_work = 1;
 				sdev = res->sdev;
 				sdev = res->sdev;
 				if (!scsi_device_get(sdev)) {
 				if (!scsi_device_get(sdev)) {
-					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+					if (!res->add_to_ml)
+						list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+					else
+						res->del_from_ml = 0;
 					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 					scsi_remove_device(sdev);
 					scsi_remove_device(sdev);
 					scsi_device_put(sdev);
 					scsi_device_put(sdev);
@@ -8864,7 +8867,7 @@ static void __ipr_remove(struct pci_dev *pdev)
 
 
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
-	flush_scheduled_work();
+	flush_work_sync(&ioa_cfg->work_q);
 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
 
 
 	spin_lock(&ipr_driver_lock);
 	spin_lock(&ipr_driver_lock);

+ 67 - 67
drivers/scsi/iscsi_tcp.c

@@ -608,54 +608,12 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
 	iscsi_sw_tcp_release_conn(conn);
 	iscsi_sw_tcp_release_conn(conn);
 }
 }
 
 
-static int iscsi_sw_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
-				 char *buf, int *port,
-				 int (*getname)(struct socket *,
-						struct sockaddr *,
-						int *addrlen))
-{
-	struct sockaddr_storage *addr;
-	struct sockaddr_in6 *sin6;
-	struct sockaddr_in *sin;
-	int rc = 0, len;
-
-	addr = kmalloc(sizeof(*addr), GFP_KERNEL);
-	if (!addr)
-		return -ENOMEM;
-
-	if (getname(sock, (struct sockaddr *) addr, &len)) {
-		rc = -ENODEV;
-		goto free_addr;
-	}
-
-	switch (addr->ss_family) {
-	case AF_INET:
-		sin = (struct sockaddr_in *)addr;
-		spin_lock_bh(&conn->session->lock);
-		sprintf(buf, "%pI4", &sin->sin_addr.s_addr);
-		*port = be16_to_cpu(sin->sin_port);
-		spin_unlock_bh(&conn->session->lock);
-		break;
-	case AF_INET6:
-		sin6 = (struct sockaddr_in6 *)addr;
-		spin_lock_bh(&conn->session->lock);
-		sprintf(buf, "%pI6", &sin6->sin6_addr);
-		*port = be16_to_cpu(sin6->sin6_port);
-		spin_unlock_bh(&conn->session->lock);
-		break;
-	}
-free_addr:
-	kfree(addr);
-	return rc;
-}
-
 static int
 static int
 iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
 iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
 		       struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
 		       struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
 		       int is_leading)
 		       int is_leading)
 {
 {
-	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
-	struct iscsi_host *ihost = shost_priv(shost);
+	struct iscsi_session *session = cls_session->dd_data;
 	struct iscsi_conn *conn = cls_conn->dd_data;
 	struct iscsi_conn *conn = cls_conn->dd_data;
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
 	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
@@ -670,27 +628,15 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
 				  "sockfd_lookup failed %d\n", err);
 				  "sockfd_lookup failed %d\n", err);
 		return -EEXIST;
 		return -EEXIST;
 	}
 	}
-	/*
-	 * copy these values now because if we drop the session
-	 * userspace may still want to query the values since we will
-	 * be using them for the reconnect
-	 */
-	err = iscsi_sw_tcp_get_addr(conn, sock, conn->portal_address,
-				    &conn->portal_port, kernel_getpeername);
-	if (err)
-		goto free_socket;
-
-	err = iscsi_sw_tcp_get_addr(conn, sock, ihost->local_address,
-				    &ihost->local_port, kernel_getsockname);
-	if (err)
-		goto free_socket;
 
 
 	err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
 	err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
 	if (err)
 	if (err)
 		goto free_socket;
 		goto free_socket;
 
 
+	spin_lock_bh(&session->lock);
 	/* bind iSCSI connection and socket */
 	/* bind iSCSI connection and socket */
 	tcp_sw_conn->sock = sock;
 	tcp_sw_conn->sock = sock;
+	spin_unlock_bh(&session->lock);
 
 
 	/* setup Socket parameters */
 	/* setup Socket parameters */
 	sk = sock->sk;
 	sk = sock->sk;
@@ -752,24 +698,74 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
 				       enum iscsi_param param, char *buf)
 				       enum iscsi_param param, char *buf)
 {
 {
 	struct iscsi_conn *conn = cls_conn->dd_data;
 	struct iscsi_conn *conn = cls_conn->dd_data;
-	int len;
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+	struct sockaddr_in6 addr;
+	int rc, len;
 
 
 	switch(param) {
 	switch(param) {
 	case ISCSI_PARAM_CONN_PORT:
 	case ISCSI_PARAM_CONN_PORT:
-		spin_lock_bh(&conn->session->lock);
-		len = sprintf(buf, "%hu\n", conn->portal_port);
-		spin_unlock_bh(&conn->session->lock);
-		break;
 	case ISCSI_PARAM_CONN_ADDRESS:
 	case ISCSI_PARAM_CONN_ADDRESS:
 		spin_lock_bh(&conn->session->lock);
 		spin_lock_bh(&conn->session->lock);
-		len = sprintf(buf, "%s\n", conn->portal_address);
+		if (!tcp_sw_conn || !tcp_sw_conn->sock) {
+			spin_unlock_bh(&conn->session->lock);
+			return -ENOTCONN;
+		}
+		rc = kernel_getpeername(tcp_sw_conn->sock,
+					(struct sockaddr *)&addr, &len);
 		spin_unlock_bh(&conn->session->lock);
 		spin_unlock_bh(&conn->session->lock);
-		break;
+		if (rc)
+			return rc;
+
+		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+						 &addr, param, buf);
 	default:
 	default:
 		return iscsi_conn_get_param(cls_conn, param, buf);
 		return iscsi_conn_get_param(cls_conn, param, buf);
 	}
 	}
 
 
-	return len;
+	return 0;
+}
+
+static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
+				       enum iscsi_host_param param, char *buf)
+{
+	struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
+	struct iscsi_session *session = tcp_sw_host->session;
+	struct iscsi_conn *conn;
+	struct iscsi_tcp_conn *tcp_conn;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn;
+	struct sockaddr_in6 addr;
+	int rc, len;
+
+	switch (param) {
+	case ISCSI_HOST_PARAM_IPADDRESS:
+		spin_lock_bh(&session->lock);
+		conn = session->leadconn;
+		if (!conn) {
+			spin_unlock_bh(&session->lock);
+			return -ENOTCONN;
+		}
+		tcp_conn = conn->dd_data;
+
+		tcp_sw_conn = tcp_conn->dd_data;
+		if (!tcp_sw_conn->sock) {
+			spin_unlock_bh(&session->lock);
+			return -ENOTCONN;
+		}
+
+		rc = kernel_getsockname(tcp_sw_conn->sock,
+					(struct sockaddr *)&addr, &len);
+		spin_unlock_bh(&session->lock);
+		if (rc)
+			return rc;
+
+		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+						 &addr, param, buf);
+	default:
+		return iscsi_host_get_param(shost, param, buf);
+	}
+
+	return 0;
 }
 }
 
 
 static void
 static void
@@ -797,6 +793,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
 {
 {
 	struct iscsi_cls_session *cls_session;
 	struct iscsi_cls_session *cls_session;
 	struct iscsi_session *session;
 	struct iscsi_session *session;
+	struct iscsi_sw_tcp_host *tcp_sw_host;
 	struct Scsi_Host *shost;
 	struct Scsi_Host *shost;
 
 
 	if (ep) {
 	if (ep) {
@@ -804,7 +801,8 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
 		return NULL;
 		return NULL;
 	}
 	}
 
 
-	shost = iscsi_host_alloc(&iscsi_sw_tcp_sht, 0, 1);
+	shost = iscsi_host_alloc(&iscsi_sw_tcp_sht,
+				 sizeof(struct iscsi_sw_tcp_host), 1);
 	if (!shost)
 	if (!shost)
 		return NULL;
 		return NULL;
 	shost->transportt = iscsi_sw_tcp_scsi_transport;
 	shost->transportt = iscsi_sw_tcp_scsi_transport;
@@ -825,6 +823,8 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
 	if (!cls_session)
 	if (!cls_session)
 		goto remove_host;
 		goto remove_host;
 	session = cls_session->dd_data;
 	session = cls_session->dd_data;
+	tcp_sw_host = iscsi_host_priv(shost);
+	tcp_sw_host->session = session;
 
 
 	shost->can_queue = session->scsi_cmds_max;
 	shost->can_queue = session->scsi_cmds_max;
 	if (iscsi_tcp_r2tpool_alloc(session))
 	if (iscsi_tcp_r2tpool_alloc(session))
@@ -929,7 +929,7 @@ static struct iscsi_transport iscsi_sw_tcp_transport = {
 	.start_conn		= iscsi_conn_start,
 	.start_conn		= iscsi_conn_start,
 	.stop_conn		= iscsi_sw_tcp_conn_stop,
 	.stop_conn		= iscsi_sw_tcp_conn_stop,
 	/* iscsi host params */
 	/* iscsi host params */
-	.get_host_param		= iscsi_host_get_param,
+	.get_host_param		= iscsi_sw_tcp_host_get_param,
 	.set_host_param		= iscsi_host_set_param,
 	.set_host_param		= iscsi_host_set_param,
 	/* IO */
 	/* IO */
 	.send_pdu		= iscsi_conn_send_pdu,
 	.send_pdu		= iscsi_conn_send_pdu,

+ 4 - 0
drivers/scsi/iscsi_tcp.h

@@ -55,6 +55,10 @@ struct iscsi_sw_tcp_conn {
 	ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
 	ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
 };
 };
 
 
+struct iscsi_sw_tcp_host {
+	struct iscsi_session	*session;
+};
+
 struct iscsi_sw_tcp_hdrbuf {
 struct iscsi_sw_tcp_hdrbuf {
 	struct iscsi_hdr	hdrbuf;
 	struct iscsi_hdr	hdrbuf;
 	char			hdrextbuf[ISCSI_MAX_AHS_SIZE +
 	char			hdrextbuf[ISCSI_MAX_AHS_SIZE +

+ 82 - 29
drivers/scsi/libfc/fc_exch.c

@@ -38,7 +38,7 @@ u16	fc_cpu_mask;		/* cpu mask for possible cpus */
 EXPORT_SYMBOL(fc_cpu_mask);
 EXPORT_SYMBOL(fc_cpu_mask);
 static u16	fc_cpu_order;	/* 2's power to represent total possible cpus */
 static u16	fc_cpu_order;	/* 2's power to represent total possible cpus */
 static struct kmem_cache *fc_em_cachep;	       /* cache for exchanges */
 static struct kmem_cache *fc_em_cachep;	       /* cache for exchanges */
-struct workqueue_struct *fc_exch_workqueue;
+static struct workqueue_struct *fc_exch_workqueue;
 
 
 /*
 /*
  * Structure and function definitions for managing Fibre Channel Exchanges
  * Structure and function definitions for managing Fibre Channel Exchanges
@@ -558,6 +558,22 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
 	return sp;
 	return sp;
 }
 }
 
 
+/*
+ * Set the response handler for the exchange associated with a sequence.
+ */
+static void fc_seq_set_resp(struct fc_seq *sp,
+			    void (*resp)(struct fc_seq *, struct fc_frame *,
+					 void *),
+			    void *arg)
+{
+	struct fc_exch *ep = fc_seq_exch(sp);
+
+	spin_lock_bh(&ep->ex_lock);
+	ep->resp = resp;
+	ep->arg = arg;
+	spin_unlock_bh(&ep->ex_lock);
+}
+
 /**
 /**
  * fc_seq_exch_abort() - Abort an exchange and sequence
  * fc_seq_exch_abort() - Abort an exchange and sequence
  * @req_sp:	The sequence to be aborted
  * @req_sp:	The sequence to be aborted
@@ -650,13 +666,10 @@ static void fc_exch_timeout(struct work_struct *work)
 		if (e_stat & ESB_ST_ABNORMAL)
 		if (e_stat & ESB_ST_ABNORMAL)
 			rc = fc_exch_done_locked(ep);
 			rc = fc_exch_done_locked(ep);
 		spin_unlock_bh(&ep->ex_lock);
 		spin_unlock_bh(&ep->ex_lock);
+		if (!rc)
+			fc_exch_delete(ep);
 		if (resp)
 		if (resp)
 			resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
 			resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
-		if (!rc) {
-			/* delete the exchange if it's already being aborted */
-			fc_exch_delete(ep);
-			return;
-		}
 		fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
 		fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
 		goto done;
 		goto done;
 	}
 	}
@@ -1266,6 +1279,8 @@ free:
  * @fp:    The request frame
  * @fp:    The request frame
  *
  *
  * On success, the sequence pointer will be returned and also in fr_seq(@fp).
  * On success, the sequence pointer will be returned and also in fr_seq(@fp).
+ * A reference will be held on the exchange/sequence for the caller, which
+ * must call fc_seq_release().
  */
  */
 static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
 static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
 {
 {
@@ -1282,6 +1297,15 @@ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
 	return fr_seq(fp);
 	return fr_seq(fp);
 }
 }
 
 
+/**
+ * fc_seq_release() - Release the hold
+ * @sp:    The sequence.
+ */
+static void fc_seq_release(struct fc_seq *sp)
+{
+	fc_exch_release(fc_seq_exch(sp));
+}
+
 /**
 /**
  * fc_exch_recv_req() - Handler for an incoming request
  * fc_exch_recv_req() - Handler for an incoming request
  * @lport: The local port that received the request
  * @lport: The local port that received the request
@@ -2151,6 +2175,7 @@ err:
 		fc_exch_mgr_del(ema);
 		fc_exch_mgr_del(ema);
 	return -ENOMEM;
 	return -ENOMEM;
 }
 }
+EXPORT_SYMBOL(fc_exch_mgr_list_clone);
 
 
 /**
 /**
  * fc_exch_mgr_alloc() - Allocate an exchange manager
  * fc_exch_mgr_alloc() - Allocate an exchange manager
@@ -2253,17 +2278,46 @@ void fc_exch_mgr_free(struct fc_lport *lport)
 }
 }
 EXPORT_SYMBOL(fc_exch_mgr_free);
 EXPORT_SYMBOL(fc_exch_mgr_free);
 
 
+/**
+ * fc_find_ema() - Lookup and return appropriate Exchange Manager Anchor depending
+ * upon 'xid'.
+ * @f_ctl: f_ctl
+ * @lport: The local port the frame was received on
+ * @fh: The received frame header
+ */
+static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl,
+					      struct fc_lport *lport,
+					      struct fc_frame_header *fh)
+{
+	struct fc_exch_mgr_anchor *ema;
+	u16 xid;
+
+	if (f_ctl & FC_FC_EX_CTX)
+		xid = ntohs(fh->fh_ox_id);
+	else {
+		xid = ntohs(fh->fh_rx_id);
+		if (xid == FC_XID_UNKNOWN)
+			return list_entry(lport->ema_list.prev,
+					  typeof(*ema), ema_list);
+	}
+
+	list_for_each_entry(ema, &lport->ema_list, ema_list) {
+		if ((xid >= ema->mp->min_xid) &&
+		    (xid <= ema->mp->max_xid))
+			return ema;
+	}
+	return NULL;
+}
 /**
 /**
  * fc_exch_recv() - Handler for received frames
  * fc_exch_recv() - Handler for received frames
  * @lport: The local port the frame was received on
  * @lport: The local port the frame was received on
- * @fp:	   The received frame
+ * @fp:	The received frame
  */
  */
 void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
 void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
 {
 {
 	struct fc_frame_header *fh = fc_frame_header_get(fp);
 	struct fc_frame_header *fh = fc_frame_header_get(fp);
 	struct fc_exch_mgr_anchor *ema;
 	struct fc_exch_mgr_anchor *ema;
-	u32 f_ctl, found = 0;
-	u16 oxid;
+	u32 f_ctl;
 
 
 	/* lport lock ? */
 	/* lport lock ? */
 	if (!lport || lport->state == LPORT_ST_DISABLED) {
 	if (!lport || lport->state == LPORT_ST_DISABLED) {
@@ -2274,24 +2328,17 @@ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
 	}
 	}
 
 
 	f_ctl = ntoh24(fh->fh_f_ctl);
 	f_ctl = ntoh24(fh->fh_f_ctl);
-	oxid = ntohs(fh->fh_ox_id);
-	if (f_ctl & FC_FC_EX_CTX) {
-		list_for_each_entry(ema, &lport->ema_list, ema_list) {
-			if ((oxid >= ema->mp->min_xid) &&
-			    (oxid <= ema->mp->max_xid)) {
-				found = 1;
-				break;
-			}
-		}
-
-		if (!found) {
-			FC_LPORT_DBG(lport, "Received response for out "
-				     "of range oxid:%hx\n", oxid);
-			fc_frame_free(fp);
-			return;
-		}
-	} else
-		ema = list_entry(lport->ema_list.prev, typeof(*ema), ema_list);
+	ema = fc_find_ema(f_ctl, lport, fh);
+	if (!ema) {
+		FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor,"
+				    "fc_ctl <0x%x>, xid <0x%x>\n",
+				     f_ctl,
+				     (f_ctl & FC_FC_EX_CTX) ?
+				     ntohs(fh->fh_ox_id) :
+				     ntohs(fh->fh_rx_id));
+		fc_frame_free(fp);
+		return;
+	}
 
 
 	/*
 	/*
 	 * If frame is marked invalid, just drop it.
 	 * If frame is marked invalid, just drop it.
@@ -2329,6 +2376,9 @@ int fc_exch_init(struct fc_lport *lport)
 	if (!lport->tt.seq_start_next)
 	if (!lport->tt.seq_start_next)
 		lport->tt.seq_start_next = fc_seq_start_next;
 		lport->tt.seq_start_next = fc_seq_start_next;
 
 
+	if (!lport->tt.seq_set_resp)
+		lport->tt.seq_set_resp = fc_seq_set_resp;
+
 	if (!lport->tt.exch_seq_send)
 	if (!lport->tt.exch_seq_send)
 		lport->tt.exch_seq_send = fc_exch_seq_send;
 		lport->tt.exch_seq_send = fc_exch_seq_send;
 
 
@@ -2350,6 +2400,9 @@ int fc_exch_init(struct fc_lport *lport)
 	if (!lport->tt.seq_assign)
 	if (!lport->tt.seq_assign)
 		lport->tt.seq_assign = fc_seq_assign;
 		lport->tt.seq_assign = fc_seq_assign;
 
 
+	if (!lport->tt.seq_release)
+		lport->tt.seq_release = fc_seq_release;
+
 	return 0;
 	return 0;
 }
 }
 EXPORT_SYMBOL(fc_exch_init);
 EXPORT_SYMBOL(fc_exch_init);
@@ -2357,7 +2410,7 @@ EXPORT_SYMBOL(fc_exch_init);
 /**
 /**
  * fc_setup_exch_mgr() - Setup an exchange manager
  * fc_setup_exch_mgr() - Setup an exchange manager
  */
  */
-int fc_setup_exch_mgr()
+int fc_setup_exch_mgr(void)
 {
 {
 	fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
 	fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
 					 0, SLAB_HWCACHE_ALIGN, NULL);
 					 0, SLAB_HWCACHE_ALIGN, NULL);
@@ -2395,7 +2448,7 @@ int fc_setup_exch_mgr()
 /**
 /**
  * fc_destroy_exch_mgr() - Destroy an exchange manager
  * fc_destroy_exch_mgr() - Destroy an exchange manager
  */
  */
-void fc_destroy_exch_mgr()
+void fc_destroy_exch_mgr(void)
 {
 {
 	destroy_workqueue(fc_exch_workqueue);
 	destroy_workqueue(fc_exch_workqueue);
 	kmem_cache_destroy(fc_em_cachep);
 	kmem_cache_destroy(fc_em_cachep);

+ 15 - 24
drivers/scsi/libfc/fc_fcp.c

@@ -42,7 +42,7 @@
 
 
 #include "fc_libfc.h"
 #include "fc_libfc.h"
 
 
-struct kmem_cache *scsi_pkt_cachep;
+static struct kmem_cache *scsi_pkt_cachep;
 
 
 /* SRB state definitions */
 /* SRB state definitions */
 #define FC_SRB_FREE		0		/* cmd is free */
 #define FC_SRB_FREE		0		/* cmd is free */
@@ -155,6 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
 	if (fsp) {
 	if (fsp) {
 		memset(fsp, 0, sizeof(*fsp));
 		memset(fsp, 0, sizeof(*fsp));
 		fsp->lp = lport;
 		fsp->lp = lport;
+		fsp->xfer_ddp = FC_XID_UNKNOWN;
 		atomic_set(&fsp->ref_cnt, 1);
 		atomic_set(&fsp->ref_cnt, 1);
 		init_timer(&fsp->timer);
 		init_timer(&fsp->timer);
 		INIT_LIST_HEAD(&fsp->list);
 		INIT_LIST_HEAD(&fsp->list);
@@ -1201,6 +1202,7 @@ unlock:
 static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
 static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
 {
 {
 	int rc = FAILED;
 	int rc = FAILED;
+	unsigned long ticks_left;
 
 
 	if (fc_fcp_send_abort(fsp))
 	if (fc_fcp_send_abort(fsp))
 		return FAILED;
 		return FAILED;
@@ -1209,13 +1211,13 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
 	fsp->wait_for_comp = 1;
 	fsp->wait_for_comp = 1;
 
 
 	spin_unlock_bh(&fsp->scsi_pkt_lock);
 	spin_unlock_bh(&fsp->scsi_pkt_lock);
-	rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
+	ticks_left = wait_for_completion_timeout(&fsp->tm_done,
+							FC_SCSI_TM_TOV);
 	spin_lock_bh(&fsp->scsi_pkt_lock);
 	spin_lock_bh(&fsp->scsi_pkt_lock);
 	fsp->wait_for_comp = 0;
 	fsp->wait_for_comp = 0;
 
 
-	if (!rc) {
+	if (!ticks_left) {
 		FC_FCP_DBG(fsp, "target abort cmd  failed\n");
 		FC_FCP_DBG(fsp, "target abort cmd  failed\n");
-		rc = FAILED;
 	} else if (fsp->state & FC_SRB_ABORTED) {
 	} else if (fsp->state & FC_SRB_ABORTED) {
 		FC_FCP_DBG(fsp, "target abort cmd  passed\n");
 		FC_FCP_DBG(fsp, "target abort cmd  passed\n");
 		rc = SUCCESS;
 		rc = SUCCESS;
@@ -1321,7 +1323,7 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 		 *
 		 *
 		 * scsi-eh will escalate for when either happens.
 		 * scsi-eh will escalate for when either happens.
 		 */
 		 */
-		goto out;
+		return;
 	}
 	}
 
 
 	if (fc_fcp_lock_pkt(fsp))
 	if (fc_fcp_lock_pkt(fsp))
@@ -1787,15 +1789,14 @@ static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport)
 
 
 /**
 /**
  * fc_queuecommand() - The queuecommand function of the SCSI template
  * fc_queuecommand() - The queuecommand function of the SCSI template
+ * @shost: The Scsi_Host that the command was issued to
  * @cmd:   The scsi_cmnd to be executed
  * @cmd:   The scsi_cmnd to be executed
- * @done:  The callback function to be called when the scsi_cmnd is complete
  *
  *
- * This is the i/o strategy routine, called by the SCSI layer. This routine
- * is called with the host_lock held.
+ * This is the i/o strategy routine, called by the SCSI layer.
  */
  */
-static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
+int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
 {
 {
-	struct fc_lport *lport;
+	struct fc_lport *lport = shost_priv(shost);
 	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
 	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
 	struct fc_fcp_pkt *fsp;
 	struct fc_fcp_pkt *fsp;
 	struct fc_rport_libfc_priv *rpriv;
 	struct fc_rport_libfc_priv *rpriv;
@@ -1803,15 +1804,12 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
 	int rc = 0;
 	int rc = 0;
 	struct fcoe_dev_stats *stats;
 	struct fcoe_dev_stats *stats;
 
 
-	lport = shost_priv(sc_cmd->device->host);
-
 	rval = fc_remote_port_chkready(rport);
 	rval = fc_remote_port_chkready(rport);
 	if (rval) {
 	if (rval) {
 		sc_cmd->result = rval;
 		sc_cmd->result = rval;
-		done(sc_cmd);
+		sc_cmd->scsi_done(sc_cmd);
 		return 0;
 		return 0;
 	}
 	}
-	spin_unlock_irq(lport->host->host_lock);
 
 
 	if (!*(struct fc_remote_port **)rport->dd_data) {
 	if (!*(struct fc_remote_port **)rport->dd_data) {
 		/*
 		/*
@@ -1819,7 +1817,7 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
 		 * online
 		 * online
 		 */
 		 */
 		sc_cmd->result = DID_IMM_RETRY << 16;
 		sc_cmd->result = DID_IMM_RETRY << 16;
-		done(sc_cmd);
+		sc_cmd->scsi_done(sc_cmd);
 		goto out;
 		goto out;
 	}
 	}
 
 
@@ -1842,10 +1840,7 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
 	 * build the libfc request pkt
 	 * build the libfc request pkt
 	 */
 	 */
 	fsp->cmd = sc_cmd;	/* save the cmd */
 	fsp->cmd = sc_cmd;	/* save the cmd */
-	fsp->lp = lport;	/* save the softc ptr */
 	fsp->rport = rport;	/* set the remote port ptr */
 	fsp->rport = rport;	/* set the remote port ptr */
-	fsp->xfer_ddp = FC_XID_UNKNOWN;
-	sc_cmd->scsi_done = done;
 
 
 	/*
 	/*
 	 * set up the transfer length
 	 * set up the transfer length
@@ -1886,11 +1881,8 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 	}
 	}
 out:
 out:
-	spin_lock_irq(lport->host->host_lock);
 	return rc;
 	return rc;
 }
 }
-
-DEF_SCSI_QCMD(fc_queuecommand)
 EXPORT_SYMBOL(fc_queuecommand);
 EXPORT_SYMBOL(fc_queuecommand);
 
 
 /**
 /**
@@ -2112,7 +2104,6 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
 	 * the sc passed in is not setup for execution like when sent
 	 * the sc passed in is not setup for execution like when sent
 	 * through the queuecommand callout.
 	 * through the queuecommand callout.
 	 */
 	 */
-	fsp->lp = lport;	/* save the softc ptr */
 	fsp->rport = rport;	/* set the remote port ptr */
 	fsp->rport = rport;	/* set the remote port ptr */
 
 
 	/*
 	/*
@@ -2245,7 +2236,7 @@ void fc_fcp_destroy(struct fc_lport *lport)
 }
 }
 EXPORT_SYMBOL(fc_fcp_destroy);
 EXPORT_SYMBOL(fc_fcp_destroy);
 
 
-int fc_setup_fcp()
+int fc_setup_fcp(void)
 {
 {
 	int rc = 0;
 	int rc = 0;
 
 
@@ -2261,7 +2252,7 @@ int fc_setup_fcp()
 	return rc;
 	return rc;
 }
 }
 
 
-void fc_destroy_fcp()
+void fc_destroy_fcp(void)
 {
 {
 	if (scsi_pkt_cachep)
 	if (scsi_pkt_cachep)
 		kmem_cache_destroy(scsi_pkt_cachep);
 		kmem_cache_destroy(scsi_pkt_cachep);

+ 120 - 0
drivers/scsi/libfc/fc_libfc.c

@@ -35,6 +35,27 @@ unsigned int fc_debug_logging;
 module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
 module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
 MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
 
 
+DEFINE_MUTEX(fc_prov_mutex);
+static LIST_HEAD(fc_local_ports);
+struct blocking_notifier_head fc_lport_notifier_head =
+		BLOCKING_NOTIFIER_INIT(fc_lport_notifier_head);
+EXPORT_SYMBOL(fc_lport_notifier_head);
+
+/*
+ * Providers which primarily send requests and PRLIs.
+ */
+struct fc4_prov *fc_active_prov[FC_FC4_PROV_SIZE] = {
+	[0] = &fc_rport_t0_prov,
+	[FC_TYPE_FCP] = &fc_rport_fcp_init,
+};
+
+/*
+ * Providers which receive requests.
+ */
+struct fc4_prov *fc_passive_prov[FC_FC4_PROV_SIZE] = {
+	[FC_TYPE_ELS] = &fc_lport_els_prov,
+};
+
 /**
 /**
  * libfc_init() - Initialize libfc.ko
  * libfc_init() - Initialize libfc.ko
  */
  */
@@ -210,3 +231,102 @@ void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
 	fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset);
 	fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset);
 }
 }
 EXPORT_SYMBOL(fc_fill_reply_hdr);
 EXPORT_SYMBOL(fc_fill_reply_hdr);
+
+/**
+ * fc_fc4_conf_lport_params() - Modify "service_params" of specified lport
+ * if there is service provider (target provider) registered with libfc
+ * for specified "fc_ft_type"
+ * @lport: Local port which service_params needs to be modified
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ */
+void fc_fc4_conf_lport_params(struct fc_lport *lport, enum fc_fh_type type)
+{
+	struct fc4_prov *prov_entry;
+	BUG_ON(type >= FC_FC4_PROV_SIZE);
+	BUG_ON(!lport);
+	prov_entry = fc_passive_prov[type];
+	if (type == FC_TYPE_FCP) {
+		if (prov_entry && prov_entry->recv)
+			lport->service_params |= FCP_SPPF_TARG_FCN;
+	}
+}
+
+void fc_lport_iterate(void (*notify)(struct fc_lport *, void *), void *arg)
+{
+	struct fc_lport *lport;
+
+	mutex_lock(&fc_prov_mutex);
+	list_for_each_entry(lport, &fc_local_ports, lport_list)
+		notify(lport, arg);
+	mutex_unlock(&fc_prov_mutex);
+}
+EXPORT_SYMBOL(fc_lport_iterate);
+
+/**
+ * fc_fc4_register_provider() - register FC-4 upper-level provider.
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ * @prov: structure describing provider including ops vector.
+ *
+ * Returns 0 on success, negative error otherwise.
+ */
+int fc_fc4_register_provider(enum fc_fh_type type, struct fc4_prov *prov)
+{
+	struct fc4_prov **prov_entry;
+	int ret = 0;
+
+	if (type >= FC_FC4_PROV_SIZE)
+		return -EINVAL;
+	mutex_lock(&fc_prov_mutex);
+	prov_entry = (prov->recv ? fc_passive_prov : fc_active_prov) + type;
+	if (*prov_entry)
+		ret = -EBUSY;
+	else
+		*prov_entry = prov;
+	mutex_unlock(&fc_prov_mutex);
+	return ret;
+}
+EXPORT_SYMBOL(fc_fc4_register_provider);
+
+/**
+ * fc_fc4_deregister_provider() - deregister FC-4 upper-level provider.
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ * @prov: structure describing provider including ops vector.
+ */
+void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *prov)
+{
+	BUG_ON(type >= FC_FC4_PROV_SIZE);
+	mutex_lock(&fc_prov_mutex);
+	if (prov->recv)
+		rcu_assign_pointer(fc_passive_prov[type], NULL);
+	else
+		rcu_assign_pointer(fc_active_prov[type], NULL);
+	mutex_unlock(&fc_prov_mutex);
+	synchronize_rcu();
+}
+EXPORT_SYMBOL(fc_fc4_deregister_provider);
+
+/**
+ * fc_fc4_add_lport() - add new local port to list and run notifiers.
+ * @lport:  The new local port.
+ */
+void fc_fc4_add_lport(struct fc_lport *lport)
+{
+	mutex_lock(&fc_prov_mutex);
+	list_add_tail(&lport->lport_list, &fc_local_ports);
+	blocking_notifier_call_chain(&fc_lport_notifier_head,
+				     FC_LPORT_EV_ADD, lport);
+	mutex_unlock(&fc_prov_mutex);
+}
+
+/**
+ * fc_fc4_del_lport() - remove local port from list and run notifiers.
+ * @lport:  The new local port.
+ */
+void fc_fc4_del_lport(struct fc_lport *lport)
+{
+	mutex_lock(&fc_prov_mutex);
+	list_del(&lport->lport_list);
+	blocking_notifier_call_chain(&fc_lport_notifier_head,
+				     FC_LPORT_EV_DEL, lport);
+	mutex_unlock(&fc_prov_mutex);
+}

+ 14 - 0
drivers/scsi/libfc/fc_libfc.h

@@ -93,6 +93,17 @@ extern unsigned int fc_debug_logging;
 			 printk(KERN_INFO "host%u: scsi: " fmt,		\
 			 printk(KERN_INFO "host%u: scsi: " fmt,		\
 				(lport)->host->host_no,	##args))
 				(lport)->host->host_no,	##args))
 
 
+/*
+ * FC-4 Providers.
+ */
+extern struct fc4_prov *fc_active_prov[];	/* providers without recv */
+extern struct fc4_prov *fc_passive_prov[];	/* providers with recv */
+extern struct mutex fc_prov_mutex;		/* lock over table changes */
+
+extern struct fc4_prov fc_rport_t0_prov;	/* type 0 provider */
+extern struct fc4_prov fc_lport_els_prov;	/* ELS provider */
+extern struct fc4_prov fc_rport_fcp_init;	/* FCP initiator provider */
+
 /*
 /*
  * Set up direct-data placement for this I/O request
  * Set up direct-data placement for this I/O request
  */
  */
@@ -112,6 +123,9 @@ void fc_destroy_fcp(void);
  * Internal libfc functions
  * Internal libfc functions
  */
  */
 const char *fc_els_resp_type(struct fc_frame *);
 const char *fc_els_resp_type(struct fc_frame *);
+extern void fc_fc4_add_lport(struct fc_lport *);
+extern void fc_fc4_del_lport(struct fc_lport *);
+extern void fc_fc4_conf_lport_params(struct fc_lport *, enum fc_fh_type);
 
 
 /*
 /*
  * Copies a buffer into an sg list
  * Copies a buffer into an sg list

+ 60 - 9
drivers/scsi/libfc/fc_lport.c

@@ -633,6 +633,7 @@ int fc_lport_destroy(struct fc_lport *lport)
 	lport->tt.fcp_abort_io(lport);
 	lport->tt.fcp_abort_io(lport);
 	lport->tt.disc_stop_final(lport);
 	lport->tt.disc_stop_final(lport);
 	lport->tt.exch_mgr_reset(lport, 0, 0);
 	lport->tt.exch_mgr_reset(lport, 0, 0);
+	fc_fc4_del_lport(lport);
 	return 0;
 	return 0;
 }
 }
 EXPORT_SYMBOL(fc_lport_destroy);
 EXPORT_SYMBOL(fc_lport_destroy);
@@ -849,7 +850,7 @@ out:
 }
 }
 
 
 /**
 /**
- * fc_lport_recv_req() - The generic lport request handler
+ * fc_lport_recv_els_req() - The generic lport ELS request handler
  * @lport: The local port that received the request
  * @lport: The local port that received the request
  * @fp:	   The request frame
  * @fp:	   The request frame
  *
  *
@@ -859,9 +860,9 @@ out:
  * Locking Note: This function should not be called with the lport
  * Locking Note: This function should not be called with the lport
  *		 lock held becuase it will grab the lock.
  *		 lock held becuase it will grab the lock.
  */
  */
-static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+static void fc_lport_recv_els_req(struct fc_lport *lport,
+				  struct fc_frame *fp)
 {
 {
-	struct fc_frame_header *fh = fc_frame_header_get(fp);
 	void (*recv)(struct fc_lport *, struct fc_frame *);
 	void (*recv)(struct fc_lport *, struct fc_frame *);
 
 
 	mutex_lock(&lport->lp_mutex);
 	mutex_lock(&lport->lp_mutex);
@@ -873,8 +874,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
 	 */
 	 */
 	if (!lport->link_up)
 	if (!lport->link_up)
 		fc_frame_free(fp);
 		fc_frame_free(fp);
-	else if (fh->fh_type == FC_TYPE_ELS &&
-		 fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
+	else {
 		/*
 		/*
 		 * Check opcode.
 		 * Check opcode.
 		 */
 		 */
@@ -903,14 +903,62 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
 		}
 		}
 
 
 		recv(lport, fp);
 		recv(lport, fp);
-	} else {
-		FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
-			     fr_eof(fp));
-		fc_frame_free(fp);
 	}
 	}
 	mutex_unlock(&lport->lp_mutex);
 	mutex_unlock(&lport->lp_mutex);
 }
 }
 
 
+static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len,
+			     const struct fc_els_spp *spp_in,
+			     struct fc_els_spp *spp_out)
+{
+	return FC_SPP_RESP_INVL;
+}
+
+struct fc4_prov fc_lport_els_prov = {
+	.prli = fc_lport_els_prli,
+	.recv = fc_lport_recv_els_req,
+};
+
+/**
+ * fc_lport_recv_req() - The generic lport request handler
+ * @lport: The lport that received the request
+ * @fp: The frame the request is in
+ *
+ * Locking Note: This function should not be called with the lport
+ *		 lock held becuase it may grab the lock.
+ */
+static void fc_lport_recv_req(struct fc_lport *lport,
+			      struct fc_frame *fp)
+{
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+	struct fc_seq *sp = fr_seq(fp);
+	struct fc4_prov *prov;
+
+	/*
+	 * Use RCU read lock and module_lock to be sure module doesn't
+	 * deregister and get unloaded while we're calling it.
+	 * try_module_get() is inlined and accepts a NULL parameter.
+	 * Only ELSes and FCP target ops should come through here.
+	 * The locking is unfortunate, and a better scheme is being sought.
+	 */
+
+	rcu_read_lock();
+	if (fh->fh_type >= FC_FC4_PROV_SIZE)
+		goto drop;
+	prov = rcu_dereference(fc_passive_prov[fh->fh_type]);
+	if (!prov || !try_module_get(prov->module))
+		goto drop;
+	rcu_read_unlock();
+	prov->recv(lport, fp);
+	module_put(prov->module);
+	return;
+drop:
+	rcu_read_unlock();
+	FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
+	fc_frame_free(fp);
+	lport->tt.exch_done(sp);
+}
+
 /**
 /**
  * fc_lport_reset() - Reset a local port
  * fc_lport_reset() - Reset a local port
  * @lport: The local port which should be reset
  * @lport: The local port which should be reset
@@ -1542,6 +1590,7 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
  */
  */
 int fc_lport_config(struct fc_lport *lport)
 int fc_lport_config(struct fc_lport *lport)
 {
 {
+	INIT_LIST_HEAD(&lport->ema_list);
 	INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
 	INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
 	mutex_init(&lport->lp_mutex);
 	mutex_init(&lport->lp_mutex);
 
 
@@ -1549,6 +1598,7 @@ int fc_lport_config(struct fc_lport *lport)
 
 
 	fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
 	fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
 	fc_lport_add_fc4_type(lport, FC_TYPE_CT);
 	fc_lport_add_fc4_type(lport, FC_TYPE_CT);
+	fc_fc4_conf_lport_params(lport, FC_TYPE_FCP);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1586,6 +1636,7 @@ int fc_lport_init(struct fc_lport *lport)
 		fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
 		fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
 	if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
 	if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
 		fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
 		fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
+	fc_fc4_add_lport(lport);
 
 
 	return 0;
 	return 0;
 }
 }

+ 2 - 8
drivers/scsi/libfc/fc_npiv.c

@@ -37,9 +37,7 @@ struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize)
 
 
 	vn_port = libfc_host_alloc(shost->hostt, privsize);
 	vn_port = libfc_host_alloc(shost->hostt, privsize);
 	if (!vn_port)
 	if (!vn_port)
-		goto err_out;
-	if (fc_exch_mgr_list_clone(n_port, vn_port))
-		goto err_put;
+		return vn_port;
 
 
 	vn_port->vport = vport;
 	vn_port->vport = vport;
 	vport->dd_data = vn_port;
 	vport->dd_data = vn_port;
@@ -49,11 +47,6 @@ struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize)
 	mutex_unlock(&n_port->lp_mutex);
 	mutex_unlock(&n_port->lp_mutex);
 
 
 	return vn_port;
 	return vn_port;
-
-err_put:
-	scsi_host_put(vn_port->host);
-err_out:
-	return NULL;
 }
 }
 EXPORT_SYMBOL(libfc_vport_create);
 EXPORT_SYMBOL(libfc_vport_create);
 
 
@@ -86,6 +79,7 @@ struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id)
 
 
 	return lport;
 	return lport;
 }
 }
+EXPORT_SYMBOL(fc_vport_id_lookup);
 
 
 /*
 /*
  * When setting the link state of vports during an lport state change, it's
  * When setting the link state of vports during an lport state change, it's

+ 155 - 36
drivers/scsi/libfc/fc_rport.c

@@ -58,7 +58,7 @@
 
 
 #include "fc_libfc.h"
 #include "fc_libfc.h"
 
 
-struct workqueue_struct *rport_event_queue;
+static struct workqueue_struct *rport_event_queue;
 
 
 static void fc_rport_enter_flogi(struct fc_rport_priv *);
 static void fc_rport_enter_flogi(struct fc_rport_priv *);
 static void fc_rport_enter_plogi(struct fc_rport_priv *);
 static void fc_rport_enter_plogi(struct fc_rport_priv *);
@@ -145,8 +145,10 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
 	rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
 	rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
 	INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
 	INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
 	INIT_WORK(&rdata->event_work, fc_rport_work);
 	INIT_WORK(&rdata->event_work, fc_rport_work);
-	if (port_id != FC_FID_DIR_SERV)
+	if (port_id != FC_FID_DIR_SERV) {
+		rdata->lld_event_callback = lport->tt.rport_event_callback;
 		list_add_rcu(&rdata->peers, &lport->disc.rports);
 		list_add_rcu(&rdata->peers, &lport->disc.rports);
+	}
 	return rdata;
 	return rdata;
 }
 }
 
 
@@ -257,6 +259,8 @@ static void fc_rport_work(struct work_struct *work)
 	struct fc_rport_operations *rport_ops;
 	struct fc_rport_operations *rport_ops;
 	struct fc_rport_identifiers ids;
 	struct fc_rport_identifiers ids;
 	struct fc_rport *rport;
 	struct fc_rport *rport;
+	struct fc4_prov *prov;
+	u8 type;
 
 
 	mutex_lock(&rdata->rp_mutex);
 	mutex_lock(&rdata->rp_mutex);
 	event = rdata->event;
 	event = rdata->event;
@@ -300,12 +304,25 @@ static void fc_rport_work(struct work_struct *work)
 			FC_RPORT_DBG(rdata, "callback ev %d\n", event);
 			FC_RPORT_DBG(rdata, "callback ev %d\n", event);
 			rport_ops->event_callback(lport, rdata, event);
 			rport_ops->event_callback(lport, rdata, event);
 		}
 		}
+		if (rdata->lld_event_callback) {
+			FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
+			rdata->lld_event_callback(lport, rdata, event);
+		}
 		kref_put(&rdata->kref, lport->tt.rport_destroy);
 		kref_put(&rdata->kref, lport->tt.rport_destroy);
 		break;
 		break;
 
 
 	case RPORT_EV_FAILED:
 	case RPORT_EV_FAILED:
 	case RPORT_EV_LOGO:
 	case RPORT_EV_LOGO:
 	case RPORT_EV_STOP:
 	case RPORT_EV_STOP:
+		if (rdata->prli_count) {
+			mutex_lock(&fc_prov_mutex);
+			for (type = 1; type < FC_FC4_PROV_SIZE; type++) {
+				prov = fc_passive_prov[type];
+				if (prov && prov->prlo)
+					prov->prlo(rdata);
+			}
+			mutex_unlock(&fc_prov_mutex);
+		}
 		port_id = rdata->ids.port_id;
 		port_id = rdata->ids.port_id;
 		mutex_unlock(&rdata->rp_mutex);
 		mutex_unlock(&rdata->rp_mutex);
 
 
@@ -313,6 +330,10 @@ static void fc_rport_work(struct work_struct *work)
 			FC_RPORT_DBG(rdata, "callback ev %d\n", event);
 			FC_RPORT_DBG(rdata, "callback ev %d\n", event);
 			rport_ops->event_callback(lport, rdata, event);
 			rport_ops->event_callback(lport, rdata, event);
 		}
 		}
+		if (rdata->lld_event_callback) {
+			FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
+			rdata->lld_event_callback(lport, rdata, event);
+		}
 		cancel_delayed_work_sync(&rdata->retry_work);
 		cancel_delayed_work_sync(&rdata->retry_work);
 
 
 		/*
 		/*
@@ -336,6 +357,7 @@ static void fc_rport_work(struct work_struct *work)
 			if (port_id == FC_FID_DIR_SERV) {
 			if (port_id == FC_FID_DIR_SERV) {
 				rdata->event = RPORT_EV_NONE;
 				rdata->event = RPORT_EV_NONE;
 				mutex_unlock(&rdata->rp_mutex);
 				mutex_unlock(&rdata->rp_mutex);
+				kref_put(&rdata->kref, lport->tt.rport_destroy);
 			} else if ((rdata->flags & FC_RP_STARTED) &&
 			} else if ((rdata->flags & FC_RP_STARTED) &&
 				   rdata->major_retries <
 				   rdata->major_retries <
 				   lport->max_rport_retry_count) {
 				   lport->max_rport_retry_count) {
@@ -575,7 +597,7 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata,
 
 
 	/* make sure this isn't an FC_EX_CLOSED error, never retry those */
 	/* make sure this isn't an FC_EX_CLOSED error, never retry those */
 	if (PTR_ERR(fp) == -FC_EX_CLOSED)
 	if (PTR_ERR(fp) == -FC_EX_CLOSED)
-		return fc_rport_error(rdata, fp);
+		goto out;
 
 
 	if (rdata->retries < rdata->local_port->max_rport_retry_count) {
 	if (rdata->retries < rdata->local_port->max_rport_retry_count) {
 		FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
 		FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
@@ -588,7 +610,8 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata,
 		return;
 		return;
 	}
 	}
 
 
-	return fc_rport_error(rdata, fp);
+out:
+	fc_rport_error(rdata, fp);
 }
 }
 
 
 /**
 /**
@@ -878,6 +901,9 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 		rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
 		rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
 		rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
 		rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
 
 
+		/* save plogi response sp_features for further reference */
+		rdata->sp_features = ntohs(plp->fl_csp.sp_features);
+
 		if (lport->point_to_multipoint)
 		if (lport->point_to_multipoint)
 			fc_rport_login_complete(rdata, fp);
 			fc_rport_login_complete(rdata, fp);
 		csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
 		csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
@@ -949,6 +975,8 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
 		struct fc_els_prli prli;
 		struct fc_els_prli prli;
 		struct fc_els_spp spp;
 		struct fc_els_spp spp;
 	} *pp;
 	} *pp;
+	struct fc_els_spp temp_spp;
+	struct fc4_prov *prov;
 	u32 roles = FC_RPORT_ROLE_UNKNOWN;
 	u32 roles = FC_RPORT_ROLE_UNKNOWN;
 	u32 fcp_parm = 0;
 	u32 fcp_parm = 0;
 	u8 op;
 	u8 op;
@@ -983,6 +1011,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
 		resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
 		resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
 		FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n",
 		FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n",
 			     pp->spp.spp_flags);
 			     pp->spp.spp_flags);
+		rdata->spp_type = pp->spp.spp_type;
 		if (resp_code != FC_SPP_RESP_ACK) {
 		if (resp_code != FC_SPP_RESP_ACK) {
 			if (resp_code == FC_SPP_RESP_CONF)
 			if (resp_code == FC_SPP_RESP_CONF)
 				fc_rport_error(rdata, fp);
 				fc_rport_error(rdata, fp);
@@ -996,6 +1025,15 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
 		fcp_parm = ntohl(pp->spp.spp_params);
 		fcp_parm = ntohl(pp->spp.spp_params);
 		if (fcp_parm & FCP_SPPF_RETRY)
 		if (fcp_parm & FCP_SPPF_RETRY)
 			rdata->flags |= FC_RP_FLAGS_RETRY;
 			rdata->flags |= FC_RP_FLAGS_RETRY;
+		if (fcp_parm & FCP_SPPF_CONF_COMPL)
+			rdata->flags |= FC_RP_FLAGS_CONF_REQ;
+
+		prov = fc_passive_prov[FC_TYPE_FCP];
+		if (prov) {
+			memset(&temp_spp, 0, sizeof(temp_spp));
+			prov->prli(rdata, pp->prli.prli_spp_len,
+				   &pp->spp, &temp_spp);
+		}
 
 
 		rdata->supported_classes = FC_COS_CLASS3;
 		rdata->supported_classes = FC_COS_CLASS3;
 		if (fcp_parm & FCP_SPPF_INIT_FCN)
 		if (fcp_parm & FCP_SPPF_INIT_FCN)
@@ -1033,6 +1071,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
 		struct fc_els_spp spp;
 		struct fc_els_spp spp;
 	} *pp;
 	} *pp;
 	struct fc_frame *fp;
 	struct fc_frame *fp;
+	struct fc4_prov *prov;
 
 
 	/*
 	/*
 	 * If the rport is one of the well known addresses
 	 * If the rport is one of the well known addresses
@@ -1054,9 +1093,20 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
 		return;
 		return;
 	}
 	}
 
 
-	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
-				  fc_rport_prli_resp, rdata,
-				  2 * lport->r_a_tov))
+	fc_prli_fill(lport, fp);
+
+	prov = fc_passive_prov[FC_TYPE_FCP];
+	if (prov) {
+		pp = fc_frame_payload_get(fp, sizeof(*pp));
+		prov->prli(rdata, sizeof(pp->spp), NULL, &pp->spp);
+	}
+
+	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rdata->ids.port_id,
+		       fc_host_port_id(lport->host), FC_TYPE_ELS,
+		       FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+	if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp,
+				    NULL, rdata, 2 * lport->r_a_tov))
 		fc_rport_error_retry(rdata, NULL);
 		fc_rport_error_retry(rdata, NULL);
 	else
 	else
 		kref_get(&rdata->kref);
 		kref_get(&rdata->kref);
@@ -1642,9 +1692,9 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
 	unsigned int len;
 	unsigned int len;
 	unsigned int plen;
 	unsigned int plen;
 	enum fc_els_spp_resp resp;
 	enum fc_els_spp_resp resp;
+	enum fc_els_spp_resp passive;
 	struct fc_seq_els_data rjt_data;
 	struct fc_seq_els_data rjt_data;
-	u32 fcp_parm;
-	u32 roles = FC_RPORT_ROLE_UNKNOWN;
+	struct fc4_prov *prov;
 
 
 	FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
 	FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
 		     fc_rport_state(rdata));
 		     fc_rport_state(rdata));
@@ -1678,46 +1728,42 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
 	pp->prli.prli_len = htons(len);
 	pp->prli.prli_len = htons(len);
 	len -= sizeof(struct fc_els_prli);
 	len -= sizeof(struct fc_els_prli);
 
 
-	/* reinitialize remote port roles */
-	rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
-
 	/*
 	/*
 	 * Go through all the service parameter pages and build
 	 * Go through all the service parameter pages and build
 	 * response.  If plen indicates longer SPP than standard,
 	 * response.  If plen indicates longer SPP than standard,
 	 * use that.  The entire response has been pre-cleared above.
 	 * use that.  The entire response has been pre-cleared above.
 	 */
 	 */
 	spp = &pp->spp;
 	spp = &pp->spp;
+	mutex_lock(&fc_prov_mutex);
 	while (len >= plen) {
 	while (len >= plen) {
+		rdata->spp_type = rspp->spp_type;
 		spp->spp_type = rspp->spp_type;
 		spp->spp_type = rspp->spp_type;
 		spp->spp_type_ext = rspp->spp_type_ext;
 		spp->spp_type_ext = rspp->spp_type_ext;
-		spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
-		resp = FC_SPP_RESP_ACK;
-
-		switch (rspp->spp_type) {
-		case 0:	/* common to all FC-4 types */
-			break;
-		case FC_TYPE_FCP:
-			fcp_parm = ntohl(rspp->spp_params);
-			if (fcp_parm & FCP_SPPF_RETRY)
-				rdata->flags |= FC_RP_FLAGS_RETRY;
-			rdata->supported_classes = FC_COS_CLASS3;
-			if (fcp_parm & FCP_SPPF_INIT_FCN)
-				roles |= FC_RPORT_ROLE_FCP_INITIATOR;
-			if (fcp_parm & FCP_SPPF_TARG_FCN)
-				roles |= FC_RPORT_ROLE_FCP_TARGET;
-			rdata->ids.roles = roles;
-
-			spp->spp_params = htonl(lport->service_params);
-			break;
-		default:
-			resp = FC_SPP_RESP_INVL;
-			break;
+		resp = 0;
+
+		if (rspp->spp_type < FC_FC4_PROV_SIZE) {
+			prov = fc_active_prov[rspp->spp_type];
+			if (prov)
+				resp = prov->prli(rdata, plen, rspp, spp);
+			prov = fc_passive_prov[rspp->spp_type];
+			if (prov) {
+				passive = prov->prli(rdata, plen, rspp, spp);
+				if (!resp || passive == FC_SPP_RESP_ACK)
+					resp = passive;
+			}
+		}
+		if (!resp) {
+			if (spp->spp_flags & FC_SPP_EST_IMG_PAIR)
+				resp |= FC_SPP_RESP_CONF;
+			else
+				resp |= FC_SPP_RESP_INVL;
 		}
 		}
 		spp->spp_flags |= resp;
 		spp->spp_flags |= resp;
 		len -= plen;
 		len -= plen;
 		rspp = (struct fc_els_spp *)((char *)rspp + plen);
 		rspp = (struct fc_els_spp *)((char *)rspp + plen);
 		spp = (struct fc_els_spp *)((char *)spp + plen);
 		spp = (struct fc_els_spp *)((char *)spp + plen);
 	}
 	}
+	mutex_unlock(&fc_prov_mutex);
 
 
 	/*
 	/*
 	 * Send LS_ACC.	 If this fails, the originator should retry.
 	 * Send LS_ACC.	 If this fails, the originator should retry.
@@ -1886,10 +1932,83 @@ int fc_rport_init(struct fc_lport *lport)
 }
 }
 EXPORT_SYMBOL(fc_rport_init);
 EXPORT_SYMBOL(fc_rport_init);
 
 
+/**
+ * fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator.
+ * @rdata: remote port private
+ * @spp_len: service parameter page length
+ * @rspp: received service parameter page
+ * @spp: response service parameter page
+ *
+ * Returns the value for the response code to be placed in spp_flags;
+ * Returns 0 if not an initiator.
+ */
+static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len,
+			     const struct fc_els_spp *rspp,
+			     struct fc_els_spp *spp)
+{
+	struct fc_lport *lport = rdata->local_port;
+	u32 fcp_parm;
+
+	fcp_parm = ntohl(rspp->spp_params);
+	rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+	if (fcp_parm & FCP_SPPF_INIT_FCN)
+		rdata->ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+	if (fcp_parm & FCP_SPPF_TARG_FCN)
+		rdata->ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+	if (fcp_parm & FCP_SPPF_RETRY)
+		rdata->flags |= FC_RP_FLAGS_RETRY;
+	rdata->supported_classes = FC_COS_CLASS3;
+
+	if (!(lport->service_params & FC_RPORT_ROLE_FCP_INITIATOR))
+		return 0;
+
+	spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
+
+	/*
+	 * OR in our service parameters with other providers (target), if any.
+	 */
+	fcp_parm = ntohl(spp->spp_params);
+	spp->spp_params = htonl(fcp_parm | lport->service_params);
+	return FC_SPP_RESP_ACK;
+}
+
+/*
+ * FC-4 provider ops for FCP initiator.
+ */
+struct fc4_prov fc_rport_fcp_init = {
+	.prli = fc_rport_fcp_prli,
+};
+
+/**
+ * fc_rport_t0_prli() - Handle incoming PRLI parameters for type 0
+ * @rdata: remote port private
+ * @spp_len: service parameter page length
+ * @rspp: received service parameter page
+ * @spp: response service parameter page
+ */
+static int fc_rport_t0_prli(struct fc_rport_priv *rdata, u32 spp_len,
+			    const struct fc_els_spp *rspp,
+			    struct fc_els_spp *spp)
+{
+	if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR)
+		return FC_SPP_RESP_INVL;
+	return FC_SPP_RESP_ACK;
+}
+
+/*
+ * FC-4 provider ops for type 0 service parameters.
+ *
+ * This handles the special case of type 0 which is always successful
+ * but doesn't do anything otherwise.
+ */
+struct fc4_prov fc_rport_t0_prov = {
+	.prli = fc_rport_t0_prli,
+};
+
 /**
 /**
  * fc_setup_rport() - Initialize the rport_event_queue
  * fc_setup_rport() - Initialize the rport_event_queue
  */
  */
-int fc_setup_rport()
+int fc_setup_rport(void)
 {
 {
 	rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
 	rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
 	if (!rport_event_queue)
 	if (!rport_event_queue)
@@ -1900,7 +2019,7 @@ int fc_setup_rport()
 /**
 /**
  * fc_destroy_rport() - Destroy the rport_event_queue
  * fc_destroy_rport() - Destroy the rport_event_queue
  */
  */
-void fc_destroy_rport()
+void fc_destroy_rport(void)
 {
 {
 	destroy_workqueue(rport_event_queue);
 	destroy_workqueue(rport_event_queue);
 }
 }

+ 41 - 3
drivers/scsi/libiscsi.c

@@ -3352,6 +3352,47 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
 }
 }
 EXPORT_SYMBOL_GPL(iscsi_session_get_param);
 EXPORT_SYMBOL_GPL(iscsi_session_get_param);
 
 
+int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
+			      enum iscsi_param param, char *buf)
+{
+	struct sockaddr_in6 *sin6 = NULL;
+	struct sockaddr_in *sin = NULL;
+	int len;
+
+	switch (addr->ss_family) {
+	case AF_INET:
+		sin = (struct sockaddr_in *)addr;
+		break;
+	case AF_INET6:
+		sin6 = (struct sockaddr_in6 *)addr;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (param) {
+	case ISCSI_PARAM_CONN_ADDRESS:
+	case ISCSI_HOST_PARAM_IPADDRESS:
+		if (sin)
+			len = sprintf(buf, "%pI4\n", &sin->sin_addr.s_addr);
+		else
+			len = sprintf(buf, "%pI6\n", &sin6->sin6_addr);
+		break;
+	case ISCSI_PARAM_CONN_PORT:
+		if (sin)
+			len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port));
+		else
+			len = sprintf(buf, "%hu\n",
+				      be16_to_cpu(sin6->sin6_port));
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return len;
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_get_addr_param);
+
 int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
 int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
 			 enum iscsi_param param, char *buf)
 			 enum iscsi_param param, char *buf)
 {
 {
@@ -3416,9 +3457,6 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
 		len = sprintf(buf, "%s\n", ihost->initiatorname);
 		len = sprintf(buf, "%s\n", ihost->initiatorname);
 		break;
 		break;
-	case ISCSI_HOST_PARAM_IPADDRESS:
-		len = sprintf(buf, "%s\n", ihost->local_address);
-		break;
 	default:
 	default:
 		return -ENOSYS;
 		return -ENOSYS;
 	}
 	}

+ 0 - 8
drivers/scsi/libsas/Kconfig

@@ -46,11 +46,3 @@ config SCSI_SAS_HOST_SMP
 		Allows sas hosts to receive SMP frames.  Selecting this
 		Allows sas hosts to receive SMP frames.  Selecting this
 		option builds an SMP interpreter into libsas.  Say
 		option builds an SMP interpreter into libsas.  Say
 		N here if you want to save the few kb this consumes.
 		N here if you want to save the few kb this consumes.
-
-config SCSI_SAS_LIBSAS_DEBUG
-	bool "Compile the SAS Domain Transport Attributes in debug mode"
-	default y
-	depends on SCSI_SAS_LIBSAS
-	help
-		Compiles the SAS Layer in debug mode.  In debug mode, the
-		SAS Layer prints diagnostic and debug messages.

+ 0 - 4
drivers/scsi/libsas/Makefile

@@ -21,10 +21,6 @@
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
 # USA
 # USA
 
 
-ifeq ($(CONFIG_SCSI_SAS_LIBSAS_DEBUG),y)
-	EXTRA_CFLAGS += -DSAS_DEBUG
-endif
-
 obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas.o
 obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas.o
 libsas-y +=  sas_init.o     \
 libsas-y +=  sas_init.o     \
 		sas_phy.o      \
 		sas_phy.o      \

+ 22 - 58
drivers/scsi/libsas/sas_ata.c

@@ -71,13 +71,13 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
 		case SAS_SG_ERR:
 		case SAS_SG_ERR:
 			return AC_ERR_INVALID;
 			return AC_ERR_INVALID;
 
 
-		case SAM_STAT_CHECK_CONDITION:
 		case SAS_OPEN_TO:
 		case SAS_OPEN_TO:
 		case SAS_OPEN_REJECT:
 		case SAS_OPEN_REJECT:
 			SAS_DPRINTK("%s: Saw error %d.  What to do?\n",
 			SAS_DPRINTK("%s: Saw error %d.  What to do?\n",
 				    __func__, ts->stat);
 				    __func__, ts->stat);
 			return AC_ERR_OTHER;
 			return AC_ERR_OTHER;
 
 
+		case SAM_STAT_CHECK_CONDITION:
 		case SAS_ABORTED_TASK:
 		case SAS_ABORTED_TASK:
 			return AC_ERR_DEV;
 			return AC_ERR_DEV;
 
 
@@ -107,13 +107,15 @@ static void sas_ata_task_done(struct sas_task *task)
 	sas_ha = dev->port->ha;
 	sas_ha = dev->port->ha;
 
 
 	spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
 	spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
-	if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD) {
+	if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
+	    ((stat->stat == SAM_STAT_CHECK_CONDITION &&
+	      dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
 		ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
 		ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
 		qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
 		qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
 		dev->sata_dev.sstatus = resp->sstatus;
 		dev->sata_dev.sstatus = resp->sstatus;
 		dev->sata_dev.serror = resp->serror;
 		dev->sata_dev.serror = resp->serror;
 		dev->sata_dev.scontrol = resp->scontrol;
 		dev->sata_dev.scontrol = resp->scontrol;
-	} else if (stat->stat != SAM_STAT_GOOD) {
+	} else {
 		ac = sas_to_ata_err(stat);
 		ac = sas_to_ata_err(stat);
 		if (ac) {
 		if (ac) {
 			SAS_DPRINTK("%s: SAS error %x\n", __func__,
 			SAS_DPRINTK("%s: SAS error %x\n", __func__,
@@ -305,55 +307,6 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
 	}
 	}
 }
 }
 
 
-static int sas_ata_scr_write(struct ata_link *link, unsigned int sc_reg_in,
-			      u32 val)
-{
-	struct domain_device *dev = link->ap->private_data;
-
-	SAS_DPRINTK("STUB %s\n", __func__);
-	switch (sc_reg_in) {
-		case SCR_STATUS:
-			dev->sata_dev.sstatus = val;
-			break;
-		case SCR_CONTROL:
-			dev->sata_dev.scontrol = val;
-			break;
-		case SCR_ERROR:
-			dev->sata_dev.serror = val;
-			break;
-		case SCR_ACTIVE:
-			dev->sata_dev.ap->link.sactive = val;
-			break;
-		default:
-			return -EINVAL;
-	}
-	return 0;
-}
-
-static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
-			    u32 *val)
-{
-	struct domain_device *dev = link->ap->private_data;
-
-	SAS_DPRINTK("STUB %s\n", __func__);
-	switch (sc_reg_in) {
-		case SCR_STATUS:
-			*val = dev->sata_dev.sstatus;
-			return 0;
-		case SCR_CONTROL:
-			*val = dev->sata_dev.scontrol;
-			return 0;
-		case SCR_ERROR:
-			*val = dev->sata_dev.serror;
-			return 0;
-		case SCR_ACTIVE:
-			*val = dev->sata_dev.ap->link.sactive;
-			return 0;
-		default:
-			return -EINVAL;
-	}
-}
-
 static struct ata_port_operations sas_sata_ops = {
 static struct ata_port_operations sas_sata_ops = {
 	.prereset		= ata_std_prereset,
 	.prereset		= ata_std_prereset,
 	.softreset		= NULL,
 	.softreset		= NULL,
@@ -367,8 +320,6 @@ static struct ata_port_operations sas_sata_ops = {
 	.qc_fill_rtf		= sas_ata_qc_fill_rtf,
 	.qc_fill_rtf		= sas_ata_qc_fill_rtf,
 	.port_start		= ata_sas_port_start,
 	.port_start		= ata_sas_port_start,
 	.port_stop		= ata_sas_port_stop,
 	.port_stop		= ata_sas_port_stop,
-	.scr_read		= sas_ata_scr_read,
-	.scr_write		= sas_ata_scr_write
 };
 };
 
 
 static struct ata_port_info sata_port_info = {
 static struct ata_port_info sata_port_info = {
@@ -801,7 +752,7 @@ void sas_ata_strategy_handler(struct Scsi_Host *shost)
 
 
 		if (!dev_is_sata(ddev))
 		if (!dev_is_sata(ddev))
 			continue;
 			continue;
-		
+
 		ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata port error handler");
 		ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata port error handler");
 		ata_scsi_port_error_handler(shost, ap);
 		ata_scsi_port_error_handler(shost, ap);
 	}
 	}
@@ -834,13 +785,13 @@ int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
 		LIST_HEAD(sata_q);
 		LIST_HEAD(sata_q);
 
 
 		ap = NULL;
 		ap = NULL;
-		
+
 		list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
 		list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
 			struct domain_device *ddev = cmd_to_domain_dev(cmd);
 			struct domain_device *ddev = cmd_to_domain_dev(cmd);
 
 
 			if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd))
 			if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd))
 				continue;
 				continue;
-			if(ap && ap != ddev->sata_dev.ap)
+			if (ap && ap != ddev->sata_dev.ap)
 				continue;
 				continue;
 			ap = ddev->sata_dev.ap;
 			ap = ddev->sata_dev.ap;
 			rtn = 1;
 			rtn = 1;
@@ -848,8 +799,21 @@ int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
 		}
 		}
 
 
 		if (!list_empty(&sata_q)) {
 		if (!list_empty(&sata_q)) {
-			ata_port_printk(ap, KERN_DEBUG,"sas eh calling libata cmd error handler\n");
+			ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata cmd error handler\n");
 			ata_scsi_cmd_error_handler(shost, ap, &sata_q);
 			ata_scsi_cmd_error_handler(shost, ap, &sata_q);
+			/*
+			 * ata's error handler may leave the cmd on the list
+			 * so make sure they don't remain on a stack list
+			 * about to go out of scope.
+			 *
+			 * This looks strange, since the commands are
+			 * now part of no list, but the next error
+			 * action will be ata_port_error_handler()
+			 * which takes no list and sweeps them up
+			 * anyway from the ata tag array.
+			 */
+			while (!list_empty(&sata_q))
+				list_del_init(sata_q.next);
 		}
 		}
 	} while (ap);
 	} while (ap);
 
 

+ 0 - 4
drivers/scsi/libsas/sas_dump.c

@@ -24,8 +24,6 @@
 
 
 #include "sas_dump.h"
 #include "sas_dump.h"
 
 
-#ifdef SAS_DEBUG
-
 static const char *sas_hae_str[] = {
 static const char *sas_hae_str[] = {
 	[0] = "HAE_RESET",
 	[0] = "HAE_RESET",
 };
 };
@@ -72,5 +70,3 @@ void sas_dump_port(struct asd_sas_port *port)
 	SAS_DPRINTK("port%d: oob_mode:0x%x\n", port->id, port->oob_mode);
 	SAS_DPRINTK("port%d: oob_mode:0x%x\n", port->id, port->oob_mode);
 	SAS_DPRINTK("port%d: num_phys:%d\n", port->id, port->num_phys);
 	SAS_DPRINTK("port%d: num_phys:%d\n", port->id, port->num_phys);
 }
 }
-
-#endif /* SAS_DEBUG */

+ 0 - 12
drivers/scsi/libsas/sas_dump.h

@@ -24,19 +24,7 @@
 
 
 #include "sas_internal.h"
 #include "sas_internal.h"
 
 
-#ifdef SAS_DEBUG
-
 void sas_dprint_porte(int phyid, enum port_event pe);
 void sas_dprint_porte(int phyid, enum port_event pe);
 void sas_dprint_phye(int phyid, enum phy_event pe);
 void sas_dprint_phye(int phyid, enum phy_event pe);
 void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he);
 void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he);
 void sas_dump_port(struct asd_sas_port *port);
 void sas_dump_port(struct asd_sas_port *port);
-
-#else /* SAS_DEBUG */
-
-static inline void sas_dprint_porte(int phyid, enum port_event pe) { }
-static inline void sas_dprint_phye(int phyid, enum phy_event pe) { }
-static inline void sas_dprint_hae(struct sas_ha_struct *sas_ha,
-				  enum ha_event he) { }
-static inline void sas_dump_port(struct asd_sas_port *port) { }
-
-#endif /* SAS_DEBUG */

+ 5 - 0
drivers/scsi/libsas/sas_expander.c

@@ -244,6 +244,11 @@ static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
 		 * dev to host FIS as described in section G.5 of
 		 * dev to host FIS as described in section G.5 of
 		 * sas-2 r 04b */
 		 * sas-2 r 04b */
 		dr = &((struct smp_resp *)disc_resp)->disc;
 		dr = &((struct smp_resp *)disc_resp)->disc;
+		if (memcmp(dev->sas_addr, dr->attached_sas_addr,
+			  SAS_ADDR_SIZE) == 0) {
+			sas_printk("Found loopback topology, just ignore it!\n");
+			return 0;
+		}
 		if (!(dr->attached_dev_type == 0 &&
 		if (!(dr->attached_dev_type == 0 &&
 		      dr->attached_sata_dev))
 		      dr->attached_sata_dev))
 			break;
 			break;

+ 1 - 5
drivers/scsi/libsas/sas_internal.h

@@ -33,11 +33,7 @@
 
 
 #define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
 #define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
 
 
-#ifdef SAS_DEBUG
-#define SAS_DPRINTK(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
-#else
-#define SAS_DPRINTK(fmt, ...)
-#endif
+#define SAS_DPRINTK(fmt, ...) printk(KERN_DEBUG "sas: " fmt, ## __VA_ARGS__)
 
 
 #define TO_SAS_TASK(_scsi_cmd)  ((void *)(_scsi_cmd)->host_scribble)
 #define TO_SAS_TASK(_scsi_cmd)  ((void *)(_scsi_cmd)->host_scribble)
 #define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
 #define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)

+ 1 - 2
drivers/scsi/libsas/sas_scsi_host.c

@@ -681,11 +681,10 @@ enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
 {
 {
 	struct sas_task *task = TO_SAS_TASK(cmd);
 	struct sas_task *task = TO_SAS_TASK(cmd);
 	unsigned long flags;
 	unsigned long flags;
-	enum blk_eh_timer_return rtn; 
+	enum blk_eh_timer_return rtn;
 
 
 	if (sas_ata_timed_out(cmd, task, &rtn))
 	if (sas_ata_timed_out(cmd, task, &rtn))
 		return rtn;
 		return rtn;
-	
 
 
 	if (!task) {
 	if (!task) {
 		cmd->request->timeout /= 2;
 		cmd->request->timeout /= 2;

+ 13 - 2
drivers/scsi/lpfc/lpfc.h

@@ -1,7 +1,7 @@
 /*******************************************************************
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -325,6 +325,7 @@ struct lpfc_vport {
 #define FC_VPORT_CVL_RCVD	0x400000 /* VLink failed due to CVL	 */
 #define FC_VPORT_CVL_RCVD	0x400000 /* VLink failed due to CVL	 */
 #define FC_VFI_REGISTERED	0x800000 /* VFI is registered */
 #define FC_VFI_REGISTERED	0x800000 /* VFI is registered */
 #define FC_FDISC_COMPLETED	0x1000000/* FDISC completed */
 #define FC_FDISC_COMPLETED	0x1000000/* FDISC completed */
+#define FC_DISC_DELAYED		0x2000000/* Delay NPort discovery */
 
 
 	uint32_t ct_flags;
 	uint32_t ct_flags;
 #define FC_CT_RFF_ID		0x1	 /* RFF_ID accepted by switch */
 #define FC_CT_RFF_ID		0x1	 /* RFF_ID accepted by switch */
@@ -348,6 +349,8 @@ struct lpfc_vport {
 
 
 	uint32_t fc_myDID;	/* fibre channel S_ID */
 	uint32_t fc_myDID;	/* fibre channel S_ID */
 	uint32_t fc_prevDID;	/* previous fibre channel S_ID */
 	uint32_t fc_prevDID;	/* previous fibre channel S_ID */
+	struct lpfc_name fabric_portname;
+	struct lpfc_name fabric_nodename;
 
 
 	int32_t stopped;   /* HBA has not been restarted since last ERATT */
 	int32_t stopped;   /* HBA has not been restarted since last ERATT */
 	uint8_t fc_linkspeed;	/* Link speed after last READ_LA */
 	uint8_t fc_linkspeed;	/* Link speed after last READ_LA */
@@ -372,6 +375,7 @@ struct lpfc_vport {
 #define WORKER_DISC_TMO                0x1	/* vport: Discovery timeout */
 #define WORKER_DISC_TMO                0x1	/* vport: Discovery timeout */
 #define WORKER_ELS_TMO                 0x2	/* vport: ELS timeout */
 #define WORKER_ELS_TMO                 0x2	/* vport: ELS timeout */
 #define WORKER_FDMI_TMO                0x4	/* vport: FDMI timeout */
 #define WORKER_FDMI_TMO                0x4	/* vport: FDMI timeout */
+#define WORKER_DELAYED_DISC_TMO        0x8	/* vport: delayed discovery */
 
 
 #define WORKER_MBOX_TMO                0x100	/* hba: MBOX timeout */
 #define WORKER_MBOX_TMO                0x100	/* hba: MBOX timeout */
 #define WORKER_HB_TMO                  0x200	/* hba: Heart beat timeout */
 #define WORKER_HB_TMO                  0x200	/* hba: Heart beat timeout */
@@ -382,6 +386,7 @@ struct lpfc_vport {
 
 
 	struct timer_list fc_fdmitmo;
 	struct timer_list fc_fdmitmo;
 	struct timer_list els_tmofunc;
 	struct timer_list els_tmofunc;
+	struct timer_list delayed_disc_tmo;
 
 
 	int unreg_vpi_cmpl;
 	int unreg_vpi_cmpl;
 
 
@@ -548,6 +553,8 @@ struct lpfc_hba {
 #define LPFC_SLI3_CRP_ENABLED		0x08
 #define LPFC_SLI3_CRP_ENABLED		0x08
 #define LPFC_SLI3_BG_ENABLED		0x20
 #define LPFC_SLI3_BG_ENABLED		0x20
 #define LPFC_SLI3_DSS_ENABLED		0x40
 #define LPFC_SLI3_DSS_ENABLED		0x40
+#define LPFC_SLI4_PERFH_ENABLED		0x80
+#define LPFC_SLI4_PHWQ_ENABLED		0x100
 	uint32_t iocb_cmd_size;
 	uint32_t iocb_cmd_size;
 	uint32_t iocb_rsp_size;
 	uint32_t iocb_rsp_size;
 
 
@@ -655,7 +662,7 @@ struct lpfc_hba {
 #define LPFC_INITIALIZE_LINK              0	/* do normal init_link mbox */
 #define LPFC_INITIALIZE_LINK              0	/* do normal init_link mbox */
 #define LPFC_DELAY_INIT_LINK              1	/* layered driver hold off */
 #define LPFC_DELAY_INIT_LINK              1	/* layered driver hold off */
 #define LPFC_DELAY_INIT_LINK_INDEFINITELY 2	/* wait, manual intervention */
 #define LPFC_DELAY_INIT_LINK_INDEFINITELY 2	/* wait, manual intervention */
-
+	uint32_t cfg_enable_dss;
 	lpfc_vpd_t vpd;		/* vital product data */
 	lpfc_vpd_t vpd;		/* vital product data */
 
 
 	struct pci_dev *pcidev;
 	struct pci_dev *pcidev;
@@ -792,6 +799,10 @@ struct lpfc_hba {
 	struct dentry *debug_slow_ring_trc;
 	struct dentry *debug_slow_ring_trc;
 	struct lpfc_debugfs_trc *slow_ring_trc;
 	struct lpfc_debugfs_trc *slow_ring_trc;
 	atomic_t slow_ring_trc_cnt;
 	atomic_t slow_ring_trc_cnt;
+	/* iDiag debugfs sub-directory */
+	struct dentry *idiag_root;
+	struct dentry *idiag_pci_cfg;
+	struct dentry *idiag_que_info;
 #endif
 #endif
 
 
 	/* Used for deferred freeing of ELS data buffers */
 	/* Used for deferred freeing of ELS data buffers */

+ 93 - 30
drivers/scsi/lpfc/lpfc_attr.c

@@ -1,7 +1,7 @@
 /*******************************************************************
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -623,10 +623,14 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
 	int status = 0;
 	int status = 0;
 	int cnt = 0;
 	int cnt = 0;
 	int i;
 	int i;
+	int rc;
 
 
 	init_completion(&online_compl);
 	init_completion(&online_compl);
-	lpfc_workq_post_event(phba, &status, &online_compl,
+	rc = lpfc_workq_post_event(phba, &status, &online_compl,
 			      LPFC_EVT_OFFLINE_PREP);
 			      LPFC_EVT_OFFLINE_PREP);
+	if (rc == 0)
+		return -ENOMEM;
+
 	wait_for_completion(&online_compl);
 	wait_for_completion(&online_compl);
 
 
 	if (status != 0)
 	if (status != 0)
@@ -652,7 +656,10 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
 	}
 	}
 
 
 	init_completion(&online_compl);
 	init_completion(&online_compl);
-	lpfc_workq_post_event(phba, &status, &online_compl, type);
+	rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
+	if (rc == 0)
+		return -ENOMEM;
+
 	wait_for_completion(&online_compl);
 	wait_for_completion(&online_compl);
 
 
 	if (status != 0)
 	if (status != 0)
@@ -671,6 +678,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
  *
  *
  * Notes:
  * Notes:
  * Assumes any error from lpfc_do_offline() will be negative.
  * Assumes any error from lpfc_do_offline() will be negative.
+ * Do not make this function static.
  *
  *
  * Returns:
  * Returns:
  * lpfc_do_offline() return code if not zero
  * lpfc_do_offline() return code if not zero
@@ -682,6 +690,7 @@ lpfc_selective_reset(struct lpfc_hba *phba)
 {
 {
 	struct completion online_compl;
 	struct completion online_compl;
 	int status = 0;
 	int status = 0;
+	int rc;
 
 
 	if (!phba->cfg_enable_hba_reset)
 	if (!phba->cfg_enable_hba_reset)
 		return -EIO;
 		return -EIO;
@@ -692,8 +701,11 @@ lpfc_selective_reset(struct lpfc_hba *phba)
 		return status;
 		return status;
 
 
 	init_completion(&online_compl);
 	init_completion(&online_compl);
-	lpfc_workq_post_event(phba, &status, &online_compl,
+	rc = lpfc_workq_post_event(phba, &status, &online_compl,
 			      LPFC_EVT_ONLINE);
 			      LPFC_EVT_ONLINE);
+	if (rc == 0)
+		return -ENOMEM;
+
 	wait_for_completion(&online_compl);
 	wait_for_completion(&online_compl);
 
 
 	if (status != 0)
 	if (status != 0)
@@ -812,14 +824,17 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
 	struct lpfc_hba   *phba = vport->phba;
 	struct lpfc_hba   *phba = vport->phba;
 	struct completion online_compl;
 	struct completion online_compl;
 	int status=0;
 	int status=0;
+	int rc;
 
 
 	if (!phba->cfg_enable_hba_reset)
 	if (!phba->cfg_enable_hba_reset)
 		return -EACCES;
 		return -EACCES;
 	init_completion(&online_compl);
 	init_completion(&online_compl);
 
 
 	if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
 	if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
-		lpfc_workq_post_event(phba, &status, &online_compl,
+		rc = lpfc_workq_post_event(phba, &status, &online_compl,
 				      LPFC_EVT_ONLINE);
 				      LPFC_EVT_ONLINE);
+		if (rc == 0)
+			return -ENOMEM;
 		wait_for_completion(&online_compl);
 		wait_for_completion(&online_compl);
 	} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
 	} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
 		status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
 		status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
@@ -1278,6 +1293,28 @@ lpfc_fips_rev_show(struct device *dev,  struct device_attribute *attr,
 	return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
 	return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
 }
 }
 
 
+/**
+ * lpfc_dss_show - Return the current state of dss and the configured state
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_dss_show(struct device *dev, struct device_attribute *attr,
+	      char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
+			(phba->cfg_enable_dss) ? "Enabled" : "Disabled",
+			(phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
+				"" : "Not ");
+}
+
 /**
 /**
  * lpfc_param_show - Return a cfg attribute value in decimal
  * lpfc_param_show - Return a cfg attribute value in decimal
  *
  *
@@ -1597,13 +1634,13 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
 
 
 #define LPFC_ATTR(name, defval, minval, maxval, desc) \
 #define LPFC_ATTR(name, defval, minval, maxval, desc) \
 static uint lpfc_##name = defval;\
 static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_param_init(name, defval, minval, maxval)
 lpfc_param_init(name, defval, minval, maxval)
 
 
 #define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
 #define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
 static uint lpfc_##name = defval;\
 static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_param_show(name)\
 lpfc_param_show(name)\
 lpfc_param_init(name, defval, minval, maxval)\
 lpfc_param_init(name, defval, minval, maxval)\
@@ -1611,7 +1648,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
 
 
 #define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
 #define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
 static uint lpfc_##name = defval;\
 static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_param_show(name)\
 lpfc_param_show(name)\
 lpfc_param_init(name, defval, minval, maxval)\
 lpfc_param_init(name, defval, minval, maxval)\
@@ -1622,7 +1659,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
 
 
 #define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
 #define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
 static uint lpfc_##name = defval;\
 static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_param_hex_show(name)\
 lpfc_param_hex_show(name)\
 lpfc_param_init(name, defval, minval, maxval)\
 lpfc_param_init(name, defval, minval, maxval)\
@@ -1630,7 +1667,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
 
 
 #define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
 #define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
 static uint lpfc_##name = defval;\
 static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_param_hex_show(name)\
 lpfc_param_hex_show(name)\
 lpfc_param_init(name, defval, minval, maxval)\
 lpfc_param_init(name, defval, minval, maxval)\
@@ -1641,13 +1678,13 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
 
 
 #define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
 #define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
 static uint lpfc_##name = defval;\
 static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_vport_param_init(name, defval, minval, maxval)
 lpfc_vport_param_init(name, defval, minval, maxval)
 
 
 #define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
 #define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
 static uint lpfc_##name = defval;\
 static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_vport_param_show(name)\
 lpfc_vport_param_show(name)\
 lpfc_vport_param_init(name, defval, minval, maxval)\
 lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1655,7 +1692,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
 
 
 #define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
 #define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
 static uint lpfc_##name = defval;\
 static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_vport_param_show(name)\
 lpfc_vport_param_show(name)\
 lpfc_vport_param_init(name, defval, minval, maxval)\
 lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1666,7 +1703,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
 
 
 #define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
 #define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
 static uint lpfc_##name = defval;\
 static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_vport_param_hex_show(name)\
 lpfc_vport_param_hex_show(name)\
 lpfc_vport_param_init(name, defval, minval, maxval)\
 lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1674,7 +1711,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
 
 
 #define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
 #define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
 static uint lpfc_##name = defval;\
 static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 MODULE_PARM_DESC(lpfc_##name, desc);\
 lpfc_vport_param_hex_show(name)\
 lpfc_vport_param_hex_show(name)\
 lpfc_vport_param_init(name, defval, minval, maxval)\
 lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1718,7 +1755,7 @@ static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
 static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
 static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
 static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
 static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
 static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
 static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
-
+static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
 
 
 static char *lpfc_soft_wwn_key = "C99G71SL8032A";
 static char *lpfc_soft_wwn_key = "C99G71SL8032A";
 
 
@@ -1813,6 +1850,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
 	int stat1=0, stat2=0;
 	int stat1=0, stat2=0;
 	unsigned int i, j, cnt=count;
 	unsigned int i, j, cnt=count;
 	u8 wwpn[8];
 	u8 wwpn[8];
+	int rc;
 
 
 	if (!phba->cfg_enable_hba_reset)
 	if (!phba->cfg_enable_hba_reset)
 		return -EACCES;
 		return -EACCES;
@@ -1863,7 +1901,11 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
 				"0463 lpfc_soft_wwpn attribute set failed to "
 				"0463 lpfc_soft_wwpn attribute set failed to "
 				"reinit adapter - %d\n", stat1);
 				"reinit adapter - %d\n", stat1);
 	init_completion(&online_compl);
 	init_completion(&online_compl);
-	lpfc_workq_post_event(phba, &stat2, &online_compl, LPFC_EVT_ONLINE);
+	rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
+				   LPFC_EVT_ONLINE);
+	if (rc == 0)
+		return -ENOMEM;
+
 	wait_for_completion(&online_compl);
 	wait_for_completion(&online_compl);
 	if (stat2)
 	if (stat2)
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1954,7 +1996,7 @@ static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
 
 
 
 
 static int lpfc_poll = 0;
 static int lpfc_poll = 0;
-module_param(lpfc_poll, int, 0);
+module_param(lpfc_poll, int, S_IRUGO);
 MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
 MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
 		 " 0 - none,"
 		 " 0 - none,"
 		 " 1 - poll with interrupts enabled"
 		 " 1 - poll with interrupts enabled"
@@ -1964,21 +2006,21 @@ static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
 		   lpfc_poll_show, lpfc_poll_store);
 		   lpfc_poll_show, lpfc_poll_store);
 
 
 int  lpfc_sli_mode = 0;
 int  lpfc_sli_mode = 0;
-module_param(lpfc_sli_mode, int, 0);
+module_param(lpfc_sli_mode, int, S_IRUGO);
 MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
 MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
 		 " 0 - auto (SLI-3 if supported),"
 		 " 0 - auto (SLI-3 if supported),"
 		 " 2 - select SLI-2 even on SLI-3 capable HBAs,"
 		 " 2 - select SLI-2 even on SLI-3 capable HBAs,"
 		 " 3 - select SLI-3");
 		 " 3 - select SLI-3");
 
 
 int lpfc_enable_npiv = 1;
 int lpfc_enable_npiv = 1;
-module_param(lpfc_enable_npiv, int, 0);
+module_param(lpfc_enable_npiv, int, S_IRUGO);
 MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality");
 MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality");
 lpfc_param_show(enable_npiv);
 lpfc_param_show(enable_npiv);
 lpfc_param_init(enable_npiv, 1, 0, 1);
 lpfc_param_init(enable_npiv, 1, 0, 1);
 static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
 static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
 
 
 int lpfc_enable_rrq;
 int lpfc_enable_rrq;
-module_param(lpfc_enable_rrq, int, 0);
+module_param(lpfc_enable_rrq, int, S_IRUGO);
 MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
 MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
 lpfc_param_show(enable_rrq);
 lpfc_param_show(enable_rrq);
 lpfc_param_init(enable_rrq, 0, 0, 1);
 lpfc_param_init(enable_rrq, 0, 0, 1);
@@ -2040,7 +2082,7 @@ static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
 			 lpfc_txcmplq_hw_show, NULL);
 			 lpfc_txcmplq_hw_show, NULL);
 
 
 int lpfc_iocb_cnt = 2;
 int lpfc_iocb_cnt = 2;
-module_param(lpfc_iocb_cnt, int, 1);
+module_param(lpfc_iocb_cnt, int, S_IRUGO);
 MODULE_PARM_DESC(lpfc_iocb_cnt,
 MODULE_PARM_DESC(lpfc_iocb_cnt,
 	"Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
 	"Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
 lpfc_param_show(iocb_cnt);
 lpfc_param_show(iocb_cnt);
@@ -2192,7 +2234,7 @@ static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR,
 # disappear until the timer expires. Value range is [0,255]. Default
 # disappear until the timer expires. Value range is [0,255]. Default
 # value is 30.
 # value is 30.
 */
 */
-module_param(lpfc_devloss_tmo, int, 0);
+module_param(lpfc_devloss_tmo, int, S_IRUGO);
 MODULE_PARM_DESC(lpfc_devloss_tmo,
 MODULE_PARM_DESC(lpfc_devloss_tmo,
 		 "Seconds driver will hold I/O waiting "
 		 "Seconds driver will hold I/O waiting "
 		 "for a device to come back");
 		 "for a device to come back");
@@ -2302,7 +2344,7 @@ LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
 # Default value of this parameter is 1.
 # Default value of this parameter is 1.
 */
 */
 static int lpfc_restrict_login = 1;
 static int lpfc_restrict_login = 1;
-module_param(lpfc_restrict_login, int, 0);
+module_param(lpfc_restrict_login, int, S_IRUGO);
 MODULE_PARM_DESC(lpfc_restrict_login,
 MODULE_PARM_DESC(lpfc_restrict_login,
 		 "Restrict virtual ports login to remote initiators.");
 		 "Restrict virtual ports login to remote initiators.");
 lpfc_vport_param_show(restrict_login);
 lpfc_vport_param_show(restrict_login);
@@ -2473,7 +2515,7 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
 	return -EINVAL;
 	return -EINVAL;
 }
 }
 static int lpfc_topology = 0;
 static int lpfc_topology = 0;
-module_param(lpfc_topology, int, 0);
+module_param(lpfc_topology, int, S_IRUGO);
 MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology");
 MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology");
 lpfc_param_show(topology)
 lpfc_param_show(topology)
 lpfc_param_init(topology, 0, 0, 6)
 lpfc_param_init(topology, 0, 0, 6)
@@ -2915,7 +2957,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
 }
 }
 
 
 static int lpfc_link_speed = 0;
 static int lpfc_link_speed = 0;
-module_param(lpfc_link_speed, int, 0);
+module_param(lpfc_link_speed, int, S_IRUGO);
 MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
 MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
 lpfc_param_show(link_speed)
 lpfc_param_show(link_speed)
 
 
@@ -3043,7 +3085,7 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
 }
 }
 
 
 static int lpfc_aer_support = 1;
 static int lpfc_aer_support = 1;
-module_param(lpfc_aer_support, int, 1);
+module_param(lpfc_aer_support, int, S_IRUGO);
 MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
 MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
 lpfc_param_show(aer_support)
 lpfc_param_show(aer_support)
 
 
@@ -3155,7 +3197,7 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
 # The value is set in milliseconds.
 # The value is set in milliseconds.
 */
 */
 static int lpfc_max_scsicmpl_time;
 static int lpfc_max_scsicmpl_time;
-module_param(lpfc_max_scsicmpl_time, int, 0);
+module_param(lpfc_max_scsicmpl_time, int, S_IRUGO);
 MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
 MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
 	"Use command completion time to control queue depth");
 	"Use command completion time to control queue depth");
 lpfc_vport_param_show(max_scsicmpl_time);
 lpfc_vport_param_show(max_scsicmpl_time);
@@ -3331,7 +3373,7 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
 */
 */
 unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION;
 unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION;
 
 
-module_param(lpfc_prot_mask, uint, 0);
+module_param(lpfc_prot_mask, uint, S_IRUGO);
 MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
 MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
 
 
 /*
 /*
@@ -3343,9 +3385,28 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
 #
 #
 */
 */
 unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
 unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
-module_param(lpfc_prot_guard, byte, 0);
+module_param(lpfc_prot_guard, byte, S_IRUGO);
 MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
 MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
 
 
+/*
+ * Delay initial NPort discovery when Clean Address bit is cleared in
+ * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed.
+ * This parameter can have value 0 or 1.
+ * When this parameter is set to 0, no delay is added to the initial
+ * discovery.
+ * When this parameter is set to non-zero value, initial Nport discovery is
+ * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC
+ * accept and FCID/Fabric name/Fabric portname is changed.
+ * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion
+ * when Clean Address bit is cleared in FLOGI/FDISC
+ * accept and FCID/Fabric name/Fabric portname is changed.
+ * Default value is 0.
+ */
+int lpfc_delay_discovery;
+module_param(lpfc_delay_discovery, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_delay_discovery,
+	"Delay NPort discovery when Clean Address bit is cleared. "
+	"Allowed values: 0,1.");
 
 
 /*
 /*
  * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
  * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
@@ -3437,6 +3498,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_txcmplq_hw,
 	&dev_attr_txcmplq_hw,
 	&dev_attr_lpfc_fips_level,
 	&dev_attr_lpfc_fips_level,
 	&dev_attr_lpfc_fips_rev,
 	&dev_attr_lpfc_fips_rev,
+	&dev_attr_lpfc_dss,
 	NULL,
 	NULL,
 };
 };
 
 
@@ -4639,6 +4701,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
 	lpfc_aer_support_init(phba, lpfc_aer_support);
 	lpfc_aer_support_init(phba, lpfc_aer_support);
 	lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
 	lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
 	lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
 	lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
+	phba->cfg_enable_dss = 1;
 	return;
 	return;
 }
 }
 
 

+ 6 - 3
drivers/scsi/lpfc/lpfc_crtn.h

@@ -53,9 +53,9 @@ void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
 void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
 void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
 void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
 void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
 void lpfc_supported_pages(struct lpfcMboxq *);
 void lpfc_supported_pages(struct lpfcMboxq *);
-void lpfc_sli4_params(struct lpfcMboxq *);
+void lpfc_pc_sli4_params(struct lpfcMboxq *);
 int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
 int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
-
+int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
 struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
 struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
 void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
 void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
 void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
 void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
@@ -167,6 +167,8 @@ int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
 int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
 int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
 void lpfc_fdmi_tmo(unsigned long);
 void lpfc_fdmi_tmo(unsigned long);
 void lpfc_fdmi_timeout_handler(struct lpfc_vport *);
 void lpfc_fdmi_timeout_handler(struct lpfc_vport *);
+void lpfc_delayed_disc_tmo(unsigned long);
+void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
 
 
 int lpfc_config_port_prep(struct lpfc_hba *);
 int lpfc_config_port_prep(struct lpfc_hba *);
 int lpfc_config_port_post(struct lpfc_hba *);
 int lpfc_config_port_post(struct lpfc_hba *);
@@ -341,6 +343,7 @@ extern struct fc_function_template lpfc_transport_functions;
 extern struct fc_function_template lpfc_vport_transport_functions;
 extern struct fc_function_template lpfc_vport_transport_functions;
 extern int lpfc_sli_mode;
 extern int lpfc_sli_mode;
 extern int lpfc_enable_npiv;
 extern int lpfc_enable_npiv;
+extern int lpfc_delay_discovery;
 
 
 int  lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
 int  lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
 int  lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *,	size_t);
 int  lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *,	size_t);
@@ -423,6 +426,6 @@ int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
 int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
 int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
 	uint16_t, uint16_t, uint16_t);
 	uint16_t, uint16_t, uint16_t);
 void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
 void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
-void lpfc_cleanup_vports_rrqs(struct lpfc_vport *);
+void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
 struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
 struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
 	uint32_t);
 	uint32_t);

+ 49 - 0
drivers/scsi/lpfc/lpfc_ct.c

@@ -1738,6 +1738,55 @@ fdmi_cmd_exit:
 	return 1;
 	return 1;
 }
 }
 
 
+/**
+ * lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer.
+ * @ptr - Context object of the timer.
+ *
+ * This function set the WORKER_DELAYED_DISC_TMO flag and wake up
+ * the worker thread.
+ **/
+void
+lpfc_delayed_disc_tmo(unsigned long ptr)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t tmo_posted;
+	unsigned long iflag;
+
+	spin_lock_irqsave(&vport->work_port_lock, iflag);
+	tmo_posted = vport->work_port_events & WORKER_DELAYED_DISC_TMO;
+	if (!tmo_posted)
+		vport->work_port_events |= WORKER_DELAYED_DISC_TMO;
+	spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
+}
+
+/**
+ * lpfc_delayed_disc_timeout_handler - Function called by worker thread to
+ *      handle delayed discovery.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This function start nport discovery of the vport.
+ **/
+void
+lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	spin_lock_irq(shost->host_lock);
+	if (!(vport->fc_flag & FC_DISC_DELAYED)) {
+		spin_unlock_irq(shost->host_lock);
+		return;
+	}
+	vport->fc_flag &= ~FC_DISC_DELAYED;
+	spin_unlock_irq(shost->host_lock);
+
+	lpfc_do_scr_ns_plogi(vport->phba, vport);
+}
+
 void
 void
 lpfc_fdmi_tmo(unsigned long ptr)
 lpfc_fdmi_tmo(unsigned long ptr)
 {
 {

File diff suppressed because it is too large
+ 882 - 55
drivers/scsi/lpfc/lpfc_debugfs.c


+ 59 - 1
drivers/scsi/lpfc/lpfc_debugfs.h

@@ -1,7 +1,7 @@
 /*******************************************************************
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2007 Emulex.  All rights reserved.                *
+ * Copyright (C) 2007-2011 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * www.emulex.com                                                  *
  *                                                                 *
  *                                                                 *
@@ -22,6 +22,44 @@
 #define _H_LPFC_DEBUG_FS
 #define _H_LPFC_DEBUG_FS
 
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+
+/* size of output line, for discovery_trace and slow_ring_trace */
+#define LPFC_DEBUG_TRC_ENTRY_SIZE 100
+
+/* nodelist output buffer size */
+#define LPFC_NODELIST_SIZE 8192
+#define LPFC_NODELIST_ENTRY_SIZE 120
+
+/* dumpHBASlim output buffer size */
+#define LPFC_DUMPHBASLIM_SIZE 4096
+
+/* dumpHostSlim output buffer size */
+#define LPFC_DUMPHOSTSLIM_SIZE 4096
+
+/* hbqinfo output buffer size */
+#define LPFC_HBQINFO_SIZE 8192
+
+/* rdPciConf output buffer size */
+#define LPFC_PCI_CFG_SIZE 4096
+#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2)
+#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4)
+
+/* queue info output buffer size */
+#define LPFC_QUE_INFO_GET_BUF_SIZE 2048
+
+#define SIZE_U8  sizeof(uint8_t)
+#define SIZE_U16 sizeof(uint16_t)
+#define SIZE_U32 sizeof(uint32_t)
+
+struct lpfc_debug {
+	char *i_private;
+	char op;
+#define LPFC_IDIAG_OP_RD 1
+#define LPFC_IDIAG_OP_WR 2
+	char *buffer;
+	int  len;
+};
+
 struct lpfc_debugfs_trc {
 struct lpfc_debugfs_trc {
 	char *fmt;
 	char *fmt;
 	uint32_t data1;
 	uint32_t data1;
@@ -30,6 +68,26 @@ struct lpfc_debugfs_trc {
 	uint32_t seq_cnt;
 	uint32_t seq_cnt;
 	unsigned long jif;
 	unsigned long jif;
 };
 };
+
+struct lpfc_idiag_offset {
+	uint32_t last_rd;
+};
+
+#define LPFC_IDIAG_CMD_DATA_SIZE 4
+struct lpfc_idiag_cmd {
+	uint32_t opcode;
+#define LPFC_IDIAG_CMD_PCICFG_RD 0x00000001
+#define LPFC_IDIAG_CMD_PCICFG_WR 0x00000002
+#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003
+#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004
+	uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE];
+};
+
+struct lpfc_idiag {
+	uint32_t active;
+	struct lpfc_idiag_cmd cmd;
+	struct lpfc_idiag_offset offset;
+};
 #endif
 #endif
 
 
 /* Mask for discovery_trace */
 /* Mask for discovery_trace */

+ 163 - 10
drivers/scsi/lpfc/lpfc_els.c

@@ -1,7 +1,7 @@
 /*******************************************************************
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -484,6 +484,59 @@ fail:
 	return rc;
 	return rc;
 }
 }
 
 
+/**
+ * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @sp: pointer to service parameter data structure.
+ *
+ * This routine is called from FLOGI/FDISC completion handler functions.
+ * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
+ * node nodename is changed in the completion service parameter else return
+ * 0. This function also set flag in the vport data structure to delay
+ * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
+ * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
+ * node nodename is changed in the completion service parameter.
+ *
+ * Return code
+ *   0 - FCID and Fabric Nodename and Fabric portname is not changed.
+ *   1 - FCID or Fabric Nodename or Fabric portname is changed.
+ *
+ **/
+static uint8_t
+lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
+		struct serv_parm *sp)
+{
+	uint8_t fabric_param_changed = 0;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if ((vport->fc_prevDID != vport->fc_myDID) ||
+		memcmp(&vport->fabric_portname, &sp->portName,
+			sizeof(struct lpfc_name)) ||
+		memcmp(&vport->fabric_nodename, &sp->nodeName,
+			sizeof(struct lpfc_name)))
+		fabric_param_changed = 1;
+
+	/*
+	 * Word 1 Bit 31 in common service parameter is overloaded.
+	 * Word 1 Bit 31 in FLOGI request is multiple NPort request
+	 * Word 1 Bit 31 in FLOGI response is clean address bit
+	 *
+	 * If fabric parameter is changed and clean address bit is
+	 * cleared delay nport discovery if
+	 * - vport->fc_prevDID != 0 (not initial discovery) OR
+	 * - lpfc_delay_discovery module parameter is set.
+	 */
+	if (fabric_param_changed && !sp->cmn.clean_address_bit &&
+	    (vport->fc_prevDID || lpfc_delay_discovery)) {
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_DISC_DELAYED;
+		spin_unlock_irq(shost->host_lock);
+	}
+
+	return fabric_param_changed;
+}
+
+
 /**
 /**
  * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
  * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
  * @vport: pointer to a host virtual N_Port data structure.
  * @vport: pointer to a host virtual N_Port data structure.
@@ -512,6 +565,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_nodelist *np;
 	struct lpfc_nodelist *np;
 	struct lpfc_nodelist *next_np;
 	struct lpfc_nodelist *next_np;
+	uint8_t fabric_param_changed;
 
 
 	spin_lock_irq(shost->host_lock);
 	spin_lock_irq(shost->host_lock);
 	vport->fc_flag |= FC_FABRIC;
 	vport->fc_flag |= FC_FABRIC;
@@ -544,6 +598,12 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
 	ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
 	ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
 				sp->cmn.bbRcvSizeLsb;
 				sp->cmn.bbRcvSizeLsb;
+
+	fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
+	memcpy(&vport->fabric_portname, &sp->portName,
+			sizeof(struct lpfc_name));
+	memcpy(&vport->fabric_nodename, &sp->nodeName,
+			sizeof(struct lpfc_name));
 	memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
 	memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
 
 
 	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
 	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
@@ -565,7 +625,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		}
 		}
 	}
 	}
 
 
-	if ((vport->fc_prevDID != vport->fc_myDID) &&
+	if (fabric_param_changed &&
 		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
 		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
 
 
 		/* If our NportID changed, we need to ensure all
 		/* If our NportID changed, we need to ensure all
@@ -2203,6 +2263,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 	IOCB_t *irsp;
 	IOCB_t *irsp;
 	struct lpfc_sli *psli;
 	struct lpfc_sli *psli;
+	struct lpfcMboxq *mbox;
 
 
 	psli = &phba->sli;
 	psli = &phba->sli;
 	/* we pass cmdiocb to state machine which needs rspiocb as well */
 	/* we pass cmdiocb to state machine which needs rspiocb as well */
@@ -2260,6 +2321,21 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 					NLP_EVT_CMPL_LOGO);
 					NLP_EVT_CMPL_LOGO);
 out:
 out:
 	lpfc_els_free_iocb(phba, cmdiocb);
 	lpfc_els_free_iocb(phba, cmdiocb);
+	/* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
+	if ((vport->fc_flag & FC_PT2PT) &&
+		!(vport->fc_flag & FC_PT2PT_PLOGI)) {
+		phba->pport->fc_myDID = 0;
+		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (mbox) {
+			lpfc_config_link(phba, mbox);
+			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+			mbox->vport = vport;
+			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
+				MBX_NOT_FINISHED) {
+				mempool_free(mbox, phba->mbox_mem_pool);
+			}
+		}
+	}
 	return;
 	return;
 }
 }
 
 
@@ -2745,7 +2821,8 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
 		}
 		}
 		break;
 		break;
 	case ELS_CMD_FDISC:
 	case ELS_CMD_FDISC:
-		lpfc_issue_els_fdisc(vport, ndlp, retry);
+		if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
+			lpfc_issue_els_fdisc(vport, ndlp, retry);
 		break;
 		break;
 	}
 	}
 	return;
 	return;
@@ -2815,9 +2892,17 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
 
 	switch (irsp->ulpStatus) {
 	switch (irsp->ulpStatus) {
 	case IOSTAT_FCP_RSP_ERROR:
 	case IOSTAT_FCP_RSP_ERROR:
+		break;
 	case IOSTAT_REMOTE_STOP:
 	case IOSTAT_REMOTE_STOP:
+		if (phba->sli_rev == LPFC_SLI_REV4) {
+			/* This IO was aborted by the target, we don't
+			 * know the rxid and because we did not send the
+			 * ABTS we cannot generate and RRQ.
+			 */
+			lpfc_set_rrq_active(phba, ndlp,
+					 cmdiocb->sli4_xritag, 0, 0);
+		}
 		break;
 		break;
-
 	case IOSTAT_LOCAL_REJECT:
 	case IOSTAT_LOCAL_REJECT:
 		switch ((irsp->un.ulpWord[4] & 0xff)) {
 		switch ((irsp->un.ulpWord[4] & 0xff)) {
 		case IOERR_LOOP_OPEN_FAILURE:
 		case IOERR_LOOP_OPEN_FAILURE:
@@ -4013,28 +4098,34 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport,
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	struct RRQ *rrq;
 	struct RRQ *rrq;
 	uint16_t rxid;
 	uint16_t rxid;
+	uint16_t xri;
 	struct lpfc_node_rrq *prrq;
 	struct lpfc_node_rrq *prrq;
 
 
 
 
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
 	pcmd += sizeof(uint32_t);
 	pcmd += sizeof(uint32_t);
 	rrq = (struct RRQ *)pcmd;
 	rrq = (struct RRQ *)pcmd;
-	rxid = bf_get(rrq_oxid, rrq);
+	rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
+	rxid = be16_to_cpu(bf_get(rrq_rxid, rrq));
 
 
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 			"2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
 			"2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
 			" x%x x%x\n",
 			" x%x x%x\n",
-			bf_get(rrq_did, rrq),
-			bf_get(rrq_oxid, rrq),
+			be32_to_cpu(bf_get(rrq_did, rrq)),
+			be16_to_cpu(bf_get(rrq_oxid, rrq)),
 			rxid,
 			rxid,
 			iocb->iotag, iocb->iocb.ulpContext);
 			iocb->iotag, iocb->iocb.ulpContext);
 
 
 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
 		"Clear RRQ:  did:x%x flg:x%x exchg:x%.08x",
 		"Clear RRQ:  did:x%x flg:x%x exchg:x%.08x",
 		ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
 		ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
-	prrq = lpfc_get_active_rrq(vport, rxid, ndlp->nlp_DID);
+	if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
+		xri = be16_to_cpu(bf_get(rrq_oxid, rrq));
+	else
+		xri = rxid;
+	prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
 	if (prrq)
 	if (prrq)
-		lpfc_clr_rrq_active(phba, rxid, prrq);
+		lpfc_clr_rrq_active(phba, xri, prrq);
 	return;
 	return;
 }
 }
 
 
@@ -6166,6 +6257,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	if (vport->load_flag & FC_UNLOADING)
 	if (vport->load_flag & FC_UNLOADING)
 		goto dropit;
 		goto dropit;
 
 
+	/* If NPort discovery is delayed drop incoming ELS */
+	if ((vport->fc_flag & FC_DISC_DELAYED) &&
+			(cmd != ELS_CMD_PLOGI))
+		goto dropit;
+
 	ndlp = lpfc_findnode_did(vport, did);
 	ndlp = lpfc_findnode_did(vport, did);
 	if (!ndlp) {
 	if (!ndlp) {
 		/* Cannot find existing Fabric ndlp, so allocate a new one */
 		/* Cannot find existing Fabric ndlp, so allocate a new one */
@@ -6218,6 +6314,12 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 		ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
 		ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
 
 
 		lpfc_send_els_event(vport, ndlp, payload);
 		lpfc_send_els_event(vport, ndlp, payload);
+
+		/* If Nport discovery is delayed, reject PLOGIs */
+		if (vport->fc_flag & FC_DISC_DELAYED) {
+			rjt_err = LSRJT_UNABLE_TPC;
+			break;
+		}
 		if (vport->port_state < LPFC_DISC_AUTH) {
 		if (vport->port_state < LPFC_DISC_AUTH) {
 			if (!(phba->pport->fc_flag & FC_PT2PT) ||
 			if (!(phba->pport->fc_flag & FC_PT2PT) ||
 				(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
 				(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -6596,6 +6698,21 @@ void
 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
 {
 {
 	struct lpfc_nodelist *ndlp, *ndlp_fdmi;
 	struct lpfc_nodelist *ndlp, *ndlp_fdmi;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	/*
+	 * If lpfc_delay_discovery parameter is set and the clean address
+	 * bit is cleared and fc fabric parameters chenged, delay FC NPort
+	 * discovery.
+	 */
+	spin_lock_irq(shost->host_lock);
+	if (vport->fc_flag & FC_DISC_DELAYED) {
+		spin_unlock_irq(shost->host_lock);
+		mod_timer(&vport->delayed_disc_tmo,
+			jiffies + HZ * phba->fc_ratov);
+		return;
+	}
+	spin_unlock_irq(shost->host_lock);
 
 
 	ndlp = lpfc_findnode_did(vport, NameServer_DID);
 	ndlp = lpfc_findnode_did(vport, NameServer_DID);
 	if (!ndlp) {
 	if (!ndlp) {
@@ -6938,6 +7055,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 	struct lpfc_nodelist *next_np;
 	struct lpfc_nodelist *next_np;
 	IOCB_t *irsp = &rspiocb->iocb;
 	IOCB_t *irsp = &rspiocb->iocb;
 	struct lpfc_iocbq *piocb;
 	struct lpfc_iocbq *piocb;
+	struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+	struct serv_parm *sp;
+	uint8_t fabric_param_changed;
 
 
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 			 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
 			 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
@@ -6981,7 +7101,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
 
 	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
 	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
 	lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
 	lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
-	if ((vport->fc_prevDID != vport->fc_myDID) &&
+	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+	sp = prsp->virt + sizeof(uint32_t);
+	fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
+	memcpy(&vport->fabric_portname, &sp->portName,
+		sizeof(struct lpfc_name));
+	memcpy(&vport->fabric_nodename, &sp->nodeName,
+		sizeof(struct lpfc_name));
+	if (fabric_param_changed &&
 		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
 		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
 		/* If our NportID changed, we need to ensure all
 		/* If our NportID changed, we need to ensure all
 		 * remaining NPORTs get unreg_login'ed so we can
 		 * remaining NPORTs get unreg_login'ed so we can
@@ -7581,6 +7708,32 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
 			      IOERR_SLI_ABORTED);
 			      IOERR_SLI_ABORTED);
 }
 }
 
 
+/**
+ * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is invoked by the vport cleanup for deletions and the cleanup
+ * for an ndlp on removal.
+ **/
+void
+lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+	unsigned long iflag = 0;
+
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+	list_for_each_entry_safe(sglq_entry, sglq_next,
+			&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
+		if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
+			sglq_entry->ndlp = NULL;
+	}
+	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return;
+}
+
 /**
 /**
  * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
  * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
  * @phba: pointer to lpfc hba data structure.
  * @phba: pointer to lpfc hba data structure.

+ 14 - 4
drivers/scsi/lpfc/lpfc_hbadisc.c

@@ -658,6 +658,8 @@ lpfc_work_done(struct lpfc_hba *phba)
 				lpfc_ramp_down_queue_handler(phba);
 				lpfc_ramp_down_queue_handler(phba);
 			if (work_port_events & WORKER_RAMP_UP_QUEUE)
 			if (work_port_events & WORKER_RAMP_UP_QUEUE)
 				lpfc_ramp_up_queue_handler(phba);
 				lpfc_ramp_up_queue_handler(phba);
+			if (work_port_events & WORKER_DELAYED_DISC_TMO)
+				lpfc_delayed_disc_timeout_handler(vport);
 		}
 		}
 	lpfc_destroy_vport_work_array(phba, vports);
 	lpfc_destroy_vport_work_array(phba, vports);
 
 
@@ -838,6 +840,11 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
 
 
 	lpfc_port_link_failure(vport);
 	lpfc_port_link_failure(vport);
 
 
+	/* Stop delayed Nport discovery */
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_DISC_DELAYED;
+	spin_unlock_irq(shost->host_lock);
+	del_timer_sync(&vport->delayed_disc_tmo);
 }
 }
 
 
 int
 int
@@ -3160,7 +3167,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	spin_unlock_irq(shost->host_lock);
 	spin_unlock_irq(shost->host_lock);
 	vport->unreg_vpi_cmpl = VPORT_OK;
 	vport->unreg_vpi_cmpl = VPORT_OK;
 	mempool_free(pmb, phba->mbox_mem_pool);
 	mempool_free(pmb, phba->mbox_mem_pool);
-	lpfc_cleanup_vports_rrqs(vport);
+	lpfc_cleanup_vports_rrqs(vport, NULL);
 	/*
 	/*
 	 * This shost reference might have been taken at the beginning of
 	 * This shost reference might have been taken at the beginning of
 	 * lpfc_vport_delete()
 	 * lpfc_vport_delete()
@@ -3900,6 +3907,8 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 	if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
 	if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
 		return;
 		return;
 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
+	if (vport->phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_cleanup_vports_rrqs(vport, ndlp);
 	lpfc_nlp_put(ndlp);
 	lpfc_nlp_put(ndlp);
 	return;
 	return;
 }
 }
@@ -4289,7 +4298,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 
 
 	list_del_init(&ndlp->els_retry_evt.evt_listp);
 	list_del_init(&ndlp->els_retry_evt.evt_listp);
 	list_del_init(&ndlp->dev_loss_evt.evt_listp);
 	list_del_init(&ndlp->dev_loss_evt.evt_listp);
-
+	lpfc_cleanup_vports_rrqs(vport, ndlp);
 	lpfc_unreg_rpi(vport, ndlp);
 	lpfc_unreg_rpi(vport, ndlp);
 
 
 	return 0;
 	return 0;
@@ -4426,10 +4435,11 @@ lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
 {
 {
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_nodelist *ndlp;
+	unsigned long iflags;
 
 
-	spin_lock_irq(shost->host_lock);
+	spin_lock_irqsave(shost->host_lock, iflags);
 	ndlp = __lpfc_findnode_did(vport, did);
 	ndlp = __lpfc_findnode_did(vport, did);
-	spin_unlock_irq(shost->host_lock);
+	spin_unlock_irqrestore(shost->host_lock, iflags);
 	return ndlp;
 	return ndlp;
 }
 }
 
 

+ 10 - 1
drivers/scsi/lpfc/lpfc_hw.h

@@ -341,6 +341,12 @@ struct csp {
 	uint8_t bbCreditMsb;
 	uint8_t bbCreditMsb;
 	uint8_t bbCreditlsb;	/* FC Word 0, byte 3 */
 	uint8_t bbCreditlsb;	/* FC Word 0, byte 3 */
 
 
+/*
+ * Word 1 Bit 31 in common service parameter is overloaded.
+ * Word 1 Bit 31 in FLOGI request is multiple NPort request
+ * Word 1 Bit 31 in FLOGI response is clean address bit
+ */
+#define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */
 #ifdef __BIG_ENDIAN_BITFIELD
 #ifdef __BIG_ENDIAN_BITFIELD
 	uint16_t request_multiple_Nport:1;	/* FC Word 1, bit 31 */
 	uint16_t request_multiple_Nport:1;	/* FC Word 1, bit 31 */
 	uint16_t randomOffset:1;	/* FC Word 1, bit 30 */
 	uint16_t randomOffset:1;	/* FC Word 1, bit 30 */
@@ -3198,7 +3204,10 @@ typedef struct {
 #define IOERR_SLER_RRQ_RJT_ERR        0x4C
 #define IOERR_SLER_RRQ_RJT_ERR        0x4C
 #define IOERR_SLER_RRQ_RETRY_ERR      0x4D
 #define IOERR_SLER_RRQ_RETRY_ERR      0x4D
 #define IOERR_SLER_ABTS_ERR           0x4E
 #define IOERR_SLER_ABTS_ERR           0x4E
-
+#define IOERR_ELXSEC_KEY_UNWRAP_ERROR		0xF0
+#define IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR	0xF1
+#define IOERR_ELXSEC_CRYPTO_ERROR		0xF2
+#define IOERR_ELXSEC_CRYPTO_COMPARE_ERROR	0xF3
 #define IOERR_DRVR_MASK               0x100
 #define IOERR_DRVR_MASK               0x100
 #define IOERR_SLI_DOWN                0x101  /* ulpStatus  - Driver defined */
 #define IOERR_SLI_DOWN                0x101  /* ulpStatus  - Driver defined */
 #define IOERR_SLI_BRESET              0x102
 #define IOERR_SLI_BRESET              0x102

+ 100 - 4
drivers/scsi/lpfc/lpfc_hw4.h

@@ -778,6 +778,7 @@ struct mbox_header {
 #define LPFC_MBOX_OPCODE_QUERY_FW_CFG		0x3A
 #define LPFC_MBOX_OPCODE_QUERY_FW_CFG		0x3A
 #define LPFC_MBOX_OPCODE_FUNCTION_RESET		0x3D
 #define LPFC_MBOX_OPCODE_FUNCTION_RESET		0x3D
 #define LPFC_MBOX_OPCODE_MQ_CREATE_EXT		0x5A
 #define LPFC_MBOX_OPCODE_MQ_CREATE_EXT		0x5A
+#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS	0xB5
 
 
 /* FCoE Opcodes */
 /* FCoE Opcodes */
 #define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE			0x01
 #define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE			0x01
@@ -1852,6 +1853,9 @@ struct lpfc_mbx_request_features {
 #define lpfc_mbx_rq_ftr_rq_ifip_SHIFT		7
 #define lpfc_mbx_rq_ftr_rq_ifip_SHIFT		7
 #define lpfc_mbx_rq_ftr_rq_ifip_MASK		0x00000001
 #define lpfc_mbx_rq_ftr_rq_ifip_MASK		0x00000001
 #define lpfc_mbx_rq_ftr_rq_ifip_WORD		word2
 #define lpfc_mbx_rq_ftr_rq_ifip_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_perfh_SHIFT		11
+#define lpfc_mbx_rq_ftr_rq_perfh_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_perfh_WORD		word2
 	uint32_t word3;
 	uint32_t word3;
 #define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT		0
 #define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT		0
 #define lpfc_mbx_rq_ftr_rsp_iaab_MASK		0x00000001
 #define lpfc_mbx_rq_ftr_rsp_iaab_MASK		0x00000001
@@ -1877,6 +1881,9 @@ struct lpfc_mbx_request_features {
 #define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT		7
 #define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT		7
 #define lpfc_mbx_rq_ftr_rsp_ifip_MASK		0x00000001
 #define lpfc_mbx_rq_ftr_rsp_ifip_MASK		0x00000001
 #define lpfc_mbx_rq_ftr_rsp_ifip_WORD		word3
 #define lpfc_mbx_rq_ftr_rsp_ifip_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_perfh_SHIFT		11
+#define lpfc_mbx_rq_ftr_rsp_perfh_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_perfh_WORD		word3
 };
 };
 
 
 struct lpfc_mbx_supp_pages {
 struct lpfc_mbx_supp_pages {
@@ -1935,7 +1942,7 @@ struct lpfc_mbx_supp_pages {
 #define LPFC_SLI4_PARAMETERS		2
 #define LPFC_SLI4_PARAMETERS		2
 };
 };
 
 
-struct lpfc_mbx_sli4_params {
+struct lpfc_mbx_pc_sli4_params {
 	uint32_t word1;
 	uint32_t word1;
 #define qs_SHIFT				0
 #define qs_SHIFT				0
 #define qs_MASK					0x00000001
 #define qs_MASK					0x00000001
@@ -2051,6 +2058,88 @@ struct lpfc_mbx_sli4_params {
 	uint32_t rsvd_13_63[51];
 	uint32_t rsvd_13_63[51];
 };
 };
 
 
+struct lpfc_sli4_parameters {
+	uint32_t word0;
+#define cfg_prot_type_SHIFT			0
+#define cfg_prot_type_MASK			0x000000FF
+#define cfg_prot_type_WORD			word0
+	uint32_t word1;
+#define cfg_ft_SHIFT				0
+#define cfg_ft_MASK				0x00000001
+#define cfg_ft_WORD				word1
+#define cfg_sli_rev_SHIFT			4
+#define cfg_sli_rev_MASK			0x0000000f
+#define cfg_sli_rev_WORD			word1
+#define cfg_sli_family_SHIFT			8
+#define cfg_sli_family_MASK			0x0000000f
+#define cfg_sli_family_WORD			word1
+#define cfg_if_type_SHIFT			12
+#define cfg_if_type_MASK			0x0000000f
+#define cfg_if_type_WORD			word1
+#define cfg_sli_hint_1_SHIFT			16
+#define cfg_sli_hint_1_MASK			0x000000ff
+#define cfg_sli_hint_1_WORD			word1
+#define cfg_sli_hint_2_SHIFT			24
+#define cfg_sli_hint_2_MASK			0x0000001f
+#define cfg_sli_hint_2_WORD			word1
+	uint32_t word2;
+	uint32_t word3;
+	uint32_t word4;
+#define cfg_cqv_SHIFT				14
+#define cfg_cqv_MASK				0x00000003
+#define cfg_cqv_WORD				word4
+	uint32_t word5;
+	uint32_t word6;
+#define cfg_mqv_SHIFT				14
+#define cfg_mqv_MASK				0x00000003
+#define cfg_mqv_WORD				word6
+	uint32_t word7;
+	uint32_t word8;
+#define cfg_wqv_SHIFT				14
+#define cfg_wqv_MASK				0x00000003
+#define cfg_wqv_WORD				word8
+	uint32_t word9;
+	uint32_t word10;
+#define cfg_rqv_SHIFT				14
+#define cfg_rqv_MASK				0x00000003
+#define cfg_rqv_WORD				word10
+	uint32_t word11;
+#define cfg_rq_db_window_SHIFT			28
+#define cfg_rq_db_window_MASK			0x0000000f
+#define cfg_rq_db_window_WORD			word11
+	uint32_t word12;
+#define cfg_fcoe_SHIFT				0
+#define cfg_fcoe_MASK				0x00000001
+#define cfg_fcoe_WORD				word12
+#define cfg_phwq_SHIFT				15
+#define cfg_phwq_MASK				0x00000001
+#define cfg_phwq_WORD				word12
+#define cfg_loopbk_scope_SHIFT			28
+#define cfg_loopbk_scope_MASK			0x0000000f
+#define cfg_loopbk_scope_WORD			word12
+	uint32_t sge_supp_len;
+	uint32_t word14;
+#define cfg_sgl_page_cnt_SHIFT			0
+#define cfg_sgl_page_cnt_MASK			0x0000000f
+#define cfg_sgl_page_cnt_WORD			word14
+#define cfg_sgl_page_size_SHIFT			8
+#define cfg_sgl_page_size_MASK			0x000000ff
+#define cfg_sgl_page_size_WORD			word14
+#define cfg_sgl_pp_align_SHIFT			16
+#define cfg_sgl_pp_align_MASK			0x000000ff
+#define cfg_sgl_pp_align_WORD			word14
+	uint32_t word15;
+	uint32_t word16;
+	uint32_t word17;
+	uint32_t word18;
+	uint32_t word19;
+};
+
+struct lpfc_mbx_get_sli4_parameters {
+	struct mbox_header header;
+	struct lpfc_sli4_parameters sli4_parameters;
+};
+
 /* Mailbox Completion Queue Error Messages */
 /* Mailbox Completion Queue Error Messages */
 #define MB_CQE_STATUS_SUCCESS 			0x0
 #define MB_CQE_STATUS_SUCCESS 			0x0
 #define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES	0x1
 #define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES	0x1
@@ -2103,7 +2192,8 @@ struct lpfc_mqe {
 		struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
 		struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
 		struct lpfc_mbx_query_fw_cfg query_fw_cfg;
 		struct lpfc_mbx_query_fw_cfg query_fw_cfg;
 		struct lpfc_mbx_supp_pages supp_pages;
 		struct lpfc_mbx_supp_pages supp_pages;
-		struct lpfc_mbx_sli4_params sli4_params;
+		struct lpfc_mbx_pc_sli4_params sli4_params;
+		struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
 		struct lpfc_mbx_nop nop;
 		struct lpfc_mbx_nop nop;
 	} un;
 	} un;
 };
 };
@@ -2381,6 +2471,10 @@ struct wqe_common {
 #define wqe_wqes_SHIFT        15
 #define wqe_wqes_SHIFT        15
 #define wqe_wqes_MASK         0x00000001
 #define wqe_wqes_MASK         0x00000001
 #define wqe_wqes_WORD         word10
 #define wqe_wqes_WORD         word10
+/* Note that this field overlaps above fields */
+#define wqe_wqid_SHIFT        1
+#define wqe_wqid_MASK         0x0000007f
+#define wqe_wqid_WORD         word10
 #define wqe_pri_SHIFT         16
 #define wqe_pri_SHIFT         16
 #define wqe_pri_MASK          0x00000007
 #define wqe_pri_MASK          0x00000007
 #define wqe_pri_WORD          word10
 #define wqe_pri_WORD          word10
@@ -2599,7 +2693,8 @@ struct fcp_iwrite64_wqe {
 	uint32_t total_xfer_len;
 	uint32_t total_xfer_len;
 	uint32_t initial_xfer_len;
 	uint32_t initial_xfer_len;
 	struct wqe_common wqe_com;     /* words 6-11 */
 	struct wqe_common wqe_com;     /* words 6-11 */
-	uint32_t rsvd_12_15[4];         /* word 12-15 */
+	uint32_t rsrvd12;
+	struct ulp_bde64 ph_bde;       /* words 13-15 */
 };
 };
 
 
 struct fcp_iread64_wqe {
 struct fcp_iread64_wqe {
@@ -2608,7 +2703,8 @@ struct fcp_iread64_wqe {
 	uint32_t total_xfer_len;       /* word 4 */
 	uint32_t total_xfer_len;       /* word 4 */
 	uint32_t rsrvd5;               /* word 5 */
 	uint32_t rsrvd5;               /* word 5 */
 	struct wqe_common wqe_com;     /* words 6-11 */
 	struct wqe_common wqe_com;     /* words 6-11 */
-	uint32_t rsvd_12_15[4];         /* word 12-15 */
+	uint32_t rsrvd12;
+	struct ulp_bde64 ph_bde;       /* words 13-15 */
 };
 };
 
 
 struct fcp_icmnd64_wqe {
 struct fcp_icmnd64_wqe {

+ 101 - 33
drivers/scsi/lpfc/lpfc_init.c

@@ -460,7 +460,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
 	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
 	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
 		&& !(phba->lmt & LMT_16Gb))) {
 		&& !(phba->lmt & LMT_16Gb))) {
 		/* Reset link speed to auto */
 		/* Reset link speed to auto */
-		lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
+		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
 			"1302 Invalid speed for this board: "
 			"1302 Invalid speed for this board: "
 			"Reset link speed to auto: x%x\n",
 			"Reset link speed to auto: x%x\n",
 			phba->cfg_link_speed);
 			phba->cfg_link_speed);
@@ -945,17 +945,13 @@ static void
 lpfc_rrq_timeout(unsigned long ptr)
 lpfc_rrq_timeout(unsigned long ptr)
 {
 {
 	struct lpfc_hba *phba;
 	struct lpfc_hba *phba;
-	uint32_t tmo_posted;
 	unsigned long iflag;
 	unsigned long iflag;
 
 
 	phba = (struct lpfc_hba *)ptr;
 	phba = (struct lpfc_hba *)ptr;
 	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
 	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
-	tmo_posted = phba->hba_flag & HBA_RRQ_ACTIVE;
-	if (!tmo_posted)
-		phba->hba_flag |= HBA_RRQ_ACTIVE;
+	phba->hba_flag |= HBA_RRQ_ACTIVE;
 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
-	if (!tmo_posted)
-		lpfc_worker_wake_up(phba);
+	lpfc_worker_wake_up(phba);
 }
 }
 
 
 /**
 /**
@@ -2280,6 +2276,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
 		/* Wait for any activity on ndlps to settle */
 		/* Wait for any activity on ndlps to settle */
 		msleep(10);
 		msleep(10);
 	}
 	}
+	lpfc_cleanup_vports_rrqs(vport, NULL);
 }
 }
 
 
 /**
 /**
@@ -2295,6 +2292,7 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
 {
 {
 	del_timer_sync(&vport->els_tmofunc);
 	del_timer_sync(&vport->els_tmofunc);
 	del_timer_sync(&vport->fc_fdmitmo);
 	del_timer_sync(&vport->fc_fdmitmo);
+	del_timer_sync(&vport->delayed_disc_tmo);
 	lpfc_can_disctmo(vport);
 	lpfc_can_disctmo(vport);
 	return;
 	return;
 }
 }
@@ -2355,6 +2353,10 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
 	del_timer_sync(&phba->fabric_block_timer);
 	del_timer_sync(&phba->fabric_block_timer);
 	del_timer_sync(&phba->eratt_poll);
 	del_timer_sync(&phba->eratt_poll);
 	del_timer_sync(&phba->hb_tmofunc);
 	del_timer_sync(&phba->hb_tmofunc);
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		del_timer_sync(&phba->rrq_tmr);
+		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+	}
 	phba->hb_outstanding = 0;
 	phba->hb_outstanding = 0;
 
 
 	switch (phba->pci_dev_grp) {
 	switch (phba->pci_dev_grp) {
@@ -2732,6 +2734,11 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
 	init_timer(&vport->els_tmofunc);
 	init_timer(&vport->els_tmofunc);
 	vport->els_tmofunc.function = lpfc_els_timeout;
 	vport->els_tmofunc.function = lpfc_els_timeout;
 	vport->els_tmofunc.data = (unsigned long)vport;
 	vport->els_tmofunc.data = (unsigned long)vport;
+
+	init_timer(&vport->delayed_disc_tmo);
+	vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
+	vport->delayed_disc_tmo.data = (unsigned long)vport;
+
 	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
 	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
 	if (error)
 	if (error)
 		goto out_put_shost;
 		goto out_put_shost;
@@ -4283,36 +4290,37 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 		goto out_free_bsmbx;
 		goto out_free_bsmbx;
 	}
 	}
 
 
-	/* Get the Supported Pages. It is always available. */
+	/* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
 	lpfc_supported_pages(mboxq);
 	lpfc_supported_pages(mboxq);
 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
-	if (unlikely(rc)) {
-		rc = -EIO;
-		mempool_free(mboxq, phba->mbox_mem_pool);
-		goto out_free_bsmbx;
-	}
-
-	mqe = &mboxq->u.mqe;
-	memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
-	       LPFC_MAX_SUPPORTED_PAGES);
-	for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
-		switch (pn_page[i]) {
-		case LPFC_SLI4_PARAMETERS:
-			phba->sli4_hba.pc_sli4_params.supported = 1;
-			break;
-		default:
-			break;
+	if (!rc) {
+		mqe = &mboxq->u.mqe;
+		memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
+		       LPFC_MAX_SUPPORTED_PAGES);
+		for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
+			switch (pn_page[i]) {
+			case LPFC_SLI4_PARAMETERS:
+				phba->sli4_hba.pc_sli4_params.supported = 1;
+				break;
+			default:
+				break;
+			}
+		}
+		/* Read the port's SLI4 Parameters capabilities if supported. */
+		if (phba->sli4_hba.pc_sli4_params.supported)
+			rc = lpfc_pc_sli4_params_get(phba, mboxq);
+		if (rc) {
+			mempool_free(mboxq, phba->mbox_mem_pool);
+			rc = -EIO;
+			goto out_free_bsmbx;
 		}
 		}
 	}
 	}
-
-	/* Read the port's SLI4 Parameters capabilities if supported. */
-	if (phba->sli4_hba.pc_sli4_params.supported)
-		rc = lpfc_pc_sli4_params_get(phba, mboxq);
+	/*
+	 * Get sli4 parameters that override parameters from Port capabilities.
+	 * If this call fails it is not a critical error so continue loading.
+	 */
+	lpfc_get_sli4_parameters(phba, mboxq);
 	mempool_free(mboxq, phba->mbox_mem_pool);
 	mempool_free(mboxq, phba->mbox_mem_pool);
-	if (rc) {
-		rc = -EIO;
-		goto out_free_bsmbx;
-	}
 	/* Create all the SLI4 queues */
 	/* Create all the SLI4 queues */
 	rc = lpfc_sli4_queue_create(phba);
 	rc = lpfc_sli4_queue_create(phba);
 	if (rc)
 	if (rc)
@@ -7810,7 +7818,7 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 	mqe = &mboxq->u.mqe;
 	mqe = &mboxq->u.mqe;
 
 
 	/* Read the port's SLI4 Parameters port capabilities */
 	/* Read the port's SLI4 Parameters port capabilities */
-	lpfc_sli4_params(mboxq);
+	lpfc_pc_sli4_params(mboxq);
 	if (!phba->sli4_hba.intr_enable)
 	if (!phba->sli4_hba.intr_enable)
 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
 	else {
 	else {
@@ -7853,6 +7861,66 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 	return rc;
 	return rc;
 }
 }
 
 
+/**
+ * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
+ *
+ * This function is called in the SLI4 code path to read the port's
+ * sli4 capabilities.
+ *
+ * This function may be be called from any context that can block-wait
+ * for the completion.  The expectation is that this routine is called
+ * typically from probe_one or from the online routine.
+ **/
+int
+lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	int rc;
+	struct lpfc_mqe *mqe = &mboxq->u.mqe;
+	struct lpfc_pc_sli4_params *sli4_params;
+	int length;
+	struct lpfc_sli4_parameters *mbx_sli4_parameters;
+
+	/* Read the port's SLI4 Config Parameters */
+	length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
+			 length, LPFC_SLI4_MBX_EMBED);
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	else
+		rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
+			lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
+	if (unlikely(rc))
+		return rc;
+	sli4_params = &phba->sli4_hba.pc_sli4_params;
+	mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
+	sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
+	sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
+	sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
+	sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
+					     mbx_sli4_parameters);
+	sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
+					     mbx_sli4_parameters);
+	if (bf_get(cfg_phwq, mbx_sli4_parameters))
+		phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
+	else
+		phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
+	sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
+	sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
+	sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
+	sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
+	sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
+	sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
+	sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
+					    mbx_sli4_parameters);
+	sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
+					   mbx_sli4_parameters);
+	return 0;
+}
+
 /**
 /**
  * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
  * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
  * @pdev: pointer to PCI device
  * @pdev: pointer to PCI device

+ 10 - 9
drivers/scsi/lpfc/lpfc_mbox.c

@@ -1263,7 +1263,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
 	if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
 		if (phba->cfg_enable_bg)
 		if (phba->cfg_enable_bg)
 			mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
 			mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
-		mb->un.varCfgPort.cdss = 1; /* Configure Security */
+		if (phba->cfg_enable_dss)
+			mb->un.varCfgPort.cdss = 1; /* Configure Security */
 		mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
 		mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
 		mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
 		mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
 		mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
 		mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
@@ -1692,7 +1693,7 @@ lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
  * @mbox: pointer to lpfc mbox command.
  * @mbox: pointer to lpfc mbox command.
  * @subsystem: The sli4 config sub mailbox subsystem.
  * @subsystem: The sli4 config sub mailbox subsystem.
  * @opcode: The sli4 config sub mailbox command opcode.
  * @opcode: The sli4 config sub mailbox command opcode.
- * @length: Length of the sli4 config mailbox command.
+ * @length: Length of the sli4 config mailbox command (including sub-header).
  *
  *
  * This routine sets up the header fields of SLI4 specific mailbox command
  * This routine sets up the header fields of SLI4 specific mailbox command
  * for sending IOCTL command.
  * for sending IOCTL command.
@@ -1723,14 +1724,14 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
 	if (emb) {
 	if (emb) {
 		/* Set up main header fields */
 		/* Set up main header fields */
 		bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
 		bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
-		sli4_config->header.cfg_mhdr.payload_length =
-					LPFC_MBX_CMD_HDR_LENGTH + length;
+		sli4_config->header.cfg_mhdr.payload_length = length;
 		/* Set up sub-header fields following main header */
 		/* Set up sub-header fields following main header */
 		bf_set(lpfc_mbox_hdr_opcode,
 		bf_set(lpfc_mbox_hdr_opcode,
 			&sli4_config->header.cfg_shdr.request, opcode);
 			&sli4_config->header.cfg_shdr.request, opcode);
 		bf_set(lpfc_mbox_hdr_subsystem,
 		bf_set(lpfc_mbox_hdr_subsystem,
 			&sli4_config->header.cfg_shdr.request, subsystem);
 			&sli4_config->header.cfg_shdr.request, subsystem);
-		sli4_config->header.cfg_shdr.request.request_length = length;
+		sli4_config->header.cfg_shdr.request.request_length =
+			length - LPFC_MBX_CMD_HDR_LENGTH;
 		return length;
 		return length;
 	}
 	}
 
 
@@ -1902,6 +1903,7 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
 
 
 	/* Set up host requested features. */
 	/* Set up host requested features. */
 	bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
 	bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
+	bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1);
 
 
 	/* Enable DIF (block guard) only if configured to do so. */
 	/* Enable DIF (block guard) only if configured to do so. */
 	if (phba->cfg_enable_bg)
 	if (phba->cfg_enable_bg)
@@ -2159,17 +2161,16 @@ lpfc_supported_pages(struct lpfcMboxq *mbox)
 }
 }
 
 
 /**
 /**
- * lpfc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params
- *                    mailbox command.
+ * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
  * @mbox: pointer to lpfc mbox command to initialize.
  * @mbox: pointer to lpfc mbox command to initialize.
  *
  *
  * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
  * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
  * retrieve the particular SLI4 features supported by the port.
  * retrieve the particular SLI4 features supported by the port.
  **/
  **/
 void
 void
-lpfc_sli4_params(struct lpfcMboxq *mbox)
+lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
 {
 {
-	struct lpfc_mbx_sli4_params *sli4_params;
+	struct lpfc_mbx_pc_sli4_params *sli4_params;
 
 
 	memset(mbox, 0, sizeof(*mbox));
 	memset(mbox, 0, sizeof(*mbox));
 	sli4_params = &mbox->u.mqe.un.sli4_params;
 	sli4_params = &mbox->u.mqe.un.sli4_params;

+ 8 - 3
drivers/scsi/lpfc/lpfc_nportdisc.c

@@ -350,7 +350,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	ndlp->nlp_maxframe =
 	ndlp->nlp_maxframe =
 		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
 		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
 
 
-	/* no need to reg_login if we are already in one of these states */
+	/*
+	 * Need to unreg_login if we are already in one of these states and
+	 * change to NPR state. This will block the port until after the ACC
+	 * completes and the reg_login is issued and completed.
+	 */
 	switch (ndlp->nlp_state) {
 	switch (ndlp->nlp_state) {
 	case  NLP_STE_NPR_NODE:
 	case  NLP_STE_NPR_NODE:
 		if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
 		if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
@@ -359,8 +363,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	case  NLP_STE_PRLI_ISSUE:
 	case  NLP_STE_PRLI_ISSUE:
 	case  NLP_STE_UNMAPPED_NODE:
 	case  NLP_STE_UNMAPPED_NODE:
 	case  NLP_STE_MAPPED_NODE:
 	case  NLP_STE_MAPPED_NODE:
-		lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
-		return 1;
+		lpfc_unreg_rpi(vport, ndlp);
+		ndlp->nlp_prev_state = ndlp->nlp_state;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
 	}
 	}
 
 
 	if ((vport->fc_flag & FC_PT2PT) &&
 	if ((vport->fc_flag & FC_PT2PT) &&

+ 85 - 28
drivers/scsi/lpfc/lpfc_scsi.c

@@ -608,6 +608,32 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
 	return bcnt;
 	return bcnt;
 }
 }
 
 
+/**
+ * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is invoked by the vport cleanup for deletions and the cleanup
+ * for an ndlp on removal.
+ **/
+void
+lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_scsi_buf *psb, *next_psb;
+	unsigned long iflag = 0;
+
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	list_for_each_entry_safe(psb, next_psb,
+				&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
+		if (psb->rdata && psb->rdata->pnode
+			&& psb->rdata->pnode->vport == vport)
+			psb->rdata = NULL;
+	}
+	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
+
 /**
 /**
  * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
  * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
  * @phba: pointer to lpfc hba data structure.
  * @phba: pointer to lpfc hba data structure.
@@ -640,7 +666,11 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
 			psb->status = IOSTAT_SUCCESS;
 			psb->status = IOSTAT_SUCCESS;
 			spin_unlock(
 			spin_unlock(
 				&phba->sli4_hba.abts_scsi_buf_list_lock);
 				&phba->sli4_hba.abts_scsi_buf_list_lock);
-			ndlp = psb->rdata->pnode;
+			if (psb->rdata && psb->rdata->pnode)
+				ndlp = psb->rdata->pnode;
+			else
+				ndlp = NULL;
+
 			rrq_empty = list_empty(&phba->active_rrq_list);
 			rrq_empty = list_empty(&phba->active_rrq_list);
 			spin_unlock_irqrestore(&phba->hbalock, iflag);
 			spin_unlock_irqrestore(&phba->hbalock, iflag);
 			if (ndlp)
 			if (ndlp)
@@ -964,36 +994,29 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 static struct lpfc_scsi_buf*
 static struct lpfc_scsi_buf*
 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
 {
-	struct  lpfc_scsi_buf *lpfc_cmd = NULL;
-	struct  lpfc_scsi_buf *start_lpfc_cmd = NULL;
-	struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
+	struct lpfc_scsi_buf *lpfc_cmd ;
 	unsigned long iflag = 0;
 	unsigned long iflag = 0;
 	int found = 0;
 	int found = 0;
 
 
 	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
 	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
-	list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
-	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
-	while (!found && lpfc_cmd) {
+	list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
+							list) {
 		if (lpfc_test_rrq_active(phba, ndlp,
 		if (lpfc_test_rrq_active(phba, ndlp,
-					 lpfc_cmd->cur_iocbq.sli4_xritag)) {
-			lpfc_release_scsi_buf_s4(phba, lpfc_cmd);
-			spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
-			list_remove_head(scsi_buf_list, lpfc_cmd,
-					 struct lpfc_scsi_buf, list);
-			spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
-						 iflag);
-			if (lpfc_cmd == start_lpfc_cmd) {
-				lpfc_cmd = NULL;
-				break;
-			} else
-				continue;
-		}
+					 lpfc_cmd->cur_iocbq.sli4_xritag))
+			continue;
+		list_del(&lpfc_cmd->list);
 		found = 1;
 		found = 1;
 		lpfc_cmd->seg_cnt = 0;
 		lpfc_cmd->seg_cnt = 0;
 		lpfc_cmd->nonsg_phys = 0;
 		lpfc_cmd->nonsg_phys = 0;
 		lpfc_cmd->prot_seg_cnt = 0;
 		lpfc_cmd->prot_seg_cnt = 0;
+		break;
 	}
 	}
-	return  lpfc_cmd;
+	spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
+						 iflag);
+	if (!found)
+		return NULL;
+	else
+		return  lpfc_cmd;
 }
 }
 /**
 /**
  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
@@ -1981,12 +2004,14 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 	struct scatterlist *sgel = NULL;
 	struct scatterlist *sgel = NULL;
 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
 	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
 	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+	struct sli4_sge *first_data_sgl;
 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
 	dma_addr_t physaddr;
 	dma_addr_t physaddr;
 	uint32_t num_bde = 0;
 	uint32_t num_bde = 0;
 	uint32_t dma_len;
 	uint32_t dma_len;
 	uint32_t dma_offset = 0;
 	uint32_t dma_offset = 0;
 	int nseg;
 	int nseg;
+	struct ulp_bde64 *bde;
 
 
 	/*
 	/*
 	 * There are three possibilities here - use scatter-gather segment, use
 	 * There are three possibilities here - use scatter-gather segment, use
@@ -2011,7 +2036,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 		bf_set(lpfc_sli4_sge_last, sgl, 0);
 		bf_set(lpfc_sli4_sge_last, sgl, 0);
 		sgl->word2 = cpu_to_le32(sgl->word2);
 		sgl->word2 = cpu_to_le32(sgl->word2);
 		sgl += 1;
 		sgl += 1;
-
+		first_data_sgl = sgl;
 		lpfc_cmd->seg_cnt = nseg;
 		lpfc_cmd->seg_cnt = nseg;
 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
 			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
@@ -2047,6 +2072,17 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 			dma_offset += dma_len;
 			dma_offset += dma_len;
 			sgl++;
 			sgl++;
 		}
 		}
+		/* setup the performance hint (first data BDE) if enabled */
+		if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
+			bde = (struct ulp_bde64 *)
+					&(iocb_cmd->unsli3.sli3Words[5]);
+			bde->addrLow = first_data_sgl->addr_lo;
+			bde->addrHigh = first_data_sgl->addr_hi;
+			bde->tus.f.bdeSize =
+					le32_to_cpu(first_data_sgl->sge_len);
+			bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+			bde->tus.w = cpu_to_le32(bde->tus.w);
+		}
 	} else {
 	} else {
 		sgl += 1;
 		sgl += 1;
 		/* clear the last flag in the fcp_rsp map entry */
 		/* clear the last flag in the fcp_rsp map entry */
@@ -2471,6 +2507,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 			lpfc_worker_wake_up(phba);
 			lpfc_worker_wake_up(phba);
 			break;
 			break;
 		case IOSTAT_LOCAL_REJECT:
 		case IOSTAT_LOCAL_REJECT:
+		case IOSTAT_REMOTE_STOP:
+			if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
+			    lpfc_cmd->result ==
+					IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
+			    lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
+			    lpfc_cmd->result ==
+					IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
+				cmd->result = ScsiResult(DID_NO_CONNECT, 0);
+				break;
+			}
 			if (lpfc_cmd->result == IOERR_INVALID_RPI ||
 			if (lpfc_cmd->result == IOERR_INVALID_RPI ||
 			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
 			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
 			    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
 			    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
@@ -2478,7 +2524,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 				cmd->result = ScsiResult(DID_REQUEUE, 0);
 				cmd->result = ScsiResult(DID_REQUEUE, 0);
 				break;
 				break;
 			}
 			}
-
 			if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
 			if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
 			     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
 			     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
 			     pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
 			     pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
@@ -2497,7 +2542,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 							"on unprotected cmd\n");
 							"on unprotected cmd\n");
 				}
 				}
 			}
 			}
-
+			if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
+				&& (phba->sli_rev == LPFC_SLI_REV4)
+				&& (pnode && NLP_CHK_NODE_ACT(pnode))) {
+				/* This IO was aborted by the target, we don't
+				 * know the rxid and because we did not send the
+				 * ABTS we cannot generate and RRQ.
+				 */
+				lpfc_set_rrq_active(phba, pnode,
+						lpfc_cmd->cur_iocbq.sli4_xritag,
+						0, 0);
+			}
 		/* else: fall through */
 		/* else: fall through */
 		default:
 		default:
 			cmd->result = ScsiResult(DID_ERROR, 0);
 			cmd->result = ScsiResult(DID_ERROR, 0);
@@ -2508,9 +2563,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 		    || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
 		    || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
 			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
 			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
 						 SAM_STAT_BUSY);
 						 SAM_STAT_BUSY);
-	} else {
+	} else
 		cmd->result = ScsiResult(DID_OK, 0);
 		cmd->result = ScsiResult(DID_OK, 0);
-	}
 
 
 	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
 	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
 		uint32_t *lp = (uint32_t *)cmd->sense_buffer;
 		uint32_t *lp = (uint32_t *)cmd->sense_buffer;
@@ -3004,11 +3058,11 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 	 * transport is still transitioning.
 	 * transport is still transitioning.
 	 */
 	 */
 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
-		cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
+		cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
 		goto out_fail_command;
 		goto out_fail_command;
 	}
 	}
 	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
 	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
-		goto out_host_busy;
+		goto out_tgt_busy;
 
 
 	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
 	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
 	if (lpfc_cmd == NULL) {
 	if (lpfc_cmd == NULL) {
@@ -3125,6 +3179,9 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
  out_host_busy:
  out_host_busy:
 	return SCSI_MLQUEUE_HOST_BUSY;
 	return SCSI_MLQUEUE_HOST_BUSY;
 
 
+ out_tgt_busy:
+	return SCSI_MLQUEUE_TARGET_BUSY;
+
  out_fail_command:
  out_fail_command:
 	done(cmnd);
 	done(cmnd);
 	return 0;
 	return 0;

+ 108 - 109
drivers/scsi/lpfc/lpfc_sli.c

@@ -96,7 +96,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
 	/* set consumption flag every once in a while */
 	/* set consumption flag every once in a while */
 	if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
 	if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
 		bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
 		bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
-
+	if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
+		bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
 	lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
 	lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
 
 
 	/* Update the host index before invoking device */
 	/* Update the host index before invoking device */
@@ -534,15 +535,35 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
 	uint16_t adj_xri;
 	uint16_t adj_xri;
 	struct lpfc_node_rrq *rrq;
 	struct lpfc_node_rrq *rrq;
 	int empty;
 	int empty;
+	uint32_t did = 0;
+
+
+	if (!ndlp)
+		return -EINVAL;
+
+	if (!phba->cfg_enable_rrq)
+		return -EINVAL;
+
+	if (phba->pport->load_flag & FC_UNLOADING) {
+		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+		goto out;
+	}
+	did = ndlp->nlp_DID;
 
 
 	/*
 	/*
 	 * set the active bit even if there is no mem available.
 	 * set the active bit even if there is no mem available.
 	 */
 	 */
 	adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
 	adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
-	if (!ndlp)
-		return -EINVAL;
+
+	if (NLP_CHK_FREE_REQ(ndlp))
+		goto out;
+
+	if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
+		goto out;
+
 	if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
 	if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
-		return -EINVAL;
+		goto out;
+
 	rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
 	rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
 	if (rrq) {
 	if (rrq) {
 		rrq->send_rrq = send_rrq;
 		rrq->send_rrq = send_rrq;
@@ -553,14 +574,7 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
 		rrq->vport = ndlp->vport;
 		rrq->vport = ndlp->vport;
 		rrq->rxid = rxid;
 		rrq->rxid = rxid;
 		empty = list_empty(&phba->active_rrq_list);
 		empty = list_empty(&phba->active_rrq_list);
-		if (phba->cfg_enable_rrq && send_rrq)
-			/*
-			 * We need the xri before we can add this to the
-			 * phba active rrq list.
-			 */
-			rrq->send_rrq = send_rrq;
-		else
-			rrq->send_rrq = 0;
+		rrq->send_rrq = send_rrq;
 		list_add_tail(&rrq->list, &phba->active_rrq_list);
 		list_add_tail(&rrq->list, &phba->active_rrq_list);
 		if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
 		if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
 			phba->hba_flag |= HBA_RRQ_ACTIVE;
 			phba->hba_flag |= HBA_RRQ_ACTIVE;
@@ -569,40 +583,49 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
 		}
 		}
 		return 0;
 		return 0;
 	}
 	}
-	return -ENOMEM;
+out:
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"2921 Can't set rrq active xri:0x%x rxid:0x%x"
+			" DID:0x%x Send:%d\n",
+			xritag, rxid, did, send_rrq);
+	return -EINVAL;
 }
 }
 
 
 /**
 /**
- * __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
+ * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
  * @phba: Pointer to HBA context object.
  * @phba: Pointer to HBA context object.
  * @xritag: xri used in this exchange.
  * @xritag: xri used in this exchange.
  * @rrq: The RRQ to be cleared.
  * @rrq: The RRQ to be cleared.
  *
  *
- * This function is called with hbalock held. This function
  **/
  **/
-static void
-__lpfc_clr_rrq_active(struct lpfc_hba *phba,
-			uint16_t xritag,
-			struct lpfc_node_rrq *rrq)
+void
+lpfc_clr_rrq_active(struct lpfc_hba *phba,
+		    uint16_t xritag,
+		    struct lpfc_node_rrq *rrq)
 {
 {
 	uint16_t adj_xri;
 	uint16_t adj_xri;
-	struct lpfc_nodelist *ndlp;
+	struct lpfc_nodelist *ndlp = NULL;
 
 
-	ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
+	if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
+		ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
 
 
 	/* The target DID could have been swapped (cable swap)
 	/* The target DID could have been swapped (cable swap)
 	 * we should use the ndlp from the findnode if it is
 	 * we should use the ndlp from the findnode if it is
 	 * available.
 	 * available.
 	 */
 	 */
-	if (!ndlp)
+	if ((!ndlp) && rrq->ndlp)
 		ndlp = rrq->ndlp;
 		ndlp = rrq->ndlp;
 
 
+	if (!ndlp)
+		goto out;
+
 	adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
 	adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
 	if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
 	if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
 		rrq->send_rrq = 0;
 		rrq->send_rrq = 0;
 		rrq->xritag = 0;
 		rrq->xritag = 0;
 		rrq->rrq_stop_time = 0;
 		rrq->rrq_stop_time = 0;
 	}
 	}
+out:
 	mempool_free(rrq, phba->rrq_pool);
 	mempool_free(rrq, phba->rrq_pool);
 }
 }
 
 
@@ -627,34 +650,34 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
 	struct lpfc_node_rrq *nextrrq;
 	struct lpfc_node_rrq *nextrrq;
 	unsigned long next_time;
 	unsigned long next_time;
 	unsigned long iflags;
 	unsigned long iflags;
+	LIST_HEAD(send_rrq);
 
 
 	spin_lock_irqsave(&phba->hbalock, iflags);
 	spin_lock_irqsave(&phba->hbalock, iflags);
 	phba->hba_flag &= ~HBA_RRQ_ACTIVE;
 	phba->hba_flag &= ~HBA_RRQ_ACTIVE;
 	next_time = jiffies + HZ * (phba->fc_ratov + 1);
 	next_time = jiffies + HZ * (phba->fc_ratov + 1);
 	list_for_each_entry_safe(rrq, nextrrq,
 	list_for_each_entry_safe(rrq, nextrrq,
-			&phba->active_rrq_list, list) {
-		if (time_after(jiffies, rrq->rrq_stop_time)) {
-			list_del(&rrq->list);
-			if (!rrq->send_rrq)
-				/* this call will free the rrq */
-				__lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
-			else {
-			/* if we send the rrq then the completion handler
-			 *  will clear the bit in the xribitmap.
-			 */
-				spin_unlock_irqrestore(&phba->hbalock, iflags);
-				if (lpfc_send_rrq(phba, rrq)) {
-					lpfc_clr_rrq_active(phba, rrq->xritag,
-								 rrq);
-				}
-				spin_lock_irqsave(&phba->hbalock, iflags);
-			}
-		} else if  (time_before(rrq->rrq_stop_time, next_time))
+				 &phba->active_rrq_list, list) {
+		if (time_after(jiffies, rrq->rrq_stop_time))
+			list_move(&rrq->list, &send_rrq);
+		else if (time_before(rrq->rrq_stop_time, next_time))
 			next_time = rrq->rrq_stop_time;
 			next_time = rrq->rrq_stop_time;
 	}
 	}
 	spin_unlock_irqrestore(&phba->hbalock, iflags);
 	spin_unlock_irqrestore(&phba->hbalock, iflags);
 	if (!list_empty(&phba->active_rrq_list))
 	if (!list_empty(&phba->active_rrq_list))
 		mod_timer(&phba->rrq_tmr, next_time);
 		mod_timer(&phba->rrq_tmr, next_time);
+	list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
+		list_del(&rrq->list);
+		if (!rrq->send_rrq)
+			/* this call will free the rrq */
+		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+		else if (lpfc_send_rrq(phba, rrq)) {
+			/* if we send the rrq then the completion handler
+			*  will clear the bit in the xribitmap.
+			*/
+			lpfc_clr_rrq_active(phba, rrq->xritag,
+					    rrq);
+		}
+	}
 }
 }
 
 
 /**
 /**
@@ -692,29 +715,37 @@ lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
 /**
 /**
  * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
  * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
  * @vport: Pointer to vport context object.
  * @vport: Pointer to vport context object.
- *
- * Remove all active RRQs for this vport from the phba->active_rrq_list and
- * clear the rrq.
+ * @ndlp: Pointer to the lpfc_node_list structure.
+ * If ndlp is NULL Remove all active RRQs for this vport from the
+ * phba->active_rrq_list and clear the rrq.
+ * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
  **/
  **/
 void
 void
-lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport)
+lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 
 
 {
 {
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_node_rrq *rrq;
 	struct lpfc_node_rrq *rrq;
 	struct lpfc_node_rrq *nextrrq;
 	struct lpfc_node_rrq *nextrrq;
 	unsigned long iflags;
 	unsigned long iflags;
+	LIST_HEAD(rrq_list);
 
 
 	if (phba->sli_rev != LPFC_SLI_REV4)
 	if (phba->sli_rev != LPFC_SLI_REV4)
 		return;
 		return;
-	spin_lock_irqsave(&phba->hbalock, iflags);
-	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
-		if (rrq->vport == vport) {
-			list_del(&rrq->list);
-			__lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
-		}
+	if (!ndlp) {
+		lpfc_sli4_vport_delete_els_xri_aborted(vport);
+		lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
 	}
 	}
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
+		if ((rrq->vport == vport) && (!ndlp  || rrq->ndlp == ndlp))
+			list_move(&rrq->list, &rrq_list);
 	spin_unlock_irqrestore(&phba->hbalock, iflags);
 	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
+		list_del(&rrq->list);
+		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+	}
 }
 }
 
 
 /**
 /**
@@ -732,24 +763,27 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
 	struct lpfc_node_rrq *nextrrq;
 	struct lpfc_node_rrq *nextrrq;
 	unsigned long next_time;
 	unsigned long next_time;
 	unsigned long iflags;
 	unsigned long iflags;
+	LIST_HEAD(rrq_list);
 
 
 	if (phba->sli_rev != LPFC_SLI_REV4)
 	if (phba->sli_rev != LPFC_SLI_REV4)
 		return;
 		return;
 	spin_lock_irqsave(&phba->hbalock, iflags);
 	spin_lock_irqsave(&phba->hbalock, iflags);
 	phba->hba_flag &= ~HBA_RRQ_ACTIVE;
 	phba->hba_flag &= ~HBA_RRQ_ACTIVE;
 	next_time = jiffies + HZ * (phba->fc_ratov * 2);
 	next_time = jiffies + HZ * (phba->fc_ratov * 2);
-	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
+	list_splice_init(&phba->active_rrq_list, &rrq_list);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
 		list_del(&rrq->list);
 		list_del(&rrq->list);
-		__lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
 	}
 	}
-	spin_unlock_irqrestore(&phba->hbalock, iflags);
 	if (!list_empty(&phba->active_rrq_list))
 	if (!list_empty(&phba->active_rrq_list))
 		mod_timer(&phba->rrq_tmr, next_time);
 		mod_timer(&phba->rrq_tmr, next_time);
 }
 }
 
 
 
 
 /**
 /**
- * __lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
+ * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
  * @phba: Pointer to HBA context object.
  * @phba: Pointer to HBA context object.
  * @ndlp: Targets nodelist pointer for this exchange.
  * @ndlp: Targets nodelist pointer for this exchange.
  * @xritag the xri in the bitmap to test.
  * @xritag the xri in the bitmap to test.
@@ -758,8 +792,8 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
  * returns 0 = rrq not active for this xri
  * returns 0 = rrq not active for this xri
  *         1 = rrq is valid for this xri.
  *         1 = rrq is valid for this xri.
  **/
  **/
-static int
-__lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+int
+lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
 			uint16_t  xritag)
 			uint16_t  xritag)
 {
 {
 	uint16_t adj_xri;
 	uint16_t adj_xri;
@@ -801,52 +835,6 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
 	return ret;
 	return ret;
 }
 }
 
 
-/**
- * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
- * @phba: Pointer to HBA context object.
- * @xritag: xri used in this exchange.
- * @rrq: The RRQ to be cleared.
- *
- * This function is takes the hbalock.
- **/
-void
-lpfc_clr_rrq_active(struct lpfc_hba *phba,
-			uint16_t xritag,
-			struct lpfc_node_rrq *rrq)
-{
-	unsigned long iflags;
-
-	spin_lock_irqsave(&phba->hbalock, iflags);
-	__lpfc_clr_rrq_active(phba, xritag, rrq);
-	spin_unlock_irqrestore(&phba->hbalock, iflags);
-	return;
-}
-
-
-
-/**
- * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
- * @phba: Pointer to HBA context object.
- * @ndlp: Targets nodelist pointer for this exchange.
- * @xritag the xri in the bitmap to test.
- *
- * This function takes the hbalock.
- * returns 0 = rrq not active for this xri
- *         1 = rrq is valid for this xri.
- **/
-int
-lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
-			uint16_t  xritag)
-{
-	int ret;
-	unsigned long iflags;
-
-	spin_lock_irqsave(&phba->hbalock, iflags);
-	ret = __lpfc_test_rrq_active(phba, ndlp, xritag);
-	spin_unlock_irqrestore(&phba->hbalock, iflags);
-	return ret;
-}
-
 /**
 /**
  * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
  * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
  * @phba: Pointer to HBA context object.
  * @phba: Pointer to HBA context object.
@@ -884,7 +872,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
 			return NULL;
 			return NULL;
 		adj_xri = sglq->sli4_xritag -
 		adj_xri = sglq->sli4_xritag -
 				phba->sli4_hba.max_cfg_param.xri_base;
 				phba->sli4_hba.max_cfg_param.xri_base;
-		if (__lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
+		if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
 			/* This xri has an rrq outstanding for this DID.
 			/* This xri has an rrq outstanding for this DID.
 			 * put it back in the list and get another xri.
 			 * put it back in the list and get another xri.
 			 */
 			 */
@@ -969,7 +957,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
 		} else {
 		} else {
 			sglq->state = SGL_FREED;
 			sglq->state = SGL_FREED;
 			sglq->ndlp = NULL;
 			sglq->ndlp = NULL;
-			list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
+			list_add_tail(&sglq->list,
+				&phba->sli4_hba.lpfc_sgl_list);
 
 
 			/* Check if TXQ queue needs to be serviced */
 			/* Check if TXQ queue needs to be serviced */
 			if (pring->txq_cnt)
 			if (pring->txq_cnt)
@@ -4817,7 +4806,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 				"0378 No support for fcpi mode.\n");
 				"0378 No support for fcpi mode.\n");
 		ftr_rsp++;
 		ftr_rsp++;
 	}
 	}
-
+	if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
+		phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
+	else
+		phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
 	/*
 	/*
 	 * If the port cannot support the host's requested features
 	 * If the port cannot support the host's requested features
 	 * then turn off the global config parameters to disable the
 	 * then turn off the global config parameters to disable the
@@ -5004,7 +4996,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 	spin_lock_irq(&phba->hbalock);
 	spin_lock_irq(&phba->hbalock);
 	phba->link_state = LPFC_LINK_DOWN;
 	phba->link_state = LPFC_LINK_DOWN;
 	spin_unlock_irq(&phba->hbalock);
 	spin_unlock_irq(&phba->hbalock);
-	rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+	if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK)
+		rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
 out_unset_queue:
 out_unset_queue:
 	/* Unset all the queues set up in this routine when error out */
 	/* Unset all the queues set up in this routine when error out */
 	if (rc)
 	if (rc)
@@ -10478,6 +10471,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
 	cq->type = type;
 	cq->type = type;
 	cq->subtype = subtype;
 	cq->subtype = subtype;
 	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
 	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
+	cq->assoc_qid = eq->queue_id;
 	cq->host_index = 0;
 	cq->host_index = 0;
 	cq->hba_index = 0;
 	cq->hba_index = 0;
 
 
@@ -10672,6 +10666,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
 		goto out;
 		goto out;
 	}
 	}
 	mq->type = LPFC_MQ;
 	mq->type = LPFC_MQ;
+	mq->assoc_qid = cq->queue_id;
 	mq->subtype = subtype;
 	mq->subtype = subtype;
 	mq->host_index = 0;
 	mq->host_index = 0;
 	mq->hba_index = 0;
 	mq->hba_index = 0;
@@ -10759,6 +10754,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
 		goto out;
 		goto out;
 	}
 	}
 	wq->type = LPFC_WQ;
 	wq->type = LPFC_WQ;
+	wq->assoc_qid = cq->queue_id;
 	wq->subtype = subtype;
 	wq->subtype = subtype;
 	wq->host_index = 0;
 	wq->host_index = 0;
 	wq->hba_index = 0;
 	wq->hba_index = 0;
@@ -10876,6 +10872,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
 		goto out;
 		goto out;
 	}
 	}
 	hrq->type = LPFC_HRQ;
 	hrq->type = LPFC_HRQ;
+	hrq->assoc_qid = cq->queue_id;
 	hrq->subtype = subtype;
 	hrq->subtype = subtype;
 	hrq->host_index = 0;
 	hrq->host_index = 0;
 	hrq->hba_index = 0;
 	hrq->hba_index = 0;
@@ -10936,6 +10933,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
 		goto out;
 		goto out;
 	}
 	}
 	drq->type = LPFC_DRQ;
 	drq->type = LPFC_DRQ;
+	drq->assoc_qid = cq->queue_id;
 	drq->subtype = subtype;
 	drq->subtype = subtype;
 	drq->host_index = 0;
 	drq->host_index = 0;
 	drq->hba_index = 0;
 	drq->hba_index = 0;
@@ -11189,7 +11187,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
 	if (!mbox)
 	if (!mbox)
 		return -ENOMEM;
 		return -ENOMEM;
 	length = (sizeof(struct lpfc_mbx_rq_destroy) -
 	length = (sizeof(struct lpfc_mbx_rq_destroy) -
-		  sizeof(struct mbox_header));
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
 			 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
 			 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
 			 length, LPFC_SLI4_MBX_EMBED);
 			 length, LPFC_SLI4_MBX_EMBED);
@@ -11279,7 +11277,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
 			LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
 			LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
 			sizeof(struct lpfc_mbx_post_sgl_pages) -
 			sizeof(struct lpfc_mbx_post_sgl_pages) -
-			sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
+			sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
 
 
 	post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
 	post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
 				&mbox->u.mqe.un.post_sgl_pages;
 				&mbox->u.mqe.un.post_sgl_pages;
@@ -12402,7 +12400,8 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
 			 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
 			 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
 			 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
 			 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
-			 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
+			 sizeof(struct lpfc_sli4_cfg_mhdr),
+			 LPFC_SLI4_MBX_EMBED);
 	bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
 	bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
 	       hdr_tmpl, rpi_page->page_count);
 	       hdr_tmpl, rpi_page->page_count);
 	bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
 	bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,

+ 7 - 1
drivers/scsi/lpfc/lpfc_sli4.h

@@ -125,9 +125,9 @@ struct lpfc_queue {
 	uint32_t entry_count;	/* Number of entries to support on the queue */
 	uint32_t entry_count;	/* Number of entries to support on the queue */
 	uint32_t entry_size;	/* Size of each queue entry. */
 	uint32_t entry_size;	/* Size of each queue entry. */
 	uint32_t queue_id;	/* Queue ID assigned by the hardware */
 	uint32_t queue_id;	/* Queue ID assigned by the hardware */
+	uint32_t assoc_qid;     /* Queue ID associated with, for CQ/WQ/MQ */
 	struct list_head page_list;
 	struct list_head page_list;
 	uint32_t page_count;	/* Number of pages allocated for this queue */
 	uint32_t page_count;	/* Number of pages allocated for this queue */
-
 	uint32_t host_index;	/* The host's index for putting or getting */
 	uint32_t host_index;	/* The host's index for putting or getting */
 	uint32_t hba_index;	/* The last known hba index for get or put */
 	uint32_t hba_index;	/* The last known hba index for get or put */
 	union sli4_qe qe[1];	/* array to index entries (must be last) */
 	union sli4_qe qe[1];	/* array to index entries (must be last) */
@@ -359,6 +359,10 @@ struct lpfc_pc_sli4_params {
 	uint32_t hdr_pp_align;
 	uint32_t hdr_pp_align;
 	uint32_t sgl_pages_max;
 	uint32_t sgl_pages_max;
 	uint32_t sgl_pp_align;
 	uint32_t sgl_pp_align;
+	uint8_t cqv;
+	uint8_t mqv;
+	uint8_t wqv;
+	uint8_t rqv;
 };
 };
 
 
 /* SLI4 HBA data structure entries */
 /* SLI4 HBA data structure entries */
@@ -562,6 +566,8 @@ void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
 			       struct sli4_wcqe_xri_aborted *);
 			       struct sli4_wcqe_xri_aborted *);
 void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
 void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
 			       struct sli4_wcqe_xri_aborted *);
 			       struct sli4_wcqe_xri_aborted *);
+void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
+void lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *);
 int lpfc_sli4_brdreset(struct lpfc_hba *);
 int lpfc_sli4_brdreset(struct lpfc_hba *);
 int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
 int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
 void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
 void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);

+ 2 - 2
drivers/scsi/lpfc/lpfc_version.h

@@ -1,7 +1,7 @@
 /*******************************************************************
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * www.emulex.com                                                  *
  *                                                                 *
  *                                                                 *
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  * included with this package.                                     *
  *******************************************************************/
  *******************************************************************/
 
 
-#define LPFC_DRIVER_VERSION "8.3.20"
+#define LPFC_DRIVER_VERSION "8.3.21"
 #define LPFC_DRIVER_NAME		"lpfc"
 #define LPFC_DRIVER_NAME		"lpfc"
 #define LPFC_SP_DRIVER_HANDLER_NAME	"lpfc:sp"
 #define LPFC_SP_DRIVER_HANDLER_NAME	"lpfc:sp"
 #define LPFC_FP_DRIVER_HANDLER_NAME	"lpfc:fp"
 #define LPFC_FP_DRIVER_HANDLER_NAME	"lpfc:fp"

Some files were not shown because too many files changed in this diff