Browse Source

Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (147 commits)
  [SCSI] arcmsr: fix write to device check
  [SCSI] lpfc: lower stack use in lpfc_fc_frame_check
  [SCSI] eliminate an unnecessary local variable from scsi_remove_target()
  [SCSI] libiscsi: use bh locking instead of irq with session lock
  [SCSI] libiscsi: do not take host lock in queuecommand
  [SCSI] be2iscsi: fix null ptr when accessing task hdr
  [SCSI] be2iscsi: fix gfp use in alloc_pdu
  [SCSI] libiscsi: add more informative failure message during iscsi scsi eh
  [SCSI] gdth: Add missing call to gdth_ioctl_free
  [SCSI] bfa: remove unused defintions and misc cleanups
  [SCSI] bfa: remove inactive functions
  [SCSI] bfa: replace bfa_assert with WARN_ON
  [SCSI] qla2xxx: Use sg_next to fetch next sg element while walking sg list.
  [SCSI] qla2xxx: Fix to avoid recursive lock failure during BSG timeout.
  [SCSI] qla2xxx: Remove code to not reset ISP82xx on failure.
  [SCSI] qla2xxx: Display mailbox register 4 during 8012 AEN for ISP82XX parts.
  [SCSI] qla2xxx: Don't perform a BIG_HAMMER if Get-ID (0x20) mailbox command fails on CNAs.
  [SCSI] qla2xxx: Remove redundant module parameter permission bits
  [SCSI] qla2xxx: Add sysfs node for displaying board temperature.
  [SCSI] qla2xxx: Code cleanup to remove unwanted comments and code.
  ...
Linus Torvalds 14 years ago
parent
commit
da40d036fd
100 changed files with 5459 additions and 5859 deletions
  1. 22 0
      Documentation/scsi/ChangeLog.megaraid_sas
  2. 4 5
      drivers/s390/scsi/zfcp_aux.c
  3. 7 7
      drivers/s390/scsi/zfcp_ccw.c
  4. 4 4
      drivers/s390/scsi/zfcp_cfdc.c
  5. 269 857
      drivers/s390/scsi/zfcp_dbf.c
  6. 258 239
      drivers/s390/scsi/zfcp_dbf.h
  7. 59 82
      drivers/s390/scsi/zfcp_erp.c
  8. 20 32
      drivers/s390/scsi/zfcp_ext.h
  9. 10 10
      drivers/s390/scsi/zfcp_fc.c
  10. 58 55
      drivers/s390/scsi/zfcp_fsf.c
  11. 5 5
      drivers/s390/scsi/zfcp_qdio.c
  12. 23 20
      drivers/s390/scsi/zfcp_scsi.c
  13. 4 5
      drivers/s390/scsi/zfcp_sysfs.c
  14. 1 2
      drivers/scsi/arcmsr/arcmsr_hba.c
  15. 3 2
      drivers/scsi/be2iscsi/be_main.c
  16. 1 3
      drivers/scsi/bfa/Makefile
  17. 3 33
      drivers/scsi/bfa/bfa.h
  18. 0 169
      drivers/scsi/bfa/bfa_cb_ioim.h
  19. 104 269
      drivers/scsi/bfa/bfa_core.c
  20. 49 47
      drivers/scsi/bfa/bfa_cs.h
  21. 3 3
      drivers/scsi/bfa/bfa_defs.h
  22. 4 4
      drivers/scsi/bfa/bfa_defs_svc.h
  23. 0 107
      drivers/scsi/bfa/bfa_drv.c
  24. 151 457
      drivers/scsi/bfa/bfa_fc.h
  25. 52 57
      drivers/scsi/bfa/bfa_fcbuild.c
  26. 15 15
      drivers/scsi/bfa/bfa_fcbuild.h
  27. 120 422
      drivers/scsi/bfa/bfa_fcpim.c
  28. 71 117
      drivers/scsi/bfa/bfa_fcpim.h
  29. 34 172
      drivers/scsi/bfa/bfa_fcs.c
  30. 81 43
      drivers/scsi/bfa/bfa_fcs.h
  31. 15 15
      drivers/scsi/bfa/bfa_fcs_fcpim.c
  32. 177 213
      drivers/scsi/bfa/bfa_fcs_lport.c
  33. 17 213
      drivers/scsi/bfa/bfa_fcs_rport.c
  34. 2 1
      drivers/scsi/bfa/bfa_hw_cb.c
  35. 3 2
      drivers/scsi/bfa/bfa_hw_ct.c
  36. 269 232
      drivers/scsi/bfa/bfa_ioc.c
  37. 21 23
      drivers/scsi/bfa/bfa_ioc.h
  38. 92 5
      drivers/scsi/bfa/bfa_ioc_cb.c
  39. 108 12
      drivers/scsi/bfa/bfa_ioc_ct.c
  40. 0 3
      drivers/scsi/bfa/bfa_modules.h
  41. 0 143
      drivers/scsi/bfa/bfa_os_inc.h
  42. 0 4
      drivers/scsi/bfa/bfa_plog.h
  43. 12 25
      drivers/scsi/bfa/bfa_port.c
  44. 0 1
      drivers/scsi/bfa/bfa_port.h
  45. 180 339
      drivers/scsi/bfa/bfa_svc.c
  46. 14 49
      drivers/scsi/bfa/bfa_svc.h
  47. 18 20
      drivers/scsi/bfa/bfad.c
  48. 9 9
      drivers/scsi/bfa/bfad_attr.c
  49. 39 7
      drivers/scsi/bfa/bfad_debugfs.c
  50. 27 18
      drivers/scsi/bfa/bfad_drv.h
  51. 30 29
      drivers/scsi/bfa/bfad_im.c
  52. 8 8
      drivers/scsi/bfa/bfad_im.h
  53. 4 4
      drivers/scsi/bfa/bfi.h
  54. 1 0
      drivers/scsi/bfa/bfi_cbreg.h
  55. 25 16
      drivers/scsi/bfa/bfi_ctreg.h
  56. 38 28
      drivers/scsi/bfa/bfi_ms.h
  57. 2 1
      drivers/scsi/bnx2i/57xx_iscsi_constants.h
  58. 2 1
      drivers/scsi/bnx2i/57xx_iscsi_hsi.h
  59. 7 8
      drivers/scsi/bnx2i/bnx2i.h
  60. 69 27
      drivers/scsi/bnx2i/bnx2i_hwi.c
  61. 20 88
      drivers/scsi/bnx2i/bnx2i_init.c
  62. 93 55
      drivers/scsi/bnx2i/bnx2i_iscsi.c
  63. 2 1
      drivers/scsi/bnx2i/bnx2i_sysfs.c
  64. 9 2
      drivers/scsi/device_handler/scsi_dh.c
  65. 3 1
      drivers/scsi/fcoe/fcoe.c
  66. 241 59
      drivers/scsi/fcoe/libfcoe.c
  67. 3 1
      drivers/scsi/gdth.c
  68. 12 4
      drivers/scsi/gdth_proc.c
  69. 0 11
      drivers/scsi/hpsa.c
  70. 0 1
      drivers/scsi/hpsa.h
  71. 14 14
      drivers/scsi/ibmvscsi/ibmvfc.c
  72. 1 1
      drivers/scsi/ibmvscsi/ibmvfc.h
  73. 44 10
      drivers/scsi/ipr.c
  74. 2 0
      drivers/scsi/ipr.h
  75. 40 8
      drivers/scsi/libfc/fc_exch.c
  76. 98 49
      drivers/scsi/libfc/fc_fcp.c
  77. 14 2
      drivers/scsi/libfc/fc_libfc.h
  78. 12 4
      drivers/scsi/libfc/fc_lport.c
  79. 2 1
      drivers/scsi/libfc/fc_rport.c
  80. 27 37
      drivers/scsi/libiscsi.c
  81. 13 5
      drivers/scsi/libsas/sas_port.c
  82. 24 2
      drivers/scsi/lpfc/lpfc.h
  83. 54 50
      drivers/scsi/lpfc/lpfc_attr.c
  84. 24 41
      drivers/scsi/lpfc/lpfc_bsg.c
  85. 14 5
      drivers/scsi/lpfc/lpfc_crtn.h
  86. 26 22
      drivers/scsi/lpfc/lpfc_ct.c
  87. 18 1
      drivers/scsi/lpfc/lpfc_disc.h
  88. 288 39
      drivers/scsi/lpfc/lpfc_els.c
  89. 161 78
      drivers/scsi/lpfc/lpfc_hbadisc.c
  90. 118 104
      drivers/scsi/lpfc/lpfc_hw.h
  91. 218 74
      drivers/scsi/lpfc/lpfc_hw4.h
  92. 493 225
      drivers/scsi/lpfc/lpfc_init.c
  93. 1 1
      drivers/scsi/lpfc/lpfc_logmsg.h
  94. 46 30
      drivers/scsi/lpfc/lpfc_mbox.c
  95. 9 1
      drivers/scsi/lpfc/lpfc_mem.c
  96. 5 11
      drivers/scsi/lpfc/lpfc_nportdisc.c
  97. 76 6
      drivers/scsi/lpfc/lpfc_scsi.c
  98. 568 95
      drivers/scsi/lpfc/lpfc_sli.c
  99. 4 1
      drivers/scsi/lpfc/lpfc_sli.h
  100. 38 20
      drivers/scsi/lpfc/lpfc_sli4.h

+ 22 - 0
Documentation/scsi/ChangeLog.megaraid_sas

@@ -1,3 +1,25 @@
+Release Date    : Tues.  Dec 14, 2010 17:00:00 PST 2010 -
+			(emaild-id:megaraidlinux@lsi.com)
+			Adam Radford
+Current Version : 00.00.05.29-rc1
+Old Version     : 00.00.04.31-rc1
+    1. Rename megaraid_sas.c to megaraid_sas_base.c.
+    2. Update GPL headers.
+    3. Add MSI-X support and 'msix_disable' module parameter.
+    4. Use lowest memory bar (for SR-IOV VF support).
+    5. Add struct megasas_instance_temlate changes, and change all code to use
+       new instance entries:
+
+       irqreturn_t (*service_isr )(int irq, void *devp);
+       void (*tasklet)(unsigned long);
+       u32 (*init_adapter)(struct megasas_instance *);
+       u32 (*build_and_issue_cmd) (struct megasas_instance *,
+       struct scsi_cmnd *);
+       void (*issue_dcmd) (struct megasas_instance *instance,
+                              struct megasas_cmd *cmd);
+
+   6. Add code to support MegaRAID 9265/9285 controllers device id (0x5b).
+-------------------------------------------------------------------------------
 1 Release Date    : Thur.  May 03, 2010 09:12:45 PST 2009 -
 			(emaild-id:megaraidlinux@lsi.com)
 			Bo Yang

+ 4 - 5
drivers/s390/scsi/zfcp_aux.c

@@ -45,8 +45,8 @@ static char *init_device;
 module_param_named(device, init_device, charp, 0400);
 MODULE_PARM_DESC(device, "specify initial device");
 
-static struct kmem_cache *zfcp_cache_hw_align(const char *name,
-					      unsigned long size)
+static struct kmem_cache * __init zfcp_cache_hw_align(const char *name,
+						      unsigned long size)
 {
 	return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
 }
@@ -311,8 +311,7 @@ int zfcp_status_read_refill(struct zfcp_adapter *adapter)
 		if (zfcp_fsf_status_read(adapter->qdio)) {
 			if (atomic_read(&adapter->stat_miss) >=
 			    adapter->stat_read_buf_num) {
-				zfcp_erp_adapter_reopen(adapter, 0, "axsref1",
-							NULL);
+				zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
 				return 1;
 			}
 			break;
@@ -459,7 +458,7 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
 	sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
 
 	zfcp_erp_thread_kill(adapter);
-	zfcp_dbf_adapter_unregister(adapter->dbf);
+	zfcp_dbf_adapter_unregister(adapter);
 	zfcp_qdio_destroy(adapter->qdio);
 
 	zfcp_ccw_adapter_put(adapter); /* final put to release */

+ 7 - 7
drivers/s390/scsi/zfcp_ccw.c

@@ -48,7 +48,7 @@ static int zfcp_ccw_activate(struct ccw_device *cdev)
 
 	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
-				"ccresu2", NULL);
+				"ccresu2");
 	zfcp_erp_wait(adapter);
 	flush_work(&adapter->scan_work);
 
@@ -182,7 +182,7 @@ static int zfcp_ccw_set_offline(struct ccw_device *cdev)
 	if (!adapter)
 		return 0;
 
-	zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL);
+	zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1");
 	zfcp_erp_wait(adapter);
 
 	zfcp_ccw_adapter_put(adapter);
@@ -207,24 +207,24 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
 	switch (event) {
 	case CIO_GONE:
 		dev_warn(&cdev->dev, "The FCP device has been detached\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1", NULL);
+		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
 		break;
 	case CIO_NO_PATH:
 		dev_warn(&cdev->dev,
 			 "The CHPID for the FCP device is offline\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2", NULL);
+		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
 		break;
 	case CIO_OPER:
 		dev_info(&cdev->dev, "The FCP device is operational again\n");
 		zfcp_erp_set_adapter_status(adapter,
 					    ZFCP_STATUS_COMMON_RUNNING);
 		zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
-					"ccnoti4", NULL);
+					"ccnoti4");
 		break;
 	case CIO_BOXED:
 		dev_warn(&cdev->dev, "The FCP device did not respond within "
 				     "the specified time\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL);
+		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5");
 		break;
 	}
 
@@ -243,7 +243,7 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
 	if (!adapter)
 		return;
 
-	zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL);
+	zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1");
 	zfcp_erp_wait(adapter);
 	zfcp_erp_thread_kill(adapter);
 

+ 4 - 4
drivers/s390/scsi/zfcp_cfdc.c

@@ -288,7 +288,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
 		    (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
 			zfcp_erp_port_reopen(port,
 					     ZFCP_STATUS_COMMON_ERP_FAILED,
-					     "cfaac_1", NULL);
+					     "cfaac_1");
 	}
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 
@@ -299,7 +299,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
 		    (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
 			zfcp_erp_lun_reopen(sdev,
 					    ZFCP_STATUS_COMMON_ERP_FAILED,
-					    "cfaac_2", NULL);
+					    "cfaac_2");
 	}
 }
 
@@ -426,7 +426,7 @@ int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev,
 			zfcp_scsi_dev_lun(sdev),
 			(unsigned long long)zfcp_sdev->port->wwpn);
 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
-		zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6", NULL);
+		zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6");
 		return -EACCES;
 	}
 
@@ -437,7 +437,7 @@ int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev,
 			zfcp_scsi_dev_lun(sdev),
 			(unsigned long long)zfcp_sdev->port->wwpn);
 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
-		zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8", NULL);
+		zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8");
 		return -EACCES;
 	}
 

File diff suppressed because it is too large
+ 269 - 857
drivers/s390/scsi/zfcp_dbf.c


+ 258 - 239
drivers/s390/scsi/zfcp_dbf.h

@@ -1,22 +1,8 @@
 /*
- * This file is part of the zfcp device driver for
- * FCP adapters for IBM System z9 and zSeries.
+ * zfcp device driver
+ * debug feature declarations
  *
- * Copyright IBM Corp. 2008, 2009
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Copyright IBM Corp. 2008, 2010
  */
 
 #ifndef ZFCP_DBF_H
@@ -27,322 +13,350 @@
 #include "zfcp_fsf.h"
 #include "zfcp_def.h"
 
-#define ZFCP_DBF_TAG_SIZE      4
-#define ZFCP_DBF_ID_SIZE       7
+#define ZFCP_DBF_TAG_LEN       7
 
 #define ZFCP_DBF_INVALID_LUN	0xFFFFFFFFFFFFFFFFull
 
-struct zfcp_dbf_dump {
-	u8 tag[ZFCP_DBF_TAG_SIZE];
-	u32 total_size;		/* size of total dump data */
-	u32 offset;		/* how much data has being already dumped */
-	u32 size;		/* how much data comes with this record */
-	u8 data[];		/* dump data */
-} __attribute__ ((packed));
-
-struct zfcp_dbf_rec_record_thread {
-	u32 total;
+/**
+ * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
+ * @ready: number of ready recovery actions
+ * @running: number of running recovery actions
+ * @want: wanted recovery action
+ * @need: needed recovery action
+ */
+struct zfcp_dbf_rec_trigger {
 	u32 ready;
 	u32 running;
-};
-
-struct zfcp_dbf_rec_record_target {
-	u64 ref;
-	u32 status;
-	u32 d_id;
-	u64 wwpn;
-	u64 fcp_lun;
-	u32 erp_count;
-};
-
-struct zfcp_dbf_rec_record_trigger {
 	u8 want;
 	u8 need;
-	u32 as;
-	u32 ps;
-	u32 ls;
-	u64 ref;
-	u64 action;
-	u64 wwpn;
-	u64 fcp_lun;
-};
+} __packed;
 
-struct zfcp_dbf_rec_record_action {
-	u32 status;
-	u32 step;
-	u64 action;
-	u64 fsf_req;
+/**
+ * struct zfcp_dbf_rec_running - trace record for running recovery
+ * @fsf_req_id: request id for fsf requests
+ * @rec_status: status of the fsf request
+ * @rec_step: current step of the recovery action
+ * rec_count: recovery counter
+ */
+struct zfcp_dbf_rec_running {
+	u64 fsf_req_id;
+	u32 rec_status;
+	u16 rec_step;
+	u8 rec_action;
+	u8 rec_count;
+} __packed;
+
+/**
+ * enum zfcp_dbf_rec_id - recovery trace record id
+ * @ZFCP_DBF_REC_TRIG: triggered recovery identifier
+ * @ZFCP_DBF_REC_RUN: running recovery identifier
+ */
+enum zfcp_dbf_rec_id {
+	ZFCP_DBF_REC_TRIG	= 1,
+	ZFCP_DBF_REC_RUN	= 2,
 };
 
-struct zfcp_dbf_rec_record {
+/**
+ * struct zfcp_dbf_rec - trace record for error recovery actions
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @lun: logical unit number
+ * @wwpn: word wide port number
+ * @d_id: destination ID
+ * @adapter_status: current status of the adapter
+ * @port_status: current status of the port
+ * @lun_status: current status of the lun
+ * @u.trig: structure zfcp_dbf_rec_trigger
+ * @u.run: structure zfcp_dbf_rec_running
+ */
+struct zfcp_dbf_rec {
 	u8 id;
-	char id2[7];
+	char tag[ZFCP_DBF_TAG_LEN];
+	u64 lun;
+	u64 wwpn;
+	u32 d_id;
+	u32 adapter_status;
+	u32 port_status;
+	u32 lun_status;
 	union {
-		struct zfcp_dbf_rec_record_action action;
-		struct zfcp_dbf_rec_record_thread thread;
-		struct zfcp_dbf_rec_record_target target;
-		struct zfcp_dbf_rec_record_trigger trigger;
+		struct zfcp_dbf_rec_trigger trig;
+		struct zfcp_dbf_rec_running run;
 	} u;
-};
+} __packed;
 
-enum {
-	ZFCP_REC_DBF_ID_ACTION,
-	ZFCP_REC_DBF_ID_THREAD,
-	ZFCP_REC_DBF_ID_TARGET,
-	ZFCP_REC_DBF_ID_TRIGGER,
+/**
+ * enum zfcp_dbf_san_id - SAN trace record identifier
+ * @ZFCP_DBF_SAN_REQ: request trace record id
+ * @ZFCP_DBF_SAN_RES: response trace record id
+ * @ZFCP_DBF_SAN_ELS: extended link service record id
+ */
+enum zfcp_dbf_san_id {
+	ZFCP_DBF_SAN_REQ	= 1,
+	ZFCP_DBF_SAN_RES	= 2,
+	ZFCP_DBF_SAN_ELS	= 3,
 };
 
-struct zfcp_dbf_hba_record_response {
-	u32 fsf_command;
-	u64 fsf_reqid;
-	u32 fsf_seqno;
-	u64 fsf_issued;
-	u32 fsf_prot_status;
+/** struct zfcp_dbf_san - trace record for SAN requests and responses
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @fsf_req_id: request id for fsf requests
+ * @payload: unformatted information related to request/response
+ * @d_id: destination id
+ */
+struct zfcp_dbf_san {
+	u8 id;
+	char tag[ZFCP_DBF_TAG_LEN];
+	u64 fsf_req_id;
+	u32 d_id;
+#define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
+	char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
+} __packed;
+
+/**
+ * struct zfcp_dbf_hba_res - trace record for hba responses
+ * @req_issued: timestamp when request was issued
+ * @prot_status: protocol status
+ * @prot_status_qual: protocol status qualifier
+ * @fsf_status: fsf status
+ * @fsf_status_qual: fsf status qualifier
+ */
+struct zfcp_dbf_hba_res {
+	u64 req_issued;
+	u32 prot_status;
+	u8  prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
 	u32 fsf_status;
-	u8 fsf_prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
-	u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
-	u32 fsf_req_status;
-	u8 sbal_first;
-	u8 sbal_last;
-	u8 sbal_response;
-	u8 pool;
-	u64 erp_action;
-	union {
-		struct {
-			u64 cmnd;
-			u32 data_dir;
-		} fcp;
-		struct {
-			u64 wwpn;
-			u32 d_id;
-			u32 port_handle;
-		} port;
-		struct {
-			u64 wwpn;
-			u64 fcp_lun;
-			u32 port_handle;
-			u32 lun_handle;
-		} unit;
-		struct {
-			u32 d_id;
-		} els;
-	} u;
-} __attribute__ ((packed));
+	u8  fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
+} __packed;
 
-struct zfcp_dbf_hba_record_status {
-	u8 failed;
+/**
+ * struct zfcp_dbf_hba_uss - trace record for unsolicited status
+ * @status_type: type of unsolicited status
+ * @status_subtype: subtype of unsolicited status
+ * @d_id: destination ID
+ * @lun: logical unit number
+ * @queue_designator: queue designator
+ */
+struct zfcp_dbf_hba_uss {
 	u32 status_type;
 	u32 status_subtype;
-	struct fsf_queue_designator
-	 queue_designator;
-	u32 payload_size;
-#define ZFCP_DBF_UNSOL_PAYLOAD				80
-#define ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL		32
-#define ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD	56
-#define ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT	2 * sizeof(u32)
-	u8 payload[ZFCP_DBF_UNSOL_PAYLOAD];
-} __attribute__ ((packed));
-
-struct zfcp_dbf_hba_record_qdio {
-	u32 qdio_error;
-	u8 sbal_index;
-	u8 sbal_count;
-} __attribute__ ((packed));
-
-struct zfcp_dbf_hba_record {
-	u8 tag[ZFCP_DBF_TAG_SIZE];
-	u8 tag2[ZFCP_DBF_TAG_SIZE];
-	union {
-		struct zfcp_dbf_hba_record_response response;
-		struct zfcp_dbf_hba_record_status status;
-		struct zfcp_dbf_hba_record_qdio qdio;
-		struct fsf_bit_error_payload berr;
-	} u;
-} __attribute__ ((packed));
-
-struct zfcp_dbf_san_record_ct_request {
-	u16 cmd_req_code;
-	u8 revision;
-	u8 gs_type;
-	u8 gs_subtype;
-	u8 options;
-	u16 max_res_size;
-	u32 len;
-	u32 d_id;
-} __attribute__ ((packed));
-
-struct zfcp_dbf_san_record_ct_response {
-	u16 cmd_rsp_code;
-	u8 revision;
-	u8 reason_code;
-	u8 expl;
-	u8 vendor_unique;
-	u16 max_res_size;
-	u32 len;
-} __attribute__ ((packed));
-
-struct zfcp_dbf_san_record_els {
 	u32 d_id;
-} __attribute__ ((packed));
+	u64 lun;
+	u64 queue_designator;
+} __packed;
 
-struct zfcp_dbf_san_record {
-	u8 tag[ZFCP_DBF_TAG_SIZE];
-	u64 fsf_reqid;
-	u32 fsf_seqno;
+/**
+ * enum zfcp_dbf_hba_id - HBA trace record identifier
+ * @ZFCP_DBF_HBA_RES: response trace record
+ * @ZFCP_DBF_HBA_USS: unsolicited status trace record
+ * @ZFCP_DBF_HBA_BIT: bit error trace record
+ */
+enum zfcp_dbf_hba_id {
+	ZFCP_DBF_HBA_RES	= 1,
+	ZFCP_DBF_HBA_USS	= 2,
+	ZFCP_DBF_HBA_BIT	= 3,
+};
+
+/**
+ * struct zfcp_dbf_hba - common trace record for HBA records
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @fsf_req_id: request id for fsf requests
+ * @fsf_req_status: status of fsf request
+ * @fsf_cmd: fsf command
+ * @fsf_seq_no: fsf sequence number
+ * @pl_len: length of payload stored as zfcp_dbf_pay
+ * @u: record type specific data
+ */
+struct zfcp_dbf_hba {
+	u8 id;
+	char tag[ZFCP_DBF_TAG_LEN];
+	u64 fsf_req_id;
+	u32 fsf_req_status;
+	u32 fsf_cmd;
+	u32 fsf_seq_no;
+	u16 pl_len;
 	union {
-		struct zfcp_dbf_san_record_ct_request ct_req;
-		struct zfcp_dbf_san_record_ct_response ct_resp;
-		struct zfcp_dbf_san_record_els els;
+		struct zfcp_dbf_hba_res res;
+		struct zfcp_dbf_hba_uss uss;
+		struct fsf_bit_error_payload be;
 	} u;
-} __attribute__ ((packed));
+} __packed;
 
-#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
+/**
+ * enum zfcp_dbf_scsi_id - scsi trace record identifier
+ * @ZFCP_DBF_SCSI_CMND: scsi command trace record
+ */
+enum zfcp_dbf_scsi_id {
+	ZFCP_DBF_SCSI_CMND	= 1,
+};
 
-struct zfcp_dbf_scsi_record {
-	u8 tag[ZFCP_DBF_TAG_SIZE];
-	u8 tag2[ZFCP_DBF_TAG_SIZE];
+/**
+ * struct zfcp_dbf_scsi - common trace record for SCSI records
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @scsi_id: scsi device id
+ * @scsi_lun: scsi device logical unit number
+ * @scsi_result: scsi result
+ * @scsi_retries: current retry number of scsi request
+ * @scsi_allowed: allowed retries
+ * @fcp_rsp_info: FCP response info
+ * @scsi_opcode: scsi opcode
+ * @fsf_req_id: request id of fsf request
+ * @host_scribble: LLD specific data attached to SCSI request
+ * @pl_len: length of paload stored as zfcp_dbf_pay
+ * @fsf_rsp: response for fsf request
+ */
+struct zfcp_dbf_scsi {
+	u8 id;
+	char tag[ZFCP_DBF_TAG_LEN];
 	u32 scsi_id;
 	u32 scsi_lun;
 	u32 scsi_result;
-	u64 scsi_cmnd;
-#define ZFCP_DBF_SCSI_OPCODE	16
-	u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
 	u8 scsi_retries;
 	u8 scsi_allowed;
-	u64 fsf_reqid;
-	u32 fsf_seqno;
-	u64 fsf_issued;
-	u64 old_fsf_reqid;
-	u8 rsp_validity;
-	u8 rsp_scsi_status;
-	u32 rsp_resid;
-	u8 rsp_code;
-#define ZFCP_DBF_SCSI_FCP_SNS_INFO	16
-#define ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO	256
-	u32 sns_info_len;
-	u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO];
-} __attribute__ ((packed));
+	u8 fcp_rsp_info;
+#define ZFCP_DBF_SCSI_OPCODE	16
+	u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
+	u64 fsf_req_id;
+	u64 host_scribble;
+	u16 pl_len;
+	struct fcp_resp_with_ext fcp_rsp;
+} __packed;
 
+/**
+ * struct zfcp_dbf_pay - trace record for unformatted payload information
+ * @area: area this record is originated from
+ * @counter: ascending record number
+ * @fsf_req_id: request id of fsf request
+ * @data: unformatted data
+ */
+struct zfcp_dbf_pay {
+	u8 counter;
+	char area[ZFCP_DBF_TAG_LEN];
+	u64 fsf_req_id;
+#define ZFCP_DBF_PAY_MAX_REC 0x100
+	char data[ZFCP_DBF_PAY_MAX_REC];
+} __packed;
+
+/**
+ * struct zfcp_dbf - main dbf trace structure
+ * @pay: reference to payload trace area
+ * @rec: reference to recovery trace area
+ * @hba: reference to hba trace area
+ * @san: reference to san trace area
+ * @scsi: reference to scsi trace area
+ * @pay_lock: lock protecting payload trace buffer
+ * @rec_lock: lock protecting recovery trace buffer
+ * @hba_lock: lock protecting hba trace buffer
+ * @san_lock: lock protecting san trace buffer
+ * @scsi_lock: lock protecting scsi trace buffer
+ * @pay_buf: pre-allocated buffer for payload
+ * @rec_buf: pre-allocated buffer for recovery
+ * @hba_buf: pre-allocated buffer for hba
+ * @san_buf: pre-allocated buffer for san
+ * @scsi_buf: pre-allocated buffer for scsi
+ */
 struct zfcp_dbf {
+	debug_info_t			*pay;
 	debug_info_t			*rec;
 	debug_info_t			*hba;
 	debug_info_t			*san;
 	debug_info_t			*scsi;
+	spinlock_t			pay_lock;
 	spinlock_t			rec_lock;
 	spinlock_t			hba_lock;
 	spinlock_t			san_lock;
 	spinlock_t			scsi_lock;
-	struct zfcp_dbf_rec_record	rec_buf;
-	struct zfcp_dbf_hba_record	hba_buf;
-	struct zfcp_dbf_san_record	san_buf;
-	struct zfcp_dbf_scsi_record	scsi_buf;
-	struct zfcp_adapter		*adapter;
+	struct zfcp_dbf_pay		pay_buf;
+	struct zfcp_dbf_rec		rec_buf;
+	struct zfcp_dbf_hba		hba_buf;
+	struct zfcp_dbf_san		san_buf;
+	struct zfcp_dbf_scsi		scsi_buf;
 };
 
 static inline
-void zfcp_dbf_hba_fsf_resp(const char *tag2, int level,
-			   struct zfcp_fsf_req *req, struct zfcp_dbf *dbf)
+void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
 {
-	if (level <= dbf->hba->level)
-		_zfcp_dbf_hba_fsf_response(tag2, level, req, dbf);
+	if (level <= req->adapter->dbf->hba->level)
+		zfcp_dbf_hba_fsf_res(tag, req);
 }
 
 /**
  * zfcp_dbf_hba_fsf_response - trace event for request completion
- * @fsf_req: request that has been completed
+ * @req: request that has been completed
  */
-static inline void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
+static inline
+void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
 {
-	struct zfcp_dbf *dbf = req->adapter->dbf;
 	struct fsf_qtcb *qtcb = req->qtcb;
 
 	if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
 	    (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
-		zfcp_dbf_hba_fsf_resp("perr", 1, req, dbf);
+		zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
 
 	} else if (qtcb->header.fsf_status != FSF_GOOD) {
-		zfcp_dbf_hba_fsf_resp("ferr", 1, req, dbf);
+		zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req);
 
 	} else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
 		   (req->fsf_command == FSF_QTCB_OPEN_LUN)) {
-		zfcp_dbf_hba_fsf_resp("open", 4, req, dbf);
+		zfcp_dbf_hba_fsf_resp("fs_open", 4, req);
 
 	} else if (qtcb->header.log_length) {
-		zfcp_dbf_hba_fsf_resp("qtcb", 5, req, dbf);
+		zfcp_dbf_hba_fsf_resp("fs_qtcb", 5, req);
 
 	} else {
-		zfcp_dbf_hba_fsf_resp("norm", 6, req, dbf);
+		zfcp_dbf_hba_fsf_resp("fs_norm", 6, req);
 	}
- }
-
-/**
- * zfcp_dbf_hba_fsf_unsol - trace event for an unsolicited status buffer
- * @tag: tag indicating which kind of unsolicited status has been received
- * @dbf: reference to dbf structure
- * @status_buffer: buffer containing payload of unsolicited status
- */
-static inline
-void zfcp_dbf_hba_fsf_unsol(const char *tag, struct zfcp_dbf *dbf,
-			    struct fsf_status_read_buffer *buf)
-{
-	int level = 2;
-
-	if (level <= dbf->hba->level)
-		_zfcp_dbf_hba_fsf_unsol(tag, level, dbf, buf);
 }
 
 static inline
-void zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
-		   struct zfcp_dbf *dbf, struct scsi_cmnd *scmd,
-		   struct zfcp_fsf_req *req, unsigned long old_id)
+void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
+		   struct zfcp_fsf_req *req)
 {
-	if (level <= dbf->scsi->level)
-		_zfcp_dbf_scsi(tag, tag2, level, dbf, scmd, req, old_id);
+	struct zfcp_adapter *adapter = (struct zfcp_adapter *)
+					scmd->device->host->hostdata[0];
+
+	if (level <= adapter->dbf->scsi->level)
+		zfcp_dbf_scsi(tag, scmd, req);
 }
 
 /**
  * zfcp_dbf_scsi_result - trace event for SCSI command completion
- * @dbf: adapter dbf trace
  * @scmd: SCSI command pointer
  * @req: FSF request used to issue SCSI command
  */
 static inline
-void zfcp_dbf_scsi_result(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd,
-			  struct zfcp_fsf_req *req)
+void zfcp_dbf_scsi_result(struct scsi_cmnd *scmd, struct zfcp_fsf_req *req)
 {
 	if (scmd->result != 0)
-		zfcp_dbf_scsi("rslt", "erro", 3, dbf, scmd, req, 0);
+		_zfcp_dbf_scsi("rsl_err", 3, scmd, req);
 	else if (scmd->retries > 0)
-		zfcp_dbf_scsi("rslt", "retr", 4, dbf, scmd, req, 0);
+		_zfcp_dbf_scsi("rsl_ret", 4, scmd, req);
 	else
-		zfcp_dbf_scsi("rslt", "norm", 6, dbf, scmd, req, 0);
+		_zfcp_dbf_scsi("rsl_nor", 6, scmd, req);
 }
 
 /**
  * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command
- * @dbf: adapter dbf trace
  * @scmd: SCSI command pointer
  */
 static inline
-void zfcp_dbf_scsi_fail_send(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd)
+void zfcp_dbf_scsi_fail_send(struct scsi_cmnd *scmd)
 {
-	zfcp_dbf_scsi("rslt", "fail", 4, dbf, scmd, NULL, 0);
+	_zfcp_dbf_scsi("rsl_fai", 4, scmd, NULL);
 }
 
 /**
  * zfcp_dbf_scsi_abort - trace event for SCSI command abort
  * @tag: tag indicating success or failure of abort operation
- * @adapter: adapter thas has been used to issue SCSI command to be aborted
  * @scmd: SCSI command to be aborted
- * @new_req: request containing abort (might be NULL)
- * @old_id: identifier of request containg SCSI command to be aborted
+ * @fsf_req: request containing abort (might be NULL)
  */
 static inline
-void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf,
-			 struct scsi_cmnd *scmd, struct zfcp_fsf_req *new_req,
-			 unsigned long old_id)
+void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
+			 struct zfcp_fsf_req *fsf_req)
 {
-	zfcp_dbf_scsi("abrt", tag, 1, dbf, scmd, new_req, old_id);
+	_zfcp_dbf_scsi(tag, 1, scmd, fsf_req);
 }
 
 /**
@@ -352,12 +366,17 @@ void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf,
  * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
  */
 static inline
-void zfcp_dbf_scsi_devreset(const char *tag, struct scsi_cmnd *scmnd, u8 flag)
+void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
 {
-	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
+	char tmp_tag[ZFCP_DBF_TAG_LEN];
+
+	if (flag == FCP_TMF_TGT_RESET)
+		memcpy(tmp_tag, "tr_", 3);
+	else
+		memcpy(tmp_tag, "lr_", 3);
 
-	zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1,
-		      zfcp_sdev->port->adapter->dbf, scmnd, NULL, 0);
+	memcpy(&tmp_tag[3], tag, 4);
+	_zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
 }
 
 #endif /* ZFCP_DBF_H */

+ 59 - 82
drivers/s390/scsi/zfcp_erp.c

@@ -76,9 +76,9 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
 	struct zfcp_adapter *adapter = act->adapter;
 
 	list_move(&act->list, &act->adapter->erp_ready_head);
-	zfcp_dbf_rec_action("erardy1", act);
+	zfcp_dbf_rec_run("erardy1", act);
 	wake_up(&adapter->erp_ready_wq);
-	zfcp_dbf_rec_thread("erardy2", adapter->dbf);
+	zfcp_dbf_rec_run("erardy2", act);
 }
 
 static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
@@ -236,10 +236,10 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
 static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
 				   struct zfcp_port *port,
 				   struct scsi_device *sdev,
-				   char *id, void *ref, u32 act_status)
+				   char *id, u32 act_status)
 {
 	int retval = 1, need;
-	struct zfcp_erp_action *act = NULL;
+	struct zfcp_erp_action *act;
 
 	if (!adapter->erp_thread)
 		return -EIO;
@@ -255,15 +255,14 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
 	++adapter->erp_total_count;
 	list_add_tail(&act->list, &adapter->erp_ready_head);
 	wake_up(&adapter->erp_ready_wq);
-	zfcp_dbf_rec_thread("eracte1", adapter->dbf);
 	retval = 0;
  out:
-	zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, sdev);
+	zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need);
 	return retval;
 }
 
 static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
-				    int clear_mask, char *id, void *ref)
+				    int clear_mask, char *id)
 {
 	zfcp_erp_adapter_block(adapter, clear_mask);
 	zfcp_scsi_schedule_rports_block(adapter);
@@ -275,7 +274,7 @@ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
 		return -EIO;
 	}
 	return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
-				       adapter, NULL, NULL, id, ref, 0);
+				       adapter, NULL, NULL, id, 0);
 }
 
 /**
@@ -283,10 +282,8 @@ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
  * @adapter: Adapter to reopen.
  * @clear: Status flags to clear.
  * @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
  */
-void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
-			     char *id, void *ref)
+void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
 {
 	unsigned long flags;
 
@@ -299,7 +296,7 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
 					    ZFCP_STATUS_COMMON_ERP_FAILED);
 	else
 		zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
-					NULL, NULL, id, ref, 0);
+					NULL, NULL, id, 0);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
@@ -308,13 +305,12 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
  * @adapter: Adapter to shut down.
  * @clear: Status flags to clear.
  * @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
  */
 void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
-			       char *id, void *ref)
+			       char *id)
 {
 	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
-	zfcp_erp_adapter_reopen(adapter, clear | flags, id, ref);
+	zfcp_erp_adapter_reopen(adapter, clear | flags, id);
 }
 
 /**
@@ -322,13 +318,11 @@ void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
  * @port: Port to shut down.
  * @clear: Status flags to clear.
  * @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
  */
-void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id,
-			    void *ref)
+void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id)
 {
 	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
-	zfcp_erp_port_reopen(port, clear | flags, id, ref);
+	zfcp_erp_port_reopen(port, clear | flags, id);
 }
 
 static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
@@ -337,8 +331,8 @@ static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
 				    ZFCP_STATUS_COMMON_UNBLOCKED | clear);
 }
 
-static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
-					 int clear, char *id, void *ref)
+static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
+					 char *id)
 {
 	zfcp_erp_port_block(port, clear);
 	zfcp_scsi_schedule_rport_block(port);
@@ -347,28 +341,26 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
 		return;
 
 	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
-				port->adapter, port, NULL, id, ref, 0);
+				port->adapter, port, NULL, id, 0);
 }
 
 /**
  * zfcp_erp_port_forced_reopen - Forced close of port and open again
  * @port: Port to force close and to reopen.
+ * @clear: Status flags to clear.
  * @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
  */
-void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id,
-				 void *ref)
+void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id)
 {
 	unsigned long flags;
 	struct zfcp_adapter *adapter = port->adapter;
 
 	write_lock_irqsave(&adapter->erp_lock, flags);
-	_zfcp_erp_port_forced_reopen(port, clear, id, ref);
+	_zfcp_erp_port_forced_reopen(port, clear, id);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
-static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
-				 void *ref)
+static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
 {
 	zfcp_erp_port_block(port, clear);
 	zfcp_scsi_schedule_rport_block(port);
@@ -380,24 +372,25 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
 	}
 
 	return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
-				       port->adapter, port, NULL, id, ref, 0);
+				       port->adapter, port, NULL, id, 0);
 }
 
 /**
  * zfcp_erp_port_reopen - trigger remote port recovery
  * @port: port to recover
  * @clear_mask: flags in port status to be cleared
+ * @id: Id for debug trace event.
  *
  * Returns 0 if recovery has been triggered, < 0 if not.
  */
-int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref)
+int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
 {
 	int retval;
 	unsigned long flags;
 	struct zfcp_adapter *adapter = port->adapter;
 
 	write_lock_irqsave(&adapter->erp_lock, flags);
-	retval = _zfcp_erp_port_reopen(port, clear, id, ref);
+	retval = _zfcp_erp_port_reopen(port, clear, id);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 
 	return retval;
@@ -410,7 +403,7 @@ static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask)
 }
 
 static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
-				 void *ref, u32 act_status)
+				 u32 act_status)
 {
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
@@ -421,17 +414,18 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
 		return;
 
 	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
-				zfcp_sdev->port, sdev, id, ref, act_status);
+				zfcp_sdev->port, sdev, id, act_status);
 }
 
 /**
  * zfcp_erp_lun_reopen - initiate reopen of a LUN
  * @sdev: SCSI device / LUN to be reopened
  * @clear_mask: specifies flags in LUN status to be cleared
+ * @id: Id for debug trace event.
+ *
  * Return: 0 on success, < 0 on error
  */
-void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
-			 void *ref)
+void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
 {
 	unsigned long flags;
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -439,7 +433,7 @@ void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
 	struct zfcp_adapter *adapter = port->adapter;
 
 	write_lock_irqsave(&adapter->erp_lock, flags);
-	_zfcp_erp_lun_reopen(sdev, clear, id, ref, 0);
+	_zfcp_erp_lun_reopen(sdev, clear, id, 0);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
@@ -448,13 +442,11 @@ void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
  * @sdev: SCSI device / LUN to shut down.
  * @clear: Status flags to clear.
  * @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
  */
-void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id,
-			   void *ref)
+void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id)
 {
 	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
-	zfcp_erp_lun_reopen(sdev, clear | flags, id, ref);
+	zfcp_erp_lun_reopen(sdev, clear | flags, id);
 }
 
 /**
@@ -476,7 +468,7 @@ void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
 	int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
 
 	write_lock_irqsave(&adapter->erp_lock, flags);
-	_zfcp_erp_lun_reopen(sdev, clear, id, NULL, ZFCP_STATUS_ERP_NO_REF);
+	_zfcp_erp_lun_reopen(sdev, clear, id, ZFCP_STATUS_ERP_NO_REF);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 
 	zfcp_erp_wait(adapter);
@@ -490,14 +482,14 @@ static int status_change_set(unsigned long mask, atomic_t *status)
 static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
 {
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
-		zfcp_dbf_rec_adapter("eraubl1", NULL, adapter->dbf);
+		zfcp_dbf_rec_run("eraubl1", &adapter->erp_action);
 	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
 }
 
 static void zfcp_erp_port_unblock(struct zfcp_port *port)
 {
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
-		zfcp_dbf_rec_port("erpubl1", NULL, port);
+		zfcp_dbf_rec_run("erpubl1", &port->erp_action);
 	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
 }
 
@@ -506,14 +498,14 @@ static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
-		zfcp_dbf_rec_lun("erlubl1", NULL, sdev);
+		zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
 	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
 }
 
 static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
 {
 	list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
-	zfcp_dbf_rec_action("erator1", erp_action);
+	zfcp_dbf_rec_run("erator1", erp_action);
 }
 
 static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
@@ -530,11 +522,11 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
 		if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
 				   ZFCP_STATUS_ERP_TIMEDOUT)) {
 			req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
-			zfcp_dbf_rec_action("erscf_1", act);
+			zfcp_dbf_rec_run("erscf_1", act);
 			req->erp_action = NULL;
 		}
 		if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
-			zfcp_dbf_rec_action("erscf_2", act);
+			zfcp_dbf_rec_run("erscf_2", act);
 		if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
 			act->fsf_req_id = 0;
 	} else
@@ -585,40 +577,40 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
 }
 
 static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
-				      int clear, char *id, void *ref)
+				      int clear, char *id)
 {
 	struct zfcp_port *port;
 
 	read_lock(&adapter->port_list_lock);
 	list_for_each_entry(port, &adapter->port_list, list)
-		_zfcp_erp_port_reopen(port, clear, id, ref);
+		_zfcp_erp_port_reopen(port, clear, id);
 	read_unlock(&adapter->port_list_lock);
 }
 
 static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
-				     char *id, void *ref)
+				     char *id)
 {
 	struct scsi_device *sdev;
 
 	shost_for_each_device(sdev, port->adapter->scsi_host)
 		if (sdev_to_zfcp(sdev)->port == port)
-			_zfcp_erp_lun_reopen(sdev, clear, id, ref, 0);
+			_zfcp_erp_lun_reopen(sdev, clear, id, 0);
 }
 
 static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
 {
 	switch (act->action) {
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
-		_zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1", NULL);
+		_zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1");
 		break;
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
-		_zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2", NULL);
+		_zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2");
 		break;
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
-		_zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL);
+		_zfcp_erp_port_reopen(act->port, 0, "ersff_3");
 		break;
 	case ZFCP_ERP_ACTION_REOPEN_LUN:
-		_zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", NULL, 0);
+		_zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", 0);
 		break;
 	}
 }
@@ -627,13 +619,13 @@ static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
 {
 	switch (act->action) {
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
-		_zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1", NULL);
+		_zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1");
 		break;
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
-		_zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL);
+		_zfcp_erp_port_reopen(act->port, 0, "ersfs_2");
 		break;
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
-		_zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3", NULL);
+		_zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3");
 		break;
 	}
 }
@@ -652,17 +644,6 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
 	read_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
-static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act)
-{
-	struct zfcp_qdio *qdio = act->adapter->qdio;
-
-	if (zfcp_qdio_open(qdio))
-		return ZFCP_ERP_FAILED;
-	init_waitqueue_head(&qdio->req_q_wq);
-	atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status);
-	return ZFCP_ERP_SUCCEEDED;
-}
-
 static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
 {
 	struct zfcp_port *port;
@@ -670,7 +651,7 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
 				 adapter->peer_d_id);
 	if (IS_ERR(port)) /* error or port already attached */
 		return;
-	_zfcp_erp_port_reopen(port, 0, "ereptp1", NULL);
+	_zfcp_erp_port_reopen(port, 0, "ereptp1");
 }
 
 static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
@@ -693,10 +674,8 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
 			return ZFCP_ERP_FAILED;
 		}
 
-		zfcp_dbf_rec_thread_lock("erasfx1", adapter->dbf);
 		wait_event(adapter->erp_ready_wq,
 			   !list_empty(&adapter->erp_ready_head));
-		zfcp_dbf_rec_thread_lock("erasfx2", adapter->dbf);
 		if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT)
 			break;
 
@@ -735,10 +714,10 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
 	if (ret)
 		return ZFCP_ERP_FAILED;
 
-	zfcp_dbf_rec_thread_lock("erasox1", adapter->dbf);
+	zfcp_dbf_rec_run("erasox1", act);
 	wait_event(adapter->erp_ready_wq,
 		   !list_empty(&adapter->erp_ready_head));
-	zfcp_dbf_rec_thread_lock("erasox2", adapter->dbf);
+	zfcp_dbf_rec_run("erasox2", act);
 	if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
 		return ZFCP_ERP_FAILED;
 
@@ -788,7 +767,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
 {
 	struct zfcp_adapter *adapter = act->adapter;
 
-	if (zfcp_erp_adapter_strategy_open_qdio(act)) {
+	if (zfcp_qdio_open(adapter->qdio)) {
 		atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
 				  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
 				  &adapter->status);
@@ -1166,7 +1145,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
 		if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
 			_zfcp_erp_adapter_reopen(adapter,
 						 ZFCP_STATUS_COMMON_ERP_FAILED,
-						 "ersscg1", NULL);
+						 "ersscg1");
 			return ZFCP_ERP_EXIT;
 		}
 		break;
@@ -1176,7 +1155,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
 		if (zfcp_erp_strat_change_det(&port->status, erp_status)) {
 			_zfcp_erp_port_reopen(port,
 					      ZFCP_STATUS_COMMON_ERP_FAILED,
-					      "ersscg2", NULL);
+					      "ersscg2");
 			return ZFCP_ERP_EXIT;
 		}
 		break;
@@ -1186,7 +1165,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
 		if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) {
 			_zfcp_erp_lun_reopen(sdev,
 					     ZFCP_STATUS_COMMON_ERP_FAILED,
-					     "ersscg3", NULL, 0);
+					     "ersscg3", 0);
 			return ZFCP_ERP_EXIT;
 		}
 		break;
@@ -1206,7 +1185,7 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
 	}
 
 	list_del(&erp_action->list);
-	zfcp_dbf_rec_action("eractd1", erp_action);
+	zfcp_dbf_rec_run("eractd1", erp_action);
 
 	switch (erp_action->action) {
 	case ZFCP_ERP_ACTION_REOPEN_LUN:
@@ -1313,7 +1292,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
 			erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
 		}
 		if (adapter->erp_total_count == adapter->erp_low_mem_count)
-			_zfcp_erp_adapter_reopen(adapter, 0, "erstgy1", NULL);
+			_zfcp_erp_adapter_reopen(adapter, 0, "erstgy1");
 		else {
 			zfcp_erp_strategy_memwait(erp_action);
 			retval = ZFCP_ERP_CONTINUES;
@@ -1357,11 +1336,9 @@ static int zfcp_erp_thread(void *data)
 	unsigned long flags;
 
 	for (;;) {
-		zfcp_dbf_rec_thread_lock("erthrd1", adapter->dbf);
 		wait_event_interruptible(adapter->erp_ready_wq,
 			   !list_empty(&adapter->erp_ready_head) ||
 			   kthread_should_stop());
-		zfcp_dbf_rec_thread_lock("erthrd2", adapter->dbf);
 
 		if (kthread_should_stop())
 			break;

+ 20 - 32
drivers/s390/scsi/zfcp_ext.h

@@ -45,47 +45,33 @@ extern void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *);
 
 /* zfcp_dbf.c */
 extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
-extern void zfcp_dbf_adapter_unregister(struct zfcp_dbf *);
-extern void zfcp_dbf_rec_thread(char *, struct zfcp_dbf *);
-extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *);
-extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *);
-extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *);
-extern void zfcp_dbf_rec_lun(char *, void *, struct scsi_device *);
-extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *,
-				 struct zfcp_adapter *, struct zfcp_port *,
-				 struct scsi_device *);
-extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *);
-extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *,
-				       struct zfcp_dbf *);
-extern void _zfcp_dbf_hba_fsf_unsol(const char *, int level, struct zfcp_dbf *,
-					  struct fsf_status_read_buffer *);
-extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int);
+extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
+extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
+			      struct zfcp_port *, struct scsi_device *, u8, u8);
+extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *, u32);
-extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *);
-extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *);
-extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *);
-extern void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *);
-extern void _zfcp_dbf_scsi(const char *, const char *, int, struct zfcp_dbf *,
-			   struct scsi_cmnd *, struct zfcp_fsf_req *,
-			   unsigned long);
+extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
+extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *);
 
 /* zfcp_erp.c */
 extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
 extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
-extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *);
-extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *,
-				      void *);
+extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
+extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
 extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
 extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
-extern int  zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *);
-extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *);
-extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *,
-					void *);
+extern int  zfcp_erp_port_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
 extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
 extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
-extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *, void *);
-extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *, void *);
+extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
+extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *);
 extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *);
 extern int  zfcp_erp_thread_setup(struct zfcp_adapter *);
 extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
@@ -149,6 +135,8 @@ extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
 extern int zfcp_qdio_open(struct zfcp_qdio *);
 extern void zfcp_qdio_close(struct zfcp_qdio *);
 extern void zfcp_qdio_siosl(struct zfcp_adapter *);
+extern struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *,
+					     struct qdio_buffer *);
 
 /* zfcp_scsi.c */
 extern struct zfcp_data zfcp_data;

+ 10 - 10
drivers/s390/scsi/zfcp_fc.c

@@ -174,7 +174,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
 		if (!port->d_id)
 			zfcp_erp_port_reopen(port,
 					     ZFCP_STATUS_COMMON_ERP_FAILED,
-					     "fcrscn1", NULL);
+					     "fcrscn1");
 	}
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 }
@@ -215,7 +215,7 @@ static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
 	read_lock_irqsave(&adapter->port_list_lock, flags);
 	list_for_each_entry(port, &adapter->port_list, list)
 		if (port->wwpn == wwpn) {
-			zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req);
+			zfcp_erp_port_forced_reopen(port, 0, "fciwwp1");
 			break;
 		}
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
@@ -251,7 +251,7 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
 		(struct fsf_status_read_buffer *) fsf_req->data;
 	unsigned int els_type = status_buffer->payload.data[0];
 
-	zfcp_dbf_san_incoming_els(fsf_req);
+	zfcp_dbf_san_in_els("fciels1", fsf_req);
 	if (els_type == ELS_PLOGI)
 		zfcp_fc_incoming_plogi(fsf_req);
 	else if (els_type == ELS_LOGO)
@@ -360,7 +360,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
 	ret = zfcp_fc_ns_gid_pn(port);
 	if (ret) {
 		/* could not issue gid_pn for some reason */
-		zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1", NULL);
+		zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1");
 		goto out;
 	}
 
@@ -369,7 +369,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
 		goto out;
 	}
 
-	zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL);
+	zfcp_erp_port_reopen(port, 0, "fcgpn_3");
 out:
 	put_device(&port->dev);
 }
@@ -426,7 +426,7 @@ static void zfcp_fc_adisc_handler(void *data)
 	if (adisc->els.status) {
 		/* request rejected or timed out */
 		zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
-					    "fcadh_1", NULL);
+					    "fcadh_1");
 		goto out;
 	}
 
@@ -436,7 +436,7 @@ static void zfcp_fc_adisc_handler(void *data)
 	if ((port->wwpn != adisc_resp->adisc_wwpn) ||
 	    !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
 		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
-				     "fcadh_2", NULL);
+				     "fcadh_2");
 		goto out;
 	}
 
@@ -507,7 +507,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
 
 	/* send of ADISC was not possible */
 	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
-	zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);
+	zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
 
 out:
 	put_device(&port->dev);
@@ -659,7 +659,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
 		port = zfcp_port_enqueue(adapter, acc->fp_wwpn,
 					 ZFCP_STATUS_COMMON_NOESC, d_id);
 		if (!IS_ERR(port))
-			zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL);
+			zfcp_erp_port_reopen(port, 0, "fcegpf1");
 		else if (PTR_ERR(port) != -EEXIST)
 			ret = PTR_ERR(port);
 	}
@@ -671,7 +671,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
 	write_unlock_irqrestore(&adapter->port_list_lock, flags);
 
 	list_for_each_entry_safe(port, tmp, &remove_lh, list) {
-		zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL);
+		zfcp_erp_port_shutdown(port, 0, "fcegpf2");
 		zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
 	}
 

+ 58 - 55
drivers/s390/scsi/zfcp_fsf.c

@@ -23,7 +23,7 @@ static void zfcp_fsf_request_timeout_handler(unsigned long data)
 	struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
 	zfcp_qdio_siosl(adapter);
 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
-				"fsrth_1", NULL);
+				"fsrth_1");
 }
 
 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
@@ -65,7 +65,7 @@ static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
 {
 	dev_err(&req->adapter->ccw_device->dev, "FCP device not "
 		"operational because of an unsupported FC class\n");
-	zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1", req);
+	zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 }
 
@@ -98,7 +98,7 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
 	read_lock_irqsave(&adapter->port_list_lock, flags);
 	list_for_each_entry(port, &adapter->port_list, list)
 		if (port->d_id == d_id) {
-			zfcp_erp_port_reopen(port, 0, "fssrpc1", req);
+			zfcp_erp_port_reopen(port, 0, "fssrpc1");
 			break;
 		}
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
@@ -211,13 +211,13 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
 	struct fsf_status_read_buffer *sr_buf = req->data;
 
 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
-		zfcp_dbf_hba_fsf_unsol("dism", adapter->dbf, sr_buf);
+		zfcp_dbf_hba_fsf_uss("fssrh_1", req);
 		mempool_free(sr_buf, adapter->pool.status_read_data);
 		zfcp_fsf_req_free(req);
 		return;
 	}
 
-	zfcp_dbf_hba_fsf_unsol("read", adapter->dbf, sr_buf);
+	zfcp_dbf_hba_fsf_uss("fssrh_2", req);
 
 	switch (sr_buf->status_type) {
 	case FSF_STATUS_READ_PORT_CLOSED:
@@ -232,7 +232,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
 		dev_warn(&adapter->ccw_device->dev,
 			 "The error threshold for checksum statistics "
 			 "has been exceeded\n");
-		zfcp_dbf_hba_berr(adapter->dbf, req);
+		zfcp_dbf_hba_bit_err("fssrh_3", req);
 		break;
 	case FSF_STATUS_READ_LINK_DOWN:
 		zfcp_fsf_status_read_link_down(req);
@@ -247,7 +247,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
 		zfcp_erp_adapter_reopen(adapter,
 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
 					ZFCP_STATUS_COMMON_ERP_FAILED,
-					"fssrh_2", req);
+					"fssrh_2");
 		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
 
 		break;
@@ -287,7 +287,7 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
 			"The FCP adapter reported a problem "
 			"that cannot be recovered\n");
 		zfcp_qdio_siosl(req->adapter);
-		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req);
+		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
 		break;
 	}
 	/* all non-return stats set FSFREQ_ERROR*/
@@ -304,7 +304,7 @@ static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
 		dev_err(&req->adapter->ccw_device->dev,
 			"The FCP adapter does not recognize the command 0x%x\n",
 			req->qtcb->header.fsf_command);
-		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1", req);
+		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -335,17 +335,17 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
 			"QTCB version 0x%x not supported by FCP adapter "
 			"(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
 			psq->word[0], psq->word[1]);
-		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
 		break;
 	case FSF_PROT_ERROR_STATE:
 	case FSF_PROT_SEQ_NUMB_ERROR:
-		zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req);
+		zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_PROT_UNSUPP_QTCB_TYPE:
 		dev_err(&adapter->ccw_device->dev,
 			"The QTCB type is not supported by the FCP adapter\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
 		break;
 	case FSF_PROT_HOST_CONNECTION_INITIALIZING:
 		atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
@@ -355,12 +355,12 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
 		dev_err(&adapter->ccw_device->dev,
 			"0x%Lx is an ambiguous request identifier\n",
 			(unsigned long long)qtcb->bottom.support.req_handle);
-		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
 		break;
 	case FSF_PROT_LINK_DOWN:
 		zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
 		/* go through reopen to flush pending requests */
-		zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
+		zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
 		break;
 	case FSF_PROT_REEST_QUEUE:
 		/* All ports should be marked as ready to run again */
@@ -369,14 +369,14 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
 		zfcp_erp_adapter_reopen(adapter,
 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
 					ZFCP_STATUS_COMMON_ERP_FAILED,
-					"fspse_8", req);
+					"fspse_8");
 		break;
 	default:
 		dev_err(&adapter->ccw_device->dev,
 			"0x%x is not a valid transfer protocol status\n",
 			qtcb->prefix.prot_status);
 		zfcp_qdio_siosl(adapter);
-		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
 	}
 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 }
@@ -482,7 +482,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
 		dev_err(&adapter->ccw_device->dev,
 			"Unknown or unsupported arbitrated loop "
 			"fibre channel topology detected\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
 		return -EIO;
 	}
 
@@ -518,7 +518,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
 				"FCP adapter maximum QTCB size (%d bytes) "
 				"is too small\n",
 				bottom->max_qtcb_size);
-			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1", req);
+			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
 			return;
 		}
 		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
@@ -536,7 +536,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
 			&qtcb->header.fsf_status_qual.link_down_info);
 		break;
 	default:
-		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
 		return;
 	}
 
@@ -552,14 +552,14 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
 		dev_err(&adapter->ccw_device->dev,
 			"The FCP adapter only supports newer "
 			"control block versions\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
 		return;
 	}
 	if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
 		dev_err(&adapter->ccw_device->dev,
 			"The FCP adapter only supports older "
 			"control block versions\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
 	}
 }
 
@@ -700,7 +700,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
 		del_timer(&req->timer);
 		/* lookup request again, list might have changed */
 		zfcp_reqlist_find_rm(adapter->req_list, req_id);
-		zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
+		zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
 		return -EIO;
 	}
 
@@ -754,10 +754,11 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
 	goto out;
 
 failed_req_send:
+	req->data = NULL;
 	mempool_free(sr_buf, adapter->pool.status_read_data);
 failed_buf:
+	zfcp_dbf_hba_fsf_uss("fssr__1", req);
 	zfcp_fsf_req_free(req);
-	zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL);
 out:
 	spin_unlock_irq(&qdio->req_q_lock);
 	return retval;
@@ -776,14 +777,13 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
 	case FSF_PORT_HANDLE_NOT_VALID:
 		if (fsq->word[0] == fsq->word[1]) {
 			zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
-						"fsafch1", req);
+						"fsafch1");
 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		}
 		break;
 	case FSF_LUN_HANDLE_NOT_VALID:
 		if (fsq->word[0] == fsq->word[1]) {
-			zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2",
-					     req);
+			zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		}
 		break;
@@ -794,14 +794,13 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
 		zfcp_erp_set_port_status(zfcp_sdev->port,
 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_port_reopen(zfcp_sdev->port,
-				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3",
-				     req);
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_LUN_BOXED:
 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
-				    "fsafch4", req);
+				    "fsafch4");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                 break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -882,7 +881,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
 
 	switch (header->fsf_status) {
         case FSF_GOOD:
-		zfcp_dbf_san_ct_response(req);
+		zfcp_dbf_san_res("fsscth1", req);
 		ct->status = 0;
 		break;
         case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -902,7 +901,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_PORT_HANDLE_NOT_VALID:
-		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
+		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
 		/* fall through */
 	case FSF_GENERIC_COMMAND_REJECTED:
 	case FSF_PAYLOAD_SIZE_MISMATCH:
@@ -1025,7 +1024,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
 	req->qtcb->header.port_handle = wka_port->handle;
 	req->data = ct;
 
-	zfcp_dbf_san_ct_request(req, wka_port->d_id);
+	zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
 
 	ret = zfcp_fsf_req_send(req);
 	if (ret)
@@ -1053,7 +1052,7 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
 
 	switch (header->fsf_status) {
 	case FSF_GOOD:
-		zfcp_dbf_san_els_response(req);
+		zfcp_dbf_san_res("fsselh1", req);
 		send_els->status = 0;
 		break;
 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -1127,7 +1126,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
 	req->handler = zfcp_fsf_send_els_handler;
 	req->data = els;
 
-	zfcp_dbf_san_els_request(req);
+	zfcp_dbf_san_req("fssels1", req, d_id);
 
 	ret = zfcp_fsf_req_send(req);
 	if (ret)
@@ -1448,7 +1447,7 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
 
 	switch (req->qtcb->header.fsf_status) {
 	case FSF_PORT_HANDLE_NOT_VALID:
-		zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1", req);
+		zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -1580,7 +1579,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
 
 	if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-		zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req);
+		zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
 	}
 
 	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
@@ -1638,7 +1637,7 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
 
 	switch (header->fsf_status) {
 	case FSF_PORT_HANDLE_NOT_VALID:
-		zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1", req);
+		zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ACCESS_DENIED:
@@ -1654,7 +1653,7 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
 						  &sdev_to_zfcp(sdev)->status);
 		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
-				     "fscpph2", req);
+				     "fscpph2");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -1743,7 +1742,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
 	switch (header->fsf_status) {
 
 	case FSF_PORT_HANDLE_NOT_VALID:
-		zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1", req);
+		zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
 		/* fall through */
 	case FSF_LUN_ALREADY_OPEN:
 		break;
@@ -1755,8 +1754,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
 		zfcp_erp_set_port_status(zfcp_sdev->port,
 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_port_reopen(zfcp_sdev->port,
-				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2",
-				     req);
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_LUN_SHARING_VIOLATION:
@@ -1852,20 +1850,18 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
 
 	switch (req->qtcb->header.fsf_status) {
 	case FSF_PORT_HANDLE_NOT_VALID:
-		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1",
-					req);
+		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_LUN_HANDLE_NOT_VALID:
-		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2", req);
+		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_PORT_BOXED:
 		zfcp_erp_set_port_status(zfcp_sdev->port,
 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_port_reopen(zfcp_sdev->port,
-				     ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3",
-				     req);
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -2002,13 +1998,12 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
 	switch (header->fsf_status) {
 	case FSF_HANDLE_MISMATCH:
 	case FSF_PORT_HANDLE_NOT_VALID:
-		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1",
-					req);
+		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_FCPLUN_NOT_VALID:
 	case FSF_LUN_HANDLE_NOT_VALID:
-		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2", req);
+		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -2026,7 +2021,7 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
 			(unsigned long long)zfcp_scsi_dev_lun(sdev),
 			(unsigned long long)zfcp_sdev->port->wwpn);
 		zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
-					  "fssfch3", req);
+					  "fssfch3");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_CMND_LENGTH_NOT_VALID:
@@ -2037,21 +2032,20 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
 			(unsigned long long)zfcp_scsi_dev_lun(sdev),
 			(unsigned long long)zfcp_sdev->port->wwpn);
 		zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
-					  "fssfch4", req);
+					  "fssfch4");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_PORT_BOXED:
 		zfcp_erp_set_port_status(zfcp_sdev->port,
 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_port_reopen(zfcp_sdev->port,
-				     ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5",
-				     req);
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_LUN_BOXED:
 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
-				    "fssfch6", req);
+				    "fssfch6");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -2104,7 +2098,7 @@ static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
 
 skip_fsfstatus:
 	zfcp_fsf_req_trace(req, scpnt);
-	zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req);
+	zfcp_dbf_scsi_result(scpnt, req);
 
 	scpnt->host_scribble = NULL;
 	(scpnt->scsi_done) (scpnt);
@@ -2420,3 +2414,12 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
 			break;
 	}
 }
+
+struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *qdio,
+				      struct qdio_buffer *sbal)
+{
+	struct qdio_buffer_element *sbale = &sbal->element[0];
+	u64 req_id = (unsigned long) sbale->addr;
+
+	return zfcp_reqlist_find(qdio->adapter->req_list, req_id);
+}

+ 5 - 5
drivers/s390/scsi/zfcp_qdio.c

@@ -41,7 +41,7 @@ static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
 		zfcp_qdio_siosl(adapter);
 	zfcp_erp_adapter_reopen(adapter,
 				ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
-				ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL);
+				ZFCP_STATUS_COMMON_ERP_FAILED, id);
 }
 
 static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
@@ -74,7 +74,6 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
 	struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
 
 	if (unlikely(qdio_err)) {
-		zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
 		zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
 		return;
 	}
@@ -97,7 +96,6 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
 	int sbal_idx, sbal_no;
 
 	if (unlikely(qdio_err)) {
-		zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
 		zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
 		return;
 	}
@@ -116,7 +114,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
 	 * put SBALs back to response queue
 	 */
 	if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
-		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2", NULL);
+		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
 }
 
 static struct qdio_buffer_element *
@@ -236,7 +234,7 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
 	if (!ret) {
 		atomic_inc(&qdio->req_q_full);
 		/* assume hanging outbound queue, try queue recovery */
-		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL);
+		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
 	}
 
 	spin_lock_irq(&qdio->req_q_lock);
@@ -309,6 +307,7 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
 		return -ENOMEM;
 
 	zfcp_qdio_setup_init_data(&init_data, qdio);
+	init_waitqueue_head(&qdio->req_q_wq);
 
 	return qdio_allocate(&init_data);
 }
@@ -393,6 +392,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
 	/* set index of first avalable SBALS / number of available SBALS */
 	qdio->req_q_idx = 0;
 	atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
+	atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
 
 	return 0;
 

+ 23 - 20
drivers/s390/scsi/zfcp_scsi.c

@@ -30,6 +30,10 @@ module_param_named(dif, enable_dif, bool, 0600);
 MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
 #endif
 
+static bool allow_lun_scan = 1;
+module_param(allow_lun_scan, bool, 0600);
+MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs");
+
 static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
 					int reason)
 {
@@ -68,11 +72,8 @@ static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
 
 static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
 {
-	struct zfcp_adapter *adapter =
-		(struct zfcp_adapter *) scpnt->device->host->hostdata[0];
-
 	set_host_byte(scpnt, result);
-	zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
+	zfcp_dbf_scsi_fail_send(scpnt);
 	scpnt->scsi_done(scpnt);
 }
 
@@ -80,7 +81,6 @@ static
 int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
 {
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
-	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
 	struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device));
 	int    status, scsi_result, ret;
 
@@ -91,7 +91,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
 	scsi_result = fc_remote_port_chkready(rport);
 	if (unlikely(scsi_result)) {
 		scpnt->result = scsi_result;
-		zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
+		zfcp_dbf_scsi_fail_send(scpnt);
 		scpnt->scsi_done(scpnt);
 		return 0;
 	}
@@ -134,6 +134,7 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 	struct zfcp_port *port;
 	struct zfcp_unit *unit;
+	int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
 
 	port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
 	if (!port)
@@ -143,7 +144,7 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
 	if (unit)
 		put_device(&unit->dev);
 
-	if (!unit && !(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
+	if (!unit && !(allow_lun_scan && npiv)) {
 		put_device(&port->dev);
 		return -ENXIO;
 	}
@@ -158,7 +159,7 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
 	spin_lock_init(&zfcp_sdev->latencies.lock);
 
 	zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
-	zfcp_erp_lun_reopen(sdev, 0, "scsla_1", NULL);
+	zfcp_erp_lun_reopen(sdev, 0, "scsla_1");
 	zfcp_erp_wait(port->adapter);
 
 	return 0;
@@ -182,8 +183,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
 	old_req = zfcp_reqlist_find(adapter->req_list, old_reqid);
 	if (!old_req) {
 		write_unlock_irqrestore(&adapter->abort_lock, flags);
-		zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL,
-				    old_reqid);
+		zfcp_dbf_scsi_abort("abrt_or", scpnt, NULL);
 		return FAILED; /* completion could be in progress */
 	}
 	old_req->data = NULL;
@@ -198,29 +198,32 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
 
 		zfcp_erp_wait(adapter);
 		ret = fc_block_scsi_eh(scpnt);
-		if (ret)
+		if (ret) {
+			zfcp_dbf_scsi_abort("abrt_bl", scpnt, NULL);
 			return ret;
+		}
 		if (!(atomic_read(&adapter->status) &
 		      ZFCP_STATUS_COMMON_RUNNING)) {
-			zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL,
-					    old_reqid);
+			zfcp_dbf_scsi_abort("abrt_ru", scpnt, NULL);
 			return SUCCESS;
 		}
 	}
-	if (!abrt_req)
+	if (!abrt_req) {
+		zfcp_dbf_scsi_abort("abrt_ar", scpnt, NULL);
 		return FAILED;
+	}
 
 	wait_for_completion(&abrt_req->completion);
 
 	if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED)
-		dbf_tag = "okay";
+		dbf_tag = "abrt_ok";
 	else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED)
-		dbf_tag = "lte2";
+		dbf_tag = "abrt_nn";
 	else {
-		dbf_tag = "fail";
+		dbf_tag = "abrt_fa";
 		retval = FAILED;
 	}
-	zfcp_dbf_scsi_abort(dbf_tag, adapter->dbf, scpnt, abrt_req, old_reqid);
+	zfcp_dbf_scsi_abort(dbf_tag, scpnt, abrt_req);
 	zfcp_fsf_req_free(abrt_req);
 	return retval;
 }
@@ -280,7 +283,7 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
 	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
 	int ret;
 
-	zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
+	zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
 	zfcp_erp_wait(adapter);
 	ret = fc_block_scsi_eh(scpnt);
 	if (ret)
@@ -518,7 +521,7 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
 	port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
 
 	if (port) {
-		zfcp_erp_port_forced_reopen(port, 0, "sctrpi1", NULL);
+		zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
 		put_device(&port->dev);
 	}
 }

+ 4 - 5
drivers/s390/scsi/zfcp_sysfs.c

@@ -105,8 +105,7 @@ static ssize_t zfcp_sysfs_port_failed_store(struct device *dev,
 		return -EINVAL;
 
 	zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING);
-	zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2",
-			     NULL);
+	zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2");
 	zfcp_erp_wait(port->adapter);
 
 	return count;
@@ -148,7 +147,7 @@ static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev,
 	if (sdev) {
 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
-				    "syufai2", NULL);
+				    "syufai2");
 		zfcp_erp_wait(unit->port->adapter);
 	} else
 		zfcp_unit_scsi_scan(unit);
@@ -198,7 +197,7 @@ static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
 
 	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
-				"syafai2", NULL);
+				"syafai2");
 	zfcp_erp_wait(adapter);
 out:
 	zfcp_ccw_adapter_put(adapter);
@@ -256,7 +255,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
 
 	put_device(&port->dev);
 
-	zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL);
+	zfcp_erp_port_shutdown(port, 0, "syprs_1");
 	zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
  out:
 	zfcp_ccw_adapter_put(adapter);

+ 1 - 2
drivers/scsi/arcmsr/arcmsr_hba.c

@@ -1171,9 +1171,8 @@ static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
 	arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
 	if ( arccdbsize > 256)
 		arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
-	if (pcmd->cmnd[0]|WRITE_6 || pcmd->cmnd[0]|WRITE_10 || pcmd->cmnd[0]|WRITE_12 ){
+	if (pcmd->sc_data_direction == DMA_TO_DEVICE)
 		arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
-	}
 	ccb->arc_cdb_size = arccdbsize;
 	return SUCCESS;
 }

+ 3 - 2
drivers/scsi/be2iscsi/be_main.c

@@ -3785,7 +3785,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
 	dma_addr_t paddr;
 
 	io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
-					  GFP_KERNEL, &paddr);
+					  GFP_ATOMIC, &paddr);
 	if (!io_task->cmd_bhs)
 		return -ENOMEM;
 	io_task->bhs_pa.u.a64.address = paddr;
@@ -3914,7 +3914,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
 			io_task->psgl_handle = NULL;
 		}
 	} else {
-		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
+		if (task->hdr &&
+		   ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN))
 			return;
 		if (io_task->psgl_handle) {
 			spin_lock(&phba->mgmt_sgl_lock);

+ 1 - 3
drivers/scsi/bfa/Makefile

@@ -3,6 +3,4 @@ obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
 bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o
 bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o
 bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o
-bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_drv.o bfa_svc.o
-
-ccflags-y := -DBFA_PERF_BUILD
+bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_svc.o

+ 3 - 33
drivers/scsi/bfa/bfa.h

@@ -17,7 +17,7 @@
 #ifndef __BFA_H__
 #define __BFA_H__
 
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 #include "bfa_cs.h"
 #include "bfa_plog.h"
 #include "bfa_defs_svc.h"
@@ -33,7 +33,6 @@ typedef void    (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
  * Interrupt message handlers
  */
 void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
-void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
 
 /*
  * Request and response queue related defines
@@ -121,8 +120,8 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
 									\
 		struct list_head *waitq = bfa_reqq(__bfa, __reqq);      \
 									\
-		bfa_assert(((__reqq) < BFI_IOC_MAX_CQS));      \
-		bfa_assert((__wqe)->qresume && (__wqe)->cbarg);      \
+		WARN_ON(((__reqq) >= BFI_IOC_MAX_CQS));			\
+		WARN_ON(!((__wqe)->qresume && (__wqe)->cbarg));		\
 									\
 		list_add_tail(&(__wqe)->qe, waitq);      \
 	} while (0)
@@ -297,7 +296,6 @@ void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
 		      struct bfa_iocfc_cfg_s *cfg,
 		      struct bfa_meminfo_s *meminfo,
 		      struct bfa_pcidev_s *pcidev);
-void bfa_iocfc_detach(struct bfa_s *bfa);
 void bfa_iocfc_init(struct bfa_s *bfa);
 void bfa_iocfc_start(struct bfa_s *bfa);
 void bfa_iocfc_stop(struct bfa_s *bfa);
@@ -333,12 +331,9 @@ void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
 			   u32 *maxvec);
 void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
 				 u32 *end);
-void bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi);
 void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
 wwn_t bfa_iocfc_get_pwwn(struct bfa_s *bfa);
 wwn_t bfa_iocfc_get_nwwn(struct bfa_s *bfa);
-void bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa,
-				struct bfa_boot_pbc_s *pbcfg);
 int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
 				struct bfi_pbc_vport_s *pbc_vport);
 
@@ -386,19 +381,11 @@ void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg,
 void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 		struct bfa_meminfo_s *meminfo,
 		struct bfa_pcidev_s *pcidev);
-void bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod);
-void bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog);
 void bfa_detach(struct bfa_s *bfa);
-void bfa_init(struct bfa_s *bfa);
-void bfa_start(struct bfa_s *bfa);
-void bfa_stop(struct bfa_s *bfa);
-void bfa_attach_fcs(struct bfa_s *bfa);
 void bfa_cb_init(void *bfad, bfa_status_t status);
 void bfa_cb_updateq(void *bfad, bfa_status_t status);
 
 bfa_boolean_t bfa_intx(struct bfa_s *bfa);
-void bfa_intx_disable(struct bfa_s *bfa);
-void bfa_intx_enable(struct bfa_s *bfa);
 void bfa_isr_enable(struct bfa_s *bfa);
 void bfa_isr_disable(struct bfa_s *bfa);
 
@@ -408,31 +395,14 @@ void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q);
 
 typedef void (*bfa_cb_ioc_t) (void *cbarg, enum bfa_status status);
 void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr);
-void bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr);
 
-void bfa_adapter_get_attr(struct bfa_s *bfa,
-			  struct bfa_adapter_attr_s *ad_attr);
-u64 bfa_adapter_get_id(struct bfa_s *bfa);
 
 bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
 				   struct bfa_iocfc_intr_attr_s *attr);
 
 void bfa_iocfc_enable(struct bfa_s *bfa);
 void bfa_iocfc_disable(struct bfa_s *bfa);
-void bfa_chip_reset(struct bfa_s *bfa);
-void bfa_timer_tick(struct bfa_s *bfa);
 #define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout)		\
 	bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
 
-/*
- * BFA debug API functions
- */
-bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen);
-bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen);
-bfa_status_t bfa_debug_fwcore(struct bfa_s *bfa, void *buf,
-			      u32 *offset, int *buflen);
-void bfa_debug_fwsave_clear(struct bfa_s *bfa);
-bfa_status_t bfa_fw_stats_get(struct bfa_s *bfa, void *data);
-bfa_status_t bfa_fw_stats_clear(struct bfa_s *bfa);
-
 #endif /* __BFA_H__ */

+ 0 - 169
drivers/scsi/bfa/bfa_cb_ioim.h

@@ -1,169 +0,0 @@
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_HCB_IOIM_H__
-#define __BFA_HCB_IOIM_H__
-
-#include "bfa_os_inc.h"
-/*
- * task attribute values in FCP-2 FCP_CMND IU
- */
-#define SIMPLE_Q    0
-#define HEAD_OF_Q   1
-#define ORDERED_Q   2
-#define ACA_Q	    4
-#define UNTAGGED    5
-
-static inline lun_t
-bfad_int_to_lun(u32 luno)
-{
-	union {
-		u16	scsi_lun[4];
-		lun_t		bfa_lun;
-	} lun;
-
-	lun.bfa_lun     = 0;
-	lun.scsi_lun[0] = cpu_to_be16(luno);
-
-	return lun.bfa_lun;
-}
-
-/*
- * Get LUN for the I/O request
- */
-#define bfa_cb_ioim_get_lun(__dio)	\
-	bfad_int_to_lun(((struct scsi_cmnd *)__dio)->device->lun)
-
-/*
- * Get CDB for the I/O request
- */
-static inline u8 *
-bfa_cb_ioim_get_cdb(struct bfad_ioim_s *dio)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
-
-	return (u8 *) cmnd->cmnd;
-}
-
-/*
- * Get I/O direction (read/write) for the I/O request
- */
-static inline enum fcp_iodir
-bfa_cb_ioim_get_iodir(struct bfad_ioim_s *dio)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
-	enum dma_data_direction dmadir;
-
-	dmadir = cmnd->sc_data_direction;
-	if (dmadir == DMA_TO_DEVICE)
-		return FCP_IODIR_WRITE;
-	else if (dmadir == DMA_FROM_DEVICE)
-		return FCP_IODIR_READ;
-	else
-		return FCP_IODIR_NONE;
-}
-
-/*
- * Get IO size in bytes for the I/O request
- */
-static inline u32
-bfa_cb_ioim_get_size(struct bfad_ioim_s *dio)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
-
-	return scsi_bufflen(cmnd);
-}
-
-/*
- * Get timeout for the I/O request
- */
-static inline u8
-bfa_cb_ioim_get_timeout(struct bfad_ioim_s *dio)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
-	/*
-	 * TBD: need a timeout for scsi passthru
-	 */
-	if (cmnd->device->host == NULL)
-		return 4;
-
-	return 0;
-}
-
-/*
- * Get Command Reference Number for the I/O request. 0 if none.
- */
-static inline u8
-bfa_cb_ioim_get_crn(struct bfad_ioim_s *dio)
-{
-	return 0;
-}
-
-/*
- * Get SAM-3 priority for the I/O request. 0 is default.
- */
-static inline u8
-bfa_cb_ioim_get_priority(struct bfad_ioim_s *dio)
-{
-	return 0;
-}
-
-/*
- * Get task attributes for the I/O request. Default is FCP_TASK_ATTR_SIMPLE(0).
- */
-static inline u8
-bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
-	u8	task_attr = UNTAGGED;
-
-	if (cmnd->device->tagged_supported) {
-		switch (cmnd->tag) {
-		case HEAD_OF_QUEUE_TAG:
-			task_attr = HEAD_OF_Q;
-			break;
-		case ORDERED_QUEUE_TAG:
-			task_attr = ORDERED_Q;
-			break;
-		default:
-			task_attr = SIMPLE_Q;
-			break;
-		}
-	}
-
-	return task_attr;
-}
-
-/*
- * Get CDB length in bytes for the I/O request. Default is FCP_CMND_CDB_LEN(16).
- */
-static inline u8
-bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
-
-	return cmnd->cmd_len;
-}
-
-/*
- * Assign queue to be used for the I/O request. This value depends on whether
- * the driver wants to use the queues via any specific algorithm. Currently,
- * this is not supported.
- */
-#define bfa_cb_ioim_get_reqq(__dio) BFA_FALSE
-
-#endif /* __BFA_HCB_IOIM_H__ */

+ 104 - 269
drivers/scsi/bfa/bfa_core.c

@@ -15,12 +15,99 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_modules.h"
 #include "bfi_ctreg.h"
-#include "bfad_drv.h"
 
 BFA_TRC_FILE(HAL, CORE);
 
+/*
+ * BFA module list terminated by NULL
+ */
+static struct bfa_module_s *hal_mods[] = {
+	&hal_mod_sgpg,
+	&hal_mod_fcport,
+	&hal_mod_fcxp,
+	&hal_mod_lps,
+	&hal_mod_uf,
+	&hal_mod_rport,
+	&hal_mod_fcpim,
+	NULL
+};
+
+/*
+ * Message handlers for various modules.
+ */
+static bfa_isr_func_t  bfa_isrs[BFI_MC_MAX] = {
+	bfa_isr_unhandled,	/* NONE */
+	bfa_isr_unhandled,	/* BFI_MC_IOC */
+	bfa_isr_unhandled,	/* BFI_MC_DIAG */
+	bfa_isr_unhandled,	/* BFI_MC_FLASH */
+	bfa_isr_unhandled,	/* BFI_MC_CEE */
+	bfa_fcport_isr,		/* BFI_MC_FCPORT */
+	bfa_isr_unhandled,	/* BFI_MC_IOCFC */
+	bfa_isr_unhandled,	/* BFI_MC_LL */
+	bfa_uf_isr,		/* BFI_MC_UF */
+	bfa_fcxp_isr,		/* BFI_MC_FCXP */
+	bfa_lps_isr,		/* BFI_MC_LPS */
+	bfa_rport_isr,		/* BFI_MC_RPORT */
+	bfa_itnim_isr,		/* BFI_MC_ITNIM */
+	bfa_isr_unhandled,	/* BFI_MC_IOIM_READ */
+	bfa_isr_unhandled,	/* BFI_MC_IOIM_WRITE */
+	bfa_isr_unhandled,	/* BFI_MC_IOIM_IO */
+	bfa_ioim_isr,		/* BFI_MC_IOIM */
+	bfa_ioim_good_comp_isr,	/* BFI_MC_IOIM_IOCOM */
+	bfa_tskim_isr,		/* BFI_MC_TSKIM */
+	bfa_isr_unhandled,	/* BFI_MC_SBOOT */
+	bfa_isr_unhandled,	/* BFI_MC_IPFC */
+	bfa_isr_unhandled,	/* BFI_MC_PORT */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+};
+/*
+ * Message handlers for mailbox command classes
+ */
+static bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[BFI_MC_MAX] = {
+	NULL,
+	NULL,		/* BFI_MC_IOC   */
+	NULL,		/* BFI_MC_DIAG  */
+	NULL,		/* BFI_MC_FLASH */
+	NULL,		/* BFI_MC_CEE   */
+	NULL,		/* BFI_MC_PORT  */
+	bfa_iocfc_isr,	/* BFI_MC_IOCFC */
+	NULL,
+};
+
+
+
+static void
+bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
+{
+	struct bfa_port_s	*port = &bfa->modules.port;
+	u32			dm_len;
+	u8			*dm_kva;
+	u64			dm_pa;
+
+	dm_len = bfa_port_meminfo();
+	dm_kva = bfa_meminfo_dma_virt(mi);
+	dm_pa  = bfa_meminfo_dma_phys(mi);
+
+	memset(port, 0, sizeof(struct bfa_port_s));
+	bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
+	bfa_port_mem_claim(port, dm_kva, dm_pa);
+
+	bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
+	bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
+}
+
 /*
  * BFA IOC FC related definitions
  */
@@ -66,18 +153,6 @@ static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
 /*
  * BFA Interrupt handling functions
  */
-static void
-bfa_msix_errint(struct bfa_s *bfa, u32 intr)
-{
-	bfa_ioc_error_isr(&bfa->ioc);
-}
-
-static void
-bfa_msix_lpu(struct bfa_s *bfa)
-{
-	bfa_ioc_mbox_isr(&bfa->ioc);
-}
-
 static void
 bfa_reqq_resume(struct bfa_s *bfa, int qid)
 {
@@ -104,9 +179,6 @@ bfa_msix_all(struct bfa_s *bfa, int vec)
 	bfa_intx(bfa);
 }
 
-/*
- *  hal_intr_api
- */
 bfa_boolean_t
 bfa_intx(struct bfa_s *bfa)
 {
@@ -150,18 +222,6 @@ bfa_intx(struct bfa_s *bfa)
 	return BFA_TRUE;
 }
 
-void
-bfa_intx_enable(struct bfa_s *bfa)
-{
-	writel(bfa->iocfc.intr_mask, bfa->iocfc.bfa_regs.intr_mask);
-}
-
-void
-bfa_intx_disable(struct bfa_s *bfa)
-{
-	writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
-}
-
 void
 bfa_isr_enable(struct bfa_s *bfa)
 {
@@ -225,7 +285,7 @@ bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
 	bfa_trc(bfa, m->mhdr.msg_class);
 	bfa_trc(bfa, m->mhdr.msg_id);
 	bfa_trc(bfa, m->mhdr.mtag.i2htok);
-	bfa_assert(0);
+	WARN_ON(1);
 	bfa_trc_stop(bfa->trcmod);
 }
 
@@ -236,8 +296,6 @@ bfa_msix_rspq(struct bfa_s *bfa, int qid)
 	u32 pi, ci;
 	struct list_head *waitq;
 
-	bfa_trc_fp(bfa, qid);
-
 	qid &= (BFI_IOC_MAX_CQS - 1);
 
 	bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
@@ -245,16 +303,10 @@ bfa_msix_rspq(struct bfa_s *bfa, int qid)
 	ci = bfa_rspq_ci(bfa, qid);
 	pi = bfa_rspq_pi(bfa, qid);
 
-	bfa_trc_fp(bfa, ci);
-	bfa_trc_fp(bfa, pi);
-
 	if (bfa->rme_process) {
 		while (ci != pi) {
 			m = bfa_rspq_elem(bfa, qid, ci);
-			bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
-
 			bfa_isrs[m->mhdr.msg_class] (bfa, m);
-
 			CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
 		}
 	}
@@ -282,7 +334,7 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
 	intr = readl(bfa->iocfc.bfa_regs.intr_status);
 
 	if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
-		bfa_msix_lpu(bfa);
+		bfa_ioc_mbox_isr(&bfa->ioc);
 
 	intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
 		__HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
@@ -313,22 +365,16 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
 		}
 
 		writel(intr, bfa->iocfc.bfa_regs.intr_status);
-		bfa_msix_errint(bfa, intr);
+		bfa_ioc_error_isr(&bfa->ioc);
 	}
 }
 
-void
-bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
-{
-	bfa_isrs[mc] = isr_func;
-}
-
 /*
  * BFA IOC FC related functions
  */
 
 /*
- *  hal_ioc_pvt BFA IOC private functions
+ *  BFA IOC private functions
  */
 
 static void
@@ -379,7 +425,7 @@ bfa_iocfc_send_cfg(void *bfa_arg)
 	struct bfa_iocfc_cfg_s	*cfg = &iocfc->cfg;
 	int		i;
 
-	bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
+	WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
 	bfa_trc(bfa, cfg->fwcfg.num_cqs);
 
 	bfa_iocfc_reset_queues(bfa);
@@ -488,8 +534,8 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
 	 * First allocate dma memory for IOC.
 	 */
 	bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
-	dm_kva += bfa_ioc_meminfo();
-	dm_pa  += bfa_ioc_meminfo();
+	dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
+	dm_pa  += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
 
 	/*
 	 * Claim DMA-able memory for the request/response queues and for shadow
@@ -552,7 +598,7 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
 	bfa_meminfo_dma_virt(meminfo) = dm_kva;
 	bfa_meminfo_dma_phys(meminfo) = dm_pa;
 
-	dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover);
+	dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
 	if (dbgsz > 0) {
 		bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
 		bfa_meminfo_kva(meminfo) += dbgsz;
@@ -699,7 +745,7 @@ bfa_iocfc_disable_cbfn(void *bfa_arg)
 		bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
 			     bfa);
 	else {
-		bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE);
+		WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
 		bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
 			     bfa);
 	}
@@ -735,9 +781,6 @@ bfa_iocfc_reset_cbfn(void *bfa_arg)
 	bfa_isr_enable(bfa);
 }
 
-/*
- *  hal_ioc_public
- */
 
 /*
  * Query IOC memory requirement information.
@@ -747,11 +790,11 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
 		  u32 *dm_len)
 {
 	/* dma memory for IOC */
-	*dm_len += bfa_ioc_meminfo();
+	*dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
 
 	bfa_iocfc_fw_cfg_sz(cfg, dm_len);
 	bfa_iocfc_cqs_sz(cfg, dm_len);
-	*km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
+	*km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
 }
 
 /*
@@ -783,22 +826,13 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 
 	bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
 	bfa_iocfc_mem_claim(bfa, cfg, meminfo);
-	bfa_timer_init(&bfa->timer_mod);
+	INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
 
 	INIT_LIST_HEAD(&bfa->comp_q);
 	for (i = 0; i < BFI_IOC_MAX_CQS; i++)
 		INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
 }
 
-/*
- * Query IOC memory requirement information.
- */
-void
-bfa_iocfc_detach(struct bfa_s *bfa)
-{
-	bfa_ioc_detach(&bfa->ioc);
-}
-
 /*
  * Query IOC memory requirement information.
  */
@@ -852,22 +886,10 @@ bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
 		iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
 		break;
 	default:
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 }
 
-void
-bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr)
-{
-	bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr);
-}
-
-u64
-bfa_adapter_get_id(struct bfa_s *bfa)
-{
-	return bfa_ioc_get_adid(&bfa->ioc);
-}
-
 void
 bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
 {
@@ -976,18 +998,6 @@ bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
 	memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
 }
 
-void
-bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg)
-{
-	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
-	struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
-
-	pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
-	pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
-	pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
-	memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
-}
-
 int
 bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
 {
@@ -998,9 +1008,6 @@ bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
 	return cfgrsp->pbc_cfg.nvports;
 }
 
-/*
- *  hal_api
- */
 
 /*
  * Use this function query the memory requirement of the BFA library.
@@ -1036,7 +1043,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
 	int		i;
 	u32	km_len = 0, dm_len = 0;
 
-	bfa_assert((cfg != NULL) && (meminfo != NULL));
+	WARN_ON((cfg == NULL) || (meminfo == NULL));
 
 	memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
 	meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
@@ -1090,7 +1097,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 
 	bfa->fcs = BFA_FALSE;
 
-	bfa_assert((cfg != NULL) && (meminfo != NULL));
+	WARN_ON((cfg == NULL) || (meminfo == NULL));
 
 	/*
 	 * initialize all memory pointers for iterative allocation
@@ -1129,79 +1136,7 @@ bfa_detach(struct bfa_s *bfa)
 
 	for (i = 0; hal_mods[i]; i++)
 		hal_mods[i]->detach(bfa);
-
-	bfa_iocfc_detach(bfa);
-}
-
-
-void
-bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod)
-{
-	bfa->trcmod = trcmod;
-}
-
-void
-bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog)
-{
-	bfa->plog = plog;
-}
-
-/*
- * Initialize IOC.
- *
- * This function will return immediately, when the IOC initialization is
- * completed, the bfa_cb_init() will be called.
- *
- * @param[in]	bfa	instance
- *
- * @return void
- *
- * Special Considerations:
- *
- * @note
- * When this function returns, the driver should register the interrupt service
- * routine(s) and enable the device interrupts. If this is not done,
- * bfa_cb_init() will never get called
- */
-void
-bfa_init(struct bfa_s *bfa)
-{
-	bfa_iocfc_init(bfa);
-}
-
-/*
- * Use this function initiate the IOC configuration setup. This function
- * will return immediately.
- *
- * @param[in]	bfa	instance
- *
- * @return None
- */
-void
-bfa_start(struct bfa_s *bfa)
-{
-	bfa_iocfc_start(bfa);
-}
-
-/*
- * Use this function quiese the IOC. This function will return immediately,
- * when the IOC is actually stopped, the bfad->comp will be set.
- *
- * @param[in]bfa - pointer to bfa_t.
- *
- * @return None
- *
- * Special Considerations:
- * bfad->comp can be set before or after bfa_stop() returns.
- *
- * @note
- * In case of any failure, we could handle it automatically by doing a
- * reset and then succeed the bfa_stop() call.
- */
-void
-bfa_stop(struct bfa_s *bfa)
-{
-	bfa_iocfc_stop(bfa);
+	bfa_ioc_detach(&bfa->ioc);
 }
 
 void
@@ -1237,20 +1172,6 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
 	}
 }
 
-void
-bfa_attach_fcs(struct bfa_s *bfa)
-{
-	bfa->fcs = BFA_TRUE;
-}
-
-/*
- * Periodic timer heart beat from driver
- */
-void
-bfa_timer_tick(struct bfa_s *bfa)
-{
-	bfa_timer_beat(&bfa->timer_mod);
-}
 
 /*
  * Return the list of PCI vendor/device id lists supported by this
@@ -1321,89 +1242,3 @@ bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
 	cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
 	cfg->drvcfg.min_cfg	   = BFA_TRUE;
 }
-
-void
-bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr)
-{
-	bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
-}
-
-/*
- * Retrieve firmware trace information on IOC failure.
- */
-bfa_status_t
-bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen)
-{
-	return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen);
-}
-
-/*
- * Clear the saved firmware trace information of an IOC.
- */
-void
-bfa_debug_fwsave_clear(struct bfa_s *bfa)
-{
-	bfa_ioc_debug_fwsave_clear(&bfa->ioc);
-}
-
-/*
- * Fetch firmware trace data.
- *
- * @param[in]		bfa			BFA instance
- * @param[out]		trcdata		Firmware trace buffer
- * @param[in,out]	trclen		Firmware trace buffer len
- *
- * @retval BFA_STATUS_OK			Firmware trace is fetched.
- * @retval BFA_STATUS_INPROGRESS	Firmware trace fetch is in progress.
- */
-bfa_status_t
-bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen)
-{
-	return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen);
-}
-
-/*
- * Dump firmware memory.
- *
- * @param[in]		bfa		BFA instance
- * @param[out]		buf		buffer for dump
- * @param[in,out]	offset		smem offset to start read
- * @param[in,out]	buflen		length of buffer
- *
- * @retval BFA_STATUS_OK		Firmware memory is dumped.
- * @retval BFA_STATUS_INPROGRESS	Firmware memory dump is in progress.
- */
-bfa_status_t
-bfa_debug_fwcore(struct bfa_s *bfa, void *buf, u32 *offset, int *buflen)
-{
-	return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen);
-}
-/*
- * Reset hw semaphore & usage cnt regs and initialize.
- */
-void
-bfa_chip_reset(struct bfa_s *bfa)
-{
-	bfa_ioc_ownership_reset(&bfa->ioc);
-	bfa_ioc_pll_init(&bfa->ioc);
-}
-
-/*
- * Fetch firmware statistics data.
- *
- * @param[in]		bfa		BFA instance
- * @param[out]		data		Firmware stats buffer
- *
- * @retval BFA_STATUS_OK		Firmware trace is fetched.
- */
-bfa_status_t
-bfa_fw_stats_get(struct bfa_s *bfa, void *data)
-{
-	return bfa_ioc_fw_stats_get(&bfa->ioc, data);
-}
-
-bfa_status_t
-bfa_fw_stats_clear(struct bfa_s *bfa)
-{
-	return bfa_ioc_fw_stats_clear(&bfa->ioc);
-}

+ 49 - 47
drivers/scsi/bfa/bfa_cs.h

@@ -22,7 +22,7 @@
 #ifndef __BFA_CS_H__
 #define __BFA_CS_H__
 
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 
 /*
  * BFA TRC
@@ -32,12 +32,20 @@
 #define BFA_TRC_MAX	(4 * 1024)
 #endif
 
+#define BFA_TRC_TS(_trcm)                               \
+	({                                              \
+		struct timeval tv;                      \
+							\
+		do_gettimeofday(&tv);                   \
+		(tv.tv_sec*1000000+tv.tv_usec);         \
+	})
+
 #ifndef BFA_TRC_TS
 #define BFA_TRC_TS(_trcm)	((_trcm)->ticks++)
 #endif
 
 struct bfa_trc_s {
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 	u16	fileno;
 	u16	line;
 #else
@@ -99,13 +107,6 @@ bfa_trc_stop(struct bfa_trc_mod_s *trcm)
 	trcm->stopped = 1;
 }
 
-#ifdef FWTRC
-extern void dc_flush(void *data);
-#else
-#define dc_flush(data)
-#endif
-
-
 static inline void
 __bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
 {
@@ -119,12 +120,10 @@ __bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
 	trc->line = (u16) line;
 	trc->data.u64 = data;
 	trc->timestamp = BFA_TRC_TS(trcm);
-	dc_flush(trc);
 
 	trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
 	if (trcm->tail == trcm->head)
 		trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
-	dc_flush(trcm);
 }
 
 
@@ -141,42 +140,18 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
 	trc->line = (u16) line;
 	trc->data.u32.u32 = data;
 	trc->timestamp = BFA_TRC_TS(trcm);
-	dc_flush(trc);
 
 	trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
 	if (trcm->tail == trcm->head)
 		trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
-	dc_flush(trcm);
 }
 
-#ifndef BFA_PERF_BUILD
-#define bfa_trc_fp(_trcp, _data)	bfa_trc(_trcp, _data)
-#else
-#define bfa_trc_fp(_trcp, _data)
-#endif
-
-/*
- * @ BFA LOG interfaces
- */
-#define bfa_assert(__cond)	do {					\
-	if (!(__cond)) {						\
-		printk(KERN_ERR "assert(%s) failed at %s:%d\\n",         \
-		#__cond, __FILE__, __LINE__);				\
-	}								\
-} while (0)
-
 #define bfa_sm_fault(__mod, __event)	do {				\
 	bfa_trc(__mod, (((u32)0xDEAD << 16) | __event));		\
 	printk(KERN_ERR	"Assertion failure: %s:%d: %d",			\
 		__FILE__, __LINE__, (__event));				\
 } while (0)
 
-#ifndef BFA_PERF_BUILD
-#define bfa_assert_fp(__cond)	bfa_assert(__cond)
-#else
-#define bfa_assert_fp(__cond)
-#endif
-
 /* BFA queue definitions */
 #define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
 #define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
@@ -199,7 +174,6 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
 		bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) =	\
 				(struct list_head *) (_q);		\
 		bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe));\
-		BFA_Q_DBG_INIT(*((struct list_head **) _qe));		\
 	} else {							\
 		*((struct list_head **) (_qe)) = (struct list_head *) NULL;\
 	}								\
@@ -214,7 +188,6 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
 		bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) =	\
 			(struct list_head *) (_q);			\
 		bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
-		BFA_Q_DBG_INIT(*((struct list_head **) _qe));		\
 	} else {							\
 		*((struct list_head **) (_qe)) = (struct list_head *) NULL;\
 	}								\
@@ -236,16 +209,6 @@ bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
 	return 0;
 }
 
-/*
- * #ifdef BFA_DEBUG (Using bfa_assert to check for debug_build is not
- * consistent across modules)
- */
-#ifndef BFA_PERF_BUILD
-#define BFA_Q_DBG_INIT(_qe) bfa_q_qe_init(_qe)
-#else
-#define BFA_Q_DBG_INIT(_qe)
-#endif
-
 #define bfa_q_is_on_q(_q, _qe)      \
 	bfa_q_is_on_q_func(_q, (struct list_head *)(_qe))
 
@@ -361,4 +324,43 @@ bfa_wc_wait(struct bfa_wc_s *wc)
 	bfa_wc_down(wc);
 }
 
+static inline void
+wwn2str(char *wwn_str, u64 wwn)
+{
+	union {
+		u64 wwn;
+		u8 byte[8];
+	} w;
+
+	w.wwn = wwn;
+	sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", w.byte[0],
+		w.byte[1], w.byte[2], w.byte[3], w.byte[4], w.byte[5],
+		w.byte[6], w.byte[7]);
+}
+
+static inline void
+fcid2str(char *fcid_str, u32 fcid)
+{
+	union {
+		u32 fcid;
+		u8 byte[4];
+	} f;
+
+	f.fcid = fcid;
+	sprintf(fcid_str, "%02x:%02x:%02x", f.byte[1], f.byte[2], f.byte[3]);
+}
+
+#define bfa_swap_3b(_x)				\
+	((((_x) & 0xff) << 16) |		\
+	((_x) & 0x00ff00) |			\
+	(((_x) & 0xff0000) >> 16))
+
+#ifndef __BIG_ENDIAN
+#define bfa_hton3b(_x)  bfa_swap_3b(_x)
+#else
+#define bfa_hton3b(_x)  (_x)
+#endif
+
+#define bfa_ntoh3b(_x)  bfa_hton3b(_x)
+
 #endif /* __BFA_CS_H__ */

+ 3 - 3
drivers/scsi/bfa/bfa_defs.h

@@ -19,7 +19,7 @@
 #define __BFA_DEFS_H__
 
 #include "bfa_fc.h"
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 
 #define BFA_MFG_SERIALNUM_SIZE                  11
 #define STRSZ(_n)                               (((_n) + 4) & ~3)
@@ -446,8 +446,8 @@ enum bfa_boot_bootopt {
  * Boot lun information.
  */
 struct bfa_boot_bootlun_s {
-	wwn_t   pwwn;   /*  port wwn of target */
-	lun_t   lun;    /*  64-bit lun */
+	wwn_t   pwwn;		/*  port wwn of target */
+	struct scsi_lun   lun;  /*  64-bit lun */
 };
 #pragma pack()
 

+ 4 - 4
drivers/scsi/bfa/bfa_defs_svc.h

@@ -34,8 +34,8 @@
 struct bfa_iocfc_intr_attr_s {
 	u8		coalesce;	/*  enable/disable coalescing */
 	u8		rsvd[3];
-	u16	latency;	/*  latency in microseconds   */
-	u16	delay;		/*  delay in microseconds     */
+	__be16	latency;	/*  latency in microseconds   */
+	__be16	delay;		/*  delay in microseconds     */
 };
 
 /*
@@ -743,7 +743,7 @@ struct bfa_port_cfg_s {
 	u8	 qos_enabled;	/*  qos enabled or not		*/
 	u8	 cfg_hardalpa;	/*  is hard alpa configured	*/
 	u8	 hardalpa;	/*  configured hard alpa	*/
-	u16 maxfrsize;	/*  maximum frame size		*/
+	__be16	 maxfrsize;	/*  maximum frame size		*/
 	u8	 rx_bbcredit;	/*  receive buffer credits	*/
 	u8	 tx_bbcredit;	/*  transmit buffer credits	*/
 	u8	 ratelimit;	/*  ratelimit enabled or not	*/
@@ -843,7 +843,7 @@ struct bfa_fcport_fcf_s {
 	u8	 fka_disabled;   /*  FKA is disabled	  */
 	u8	 maxsz_verified; /*  FCoE max size verified   */
 	u8	 fc_map[3];      /*  FC map		   */
-	u16	vlan;	   /*  FCoE vlan tag/priority   */
+	__be16	 vlan;	   /*  FCoE vlan tag/priority   */
 	u32	fka_adv_per;    /*  FIP  ka advert. period   */
 	mac_t	   mac;	    /*  FCF mac		  */
 };

+ 0 - 107
drivers/scsi/bfa/bfa_drv.c

@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#include "bfa_modules.h"
-
-/*
- * BFA module list terminated by NULL
- */
-struct bfa_module_s *hal_mods[] = {
-	&hal_mod_sgpg,
-	&hal_mod_fcport,
-	&hal_mod_fcxp,
-	&hal_mod_lps,
-	&hal_mod_uf,
-	&hal_mod_rport,
-	&hal_mod_fcpim,
-	NULL
-};
-
-/*
- * Message handlers for various modules.
- */
-bfa_isr_func_t  bfa_isrs[BFI_MC_MAX] = {
-	bfa_isr_unhandled,	/* NONE */
-	bfa_isr_unhandled,	/* BFI_MC_IOC */
-	bfa_isr_unhandled,	/* BFI_MC_DIAG */
-	bfa_isr_unhandled,	/* BFI_MC_FLASH */
-	bfa_isr_unhandled,	/* BFI_MC_CEE */
-	bfa_fcport_isr,		/* BFI_MC_FCPORT */
-	bfa_isr_unhandled,	/* BFI_MC_IOCFC */
-	bfa_isr_unhandled,	/* BFI_MC_LL */
-	bfa_uf_isr,		/* BFI_MC_UF */
-	bfa_fcxp_isr,		/* BFI_MC_FCXP */
-	bfa_lps_isr,		/* BFI_MC_LPS */
-	bfa_rport_isr,		/* BFI_MC_RPORT */
-	bfa_itnim_isr,		/* BFI_MC_ITNIM */
-	bfa_isr_unhandled,	/* BFI_MC_IOIM_READ */
-	bfa_isr_unhandled,	/* BFI_MC_IOIM_WRITE */
-	bfa_isr_unhandled,	/* BFI_MC_IOIM_IO */
-	bfa_ioim_isr,		/* BFI_MC_IOIM */
-	bfa_ioim_good_comp_isr,	/* BFI_MC_IOIM_IOCOM */
-	bfa_tskim_isr,		/* BFI_MC_TSKIM */
-	bfa_isr_unhandled,	/* BFI_MC_SBOOT */
-	bfa_isr_unhandled,	/* BFI_MC_IPFC */
-	bfa_isr_unhandled,	/* BFI_MC_PORT */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-};
-
-
-/*
- * Message handlers for mailbox command classes
- */
-bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[BFI_MC_MAX] = {
-	NULL,
-	NULL,			/* BFI_MC_IOC   */
-	NULL,			/* BFI_MC_DIAG  */
-	NULL,		/* BFI_MC_FLASH */
-	NULL,			/* BFI_MC_CEE   */
-	NULL,			/* BFI_MC_PORT  */
-	bfa_iocfc_isr,		/* BFI_MC_IOCFC */
-	NULL,
-};
-
-
-
-void
-bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
-{
-	struct bfa_port_s	*port = &bfa->modules.port;
-	u32		dm_len;
-	u8			*dm_kva;
-	u64		dm_pa;
-
-	dm_len = bfa_port_meminfo();
-	dm_kva = bfa_meminfo_dma_virt(mi);
-	dm_pa  = bfa_meminfo_dma_phys(mi);
-
-	memset(port, 0, sizeof(struct bfa_port_s));
-	bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
-	bfa_port_mem_claim(port, dm_kva, dm_pa);
-
-	bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
-	bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
-}

File diff suppressed because it is too large
+ 151 - 457
drivers/scsi/bfa/bfa_fc.h


+ 52 - 57
drivers/scsi/bfa/bfa_fcbuild.c

@@ -18,16 +18,16 @@
  * fcbuild.c - FC link service frame building and parsing routines
  */
 
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 #include "bfa_fcbuild.h"
 
 /*
  * static build functions
  */
 static void     fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-				 u16 ox_id);
+				 __be16 ox_id);
 static void     fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-				 u16 ox_id);
+				 __be16 ox_id);
 static struct fchs_s fc_els_req_tmpl;
 static struct fchs_s fc_els_rsp_tmpl;
 static struct fchs_s fc_bls_req_tmpl;
@@ -48,7 +48,7 @@ fcbuild_init(void)
 	fc_els_req_tmpl.cat_info = FC_CAT_LD_REQUEST;
 	fc_els_req_tmpl.type = FC_TYPE_ELS;
 	fc_els_req_tmpl.f_ctl =
-		bfa_os_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
+		bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
 			      FCTL_SI_XFER);
 	fc_els_req_tmpl.rx_id = FC_RXID_ANY;
 
@@ -59,7 +59,7 @@ fcbuild_init(void)
 	fc_els_rsp_tmpl.cat_info = FC_CAT_LD_REPLY;
 	fc_els_rsp_tmpl.type = FC_TYPE_ELS;
 	fc_els_rsp_tmpl.f_ctl =
-		bfa_os_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
+		bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
 			      FCTL_END_SEQ | FCTL_SI_XFER);
 	fc_els_rsp_tmpl.rx_id = FC_RXID_ANY;
 
@@ -68,7 +68,7 @@ fcbuild_init(void)
 	 */
 	fc_bls_req_tmpl.routing = FC_RTG_BASIC_LINK;
 	fc_bls_req_tmpl.type = FC_TYPE_BLS;
-	fc_bls_req_tmpl.f_ctl = bfa_os_hton3b(FCTL_END_SEQ | FCTL_SI_XFER);
+	fc_bls_req_tmpl.f_ctl = bfa_hton3b(FCTL_END_SEQ | FCTL_SI_XFER);
 	fc_bls_req_tmpl.rx_id = FC_RXID_ANY;
 
 	/*
@@ -78,7 +78,7 @@ fcbuild_init(void)
 	fc_bls_rsp_tmpl.cat_info = FC_CAT_BA_ACC;
 	fc_bls_rsp_tmpl.type = FC_TYPE_BLS;
 	fc_bls_rsp_tmpl.f_ctl =
-		bfa_os_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
+		bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
 			      FCTL_END_SEQ | FCTL_SI_XFER);
 	fc_bls_rsp_tmpl.rx_id = FC_RXID_ANY;
 
@@ -129,7 +129,7 @@ fcbuild_init(void)
 	fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD;
 	fcp_fchs_tmpl.type = FC_TYPE_FCP;
 	fcp_fchs_tmpl.f_ctl =
-		bfa_os_hton3b(FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER);
+		bfa_hton3b(FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER);
 	fcp_fchs_tmpl.seq_id = 1;
 	fcp_fchs_tmpl.rx_id = FC_RXID_ANY;
 }
@@ -143,7 +143,7 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
 	fchs->cat_info = FC_CAT_UNSOLICIT_CTRL;
 	fchs->type = FC_TYPE_SERVICES;
 	fchs->f_ctl =
-		bfa_os_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
+		bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
 			      FCTL_SI_XFER);
 	fchs->rx_id = FC_RXID_ANY;
 	fchs->d_id = (d_id);
@@ -157,7 +157,7 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
 }
 
 void
-fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
+fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
 {
 	memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
 	fchs->d_id = (d_id);
@@ -166,7 +166,7 @@ fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
 }
 
 static void
-fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
+fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
 {
 	memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
 	fchs->d_id = d_id;
@@ -196,7 +196,7 @@ fc_els_rsp_parse(struct fchs_s *fchs, int len)
 }
 
 static void
-fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
+fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
 {
 	memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
 	fchs->d_id = d_id;
@@ -206,7 +206,7 @@ fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
 
 static          u16
 fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
-		 u16 ox_id, wwn_t port_name, wwn_t node_name,
+		 __be16 ox_id, wwn_t port_name, wwn_t node_name,
 		 u16 pdu_size, u8 els_code)
 {
 	struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
@@ -232,8 +232,8 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
 		u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size,
 	       u8 set_npiv, u8 set_auth, u16 local_bb_credits)
 {
-	u32        d_id = bfa_os_hton3b(FC_FABRIC_PORT);
-	u32	*vvl_info;
+	u32        d_id = bfa_hton3b(FC_FABRIC_PORT);
+	__be32	*vvl_info;
 
 	memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
 
@@ -267,7 +267,7 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
 
 u16
 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
-		   u16 ox_id, wwn_t port_name, wwn_t node_name,
+		   __be16 ox_id, wwn_t port_name, wwn_t node_name,
 		   u16 pdu_size, u16 local_bb_credits)
 {
 	u32        d_id = 0;
@@ -289,7 +289,7 @@ u16
 fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
 		u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size)
 {
-	u32        d_id = bfa_os_hton3b(FC_FABRIC_PORT);
+	u32        d_id = bfa_hton3b(FC_FABRIC_PORT);
 
 	memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
 
@@ -392,7 +392,7 @@ fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 
 u16
 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
-		  u16 ox_id, enum bfa_lport_role role)
+		  __be16 ox_id, enum bfa_lport_role role)
 {
 	struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
 
@@ -456,9 +456,9 @@ fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id,
 	return sizeof(struct fc_logo_s);
 }
 
-static          u16
+static u16
 fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
-		 u32 s_id, u16 ox_id, wwn_t port_name,
+		 u32 s_id, __be16 ox_id, wwn_t port_name,
 		 wwn_t node_name, u8 els_code)
 {
 	memset(adisc, '\0', sizeof(struct fc_adisc_s));
@@ -480,7 +480,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
 
 u16
 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
-		u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name)
+		u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name)
 {
 	return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name,
 				node_name, FC_ELS_ADISC);
@@ -488,7 +488,7 @@ fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
 
 u16
 fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
-		   u32 s_id, u16 ox_id, wwn_t port_name,
+		   u32 s_id, __be16 ox_id, wwn_t port_name,
 		   wwn_t node_name)
 {
 	return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name,
@@ -592,7 +592,7 @@ fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id,
 
 u16
 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
-		  u16 ox_id)
+		  __be16 ox_id)
 {
 	struct fc_els_cmd_s *acc = pld;
 
@@ -606,7 +606,7 @@ fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 
 u16
 fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
-		u32 s_id, u16 ox_id, u8 reason_code,
+		u32 s_id, __be16 ox_id, u8 reason_code,
 		u8 reason_code_expl)
 {
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
@@ -622,7 +622,7 @@ fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
 
 u16
 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
-		u32 s_id, u16 ox_id, u16 rx_id)
+		u32 s_id, __be16 ox_id, u16 rx_id)
 {
 	fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
 
@@ -638,7 +638,7 @@ fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
 
 u16
 fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id,
-		u32 s_id, u16 ox_id)
+		u32 s_id, __be16 ox_id)
 {
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 	memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
@@ -666,7 +666,7 @@ fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code)
 
 u16
 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
-		u32 d_id, u32 s_id, u16 ox_id, int num_pages)
+		u32 d_id, u32 s_id, __be16 ox_id, int num_pages)
 {
 	int             page;
 
@@ -690,7 +690,7 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
 
 u16
 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id,
-		  u32 s_id, u16 ox_id, int num_pages)
+		  u32 s_id, __be16 ox_id, int num_pages)
 {
 	int             page;
 
@@ -728,7 +728,7 @@ fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
 
 u16
 fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id,
-		  u32 s_id, u16 ox_id, u32 data_format,
+		  u32 s_id, __be16 ox_id, u32 data_format,
 		  struct fc_rnid_common_id_data_s *common_id_data,
 		  struct fc_rnid_general_topology_data_s *gen_topo_data)
 {
@@ -770,10 +770,10 @@ u16
 fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id,
 		u32 s_id, u32 *pid_list, u16 npids)
 {
-	u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_os_hton3b(d_id));
+	u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_hton3b(d_id));
 	int i = 0;
 
-	fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0);
+	fc_els_req_build(fchs, bfa_hton3b(dctlr_id), s_id, 0);
 
 	memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
 
@@ -788,7 +788,7 @@ fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id,
 
 u16
 fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
-		u32 d_id, u32 s_id, u16 ox_id,
+		u32 d_id, u32 s_id, __be16 ox_id,
 		  struct fc_rpsc_speed_info_s *oper_speed)
 {
 	memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
@@ -807,11 +807,6 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
 	return sizeof(struct fc_rpsc_acc_s);
 }
 
-/*
- * TBD -
- * . get rid of unnecessary memsets
- */
-
 u16
 fc_logo_rsp_parse(struct fchs_s *fchs, int len)
 {
@@ -995,7 +990,7 @@ fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
 }
 
 u16
-fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
+fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id,
 		u32 reason_code, u32 reason_expl)
 {
 	struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1);
@@ -1045,7 +1040,7 @@ fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_gidpn_req_s *gidpn = (struct fcgs_gidpn_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN);
@@ -1061,7 +1056,7 @@ fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID);
@@ -1077,7 +1072,7 @@ fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID);
@@ -1104,7 +1099,7 @@ u16
 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
 		u8 set_br_reg, u32 s_id, u16 ox_id)
 {
-	u32        d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER);
+	u32        d_id = bfa_hton3b(FC_FABRIC_CONTROLLER);
 
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 
@@ -1121,7 +1116,7 @@ u16
 fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
 		u32 s_id, u16 ox_id)
 {
-	u32        d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER);
+	u32        d_id = bfa_hton3b(FC_FABRIC_CONTROLLER);
 	u16        payldlen;
 
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
@@ -1143,7 +1138,7 @@ fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
-	u32        type_value, d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        type_value, d_id = bfa_hton3b(FC_NAME_SERVER);
 	u8         index;
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@@ -1167,7 +1162,7 @@ fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
@@ -1187,7 +1182,7 @@ fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rffid_req_s *rffid = (struct fcgs_rffid_req_s *)(cthdr + 1);
-	u32         d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32         d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID);
@@ -1209,7 +1204,7 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rspnid_req_s *rspnid =
 			(struct fcgs_rspnid_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID);
@@ -1229,7 +1224,7 @@ fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type)
 
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_gidft_req_s *gidft = (struct fcgs_gidft_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 
@@ -1249,7 +1244,7 @@ fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rpnid_req_s *rpnid = (struct fcgs_rpnid_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID);
@@ -1267,7 +1262,7 @@ fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rnnid_req_s *rnnid = (struct fcgs_rnnid_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID);
@@ -1286,7 +1281,7 @@ fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rcsid_req_s *rcsid =
 			(struct fcgs_rcsid_req_s *) (cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID);
@@ -1304,7 +1299,7 @@ fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rptid_req_s *rptid = (struct fcgs_rptid_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID);
@@ -1321,7 +1316,7 @@ fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id)
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_ganxt_req_s *ganxt = (struct fcgs_ganxt_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT);
@@ -1341,7 +1336,7 @@ fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
 {
 
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
-	u32        d_id = bfa_os_hton3b(FC_MGMT_SERVER);
+	u32        d_id = bfa_hton3b(FC_MGMT_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code);
@@ -1356,7 +1351,7 @@ void
 fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask)
 {
 	u8         index;
-	u32       *ptr = (u32 *) bit_mask;
+	__be32       *ptr = (__be32 *) bit_mask;
 	u32        type_value;
 
 	/*
@@ -1377,7 +1372,7 @@ fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_MGMT_SERVER);
+	u32        d_id = bfa_hton3b(FC_MGMT_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD,
@@ -1397,7 +1392,7 @@ fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_MGMT_SERVER);
+	u32        d_id = bfa_hton3b(FC_MGMT_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD,

+ 15 - 15
drivers/scsi/bfa/bfa_fcbuild.h

@@ -21,7 +21,7 @@
 #ifndef __FCBUILD_H__
 #define __FCBUILD_H__
 
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 #include "bfa_fc.h"
 #include "bfa_defs_fcs.h"
 
@@ -138,7 +138,7 @@ u16        fc_fdisc_build(struct fchs_s *buf, struct fc_logi_s *flogi, u32 s_id,
 			       u16 pdu_size);
 
 u16        fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
-				   u32 s_id, u16 ox_id,
+				   u32 s_id, __be16 ox_id,
 				   wwn_t port_name, wwn_t node_name,
 				   u16 pdu_size,
 				   u16 local_bb_credits);
@@ -186,7 +186,7 @@ u16        fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
 				   u16 pdu_size);
 
 u16        fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
-			u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name,
+			u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name,
 			       wwn_t node_name);
 
 enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld,
@@ -196,20 +196,20 @@ enum fc_parse_status fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len,
 				 wwn_t port_name, wwn_t node_name);
 
 u16        fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
-				   u32 d_id, u32 s_id, u16 ox_id,
+				   u32 d_id, u32 s_id, __be16 ox_id,
 				   wwn_t port_name, wwn_t node_name);
 u16        fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt,
-				u32 d_id, u32 s_id, u16 ox_id,
+				u32 d_id, u32 s_id, __be16 ox_id,
 				u8 reason_code, u8 reason_code_expl);
 u16        fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd,
-				u32 d_id, u32 s_id, u16 ox_id);
+				u32 d_id, u32 s_id, __be16 ox_id);
 u16        fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id,
 			      u32 s_id, u16 ox_id);
 
 enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len);
 
 u16        fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
-				  u32 s_id, u16 ox_id,
+				  u32 s_id, __be16 ox_id,
 				  enum bfa_lport_role role);
 
 u16        fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid,
@@ -218,7 +218,7 @@ u16        fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid,
 
 u16        fc_rnid_acc_build(struct fchs_s *fchs,
 			struct fc_rnid_acc_s *rnid_acc, u32 d_id, u32 s_id,
-			u16 ox_id, u32 data_format,
+			__be16 ox_id, u32 data_format,
 			struct fc_rnid_common_id_data_s *common_id_data,
 			struct fc_rnid_general_topology_data_s *gen_topo_data);
 
@@ -228,7 +228,7 @@ u16        fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc,
 			      u32 d_id, u32 s_id, u16 ox_id);
 u16        fc_rpsc_acc_build(struct fchs_s *fchs,
 			struct fc_rpsc_acc_s *rpsc_acc, u32 d_id, u32 s_id,
-			u16 ox_id, struct fc_rpsc_speed_info_s *oper_speed);
+			__be16 ox_id, struct fc_rpsc_speed_info_s *oper_speed);
 u16        fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id,
 				u8 fc4_type);
 
@@ -251,7 +251,7 @@ u16        fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id,
 			      u32 s_id, u16 ox_id, wwn_t port_name);
 
 u16        fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
-				  u32 s_id, u16 ox_id);
+				  u32 s_id, __be16 ox_id);
 
 u16        fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
 				     u16 cmd_code);
@@ -261,7 +261,7 @@ u16	fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn);
 void		fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask);
 
 void		fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-					 u16 ox_id);
+					 __be16 ox_id);
 
 enum fc_parse_status	fc_els_rsp_parse(struct fchs_s *fchs, int len);
 
@@ -274,15 +274,15 @@ enum fc_parse_status	fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name,
 					wwn_t port_name);
 
 u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
-		u32 s_id, u16 ox_id, u16 rx_id);
+		u32 s_id, __be16 ox_id, u16 rx_id);
 
 int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code);
 
 u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
-		u32 d_id, u32 s_id, u16 ox_id, int num_pages);
+		u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
 
 u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
-		u32 d_id, u32 s_id, u16 ox_id, int num_pages);
+		u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
 
 u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
 
@@ -304,7 +304,7 @@ u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
 u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
 
 u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-		u16 ox_id, u32 reason_code, u32 reason_expl);
+		__be16 ox_id, u32 reason_code, u32 reason_expl);
 
 u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 		u32 port_id);

File diff suppressed because it is too large
+ 120 - 422
drivers/scsi/bfa/bfa_fcpim.c


+ 71 - 117
drivers/scsi/bfa/bfa_fcpim.h

@@ -41,7 +41,7 @@
 	(__itnim->ioprofile.iocomps[__index]++)
 
 #define BFA_IOIM_RETRY_TAG_OFFSET 11
-#define BFA_IOIM_RETRY_TAG_MASK 0x07ff /* 2K IOs */
+#define BFA_IOIM_IOTAG_MASK 0x07ff /* 2K IOs */
 #define BFA_IOIM_RETRY_MAX 7
 
 /* Buckets are are 512 bytes to 2MB */
@@ -94,12 +94,12 @@ struct bfa_fcpim_mod_s {
 	struct list_head	ioim_resfree_q; /*  IOs waiting for f/w */
 	struct list_head	ioim_comp_q;	/*  IO global comp Q	*/
 	struct list_head	tskim_free_q;
-	u32		ios_active;	/*  current active IOs	*/
-	u32		delay_comp;
+	u32			ios_active;	/*  current active IOs	*/
+	u32			delay_comp;
 	struct bfa_fcpim_del_itn_stats_s del_itn_stats;
 	bfa_boolean_t		ioredirect;
 	bfa_boolean_t		io_profile;
-	u32		io_profile_start_time;
+	u32			io_profile_start_time;
 	bfa_fcpim_profile_t     profile_comp;
 	bfa_fcpim_profile_t     profile_start;
 };
@@ -114,25 +114,24 @@ struct bfa_ioim_s {
 	struct bfa_fcpim_mod_s	*fcpim;		/*  parent fcpim module */
 	struct bfa_itnim_s	*itnim;		/*  i-t-n nexus for this IO  */
 	struct bfad_ioim_s	*dio;		/*  driver IO handle	*/
-	u16		iotag;		/*  FWI IO tag	*/
-	u16		abort_tag;	/*  unqiue abort request tag */
-	u16		nsges;		/*  number of SG elements */
-	u16		nsgpgs;		/*  number of SG pages	*/
+	u16			iotag;		/*  FWI IO tag	*/
+	u16			abort_tag;	/*  unqiue abort request tag */
+	u16			nsges;		/*  number of SG elements */
+	u16			nsgpgs;		/*  number of SG pages	*/
 	struct bfa_sgpg_s	*sgpg;		/*  first SG page	*/
 	struct list_head	sgpg_q;		/*  allocated SG pages	*/
 	struct bfa_cb_qe_s	hcb_qe;		/*  bfa callback qelem	*/
 	bfa_cb_cbfn_t		io_cbfn;	/*  IO completion handler */
-	struct bfa_ioim_sp_s *iosp;		/*  slow-path IO handling */
-	u8		reqq;		/*  Request queue for I/O */
-	u64 start_time;			/*  IO's Profile start val */
+	struct bfa_ioim_sp_s	*iosp;		/*  slow-path IO handling */
+	u8			reqq;		/*  Request queue for I/O */
+	u64			start_time;	/*  IO's Profile start val */
 };
 
-
 struct bfa_ioim_sp_s {
 	struct bfi_msg_s	comp_rspmsg;	/*  IO comp f/w response */
 	u8			*snsinfo;	/*  sense info for this IO   */
-	struct bfa_sgpg_wqe_s sgpg_wqe;	/*  waitq elem for sgpg	*/
-	struct bfa_reqq_wait_s reqq_wait;	/*  to wait for room in reqq */
+	struct bfa_sgpg_wqe_s	sgpg_wqe;	/*  waitq elem for sgpg	*/
+	struct bfa_reqq_wait_s	reqq_wait;	/*  to wait for room in reqq */
 	bfa_boolean_t		abort_explicit;	/*  aborted by OS	*/
 	struct bfa_tskim_s	*tskim;		/*  Relevant TM cmd	*/
 };
@@ -143,35 +142,34 @@ struct bfa_ioim_sp_s {
 struct bfa_tskim_s {
 	struct list_head	qe;
 	bfa_sm_t		sm;
-	struct bfa_s	*bfa;	/*  BFA module  */
+	struct bfa_s		*bfa;	/*  BFA module  */
 	struct bfa_fcpim_mod_s  *fcpim;	/*  parent fcpim module	*/
 	struct bfa_itnim_s	*itnim;	/*  i-t-n nexus for this IO  */
-	struct bfad_tskim_s	*dtsk;   /*  driver task mgmt cmnd	*/
-	bfa_boolean_t	notify;	/*  notify itnim on TM comp  */
-	lun_t	lun;	/*  lun if applicable	*/
-	enum fcp_tm_cmnd	tm_cmnd;	/*  task management command  */
-	u16	tsk_tag;	/*  FWI IO tag	*/
-	u8	tsecs;	/*  timeout in seconds	*/
+	struct bfad_tskim_s	*dtsk;  /*  driver task mgmt cmnd	*/
+	bfa_boolean_t		notify;	/*  notify itnim on TM comp  */
+	struct scsi_lun		lun;	/*  lun if applicable	*/
+	enum fcp_tm_cmnd	tm_cmnd; /*  task management command  */
+	u16			tsk_tag; /*  FWI IO tag	*/
+	u8			tsecs;	/*  timeout in seconds	*/
 	struct bfa_reqq_wait_s  reqq_wait;   /*  to wait for room in reqq */
 	struct list_head	io_q;	/*  queue of affected IOs	*/
-	struct bfa_wc_s	wc;	/*  waiting counter	*/
+	struct bfa_wc_s		wc;	/*  waiting counter	*/
 	struct bfa_cb_qe_s	hcb_qe;	/*  bfa callback qelem	*/
 	enum bfi_tskim_status   tsk_status;  /*  TM status	*/
 };
 
-
 /*
  * BFA i-t-n (initiator mode)
  */
 struct bfa_itnim_s {
-	struct list_head	qe;		/*  queue element	*/
-	bfa_sm_t	  sm;		/*  i-t-n im BFA state machine  */
-	struct bfa_s	*bfa;		/*  bfa instance	*/
-	struct bfa_rport_s *rport;	/*  bfa rport	*/
-	void	*ditn;		/*  driver i-t-n structure	*/
+	struct list_head	qe;	/*  queue element	*/
+	bfa_sm_t		sm;	/*  i-t-n im BFA state machine  */
+	struct bfa_s		*bfa;	/*  bfa instance	*/
+	struct bfa_rport_s	*rport;	/*  bfa rport	*/
+	void			*ditn;	/*  driver i-t-n structure	*/
 	struct bfi_mhdr_s	mhdr;	/*  pre-built mhdr	*/
-	u8	msg_no;		/*  itnim/rport firmware handle */
-	u8	reqq;		/*  CQ for requests	*/
+	u8			msg_no;	/*  itnim/rport firmware handle */
+	u8			reqq;	/*  CQ for requests	*/
 	struct bfa_cb_qe_s	hcb_qe;	/*  bfa callback qelem	*/
 	struct list_head pending_q;	/*  queue of pending IO requests */
 	struct list_head io_q;		/*  queue of active IO requests */
@@ -181,19 +179,19 @@ struct bfa_itnim_s {
 	bfa_boolean_t   seq_rec;	/*  SQER supported	*/
 	bfa_boolean_t   is_online;	/*  itnim is ONLINE for IO	*/
 	bfa_boolean_t   iotov_active;	/*  IO TOV timer is active	 */
-	struct bfa_wc_s	wc;	/*  waiting counter	*/
-	struct bfa_timer_s timer;	/*  pending IO TOV		 */
+	struct bfa_wc_s	wc;		/*  waiting counter	*/
+	struct bfa_timer_s timer;	/*  pending IO TOV	 */
 	struct bfa_reqq_wait_s reqq_wait; /*  to wait for room in reqq */
 	struct bfa_fcpim_mod_s *fcpim;	/*  fcpim module	*/
 	struct bfa_itnim_iostats_s	stats;
 	struct bfa_itnim_ioprofile_s  ioprofile;
 };
 
-
 #define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
 #define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod)
+#define BFA_IOIM_TAG_2_ID(_iotag)	((_iotag) & BFA_IOIM_IOTAG_MASK)
 #define BFA_IOIM_FROM_TAG(_fcpim, _iotag)	\
-	(&fcpim->ioim_arr[(_iotag & BFA_IOIM_RETRY_TAG_MASK)])
+	(&fcpim->ioim_arr[(_iotag & BFA_IOIM_IOTAG_MASK)])
 #define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag)	\
 	(&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
 
@@ -201,26 +199,26 @@ struct bfa_itnim_s {
 	(_bfa->modules.fcpim_mod.io_profile_start_time)
 #define bfa_fcpim_get_io_profile(_bfa)	\
 	(_bfa->modules.fcpim_mod.io_profile)
+#define bfa_ioim_update_iotag(__ioim) do {				\
+	uint16_t k = (__ioim)->iotag >> BFA_IOIM_RETRY_TAG_OFFSET;	\
+	k++; (__ioim)->iotag &= BFA_IOIM_IOTAG_MASK;			\
+	(__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET;		\
+} while (0)
 
 static inline bfa_boolean_t
-bfa_ioim_get_iotag(struct bfa_ioim_s *ioim)
+bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
 {
-	u16 k = ioim->iotag;
-
-	k >>= BFA_IOIM_RETRY_TAG_OFFSET; k++;
-
-	if (k > BFA_IOIM_RETRY_MAX)
+	uint16_t k = ioim->iotag >> BFA_IOIM_RETRY_TAG_OFFSET;
+	if (k < BFA_IOIM_RETRY_MAX)
 		return BFA_FALSE;
-	ioim->iotag &= BFA_IOIM_RETRY_TAG_MASK;
-	ioim->iotag |= k<<BFA_IOIM_RETRY_TAG_OFFSET;
 	return BFA_TRUE;
 }
+
 /*
  * function prototypes
  */
 void	bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim,
 					struct bfa_meminfo_s *minfo);
-void	bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim);
 void	bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 void	bfa_ioim_good_comp_isr(struct bfa_s *bfa,
 					struct bfi_msg_s *msg);
@@ -232,7 +230,6 @@ void	bfa_ioim_tov(struct bfa_ioim_s *ioim);
 
 void	bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim,
 					struct bfa_meminfo_s *minfo);
-void	bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim);
 void	bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 void	bfa_tskim_iodone(struct bfa_tskim_s *tskim);
 void	bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
@@ -248,32 +245,14 @@ void	bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 void	bfa_itnim_iodone(struct bfa_itnim_s *itnim);
 void	bfa_itnim_tskdone(struct bfa_itnim_s *itnim);
 bfa_boolean_t   bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
-void bfa_ioim_profile_comp(struct bfa_ioim_s *ioim);
-void bfa_ioim_profile_start(struct bfa_ioim_s *ioim);
-
 
 /*
  * bfa fcpim module API functions
  */
-void		bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
+void	bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
 u16	bfa_fcpim_path_tov_get(struct bfa_s *bfa);
-void		bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth);
 u16	bfa_fcpim_qdepth_get(struct bfa_s *bfa);
-bfa_status_t bfa_fcpim_get_modstats(struct bfa_s *bfa,
-	 struct bfa_itnim_iostats_s *modstats);
-bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
-		struct bfa_itnim_iostats_s *stats, u8 lp_tag);
-bfa_status_t bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
-	 struct bfa_fcpim_del_itn_stats_s *modstats);
-bfa_status_t bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag);
-void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
-		struct bfa_itnim_iostats_s *itnim_stats);
-bfa_status_t bfa_fcpim_clr_modstats(struct bfa_s *bfa);
-void		bfa_fcpim_set_ioredirect(struct bfa_s *bfa,
-				bfa_boolean_t state);
-void		bfa_fcpim_update_ioredirect(struct bfa_s *bfa);
-bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
-bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
+
 #define bfa_fcpim_ioredirect_enabled(__bfa)				\
 	(((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect)
 
@@ -291,48 +270,33 @@ bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
  * bfa itnim API functions
  */
 struct bfa_itnim_s *bfa_itnim_create(struct bfa_s *bfa,
-					struct bfa_rport_s *rport, void *itnim);
-void		bfa_itnim_delete(struct bfa_itnim_s *itnim);
-void		bfa_itnim_online(struct bfa_itnim_s *itnim,
-				 bfa_boolean_t seq_rec);
-void		bfa_itnim_offline(struct bfa_itnim_s *itnim);
-void		bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
-			struct bfa_itnim_iostats_s *stats);
-void		bfa_itnim_clear_stats(struct bfa_itnim_s *itnim);
-bfa_status_t	bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
-		struct bfa_itnim_ioprofile_s *ioprofile);
+		struct bfa_rport_s *rport, void *itnim);
+void bfa_itnim_delete(struct bfa_itnim_s *itnim);
+void bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec);
+void bfa_itnim_offline(struct bfa_itnim_s *itnim);
+void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim);
+bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
+			struct bfa_itnim_ioprofile_s *ioprofile);
+
 #define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq)
 
 /*
- *	BFA completion callback for bfa_itnim_online().
- *
- * @param[in]		itnim		FCS or driver itnim instance
- *
- * return None
+ * BFA completion callback for bfa_itnim_online().
  */
 void	bfa_cb_itnim_online(void *itnim);
 
 /*
- *	BFA completion callback for bfa_itnim_offline().
- *
- * @param[in]		itnim		FCS or driver itnim instance
- *
- * return None
+ * BFA completion callback for bfa_itnim_offline().
  */
 void	bfa_cb_itnim_offline(void *itnim);
 void	bfa_cb_itnim_tov_begin(void *itnim);
 void	bfa_cb_itnim_tov(void *itnim);
 
 /*
- *	BFA notification to FCS/driver for second level error recovery.
- *
+ * BFA notification to FCS/driver for second level error recovery.
  * Atleast one I/O request has timedout and target is unresponsive to
  * repeated abort requests. Second level error recovery should be initiated
  * by starting implicit logout and recovery procedures.
- *
- * @param[in]		itnim		FCS or driver itnim instance
- *
- * return None
  */
 void	bfa_cb_itnim_sler(void *itnim);
 
@@ -349,10 +313,8 @@ void		bfa_ioim_start(struct bfa_ioim_s *ioim);
 bfa_status_t	bfa_ioim_abort(struct bfa_ioim_s *ioim);
 void		bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
 				      bfa_boolean_t iotov);
-
-
 /*
- *	I/O completion notification.
+ * I/O completion notification.
  *
  * @param[in]		dio			driver IO structure
  * @param[in]		io_status		IO completion status
@@ -363,39 +325,31 @@ void		bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
  *
  * @return None
  */
-void	bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
-				  enum bfi_ioim_status io_status,
-				  u8 scsi_status, int sns_len,
-				  u8 *sns_info, s32 residue);
+void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
+			enum bfi_ioim_status io_status,
+			u8 scsi_status, int sns_len,
+			u8 *sns_info, s32 residue);
 
 /*
- *	I/O good completion notification.
- *
- * @param[in]		dio			driver IO structure
- *
- * @return None
+ * I/O good completion notification.
  */
-void	bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
+void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
 
 /*
- *	I/O abort completion notification
- *
- * @param[in]		dio			driver IO that was aborted
- *
- * @return None
+ * I/O abort completion notification
  */
-void	bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio);
+void bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio);
 
 /*
  * bfa tskim API functions
  */
-struct bfa_tskim_s	*bfa_tskim_alloc(struct bfa_s *bfa,
-					struct bfad_tskim_s *dtsk);
-void		bfa_tskim_free(struct bfa_tskim_s *tskim);
-void		bfa_tskim_start(struct bfa_tskim_s *tskim,
-				struct bfa_itnim_s *itnim, lun_t lun,
-				enum fcp_tm_cmnd tm, u8 t_secs);
-void		bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
-				  enum bfi_tskim_status tsk_status);
+struct bfa_tskim_s *bfa_tskim_alloc(struct bfa_s *bfa,
+			struct bfad_tskim_s *dtsk);
+void bfa_tskim_free(struct bfa_tskim_s *tskim);
+void bfa_tskim_start(struct bfa_tskim_s *tskim,
+			struct bfa_itnim_s *itnim, struct scsi_lun lun,
+			enum fcp_tm_cmnd tm, u8 t_secs);
+void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
+			enum bfi_tskim_status tsk_status);
 
 #endif /* __BFA_FCPIM_H__ */

+ 34 - 172
drivers/scsi/bfa/bfa_fcs.c

@@ -19,9 +19,9 @@
  *  bfa_fcs.c BFA FCS main
  */
 
+#include "bfad_drv.h"
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
-#include "bfad_drv.h"
 
 BFA_TRC_FILE(FCS, FCS);
 
@@ -76,7 +76,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
 	fcs->bfad = bfad;
 	fcs->min_cfg = min_cfg;
 
-	bfa_attach_fcs(bfa);
+	bfa->fcs = BFA_TRUE;
 	fcbuild_init();
 
 	for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
@@ -110,14 +110,6 @@ bfa_fcs_init(struct bfa_fcs_s *fcs)
 	}
 }
 
-/*
- * Start FCS operations.
- */
-void
-bfa_fcs_start(struct bfa_fcs_s *fcs)
-{
-	bfa_fcs_fabric_modstart(fcs);
-}
 
 /*
  *	brief
@@ -138,22 +130,6 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
 	bfa_fcs_fabric_psymb_init(&fcs->fabric);
 }
 
-/*
- *	brief
- *		FCS FDMI Driver Parameter Initialization
- *
- *	param[in]		fcs		FCS instance
- *	param[in]		fdmi_enable	TRUE/FALSE
- *
- *	return None
- */
-void
-bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
-{
-
-	fcs->fdmi_enabled = fdmi_enable;
-
-}
 /*
  *	brief
  *		FCS instance cleanup and exit.
@@ -184,18 +160,6 @@ bfa_fcs_exit(struct bfa_fcs_s *fcs)
 }
 
 
-void
-bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod)
-{
-	fcs->trcmod = trcmod;
-}
-
-void
-bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs)
-{
-	bfa_wc_down(&fcs->wc);
-}
-
 /*
  * Fabric module implementation.
  */
@@ -232,31 +196,6 @@ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
 					 u32 rsp_len,
 					 u32 resid_len,
 					 struct fchs_s *rspfchs);
-/*
- *  fcs_fabric_sm fabric state machine functions
- */
-
-/*
- * Fabric state machine events
- */
-enum bfa_fcs_fabric_event {
-	BFA_FCS_FABRIC_SM_CREATE	= 1,	/*  create from driver	      */
-	BFA_FCS_FABRIC_SM_DELETE	= 2,	/*  delete from driver	      */
-	BFA_FCS_FABRIC_SM_LINK_DOWN	= 3,	/*  link down from port      */
-	BFA_FCS_FABRIC_SM_LINK_UP	= 4,	/*  link up from port	      */
-	BFA_FCS_FABRIC_SM_CONT_OP	= 5,	/*  flogi/auth continue op   */
-	BFA_FCS_FABRIC_SM_RETRY_OP	= 6,	/*  flogi/auth retry op      */
-	BFA_FCS_FABRIC_SM_NO_FABRIC	= 7,	/*  from flogi/auth	      */
-	BFA_FCS_FABRIC_SM_PERF_EVFP	= 8,	/*  from flogi/auth	      */
-	BFA_FCS_FABRIC_SM_ISOLATE	= 9,	/*  from EVFP processing     */
-	BFA_FCS_FABRIC_SM_NO_TAGGING	= 10,	/*  no VFT tagging from EVFP */
-	BFA_FCS_FABRIC_SM_DELAYED	= 11,	/*  timeout delay event      */
-	BFA_FCS_FABRIC_SM_AUTH_FAILED	= 12,	/*  auth failed	      */
-	BFA_FCS_FABRIC_SM_AUTH_SUCCESS	= 13,	/*  auth successful	      */
-	BFA_FCS_FABRIC_SM_DELCOMP	= 14,	/*  all vports deleted event */
-	BFA_FCS_FABRIC_SM_LOOPBACK	= 15,	/*  Received our own FLOGI   */
-	BFA_FCS_FABRIC_SM_START		= 16,	/*  from driver	      */
-};
 
 static void	bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
 					 enum bfa_fcs_fabric_event event);
@@ -270,14 +209,8 @@ static void	bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
 					      enum bfa_fcs_fabric_event event);
 static void	bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
 				       enum bfa_fcs_fabric_event event);
-static void	bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
-					      enum bfa_fcs_fabric_event event);
-static void	bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
-					   enum bfa_fcs_fabric_event event);
 static void	bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
 					   enum bfa_fcs_fabric_event event);
-static void	bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
-					 enum bfa_fcs_fabric_event event);
 static void	bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
 				       enum bfa_fcs_fabric_event event);
 static void	bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
@@ -337,7 +270,7 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
 
 	case BFA_FCS_FABRIC_SM_DELETE:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
-		bfa_fcs_modexit_comp(fabric->fcs);
+		bfa_wc_down(&fabric->fcs->wc);
 		break;
 
 	default:
@@ -410,7 +343,7 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
 
 	case BFA_FCS_FABRIC_SM_LOOPBACK:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		bfa_fcs_fabric_set_opertype(fabric);
 		break;
 
@@ -424,12 +357,12 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
 
 	case BFA_FCS_FABRIC_SM_LINK_DOWN:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		break;
 
 	case BFA_FCS_FABRIC_SM_DELETE:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		bfa_fcs_fabric_delete(fabric);
 		break;
 
@@ -481,7 +414,7 @@ bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
 	switch (event) {
 	case BFA_FCS_FABRIC_SM_AUTH_FAILED:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		break;
 
 	case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
@@ -495,7 +428,7 @@ bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
 
 	case BFA_FCS_FABRIC_SM_LINK_DOWN:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		break;
 
 	case BFA_FCS_FABRIC_SM_DELETE:
@@ -511,7 +444,7 @@ bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
 /*
  *   Authentication failed
  */
-static void
+void
 bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
 			      enum bfa_fcs_fabric_event event)
 {
@@ -537,7 +470,7 @@ bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
 /*
  *   Port is in loopback mode.
  */
-static void
+void
 bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
 			   enum bfa_fcs_fabric_event event)
 {
@@ -573,7 +506,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
 	switch (event) {
 	case BFA_FCS_FABRIC_SM_LINK_DOWN:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		bfa_fcs_fabric_notify_offline(fabric);
 		break;
 
@@ -596,7 +529,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
 /*
  *   Fabric is online - normal operating state.
  */
-static void
+void
 bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
 			 enum bfa_fcs_fabric_event event)
 {
@@ -606,7 +539,7 @@ bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
 	switch (event) {
 	case BFA_FCS_FABRIC_SM_LINK_DOWN:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		bfa_fcs_fabric_notify_offline(fabric);
 		break;
 
@@ -617,7 +550,7 @@ bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
 
 	case BFA_FCS_FABRIC_SM_AUTH_FAILED:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		break;
 
 	case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
@@ -697,7 +630,7 @@ bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
 	switch (event) {
 	case BFA_FCS_FABRIC_SM_DELCOMP:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
-		bfa_fcs_modexit_comp(fabric->fcs);
+		bfa_wc_down(&fabric->fcs->wc);
 		break;
 
 	case BFA_FCS_FABRIC_SM_LINK_UP:
@@ -724,8 +657,8 @@ bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric)
 	struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
 
 	port_cfg->roles = BFA_LPORT_ROLE_FCP_IM;
-	port_cfg->nwwn = bfa_ioc_get_nwwn(&fabric->fcs->bfa->ioc);
-	port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc);
+	port_cfg->nwwn = fabric->fcs->bfa->ioc.attr->nwwn;
+	port_cfg->pwwn = fabric->fcs->bfa->ioc.attr->pwwn;
 }
 
 /*
@@ -813,7 +746,7 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
 		return;
 
 	case BFA_STATUS_EPROTOCOL:
-		switch (bfa_lps_get_extstatus(fabric->lps)) {
+		switch (fabric->lps->ext_status) {
 		case BFA_EPROTO_BAD_ACCEPT:
 			fabric->stats.flogi_acc_err++;
 			break;
@@ -840,26 +773,26 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
 		return;
 	}
 
-	fabric->bb_credit = bfa_lps_get_peer_bbcredit(fabric->lps);
+	fabric->bb_credit = fabric->lps->pr_bbcred;
 	bfa_trc(fabric->fcs, fabric->bb_credit);
 
-	if (!bfa_lps_is_brcd_fabric(fabric->lps))
-		fabric->fabric_name =  bfa_lps_get_peer_nwwn(fabric->lps);
+	if (!(fabric->lps->brcd_switch))
+		fabric->fabric_name =  fabric->lps->pr_nwwn;
 
 	/*
 	 * Check port type. It should be 1 = F-port.
 	 */
-	if (bfa_lps_is_fport(fabric->lps)) {
-		fabric->bport.pid = bfa_lps_get_pid(fabric->lps);
-		fabric->is_npiv = bfa_lps_is_npiv_en(fabric->lps);
-		fabric->is_auth = bfa_lps_is_authreq(fabric->lps);
+	if (fabric->lps->fport) {
+		fabric->bport.pid = fabric->lps->lp_pid;
+		fabric->is_npiv = fabric->lps->npiv_en;
+		fabric->is_auth = fabric->lps->auth_req;
 		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP);
 	} else {
 		/*
 		 * Nport-2-Nport direct attached
 		 */
 		fabric->bport.port_topo.pn2n.rem_port_wwn =
-			bfa_lps_get_peer_pwwn(fabric->lps);
+			fabric->lps->pr_pwwn;
 		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
 	}
 
@@ -987,7 +920,7 @@ bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
 	INIT_LIST_HEAD(&fabric->vport_q);
 	INIT_LIST_HEAD(&fabric->vf_q);
 	fabric->lps = bfa_lps_alloc(fcs->bfa);
-	bfa_assert(fabric->lps);
+	WARN_ON(!fabric->lps);
 
 	/*
 	 * Initialize fabric delete completion handler. Fabric deletion is
@@ -1038,31 +971,6 @@ bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs)
 	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
 }
 
-/*
- *   Suspend fabric activity as part of driver suspend.
- */
-void
-bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs)
-{
-}
-
-bfa_boolean_t
-bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric)
-{
-	return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback);
-}
-
-bfa_boolean_t
-bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric)
-{
-	return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_auth_failed);
-}
-
-enum bfa_port_type
-bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric)
-{
-	return fabric->oper_type;
-}
 
 /*
  *   Link up notification from BFA physical port module.
@@ -1123,40 +1031,6 @@ bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
 	bfa_wc_down(&fabric->wc);
 }
 
-/*
- *   Base port is deleted.
- */
-void
-bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric)
-{
-	bfa_wc_down(&fabric->wc);
-}
-
-
-/*
- *    Check if fabric is online.
- *
- *   param[in] fabric - Fabric instance. This can be a base fabric or vf.
- *
- *   @return  TRUE/FALSE
- */
-int
-bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric)
-{
-	return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online);
-}
-
-/*
- *	brief
- *
- */
-bfa_status_t
-bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs,
-		     struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv)
-{
-	bfa_sm_set_state(vf, bfa_fcs_fabric_sm_uninit);
-	return BFA_STATUS_OK;
-}
 
 /*
  * Lookup for a vport withing a fabric given its pwwn
@@ -1176,18 +1050,6 @@ bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
 	return NULL;
 }
 
-/*
- *    In a given fabric, return the number of lports.
- *
- *   param[in] fabric - Fabric instance. This can be a base fabric or vf.
- *
- *   @return : 1 or more.
- */
-u16
-bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric)
-{
-	return fabric->num_vports;
-}
 
 /*
  *  Get OUI of the attached switch.
@@ -1207,7 +1069,7 @@ bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric)
 	u8 *tmp;
 	u16 oui;
 
-	fab_nwwn = bfa_lps_get_peer_nwwn(fabric->lps);
+	fab_nwwn = fabric->lps->pr_nwwn;
 
 	tmp = (u8 *)&fab_nwwn;
 	oui = (tmp[3] << 8) | tmp[4];
@@ -1235,7 +1097,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
 	 * external loopback cable is in place. Our own FLOGI frames are
 	 * sometimes looped back when switch port gets temporarily bypassed.
 	 */
-	if ((pid == bfa_os_ntoh3b(FC_FABRIC_PORT)) &&
+	if ((pid == bfa_ntoh3b(FC_FABRIC_PORT)) &&
 	    (els_cmd->els_code == FC_ELS_FLOGI) &&
 	    (flogi->port_name == bfa_fcs_lport_get_pwwn(&fabric->bport))) {
 		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK);
@@ -1245,7 +1107,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
 	/*
 	 * FLOGI/EVFP exchanges should be consumed by base fabric.
 	 */
-	if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) {
+	if (fchs->d_id == bfa_hton3b(FC_FABRIC_PORT)) {
 		bfa_trc(fabric->fcs, pid);
 		bfa_fcs_fabric_process_uf(fabric, fchs, len);
 		return;
@@ -1358,13 +1220,13 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
 		return;
 
 	reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
-				    bfa_os_hton3b(FC_FABRIC_PORT),
+				    bfa_hton3b(FC_FABRIC_PORT),
 				    n2n_port->reply_oxid, pcfg->pwwn,
 				    pcfg->nwwn,
 				    bfa_fcport_get_maxfrsize(bfa),
 				    bfa_fcport_get_rx_bbcredit(bfa));
 
-	bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps),
+	bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->lp_tag,
 		      BFA_FALSE, FC_CLASS_3,
 		      reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric,
 		      FC_MAX_PDUSZ, 0);
@@ -1455,7 +1317,7 @@ bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event)
 		break;
 
 	default:
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 }
 
@@ -1502,7 +1364,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
 		 * drop frame if vfid is unknown
 		 */
 		if (!fabric) {
-			bfa_assert(0);
+			WARN_ON(1);
 			bfa_stats(fcs, uf.vfid_unknown);
 			bfa_uf_free(uf);
 			return;

+ 81 - 43
drivers/scsi/bfa/bfa_fcs.h

@@ -26,6 +26,22 @@
 
 #define BFA_FCS_OS_STR_LEN		64
 
+/*
+ *  lps_pvt BFA LPS private functions
+ */
+
+enum bfa_lps_event {
+	BFA_LPS_SM_LOGIN	= 1,	/* login request from user      */
+	BFA_LPS_SM_LOGOUT	= 2,	/* logout request from user     */
+	BFA_LPS_SM_FWRSP	= 3,	/* f/w response to login/logout */
+	BFA_LPS_SM_RESUME	= 4,	/* space present in reqq queue  */
+	BFA_LPS_SM_DELETE	= 5,	/* lps delete from user         */
+	BFA_LPS_SM_OFFLINE	= 6,	/* Link is offline              */
+	BFA_LPS_SM_RX_CVL	= 7,	/* Rx clear virtual link        */
+	BFA_LPS_SM_SET_N2N_PID  = 8,	/* Set assigned PID for n2n */
+};
+
+
 /*
  * !!! Only append to the enums defined here to avoid any versioning
  * !!! needed between trace utility and driver version
@@ -41,13 +57,12 @@ enum {
 struct bfa_fcs_s;
 
 #define __fcs_min_cfg(__fcs)       ((__fcs)->min_cfg)
-void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs);
 
 #define BFA_FCS_BRCD_SWITCH_OUI  0x051e
 #define N2N_LOCAL_PID	    0x010000
 #define N2N_REMOTE_PID		0x020000
 #define	BFA_FCS_RETRY_TIMEOUT 2000
-#define BFA_FCS_PID_IS_WKA(pid)  ((bfa_os_ntoh3b(pid) > 0xFFF000) ?  1 : 0)
+#define BFA_FCS_PID_IS_WKA(pid)  ((bfa_ntoh3b(pid) > 0xFFF000) ?  1 : 0)
 
 
 
@@ -109,7 +124,7 @@ struct bfa_fcs_lport_loop_s {
 
 struct bfa_fcs_lport_n2n_s {
 	u32        rsvd;
-	u16        reply_oxid;	/*  ox_id from the req flogi to be
+	__be16     reply_oxid;	/*  ox_id from the req flogi to be
 					 *used in flogi acc */
 	wwn_t           rem_port_wwn;	/*  Attached port's wwn */
 };
@@ -316,8 +331,6 @@ void            bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port,
 				       struct bfa_fcs_rport_s *rport);
 void            bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port,
 				       struct bfa_fcs_rport_s *rport);
-void bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs);
-void bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs);
 void            bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *vport);
 void            bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport);
 void            bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport);
@@ -359,9 +372,6 @@ bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport);
 bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport);
 void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
 			    struct bfa_vport_attr_s *vport_attr);
-void bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
-			     struct bfa_vport_stats_s *vport_stats);
-void bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport);
 struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs,
 					     u16 vf_id, wwn_t vpwwn);
 void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport);
@@ -406,7 +416,7 @@ struct bfa_fcs_rport_s {
 	struct bfad_rport_s	*rp_drv;	/*  driver peer instance */
 	u32	pid;	/*  port ID of rport */
 	u16	maxfrsize;	/*  maximum frame size */
-	u16	reply_oxid;	/*  OX_ID of inbound requests */
+	__be16	reply_oxid;	/*  OX_ID of inbound requests */
 	enum fc_cos	fc_cos;	/*  FC classes of service supp */
 	bfa_boolean_t	cisc;	/*  CISC capable device */
 	bfa_boolean_t	prlo;	/*  processing prlo or LOGO */
@@ -437,32 +447,18 @@ bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
 /*
  * bfa fcs rport API functions
  */
-bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
-			       struct bfa_fcs_rport_s *rport,
-			       struct bfad_rport_s *rport_drv);
-bfa_status_t bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport);
-void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
-			    struct bfa_rport_attr_s *attr);
-void bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
-			     struct bfa_rport_stats_s *stats);
-void bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport);
 struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port,
 					     wwn_t rpwwn);
 struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
 	struct bfa_fcs_lport_s *port, wwn_t rnwwn);
 void bfa_fcs_rport_set_del_timeout(u8 rport_tmo);
 
-void bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport,
-			     enum bfa_port_speed speed);
 void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
 	 struct fchs_s *fchs, u16 len);
 void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
 
 struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_lport_s *port,
 	 u32 pid);
-void bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport);
-void bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport);
-void bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport);
 void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
 			 struct fc_logi_s *plogi_rsp);
 void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port,
@@ -470,10 +466,8 @@ void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port,
 				struct fc_logi_s *plogi);
 void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
 			 struct fc_logi_s *plogi);
-void bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport);
-void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id);
+void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id);
 
-void bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport);
 void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport);
 void bfa_fcs_rport_fcptm_offline_done(struct bfa_fcs_rport_s *rport);
 int  bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport);
@@ -618,7 +612,7 @@ struct bfa_fcs_fdmi_hba_attr_s {
 	u8         option_rom_ver[BFA_VERSION_LEN];
 	u8         fw_version[8];
 	u8         os_name[256];
-	u32        max_ct_pyld;
+	__be32        max_ct_pyld;
 };
 
 /*
@@ -626,9 +620,9 @@ struct bfa_fcs_fdmi_hba_attr_s {
  */
 struct bfa_fcs_fdmi_port_attr_s {
 	u8         supp_fc4_types[32];	/* supported FC4 types */
-	u32        supp_speed;	/* supported speed */
-	u32        curr_speed;	/* current Speed */
-	u32        max_frm_size;	/* max frame size */
+	__be32        supp_speed;	/* supported speed */
+	__be32        curr_speed;	/* current Speed */
+	__be32        max_frm_size;	/* max frame size */
 	u8         os_device_name[256];	/* OS device Name */
 	u8         host_name[256];	/* host name */
 };
@@ -663,6 +657,57 @@ struct bfa_fcs_s {
 	struct bfa_wc_s		wc;	/*  waiting counter */
 };
 
+/*
+ *  fcs_fabric_sm fabric state machine functions
+ */
+
+/*
+ * Fabric state machine events
+ */
+enum bfa_fcs_fabric_event {
+	BFA_FCS_FABRIC_SM_CREATE        = 1,    /*  create from driver        */
+	BFA_FCS_FABRIC_SM_DELETE        = 2,    /*  delete from driver        */
+	BFA_FCS_FABRIC_SM_LINK_DOWN     = 3,    /*  link down from port      */
+	BFA_FCS_FABRIC_SM_LINK_UP       = 4,    /*  link up from port         */
+	BFA_FCS_FABRIC_SM_CONT_OP       = 5,    /*  flogi/auth continue op   */
+	BFA_FCS_FABRIC_SM_RETRY_OP      = 6,    /*  flogi/auth retry op      */
+	BFA_FCS_FABRIC_SM_NO_FABRIC     = 7,    /*  from flogi/auth           */
+	BFA_FCS_FABRIC_SM_PERF_EVFP     = 8,    /*  from flogi/auth           */
+	BFA_FCS_FABRIC_SM_ISOLATE       = 9,    /*  from EVFP processing     */
+	BFA_FCS_FABRIC_SM_NO_TAGGING    = 10,   /*  no VFT tagging from EVFP */
+	BFA_FCS_FABRIC_SM_DELAYED       = 11,   /*  timeout delay event      */
+	BFA_FCS_FABRIC_SM_AUTH_FAILED   = 12,   /*  auth failed       */
+	BFA_FCS_FABRIC_SM_AUTH_SUCCESS  = 13,   /*  auth successful           */
+	BFA_FCS_FABRIC_SM_DELCOMP       = 14,   /*  all vports deleted event */
+	BFA_FCS_FABRIC_SM_LOOPBACK      = 15,   /*  Received our own FLOGI   */
+	BFA_FCS_FABRIC_SM_START         = 16,   /*  from driver       */
+};
+
+/*
+ *  fcs_rport_sm FCS rport state machine events
+ */
+
+enum rport_event {
+	RPSM_EVENT_PLOGI_SEND   = 1,    /*  new rport; start with PLOGI */
+	RPSM_EVENT_PLOGI_RCVD   = 2,    /*  Inbound PLOGI from remote port */
+	RPSM_EVENT_PLOGI_COMP   = 3,    /*  PLOGI completed to rport    */
+	RPSM_EVENT_LOGO_RCVD    = 4,    /*  LOGO from remote device     */
+	RPSM_EVENT_LOGO_IMP     = 5,    /*  implicit logo for SLER      */
+	RPSM_EVENT_FCXP_SENT    = 6,    /*  Frame from has been sent    */
+	RPSM_EVENT_DELETE       = 7,    /*  RPORT delete request        */
+	RPSM_EVENT_SCN          = 8,    /*  state change notification   */
+	RPSM_EVENT_ACCEPTED     = 9,    /*  Good response from remote device */
+	RPSM_EVENT_FAILED       = 10,   /*  Request to rport failed.    */
+	RPSM_EVENT_TIMEOUT      = 11,   /*  Rport SM timeout event      */
+	RPSM_EVENT_HCB_ONLINE  = 12,    /*  BFA rport online callback   */
+	RPSM_EVENT_HCB_OFFLINE = 13,    /*  BFA rport offline callback  */
+	RPSM_EVENT_FC4_OFFLINE = 14,    /*  FC-4 offline complete       */
+	RPSM_EVENT_ADDRESS_CHANGE = 15, /*  Rport's PID has changed     */
+	RPSM_EVENT_ADDRESS_DISC = 16,   /*  Need to Discover rport's PID */
+	RPSM_EVENT_PRLO_RCVD   = 17,    /*  PRLO from remote device     */
+	RPSM_EVENT_PLOGI_RETRY = 18,    /*  Retry PLOGI continously */
+};
+
 /*
  * bfa fcs API functions
  */
@@ -672,16 +717,12 @@ void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
 void bfa_fcs_init(struct bfa_fcs_s *fcs);
 void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
 			      struct bfa_fcs_driver_info_s *driver_info);
-void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable);
 void bfa_fcs_exit(struct bfa_fcs_s *fcs);
-void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
-void		bfa_fcs_start(struct bfa_fcs_s *fcs);
 
 /*
  * bfa fcs vf public functions
  */
 bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
-u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
 
 /*
  * fabric protected interface functions
@@ -689,32 +730,29 @@ u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
 void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs);
 void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
 void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs);
-void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs);
 void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric);
 void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric);
 void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
 	struct bfa_fcs_vport_s *vport);
 void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
 	struct bfa_fcs_vport_s *vport);
-int bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric);
 struct bfa_fcs_vport_s *bfa_fcs_fabric_vport_lookup(
 		struct bfa_fcs_fabric_s *fabric, wwn_t pwwn);
 void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs);
 void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
 		struct fchs_s *fchs, u16 len);
-bfa_boolean_t	bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric);
-bfa_boolean_t	bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric);
-enum bfa_port_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric);
 void	bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
-void	bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric);
-bfa_status_t	bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf,
-			struct bfa_fcs_s *fcs, struct bfa_lport_cfg_s *port_cfg,
-			struct bfad_vf_s *vf_drv);
 void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
 	       wwn_t fabric_name);
 u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
 void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
 void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
+void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
+			enum bfa_fcs_fabric_event event);
+void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
+			enum bfa_fcs_fabric_event event);
+void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
+			enum bfa_fcs_fabric_event event);
 
 /*
  * BFA FCS callback interfaces

+ 15 - 15
drivers/scsi/bfa/bfa_fcs_fcpim.c

@@ -19,9 +19,9 @@
  *  fcpim.c - FCP initiator mode i-t nexus state machine
  */
 
+#include "bfad_drv.h"
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
-#include "bfad_drv.h"
 #include "bfad_im.h"
 
 BFA_TRC_FILE(FCS, FCPIM);
@@ -103,7 +103,7 @@ bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
 		break;
 
 	case BFA_FCS_ITNIM_SM_OFFLINE:
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_INITIATOR:
@@ -140,7 +140,7 @@ bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
 	case BFA_FCS_ITNIM_SM_OFFLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
 		bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_DELETE:
@@ -181,7 +181,7 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
 	case BFA_FCS_ITNIM_SM_OFFLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
 		bfa_fcxp_discard(itnim->fcxp);
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_INITIATOR:
@@ -217,7 +217,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
 		} else {
 			/* invoke target offline */
 			bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
-			bfa_fcs_rport_logo_imp(itnim->rport);
+			bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP);
 		}
 		break;
 
@@ -225,7 +225,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
 	case BFA_FCS_ITNIM_SM_OFFLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
 		bfa_timer_stop(&itnim->timer);
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_INITIATOR:
@@ -269,7 +269,7 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
 	case BFA_FCS_ITNIM_SM_OFFLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
 		bfa_itnim_offline(itnim->bfa_itnim);
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_DELETE:
@@ -330,7 +330,7 @@ bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
 	switch (event) {
 	case BFA_FCS_ITNIM_SM_HCB_OFFLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_DELETE:
@@ -358,7 +358,7 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
 	switch (event) {
 	case BFA_FCS_ITNIM_SM_OFFLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_RSP_ERROR:
@@ -536,7 +536,7 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
 	if (bfa_itnim == NULL) {
 		bfa_trc(port->fcs, rport->pwwn);
 		bfa_fcb_itnim_free(port->fcs->bfad, itnim_drv);
-		bfa_assert(0);
+		WARN_ON(1);
 		return NULL;
 	}
 
@@ -688,7 +688,7 @@ bfa_cb_itnim_sler(void *cb_arg)
 
 	itnim->stats.sler++;
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
-	bfa_fcs_rport_logo_imp(itnim->rport);
+	bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP);
 }
 
 struct bfa_fcs_itnim_s *
@@ -700,7 +700,7 @@ bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
 	if (!rport)
 		return NULL;
 
-	bfa_assert(rport->itnim != NULL);
+	WARN_ON(rport->itnim == NULL);
 	return rport->itnim;
 }
 
@@ -729,7 +729,7 @@ bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
 {
 	struct bfa_fcs_itnim_s *itnim = NULL;
 
-	bfa_assert(port != NULL);
+	WARN_ON(port == NULL);
 
 	itnim = bfa_fcs_itnim_lookup(port, rpwwn);
 
@@ -746,7 +746,7 @@ bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
 {
 	struct bfa_fcs_itnim_s *itnim = NULL;
 
-	bfa_assert(port != NULL);
+	WARN_ON(port == NULL);
 
 	itnim = bfa_fcs_itnim_lookup(port, rpwwn);
 
@@ -778,6 +778,6 @@ bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
 		break;
 
 	default:
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 }

+ 177 - 213
drivers/scsi/bfa/bfa_fcs_lport.c

@@ -15,10 +15,10 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
 #include "bfa_fc.h"
-#include "bfad_drv.h"
 
 BFA_TRC_FILE(FCS, PORT);
 
@@ -159,7 +159,7 @@ bfa_fcs_lport_sm_online(
 			bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
 			list_for_each_safe(qe, qen, &port->rport_q) {
 				rport = (struct bfa_fcs_rport_s *) qe;
-				bfa_fcs_rport_delete(rport);
+				bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
 			}
 		}
 		break;
@@ -197,7 +197,7 @@ bfa_fcs_lport_sm_offline(
 			bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
 			list_for_each_safe(qe, qen, &port->rport_q) {
 				rport = (struct bfa_fcs_rport_s *) qe;
-				bfa_fcs_rport_delete(rport);
+				bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
 			}
 		}
 		break;
@@ -309,6 +309,7 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
 			return;
 		}
 		port->pid  = rx_fchs->d_id;
+		bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id);
 	}
 
 	/*
@@ -323,6 +324,7 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
 			(memcmp((void *)&bfa_fcs_lport_get_pwwn(port),
 			(void *)&plogi->port_name, sizeof(wwn_t)) < 0)) {
 			port->pid  = rx_fchs->d_id;
+			bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id);
 			rport->pid = rx_fchs->s_id;
 		}
 		bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
@@ -349,8 +351,8 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
 		 * This is a different device with the same pid. Old device
 		 * disappeared. Send implicit LOGO to old device.
 		 */
-		bfa_assert(rport->pwwn != plogi->port_name);
-		bfa_fcs_rport_logo_imp(rport);
+		WARN_ON(rport->pwwn == plogi->port_name);
+		bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
 
 		/*
 		 * Inbound PLOGI from a new device (with old PID).
@@ -362,7 +364,7 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
 	/*
 	 * PLOGI crossing each other.
 	 */
-	bfa_assert(rport->pwwn == WWN_NULL);
+	WARN_ON(rport->pwwn != WWN_NULL);
 	bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
 }
 
@@ -511,7 +513,8 @@ bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
 	__port_action[port->fabric->fab_type].offline(port);
 
 	wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
-	if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE)
+	if (bfa_sm_cmp_state(port->fabric,
+			bfa_fcs_fabric_sm_online) == BFA_TRUE)
 		BFA_LOG(KERN_ERR, bfad, bfa_log_level,
 		"Logical port lost fabric connectivity: WWN = %s Role = %s\n",
 		lpwwn_buf, "Initiator");
@@ -522,26 +525,26 @@ bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
 
 	list_for_each_safe(qe, qen, &port->rport_q) {
 		rport = (struct bfa_fcs_rport_s *) qe;
-		bfa_fcs_rport_offline(rport);
+		bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
 	}
 }
 
 static void
 bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port)
 {
-	bfa_assert(0);
+	WARN_ON(1);
 }
 
 static void
 bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port)
 {
-	bfa_assert(0);
+	WARN_ON(1);
 }
 
 static void
 bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port)
 {
-	bfa_assert(0);
+	WARN_ON(1);
 }
 
 static void
@@ -584,33 +587,11 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
 				port->vport ? port->vport->vport_drv : NULL);
 		bfa_fcs_vport_delete_comp(port->vport);
 	} else {
-		 bfa_fcs_fabric_port_delete_comp(port->fabric);
+		bfa_wc_down(&port->fabric->wc);
 	}
 }
 
 
-
-/*
- *  fcs_lport_api BFA FCS port API
- */
-/*
- *   Module initialization
- */
-void
-bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs)
-{
-
-}
-
-/*
- *   Module cleanup
- */
-void
-bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs)
-{
-	bfa_fcs_modexit_comp(fcs);
-}
-
 /*
  * Unsolicited frame receive handling.
  */
@@ -623,6 +604,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
 	struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
 
 	bfa_stats(lport, uf_recvs);
+	bfa_trc(lport->fcs, fchs->type);
 
 	if (!bfa_fcs_lport_is_online(lport)) {
 		bfa_stats(lport, uf_recv_drops);
@@ -682,8 +664,11 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
 	 * Only handles ELS frames for now.
 	 */
 	if (fchs->type != FC_TYPE_ELS) {
-		bfa_trc(lport->fcs, fchs->type);
-		bfa_assert(0);
+		bfa_trc(lport->fcs, fchs->s_id);
+		bfa_trc(lport->fcs, fchs->d_id);
+		/* ignore type FC_TYPE_FC_FSS */
+		if (fchs->type != FC_TYPE_FC_FSS)
+			bfa_sm_fault(lport->fcs, fchs->type);
 		return;
 	}
 
@@ -792,7 +777,7 @@ bfa_fcs_lport_del_rport(
 	struct bfa_fcs_lport_s *port,
 	struct bfa_fcs_rport_s *rport)
 {
-	bfa_assert(bfa_q_is_on_q(&port->rport_q, rport));
+	WARN_ON(!bfa_q_is_on_q(&port->rport_q, rport));
 	list_del(&rport->qe);
 	port->num_rports--;
 
@@ -850,8 +835,8 @@ bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
 	lport->fcs = fcs;
 	lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
 	lport->vport = vport;
-	lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) :
-				  bfa_lps_get_tag(lport->fabric->lps);
+	lport->lp_tag = (vport) ? vport->lps->lp_tag :
+				  lport->fabric->lps->lp_tag;
 
 	INIT_LIST_HEAD(&lport->rport_q);
 	lport->num_rports = 0;
@@ -903,10 +888,12 @@ bfa_fcs_lport_get_attr(
 	port_attr->port_cfg = port->port_cfg;
 
 	if (port->fabric) {
-		port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric);
-		port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric);
+		port_attr->port_type = port->fabric->oper_type;
+		port_attr->loopback = bfa_sm_cmp_state(port->fabric,
+				bfa_fcs_fabric_sm_loopback);
 		port_attr->authfail =
-			bfa_fcs_fabric_is_auth_failed(port->fabric);
+			bfa_sm_cmp_state(port->fabric,
+				bfa_fcs_fabric_sm_auth_failed);
 		port_attr->fabric_name  = bfa_fcs_lport_get_fabric_name(port);
 		memcpy(port_attr->fabric_ip_addr,
 			bfa_fcs_lport_get_fabric_ipaddr(port),
@@ -915,10 +902,10 @@ bfa_fcs_lport_get_attr(
 		if (port->vport != NULL) {
 			port_attr->port_type = BFA_PORT_TYPE_VPORT;
 			port_attr->fpma_mac =
-				bfa_lps_get_lp_mac(port->vport->lps);
+				port->vport->lps->lp_mac;
 		} else {
 			port_attr->fpma_mac =
-				bfa_lps_get_lp_mac(port->fabric->lps);
+				port->fabric->lps->lp_mac;
 		}
 	} else {
 		port_attr->port_type = BFA_PORT_TYPE_UNKNOWN;
@@ -998,6 +985,7 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
 	    ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn,
 	     sizeof(wwn_t)) > 0) {
 		port->pid = N2N_LOCAL_PID;
+		bfa_lps_set_n2n_pid(port->fabric->lps, N2N_LOCAL_PID);
 		/*
 		 * First, check if we know the device by pwwn.
 		 */
@@ -1007,7 +995,7 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
 			bfa_trc(port->fcs, rport->pid);
 			bfa_trc(port->fcs, rport->pwwn);
 			rport->pid = N2N_REMOTE_PID;
-			bfa_fcs_rport_online(rport);
+			bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
 			return;
 		}
 
@@ -1017,10 +1005,10 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
 		 */
 		if (port->num_rports > 0) {
 			rport = bfa_fcs_lport_get_rport_by_pid(port, 0);
-			bfa_assert(rport != NULL);
+			WARN_ON(rport == NULL);
 			if (rport) {
 				bfa_trc(port->fcs, rport->pwwn);
-				bfa_fcs_rport_delete(rport);
+				bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
 			}
 		}
 		bfa_fcs_rport_create(port, N2N_REMOTE_PID);
@@ -1569,6 +1557,7 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
 	struct fdmi_attr_s *attr;
 	u8        *curr_ptr;
 	u16        len, count;
+	u16	templen;
 
 	/*
 	 * get hba attributes
@@ -1594,69 +1583,69 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODENAME);
-	attr->len = sizeof(wwn_t);
-	memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), attr->len);
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = sizeof(wwn_t);
+	memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Manufacturer
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER);
-	attr->len = (u16) strlen(fcs_hba_attr->manufacturer);
-	memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = (u16) strlen(fcs_hba_attr->manufacturer);
+	memcpy(attr->value, fcs_hba_attr->manufacturer, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Serial Number
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM);
-	attr->len = (u16) strlen(fcs_hba_attr->serial_num);
-	memcpy(attr->value, fcs_hba_attr->serial_num, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = (u16) strlen(fcs_hba_attr->serial_num);
+	memcpy(attr->value, fcs_hba_attr->serial_num, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Model
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL);
-	attr->len = (u16) strlen(fcs_hba_attr->model);
-	memcpy(attr->value, fcs_hba_attr->model, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = (u16) strlen(fcs_hba_attr->model);
+	memcpy(attr->value, fcs_hba_attr->model, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Model Desc
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC);
-	attr->len = (u16) strlen(fcs_hba_attr->model_desc);
-	memcpy(attr->value, fcs_hba_attr->model_desc, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = (u16) strlen(fcs_hba_attr->model_desc);
+	memcpy(attr->value, fcs_hba_attr->model_desc, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * H/W Version
@@ -1664,14 +1653,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
 	if (fcs_hba_attr->hw_version[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
 		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION);
-		attr->len = (u16) strlen(fcs_hba_attr->hw_version);
-		memcpy(attr->value, fcs_hba_attr->hw_version, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));
-		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-		len += attr->len;
+		templen = (u16) strlen(fcs_hba_attr->hw_version);
+		memcpy(attr->value, fcs_hba_attr->hw_version, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
 		count++;
-		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-					 sizeof(attr->len));
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					 sizeof(templen));
 	}
 
 	/*
@@ -1679,14 +1668,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION);
-	attr->len = (u16) strlen(fcs_hba_attr->driver_version);
-	memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;;
+	templen = (u16) strlen(fcs_hba_attr->driver_version);
+	memcpy(attr->value, fcs_hba_attr->driver_version, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Option Rom Version
@@ -1694,14 +1683,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
 	if (fcs_hba_attr->option_rom_ver[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
 		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION);
-		attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver);
-		memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));
-		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-		len += attr->len;
+		templen = (u16) strlen(fcs_hba_attr->option_rom_ver);
+		memcpy(attr->value, fcs_hba_attr->option_rom_ver, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
 		count++;
-		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-					 sizeof(attr->len));
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					 sizeof(templen));
 	}
 
 	/*
@@ -1709,14 +1698,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION);
-	attr->len = (u16) strlen(fcs_hba_attr->driver_version);
-	memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = (u16) strlen(fcs_hba_attr->driver_version);
+	memcpy(attr->value, fcs_hba_attr->driver_version, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * OS Name
@@ -1724,14 +1713,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
 	if (fcs_hba_attr->os_name[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
 		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME);
-		attr->len = (u16) strlen(fcs_hba_attr->os_name);
-		memcpy(attr->value, fcs_hba_attr->os_name, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));
-		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-		len += attr->len;
+		templen = (u16) strlen(fcs_hba_attr->os_name);
+		memcpy(attr->value, fcs_hba_attr->os_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
 		count++;
-		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-					sizeof(attr->len));
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					sizeof(templen));
 	}
 
 	/*
@@ -1739,12 +1728,12 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT);
-	attr->len = sizeof(fcs_hba_attr->max_ct_pyld);
-	memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len);
-	len += attr->len;
+	templen = sizeof(fcs_hba_attr->max_ct_pyld);
+	memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, templen);
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Update size of payload
@@ -1845,6 +1834,7 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
 	u8        *curr_ptr;
 	u16        len;
 	u8	count = 0;
+	u16	templen;
 
 	/*
 	 * get port attributes
@@ -1863,54 +1853,54 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FC4_TYPES);
-	attr->len = sizeof(fcs_port_attr.supp_fc4_types);
-	memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len);
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = sizeof(fcs_port_attr.supp_fc4_types);
+	memcpy(attr->value, fcs_port_attr.supp_fc4_types, templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	++count;
 	attr->len =
-		cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+		cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Supported Speed
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_SPEED);
-	attr->len = sizeof(fcs_port_attr.supp_speed);
-	memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len);
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = sizeof(fcs_port_attr.supp_speed);
+	memcpy(attr->value, &fcs_port_attr.supp_speed, templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	++count;
 	attr->len =
-		cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+		cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * current Port Speed
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SPEED);
-	attr->len = sizeof(fcs_port_attr.curr_speed);
-	memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len);
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = sizeof(fcs_port_attr.curr_speed);
+	memcpy(attr->value, &fcs_port_attr.curr_speed, templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	++count;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * max frame size
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FRAME_SIZE);
-	attr->len = sizeof(fcs_port_attr.max_frm_size);
-	memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len);
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = sizeof(fcs_port_attr.max_frm_size);
+	memcpy(attr->value, &fcs_port_attr.max_frm_size, templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	++count;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * OS Device Name
@@ -1918,14 +1908,14 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
 	if (fcs_port_attr.os_device_name[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
 		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME);
-		attr->len = (u16) strlen(fcs_port_attr.os_device_name);
-		memcpy(attr->value, fcs_port_attr.os_device_name, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));
-		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-		len += attr->len;
+		templen = (u16) strlen(fcs_port_attr.os_device_name);
+		memcpy(attr->value, fcs_port_attr.os_device_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
 		++count;
-		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-					sizeof(attr->len));
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					sizeof(templen));
 	}
 	/*
 	 * Host Name
@@ -1933,14 +1923,14 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
 	if (fcs_port_attr.host_name[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
 		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME);
-		attr->len = (u16) strlen(fcs_port_attr.host_name);
-		memcpy(attr->value, fcs_port_attr.host_name, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));
-		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-		len += attr->len;
+		templen = (u16) strlen(fcs_port_attr.host_name);
+		memcpy(attr->value, fcs_port_attr.host_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
 		++count;
-		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-				sizeof(attr->len));
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				sizeof(templen));
 	}
 
 	/*
@@ -2103,7 +2093,7 @@ bfa_fcs_lport_fdmi_timeout(void *arg)
 	bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT);
 }
 
-void
+static void
 bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
 			 struct bfa_fcs_fdmi_hba_attr_s *hba_attr)
 {
@@ -2147,7 +2137,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
 	hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ);
 }
 
-void
+static void
 bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
 			  struct bfa_fcs_fdmi_port_attr_s *port_attr)
 {
@@ -2560,7 +2550,7 @@ bfa_fcs_lport_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
 	len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
 			     bfa_fcs_lport_get_fcid(port),
-				 bfa_lps_get_peer_nwwn(port->fabric->lps));
+				 port->fabric->lps->pr_nwwn);
 
 	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
 			  FC_CLASS_3, len, &fchs,
@@ -2760,7 +2750,7 @@ bfa_fcs_lport_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
 	len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
 			     bfa_fcs_lport_get_fcid(port),
-				 bfa_lps_get_peer_nwwn(port->fabric->lps));
+				 port->fabric->lps->pr_nwwn);
 
 	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
 			  FC_CLASS_3, len, &fchs,
@@ -2836,7 +2826,7 @@ bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 	ms->fcxp = fcxp;
 
 	len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
-			     bfa_os_hton3b(FC_MGMT_SERVER),
+			     bfa_hton3b(FC_MGMT_SERVER),
 			     bfa_fcs_lport_get_fcid(port), 0,
 			     port->port_cfg.pwwn, port->port_cfg.nwwn,
 				 bfa_fcport_get_maxfrsize(port->fcs->bfa));
@@ -3593,7 +3583,7 @@ fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
 	ns->fcxp = fcxp;
 
 	len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
-			     bfa_os_hton3b(FC_NAME_SERVER),
+			     bfa_hton3b(FC_NAME_SERVER),
 			     bfa_fcs_lport_get_fcid(port), 0,
 			     port->port_cfg.pwwn, port->port_cfg.nwwn,
 				 bfa_fcport_get_maxfrsize(port->fcs->bfa));
@@ -4150,7 +4140,7 @@ bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port)
 	bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
 }
 
-void
+static void
 bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
 {
 
@@ -4163,7 +4153,7 @@ bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
 
 	for (ii = 0 ; ii < nwwns; ++ii) {
 		rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]);
-		bfa_assert(rport);
+		WARN_ON(!rport);
 	}
 }
 
@@ -4352,8 +4342,8 @@ bfa_fcs_lport_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 	/* Handle VU registrations for Base port only */
 	if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) {
 		len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
-				bfa_lps_is_brcd_fabric(port->fabric->lps),
-							port->pid, 0);
+				port->fabric->lps->brcd_switch,
+				port->pid, 0);
 	} else {
 	    len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
 				    BFA_FALSE,
@@ -4626,7 +4616,7 @@ bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
 
 
 		default:
-			bfa_assert(0);
+			WARN_ON(1);
 			nsquery = BFA_TRUE;
 		}
 	}
@@ -4672,7 +4662,7 @@ bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index,
 
 	while ((qe != qh) && (i < nrports)) {
 		rport = (struct bfa_fcs_rport_s *) qe;
-		if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) {
+		if (bfa_ntoh3b(rport->pid) > 0xFFF000) {
 			qe = bfa_q_next(qe);
 			bfa_trc(fcs, (u32) rport->pwwn);
 			bfa_trc(fcs, rport->pid);
@@ -4720,7 +4710,7 @@ bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
 
 	while ((qe != qh) && (i < *nrports)) {
 		rport = (struct bfa_fcs_rport_s *) qe;
-		if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) {
+		if (bfa_ntoh3b(rport->pid) > 0xFFF000) {
 			qe = bfa_q_next(qe);
 			bfa_trc(fcs, (u32) rport->pwwn);
 			bfa_trc(fcs, rport->pid);
@@ -4771,7 +4761,7 @@ bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
 
 	while (qe != qh) {
 		rport = (struct bfa_fcs_rport_s *) qe;
-		if ((bfa_os_ntoh3b(rport->pid) > 0xFFF000) ||
+		if ((bfa_ntoh3b(rport->pid) > 0xFFF000) ||
 			(bfa_fcs_rport_get_state(rport) ==
 			  BFA_RPORT_OFFLINE)) {
 			qe = bfa_q_next(qe);
@@ -4807,7 +4797,7 @@ bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn)
 	struct bfa_fcs_vport_s *vport;
 	bfa_fcs_vf_t   *vf;
 
-	bfa_assert(fcs != NULL);
+	WARN_ON(fcs == NULL);
 
 	vf = bfa_fcs_vf_lookup(fcs, vf_id);
 	if (vf == NULL) {
@@ -4853,7 +4843,7 @@ bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port,
 		port_info->max_vports_supp =
 			bfa_lps_get_max_vport(port->fcs->bfa);
 		port_info->num_vports_inuse =
-			bfa_fcs_fabric_vport_count(port->fabric);
+			port->fabric->num_vports;
 		port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP;
 		port_info->num_rports_inuse = port->num_rports;
 	} else {
@@ -4997,7 +4987,8 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
 
 	switch (event) {
 	case BFA_FCS_VPORT_SM_START:
-		if (bfa_fcs_fabric_is_online(__vport_fabric(vport))
+		if (bfa_sm_cmp_state(__vport_fabric(vport),
+					bfa_fcs_fabric_sm_online)
 		    && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) {
 			bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
 			bfa_fcs_vport_do_fdisc(vport);
@@ -5080,13 +5071,13 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
 	switch (event) {
 	case BFA_FCS_VPORT_SM_DELETE:
 		bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
-		bfa_lps_discard(vport->lps);
+		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
 		bfa_fcs_lport_delete(&vport->lport);
 		break;
 
 	case BFA_FCS_VPORT_SM_OFFLINE:
 		bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
-		bfa_lps_discard(vport->lps);
+		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
 		break;
 
 	case BFA_FCS_VPORT_SM_RSP_OK:
@@ -5166,7 +5157,7 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
 
 	case BFA_FCS_VPORT_SM_OFFLINE:
 		bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
-		bfa_lps_discard(vport->lps);
+		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
 		bfa_fcs_lport_offline(&vport->lport);
 		break;
 
@@ -5266,7 +5257,7 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
 
 	switch (event) {
 	case BFA_FCS_VPORT_SM_OFFLINE:
-		bfa_lps_discard(vport->lps);
+		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
 		/*
 		 * !!! fall through !!!
 		 */
@@ -5305,14 +5296,14 @@ bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport)
 static void
 bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
 {
-	u8		lsrjt_rsn = bfa_lps_get_lsrjt_rsn(vport->lps);
-	u8		lsrjt_expl = bfa_lps_get_lsrjt_expl(vport->lps);
+	u8		lsrjt_rsn = vport->lps->lsrjt_rsn;
+	u8		lsrjt_expl = vport->lps->lsrjt_expl;
 
 	bfa_trc(__vport_fcs(vport), lsrjt_rsn);
 	bfa_trc(__vport_fcs(vport), lsrjt_expl);
 
 	/* For certain reason codes, we don't want to retry. */
-	switch (bfa_lps_get_lsrjt_expl(vport->lps)) {
+	switch (vport->lps->lsrjt_expl) {
 	case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */
 	case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
 		if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
@@ -5476,7 +5467,7 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
 	if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL)
 		return BFA_STATUS_VPORT_EXISTS;
 
-	if (bfa_fcs_fabric_vport_count(&fcs->fabric) ==
+	if (fcs->fabric.num_vports ==
 			bfa_lps_get_max_vport(fcs->bfa))
 		return BFA_STATUS_VPORT_MAX;
 
@@ -5618,33 +5609,6 @@ bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
 	attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
 }
 
-/*
- *	Use this function to get vport's statistics.
- *
- *	param[in]	vport	pointer to bfa_fcs_vport_t.
- *	param[out]	stats	pointer to return vport statistics in
- *
- *	return None
- */
-void
-bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
-			struct bfa_vport_stats_s *stats)
-{
-	*stats = vport->vport_stats;
-}
-
-/*
- *	Use this function to clear vport's statistics.
- *
- *	param[in]	vport	pointer to bfa_fcs_vport_t.
- *
- *	return None
- */
-void
-bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport)
-{
-	memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
-}
 
 /*
  *	Lookup a virtual port. Excludes base port from lookup.
@@ -5684,7 +5648,7 @@ bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
 		/*
 		 * Initialiaze the V-Port fields
 		 */
-		__vport_fcid(vport) = bfa_lps_get_pid(vport->lps);
+		__vport_fcid(vport) = vport->lps->lp_pid;
 		vport->vport_stats.fdisc_accepts++;
 		bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
 		break;
@@ -5697,7 +5661,7 @@ bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
 		break;
 
 	case BFA_STATUS_EPROTOCOL:
-		switch (bfa_lps_get_extstatus(vport->lps)) {
+		switch (vport->lps->ext_status) {
 		case BFA_EPROTO_BAD_ACCEPT:
 			vport->vport_stats.fdisc_acc_bad++;
 			break;

+ 17 - 213
drivers/scsi/bfa/bfa_fcs_rport.c

@@ -19,9 +19,9 @@
  *  rport.c Remote port implementation.
  */
 
+#include "bfad_drv.h"
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
-#include "bfad_drv.h"
 
 BFA_TRC_FILE(FCS, RPORT);
 
@@ -75,30 +75,6 @@ static void	bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
 static void	bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
 				struct fchs_s *rx_fchs, u16 len);
 static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
-/*
- *  fcs_rport_sm FCS rport state machine events
- */
-
-enum rport_event {
-	RPSM_EVENT_PLOGI_SEND	= 1,	/*  new rport; start with PLOGI */
-	RPSM_EVENT_PLOGI_RCVD	= 2,	/*  Inbound PLOGI from remote port */
-	RPSM_EVENT_PLOGI_COMP	= 3,	/*  PLOGI completed to rport	*/
-	RPSM_EVENT_LOGO_RCVD	= 4,	/*  LOGO from remote device	*/
-	RPSM_EVENT_LOGO_IMP	= 5,	/*  implicit logo for SLER	*/
-	RPSM_EVENT_FCXP_SENT	= 6,	/*  Frame from has been sent	*/
-	RPSM_EVENT_DELETE	= 7,	/*  RPORT delete request	*/
-	RPSM_EVENT_SCN		= 8,	/*  state change notification	*/
-	RPSM_EVENT_ACCEPTED	= 9,	/*  Good response from remote device */
-	RPSM_EVENT_FAILED	= 10,	/*  Request to rport failed.	*/
-	RPSM_EVENT_TIMEOUT	= 11,	/*  Rport SM timeout event	*/
-	RPSM_EVENT_HCB_ONLINE  = 12,	/*  BFA rport online callback	*/
-	RPSM_EVENT_HCB_OFFLINE = 13,	/*  BFA rport offline callback	*/
-	RPSM_EVENT_FC4_OFFLINE = 14,	/*  FC-4 offline complete	*/
-	RPSM_EVENT_ADDRESS_CHANGE = 15,	/*  Rport's PID has changed	*/
-	RPSM_EVENT_ADDRESS_DISC = 16,	/*  Need to Discover rport's PID */
-	RPSM_EVENT_PRLO_RCVD   = 17,	/*  PRLO from remote device	*/
-	RPSM_EVENT_PLOGI_RETRY = 18,	/*  Retry PLOGI continously */
-};
 
 static void	bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport,
 					enum rport_event event);
@@ -498,24 +474,24 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
 
 	case RPSM_EVENT_LOGO_RCVD:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		break;
 
 	case RPSM_EVENT_LOGO_IMP:
 	case RPSM_EVENT_ADDRESS_CHANGE:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		break;
 
 	case RPSM_EVENT_PLOGI_RCVD:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		bfa_fcs_rport_send_plogiacc(rport, NULL);
 		break;
 
 	case RPSM_EVENT_DELETE:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		break;
 
 	case RPSM_EVENT_SCN:
@@ -824,7 +800,7 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
 	switch (event) {
 	case RPSM_EVENT_FC4_OFFLINE:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		break;
 
 	case RPSM_EVENT_DELETE:
@@ -856,7 +832,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
 	switch (event) {
 	case RPSM_EVENT_FC4_OFFLINE:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		break;
 
 	default:
@@ -878,7 +854,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
 	switch (event) {
 	case RPSM_EVENT_FC4_OFFLINE:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		break;
 
 	case RPSM_EVENT_SCN:
@@ -1459,7 +1435,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
 			twin->stats.plogi_rcvd	  += rport->stats.plogi_rcvd;
 			twin->stats.plogi_accs++;
 
-			bfa_fcs_rport_delete(rport);
+			bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
 
 			bfa_fcs_rport_update(twin, plogi_rsp);
 			twin->pid = rsp_fchs->s_id;
@@ -1992,13 +1968,14 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
 	/*
 	 * allocate FC-4s
 	 */
-	bfa_assert(bfa_fcs_lport_is_initiator(port));
+	WARN_ON(!bfa_fcs_lport_is_initiator(port));
 
 	if (bfa_fcs_lport_is_initiator(port)) {
 		rport->itnim = bfa_fcs_itnim_create(rport);
 		if (!rport->itnim) {
 			bfa_trc(fcs, rpid);
-			bfa_rport_delete(rport->bfa_rport);
+			bfa_sm_send_event(rport->bfa_rport,
+						BFA_RPORT_SM_DELETE);
 			kfree(rport_drv);
 			return NULL;
 		}
@@ -2032,7 +2009,7 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
 			bfa_fcs_rpf_rport_offline(rport);
 	}
 
-	bfa_rport_delete(rport->bfa_rport);
+	bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_DELETE);
 	bfa_fcs_lport_del_rport(port, rport);
 	kfree(rport->rp_drv);
 }
@@ -2307,39 +2284,7 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
 	bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
 }
 
-/*
- * Called by bport/vport to delete a remote port instance.
- *
- * Rport delete is called under the following conditions:
- *		- vport is deleted
- *		- vf is deleted
- *		- explicit request from OS to delete rport
- */
-void
-bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport)
-{
-	bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
-}
 
-/*
- * Called by bport/vport to  when a target goes offline.
- *
- */
-void
-bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport)
-{
-	bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
-}
-
-/*
- * Called by bport in n2n when a target (attached port) becomes online.
- *
- */
-void
-bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport)
-{
-	bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
-}
 /*
  *	Called by bport/vport to notify SCN for the remote port
  */
@@ -2350,23 +2295,6 @@ bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
 	bfa_sm_send_event(rport, RPSM_EVENT_SCN);
 }
 
-/*
- *	Called by	fcpim to notify that the ITN cleanup is done.
- */
-void
-bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport)
-{
-	bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
-}
-
-/*
- *	Called by fcptm to notify that the ITN cleanup is done.
- */
-void
-bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport)
-{
-	bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
-}
 
 /*
  *	brief
@@ -2461,15 +2389,6 @@ bfa_cb_rport_qos_scn_prio(void *cbarg,
 	bfa_trc(rport->fcs, rport->pwwn);
 }
 
-/*
- *		Called to process any unsolicted frames from this remote port
- */
-void
-bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport)
-{
-	bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
-}
-
 /*
  *		Called to process any unsolicted frames from this remote port
  */
@@ -2586,6 +2505,7 @@ bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport)
 	return bfa_sm_to_state(rport_sm_table, rport->sm);
 }
 
+
 /*
  *	brief
  *		 Called by the Driver to set rport delete/ageout timeout
@@ -2602,7 +2522,7 @@ bfa_fcs_rport_set_del_timeout(u8 rport_tmo)
 		bfa_fcs_rport_del_timeout = rport_tmo * 1000;
 }
 void
-bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id)
+bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id)
 {
 	bfa_trc(rport->fcs, rport->pid);
 
@@ -2621,106 +2541,6 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id)
  *  fcs_rport_api FCS rport API.
  */
 
-/*
- *	Direct API to add a target by port wwn. This interface is used, for
- *	example, by bios when target pwwn is known from boot lun configuration.
- */
-bfa_status_t
-bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
-		struct bfa_fcs_rport_s *rport, struct bfad_rport_s *rport_drv)
-{
-	bfa_trc(port->fcs, *pwwn);
-
-	return BFA_STATUS_OK;
-}
-
-/*
- *	Direct API to remove a target and its associated resources. This
- *	interface is used, for example, by driver to remove target
- *	ports from the target list for a VM.
- */
-bfa_status_t
-bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport_in)
-{
-
-	struct bfa_fcs_rport_s *rport;
-
-	bfa_trc(rport_in->fcs, rport_in->pwwn);
-
-	rport = bfa_fcs_lport_get_rport_by_pwwn(rport_in->port, rport_in->pwwn);
-	if (rport == NULL) {
-		/*
-		 * TBD Error handling
-		 */
-		bfa_trc(rport_in->fcs, rport_in->pid);
-		return BFA_STATUS_UNKNOWN_RWWN;
-	}
-
-	/*
-	 * TBD if this remote port is online, send a logo
-	 */
-	return BFA_STATUS_OK;
-
-}
-
-/*
- *	Remote device status for display/debug.
- */
-void
-bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
-			struct bfa_rport_attr_s *rport_attr)
-{
-	struct bfa_rport_qos_attr_s qos_attr;
-	bfa_fcs_lport_t *port = rport->port;
-	bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
-
-	memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
-
-	rport_attr->pid = rport->pid;
-	rport_attr->pwwn = rport->pwwn;
-	rport_attr->nwwn = rport->nwwn;
-	rport_attr->cos_supported = rport->fc_cos;
-	rport_attr->df_sz = rport->maxfrsize;
-	rport_attr->state = bfa_fcs_rport_get_state(rport);
-	rport_attr->fc_cos = rport->fc_cos;
-	rport_attr->cisc = rport->cisc;
-	rport_attr->scsi_function = rport->scsi_function;
-	rport_attr->curr_speed  = rport->rpf.rpsc_speed;
-	rport_attr->assigned_speed  = rport->rpf.assigned_speed;
-
-	bfa_rport_get_qos_attr(rport->bfa_rport, &qos_attr);
-	rport_attr->qos_attr = qos_attr;
-
-	rport_attr->trl_enforced = BFA_FALSE;
-	if (bfa_fcport_is_ratelim(port->fcs->bfa)) {
-		if (rport_speed == BFA_PORT_SPEED_UNKNOWN) {
-			/* Use default ratelim speed setting */
-			rport_speed =
-				bfa_fcport_get_ratelim_speed(rport->fcs->bfa);
-		}
-
-		if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port))
-			rport_attr->trl_enforced = BFA_TRUE;
-	}
-}
-
-/*
- *	Per remote device statistics.
- */
-void
-bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
-			struct bfa_rport_stats_s *stats)
-{
-	*stats = rport->stats;
-}
-
-void
-bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport)
-{
-	memset((char *)&rport->stats, 0,
-			sizeof(struct bfa_rport_stats_s));
-}
-
 struct bfa_fcs_rport_s *
 bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
 {
@@ -2751,22 +2571,6 @@ bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t rnwwn)
 	return rport;
 }
 
-/*
- * This API is to set the Rport's speed. Should be used when RPSC is not
- * supported by the rport.
- */
-void
-bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport, bfa_port_speed_t speed)
-{
-	rport->rpf.assigned_speed  = speed;
-
-	/* Set this speed in f/w only if the RPSC speed is not available */
-	if (rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
-		bfa_rport_speed(rport->bfa_rport, speed);
-}
-
-
-
 /*
  * Remote port features (RPF) implementation.
  */
@@ -2827,7 +2631,7 @@ bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
 	case RPFSM_EVENT_RPORT_ONLINE:
 		/* Send RPSC2 to a Brocade fabric only. */
 		if ((!BFA_FCS_PID_IS_WKA(rport->pid)) &&
-			((bfa_lps_is_brcd_fabric(rport->port->fabric->lps)) ||
+			((rport->port->fabric->lps->brcd_switch) ||
 			(bfa_fcs_fabric_get_switch_oui(fabric) ==
 						BFA_FCS_BRCD_SWITCH_OUI))) {
 			bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
@@ -3093,7 +2897,7 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
 		num_ents = be16_to_cpu(rpsc2_acc->num_pids);
 		bfa_trc(rport->fcs, num_ents);
 		if (num_ents > 0) {
-			bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid);
+			WARN_ON(rpsc2_acc->port_info[0].pid == rport->pid);
 			bfa_trc(rport->fcs,
 				be16_to_cpu(rpsc2_acc->port_info[0].pid));
 			bfa_trc(rport->fcs,

+ 2 - 1
drivers/scsi/bfa/bfa_hw_cb.c

@@ -15,6 +15,7 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_modules.h"
 #include "bfi_cbreg.h"
 
@@ -110,7 +111,7 @@ bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
 {
 	int i;
 
-	bfa_assert((nvecs == 1) || (nvecs == __HFN_NUMINTS));
+	WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS));
 
 	bfa->msix.nvecs = nvecs;
 	if (nvecs == 1) {

+ 3 - 2
drivers/scsi/bfa/bfa_hw_ct.c

@@ -15,6 +15,7 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_modules.h"
 #include "bfi_ctreg.h"
 
@@ -116,7 +117,7 @@ bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
 void
 bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs)
 {
-	bfa_assert((nvecs == 1) || (nvecs == BFA_MSIX_CT_MAX));
+	WARN_ON((nvecs != 1) && (nvecs != BFA_MSIX_CT_MAX));
 	bfa_trc(bfa, nvecs);
 
 	bfa->msix.nvecs = nvecs;
@@ -143,7 +144,7 @@ bfa_hwct_msix_install(struct bfa_s *bfa)
 	for (; i <= BFA_MSIX_RME_Q3; i++)
 		bfa->msix.handler[i] = bfa_msix_rspq;
 
-	bfa_assert(i == BFA_MSIX_LPU_ERR);
+	WARN_ON(i != BFA_MSIX_LPU_ERR);
 	bfa->msix.handler[BFA_MSIX_LPU_ERR] = bfa_msix_lpu_err;
 }
 

File diff suppressed because it is too large
+ 269 - 232
drivers/scsi/bfa/bfa_ioc.c


+ 21 - 23
drivers/scsi/bfa/bfa_ioc.h

@@ -18,10 +18,15 @@
 #ifndef __BFA_IOC_H__
 #define __BFA_IOC_H__
 
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 #include "bfa_cs.h"
 #include "bfi.h"
 
+#define BFA_DBG_FWTRC_ENTS	(BFI_IOC_TRC_ENTS)
+#define BFA_DBG_FWTRC_LEN					\
+	(BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) +	\
+	(sizeof(struct bfa_trc_mod_s) -				\
+	BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
 /*
  * BFA timer declarations
  */
@@ -47,7 +52,6 @@ struct bfa_timer_mod_s {
 #define BFA_TIMER_FREQ 200 /* specified in millisecs */
 
 void bfa_timer_beat(struct bfa_timer_mod_s *mod);
-void bfa_timer_init(struct bfa_timer_mod_s *mod);
 void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
 			bfa_timer_cbfn_t timercb, void *arg,
 			unsigned int timeout);
@@ -70,7 +74,7 @@ struct bfa_sge_s {
 #define bfa_swap_words(_x)  (	\
 	((_x) << 32) | ((_x) >> 32))
 
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 #define bfa_sge_to_be(_x)
 #define bfa_sge_to_le(_x)	bfa_sge_word_swap(_x)
 #define bfa_sgaddr_le(_x)	bfa_swap_words(_x)
@@ -115,8 +119,8 @@ struct bfa_dma_s {
 static inline void
 __bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
 {
-	dma_addr->a32.addr_lo = (u32) pa;
-	dma_addr->a32.addr_hi = (u32) (bfa_os_u32(pa));
+	dma_addr->a32.addr_lo = (__be32) pa;
+	dma_addr->a32.addr_hi = (__be32) (pa >> 32);
 }
 
 
@@ -125,8 +129,8 @@ __bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
 static inline void
 __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
 {
-	dma_addr->a32.addr_lo = (u32) cpu_to_be32(pa);
-	dma_addr->a32.addr_hi = (u32) cpu_to_be32(bfa_os_u32(pa));
+	dma_addr->a32.addr_lo = cpu_to_be32(pa);
+	dma_addr->a32.addr_hi = cpu_to_be32(pa >> 32);
 }
 
 struct bfa_ioc_regs_s {
@@ -145,8 +149,11 @@ struct bfa_ioc_regs_s {
 	void __iomem *host_page_num_fn;
 	void __iomem *heartbeat;
 	void __iomem *ioc_fwstate;
+	void __iomem *alt_ioc_fwstate;
 	void __iomem *ll_halt;
+	void __iomem *alt_ll_halt;
 	void __iomem *err_set;
+	void __iomem *ioc_fail_sync;
 	void __iomem *shirq_isr_next;
 	void __iomem *shirq_msk_next;
 	void __iomem *smem_page_start;
@@ -254,8 +261,12 @@ struct bfa_ioc_hwif_s {
 	void		(*ioc_map_port)	(struct bfa_ioc_s *ioc);
 	void		(*ioc_isr_mode_set)	(struct bfa_ioc_s *ioc,
 					bfa_boolean_t msix);
-	void		(*ioc_notify_hbfail)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_notify_fail)	(struct bfa_ioc_s *ioc);
 	void		(*ioc_ownership_reset)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_sync_join)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_sync_leave)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_sync_ack)		(struct bfa_ioc_s *ioc);
+	bfa_boolean_t	(*ioc_sync_complete)	(struct bfa_ioc_s *ioc);
 };
 
 #define bfa_ioc_pcifn(__ioc)		((__ioc)->pcidev.pci_func)
@@ -325,7 +336,6 @@ void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
 void bfa_ioc_detach(struct bfa_ioc_s *ioc);
 void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
 		enum bfi_mclass mc);
-u32 bfa_ioc_meminfo(void);
 void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa);
 void bfa_ioc_enable(struct bfa_ioc_s *ioc);
 void bfa_ioc_disable(struct bfa_ioc_s *ioc);
@@ -340,6 +350,7 @@ bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
+void bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc);
 enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc);
 void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num);
 void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver);
@@ -353,24 +364,16 @@ enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc);
 void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr);
 void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
 		struct bfa_adapter_attr_s *ad_attr);
-int bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover);
 void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave);
 bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata,
 		int *trclen);
-void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc);
 bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
 				 int *trclen);
 bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
 	u32 *offset, int *buflen);
-u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr);
-u32 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr);
 void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc);
-void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
-	struct bfa_ioc_hbfail_notify_s *notify);
 bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg);
-void bfa_ioc_sem_release(void __iomem *sem_reg);
-void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
 void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
 			struct bfi_ioc_image_hdr_s *fwhdr);
 bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
@@ -381,13 +384,8 @@ bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
 /*
  * bfa mfg wwn API functions
  */
-wwn_t bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc);
-wwn_t bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc);
 mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc);
-wwn_t bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc);
-wwn_t bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc);
 mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc);
-u64 bfa_ioc_get_adid(struct bfa_ioc_s *ioc);
 
 /*
  * F/W Image Size & Chunk
@@ -421,7 +419,7 @@ bfa_cb_image_get_chunk(int type, u32 off)
 		return bfi_image_ct_cna_get_chunk(off);	break;
 	case BFI_IMAGE_CB_FC:
 		return bfi_image_cb_fc_get_chunk(off);	break;
-	default: return 0;
+	default: return NULL;
 	}
 }
 

+ 92 - 5
drivers/scsi/bfa/bfa_ioc_cb.c

@@ -15,6 +15,7 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_ioc.h"
 #include "bfi_cbreg.h"
 #include "bfa_defs.h"
@@ -29,10 +30,14 @@ static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
-static void bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc);
 
-struct bfa_ioc_hwif_s hwif_cb;
+static struct bfa_ioc_hwif_s hwif_cb;
 
 /*
  * Called from bfa_ioc_attach() to map asic specific calls.
@@ -46,8 +51,12 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
 	hwif_cb.ioc_reg_init = bfa_ioc_cb_reg_init;
 	hwif_cb.ioc_map_port = bfa_ioc_cb_map_port;
 	hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
-	hwif_cb.ioc_notify_hbfail = bfa_ioc_cb_notify_hbfail;
+	hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail;
 	hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
+	hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join;
+	hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
+	hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
+	hwif_cb.ioc_sync_complete = bfa_ioc_cb_sync_complete;
 
 	ioc->ioc_hwif = &hwif_cb;
 }
@@ -58,6 +67,21 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
 static bfa_boolean_t
 bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc)
 {
+	struct bfi_ioc_image_hdr_s fwhdr;
+	uint32_t fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+	if (fwstate == BFI_IOC_UNINIT)
+		return BFA_TRUE;
+
+	bfa_ioc_fwver_get(ioc, &fwhdr);
+
+	if (swab32(fwhdr.exec) == BFI_BOOT_TYPE_NORMAL)
+		return BFA_TRUE;
+
+	bfa_trc(ioc, fwstate);
+	bfa_trc(ioc, fwhdr.exec);
+	writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+
 	return BFA_TRUE;
 }
 
@@ -70,7 +94,7 @@ bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
  * Notify other functions on HB failure.
  */
 static void
-bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc)
+bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc)
 {
 	writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
 	readl(ioc->ioc_regs.err_set);
@@ -108,9 +132,11 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
 	if (ioc->port_id == 0) {
 		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
 		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
 	} else {
 		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
 		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+		ioc->ioc_regs.alt_ioc_fwstate = (rb + BFA_IOC0_STATE_REG);
 	}
 
 	/*
@@ -181,10 +207,71 @@ bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
 	 * will lock it instead of clearing it.
 	 */
 	readl(ioc->ioc_regs.ioc_sem_reg);
-	bfa_ioc_hw_sem_release(ioc);
+	writel(1, ioc->ioc_regs.ioc_sem_reg);
 }
 
+/*
+ * Synchronized IOC failure processing routines
+ */
+static void
+bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc)
+{
+}
 
+static void
+bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc)
+{
+}
+
+static void
+bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc)
+{
+	writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+}
+
+static bfa_boolean_t
+bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc)
+{
+	uint32_t fwstate, alt_fwstate;
+	fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+	/*
+	 * At this point, this IOC is hoding the hw sem in the
+	 * start path (fwcheck) OR in the disable/enable path
+	 * OR to check if the other IOC has acknowledged failure.
+	 *
+	 * So, this IOC can be in UNINIT, INITING, DISABLED, FAIL
+	 * or in MEMTEST states. In a normal scenario, this IOC
+	 * can not be in OP state when this function is called.
+	 *
+	 * However, this IOC could still be in OP state when
+	 * the OS driver is starting up, if the OptROM code has
+	 * left it in that state.
+	 *
+	 * If we had marked this IOC's fwstate as BFI_IOC_FAIL
+	 * in the failure case and now, if the fwstate is not
+	 * BFI_IOC_FAIL it implies that the other PCI fn have
+	 * reinitialized the ASIC or this IOC got disabled, so
+	 * return TRUE.
+	 */
+	if (fwstate == BFI_IOC_UNINIT ||
+		fwstate == BFI_IOC_INITING ||
+		fwstate == BFI_IOC_DISABLED ||
+		fwstate == BFI_IOC_MEMTEST ||
+		fwstate == BFI_IOC_OP)
+		return BFA_TRUE;
+	else {
+		alt_fwstate = readl(ioc->ioc_regs.alt_ioc_fwstate);
+		if (alt_fwstate == BFI_IOC_FAIL ||
+			alt_fwstate == BFI_IOC_DISABLED ||
+			alt_fwstate == BFI_IOC_UNINIT ||
+			alt_fwstate == BFI_IOC_INITING ||
+			alt_fwstate == BFI_IOC_MEMTEST)
+			return BFA_TRUE;
+		else
+			return BFA_FALSE;
+	}
+}
 
 bfa_status_t
 bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode)

+ 108 - 12
drivers/scsi/bfa/bfa_ioc_ct.c

@@ -15,12 +15,22 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_ioc.h"
 #include "bfi_ctreg.h"
 #include "bfa_defs.h"
 
 BFA_TRC_FILE(CNA, IOC_CT);
 
+#define bfa_ioc_ct_sync_pos(__ioc)      \
+		((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
+#define BFA_IOC_SYNC_REQD_SH    16
+#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
+#define bfa_ioc_ct_clear_sync_ackd(__val)       (__val & 0xffff0000)
+#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
+#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
+			(bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
+
 /*
  * forward declarations
  */
@@ -29,10 +39,14 @@ static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
-static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
 
-struct bfa_ioc_hwif_s hwif_ct;
+static struct bfa_ioc_hwif_s hwif_ct;
 
 /*
  * Called from bfa_ioc_attach() to map asic specific calls.
@@ -46,8 +60,12 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
 	hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
 	hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
 	hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
-	hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
+	hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
 	hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+	hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
+	hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
+	hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
+	hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
 
 	ioc->ioc_hwif = &hwif_ct;
 }
@@ -83,7 +101,8 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
 	 */
 	if (usecnt == 0) {
 		writel(1, ioc->ioc_regs.ioc_usage_reg);
-		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
+		writel(0, ioc->ioc_regs.ioc_fail_sync);
 		bfa_trc(ioc, usecnt);
 		return BFA_TRUE;
 	}
@@ -94,14 +113,14 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
 	/*
 	 * Use count cannot be non-zero and chip in uninitialized state.
 	 */
-	bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
+	WARN_ON(ioc_fwstate == BFI_IOC_UNINIT);
 
 	/*
 	 * Check if another driver with a different firmware is active
 	 */
 	bfa_ioc_fwver_get(ioc, &fwhdr);
 	if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
-		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 		bfa_trc(ioc, usecnt);
 		return BFA_FALSE;
 	}
@@ -111,7 +130,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
 	 */
 	usecnt++;
 	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
-	bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 	bfa_trc(ioc, usecnt);
 	return BFA_TRUE;
 }
@@ -139,25 +158,27 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
 	 */
 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
 	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
-	bfa_assert(usecnt > 0);
+	WARN_ON(usecnt <= 0);
 
 	usecnt--;
 	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
 	bfa_trc(ioc, usecnt);
 
-	bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 }
 
 /*
  * Notify other functions on HB failure.
  */
 static void
-bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc)
+bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
 {
 	if (ioc->cna) {
 		writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
+		writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
 		/* Wait for halt to take effect */
 		readl(ioc->ioc_regs.ll_halt);
+		readl(ioc->ioc_regs.alt_ll_halt);
 	} else {
 		writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
 		readl(ioc->ioc_regs.err_set);
@@ -209,15 +230,19 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
 	if (ioc->port_id == 0) {
 		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
 		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
 		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
 		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
 	} else {
 		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
 		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
 		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
 		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
 	}
 
 	/*
@@ -235,6 +260,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
 	ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
 	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
 	ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
+	ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
 
 	/*
 	 * sram memory access
@@ -313,7 +339,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
 	if (ioc->cna) {
 		bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
 		writel(0, ioc->ioc_regs.ioc_usage_reg);
-		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 	}
 
 	/*
@@ -322,10 +348,80 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
 	 * will lock it instead of clearing it.
 	 */
 	readl(ioc->ioc_regs.ioc_sem_reg);
-	bfa_ioc_hw_sem_release(ioc);
+	writel(1, ioc->ioc_regs.ioc_sem_reg);
+}
+
+/*
+ * Synchronized IOC failure processing routines
+ */
+static void
+bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
+{
+	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+	uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
+
+	writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static void
+bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
+{
+	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+	uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
+					bfa_ioc_ct_sync_pos(ioc);
+
+	writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static void
+bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
+{
+	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+
+	writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
+		ioc->ioc_regs.ioc_fail_sync);
 }
 
+static bfa_boolean_t
+bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
+{
+	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+	uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+	uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
+	uint32_t tmp_ackd;
+
+	if (sync_ackd == 0)
+		return BFA_TRUE;
+
+	/*
+	 * The check below is to see whether any other PCI fn
+	 * has reinitialized the ASIC (reset sync_ackd bits)
+	 * and failed again while this IOC was waiting for hw
+	 * semaphore (in bfa_iocpf_sm_semwait()).
+	 */
+	tmp_ackd = sync_ackd;
+	if ((sync_reqd &  bfa_ioc_ct_sync_pos(ioc)) &&
+		!(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
+		sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
+
+	if (sync_reqd == sync_ackd) {
+		writel(bfa_ioc_ct_clear_sync_ackd(r32),
+			ioc->ioc_regs.ioc_fail_sync);
+		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+		writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
+		return BFA_TRUE;
+	}
+
+	/*
+	 * If another PCI fn reinitialized and failed again while
+	 * this IOC was waiting for hw sem, the sync_ackd bit for
+	 * this IOC need to be set again to allow reinitialization.
+	 */
+	if (tmp_ackd != sync_ackd)
+		writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
 
+	return BFA_FALSE;
+}
 
 /*
  * Check the firmware state to know if pll_init has been completed already

+ 0 - 3
drivers/scsi/bfa/bfa_modules.h

@@ -99,7 +99,6 @@ struct bfa_module_s {
 	void (*iocdisable) (struct bfa_s *bfa);
 };
 
-extern struct bfa_module_s *hal_mods[];
 
 struct bfa_s {
 	void			*bfad;		/*  BFA driver instance    */
@@ -116,8 +115,6 @@ struct bfa_s {
 	struct bfa_msix_s	msix;
 };
 
-extern bfa_isr_func_t bfa_isrs[BFI_MC_MAX];
-extern bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[];
 extern bfa_boolean_t bfa_auto_recover;
 extern struct bfa_module_s hal_mod_sgpg;
 extern struct bfa_module_s hal_mod_fcport;

+ 0 - 143
drivers/scsi/bfa/bfa_os_inc.h

@@ -1,143 +0,0 @@
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_OS_INC_H__
-#define __BFA_OS_INC_H__
-
-#include <linux/types.h>
-#include <linux/version.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/idr.h>
-#include <linux/interrupt.h>
-#include <linux/cdev.h>
-#include <linux/fs.h>
-#include <linux/delay.h>
-#include <linux/vmalloc.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsi_transport_fc.h>
-#include <scsi/scsi_transport.h>
-
-#ifdef __BIG_ENDIAN
-#define __BIGENDIAN
-#endif
-
-static inline u64 bfa_os_get_log_time(void)
-{
-	u64 system_time = 0;
-	struct timeval tv;
-	do_gettimeofday(&tv);
-
-	/* We are interested in seconds only. */
-	system_time = tv.tv_sec;
-	return system_time;
-}
-
-#define bfa_io_lat_clock_res_div HZ
-#define bfa_io_lat_clock_res_mul 1000
-
-#define BFA_LOG(level, bfad, mask, fmt, arg...)				\
-do {									\
-	if (((mask) == 4) || (level[1] <= '4'))				\
-		dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg);	\
-} while (0)
-
-#define bfa_swap_3b(_x)				\
-	((((_x) & 0xff) << 16) |		\
-	((_x) & 0x00ff00) |			\
-	(((_x) & 0xff0000) >> 16))
-
-#define bfa_os_swap_sgaddr(_x)  ((u64)(                                 \
-	(((u64)(_x) & (u64)0x00000000000000ffull) << 32)        |       \
-	(((u64)(_x) & (u64)0x000000000000ff00ull) << 32)        |       \
-	(((u64)(_x) & (u64)0x0000000000ff0000ull) << 32)        |       \
-	(((u64)(_x) & (u64)0x00000000ff000000ull) << 32)        |       \
-	(((u64)(_x) & (u64)0x000000ff00000000ull) >> 32)        |       \
-	(((u64)(_x) & (u64)0x0000ff0000000000ull) >> 32)        |       \
-	(((u64)(_x) & (u64)0x00ff000000000000ull) >> 32)        |       \
-	(((u64)(_x) & (u64)0xff00000000000000ull) >> 32)))
-
-#ifndef __BIGENDIAN
-#define bfa_os_hton3b(_x)  bfa_swap_3b(_x)
-#define bfa_os_sgaddr(_x)  (_x)
-#else
-#define bfa_os_hton3b(_x)  (_x)
-#define bfa_os_sgaddr(_x)  bfa_os_swap_sgaddr(_x)
-#endif
-
-#define bfa_os_ntoh3b(_x)  bfa_os_hton3b(_x)
-#define bfa_os_u32(__pa64) ((__pa64) >> 32)
-
-#define BFA_TRC_TS(_trcm)				\
-	({						\
-		struct timeval tv;			\
-							\
-		do_gettimeofday(&tv);			\
-		(tv.tv_sec*1000000+tv.tv_usec);		\
-	 })
-
-#define boolean_t int
-
-/*
- * For current time stamp, OS API will fill-in
- */
-struct bfa_timeval_s {
-	u32	tv_sec;		/*  seconds        */
-	u32	tv_usec;	/*  microseconds   */
-};
-
-static inline void
-bfa_os_gettimeofday(struct bfa_timeval_s *tv)
-{
-	struct timeval  tmp_tv;
-
-	do_gettimeofday(&tmp_tv);
-	tv->tv_sec = (u32) tmp_tv.tv_sec;
-	tv->tv_usec = (u32) tmp_tv.tv_usec;
-}
-
-static inline void
-wwn2str(char *wwn_str, u64 wwn)
-{
-	union {
-		u64 wwn;
-		u8 byte[8];
-	} w;
-
-	w.wwn = wwn;
-	sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", w.byte[0],
-		w.byte[1], w.byte[2], w.byte[3], w.byte[4], w.byte[5],
-		w.byte[6], w.byte[7]);
-}
-
-static inline void
-fcid2str(char *fcid_str, u32 fcid)
-{
-	union {
-		u32 fcid;
-		u8 byte[4];
-	} f;
-
-	f.fcid = fcid;
-	sprintf(fcid_str, "%02x:%02x:%02x", f.byte[1], f.byte[2], f.byte[3]);
-}
-
-#endif /* __BFA_OS_INC_H__ */

+ 0 - 4
drivers/scsi/bfa/bfa_plog.h

@@ -151,9 +151,5 @@ void bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 			enum bfa_plog_eid event, u16 misc,
 			struct fchs_s *fchdr, u32 pld_w0);
-void bfa_plog_clear(struct bfa_plog_s *plog);
-void bfa_plog_enable(struct bfa_plog_s *plog);
-void bfa_plog_disable(struct bfa_plog_s *plog);
-bfa_boolean_t	bfa_plog_get_setting(struct bfa_plog_s *plog);
 
 #endif /* __BFA_PORTLOG_H__ */

+ 12 - 25
drivers/scsi/bfa/bfa_port.c

@@ -15,6 +15,7 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_defs_svc.h"
 #include "bfa_port.h"
 #include "bfi.h"
@@ -29,14 +30,14 @@ static void
 bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
 {
 	u32    *dip = (u32 *) stats;
-	u32    t0, t1;
+	__be32    t0, t1;
 	int	    i;
 
 	for (i = 0; i < sizeof(union bfa_port_stats_u)/sizeof(u32);
 		i += 2) {
 		t0 = dip[i];
 		t1 = dip[i + 1];
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 		dip[i] = be32_to_cpu(t0);
 		dip[i + 1] = be32_to_cpu(t1);
 #else
@@ -96,13 +97,13 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
 	port->stats_busy = BFA_FALSE;
 
 	if (status == BFA_STATUS_OK) {
-		struct bfa_timeval_s tv;
+		struct timeval tv;
 
 		memcpy(port->stats, port->stats_dma.kva,
 		       sizeof(union bfa_port_stats_u));
 		bfa_port_stats_swap(port, port->stats);
 
-		bfa_os_gettimeofday(&tv);
+		do_gettimeofday(&tv);
 		port->stats->fc.secs_reset = tv.tv_sec - port->stats_reset_time;
 	}
 
@@ -124,7 +125,7 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
 static void
 bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
 {
-	struct bfa_timeval_s tv;
+	struct timeval tv;
 
 	port->stats_status = status;
 	port->stats_busy   = BFA_FALSE;
@@ -132,7 +133,7 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
 	/*
 	* re-initialize time stamp for stats reset
 	*/
-	bfa_os_gettimeofday(&tv);
+	do_gettimeofday(&tv);
 	port->stats_reset_time = tv.tv_sec;
 
 	if (port->stats_cbfn) {
@@ -185,7 +186,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
 		break;
 
 	default:
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 }
 
@@ -432,9 +433,9 @@ void
 bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
 		 void *dev, struct bfa_trc_mod_s *trcmod)
 {
-	struct bfa_timeval_s tv;
+	struct timeval tv;
 
-	bfa_assert(port);
+	WARN_ON(!port);
 
 	port->dev    = dev;
 	port->ioc    = ioc;
@@ -447,27 +448,13 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
 
 	bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port);
 	bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port);
-	bfa_ioc_hbfail_register(port->ioc, &port->hbfail);
+	list_add_tail(&port->hbfail.qe, &port->ioc->hb_notify_q);
 
 	/*
 	 * initialize time stamp for stats reset
 	 */
-	bfa_os_gettimeofday(&tv);
+	do_gettimeofday(&tv);
 	port->stats_reset_time = tv.tv_sec;
 
 	bfa_trc(port, 0);
 }
-
-/*
- * bfa_port_detach()
- *
- *
- * @param[in] port - Pointer to the Port module data structure
- *
- * @return void
- */
-void
-bfa_port_detach(struct bfa_port_s *port)
-{
-	bfa_trc(port, 0);
-}

+ 0 - 1
drivers/scsi/bfa/bfa_port.h

@@ -48,7 +48,6 @@ struct bfa_port_s {
 
 void	     bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
 				void *dev, struct bfa_trc_mod_s *trcmod);
-void	     bfa_port_detach(struct bfa_port_s *port);
 void	     bfa_port_hbfail(void *arg);
 
 bfa_status_t bfa_port_get_stats(struct bfa_port_s *port,

File diff suppressed because it is too large
+ 180 - 339
drivers/scsi/bfa/bfa_svc.c


+ 14 - 49
drivers/scsi/bfa/bfa_svc.h

@@ -220,6 +220,18 @@ void	bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 /*
  * RPORT related defines
  */
+enum bfa_rport_event {
+	BFA_RPORT_SM_CREATE	= 1,	/*  rport create event          */
+	BFA_RPORT_SM_DELETE	= 2,	/*  deleting an existing rport  */
+	BFA_RPORT_SM_ONLINE	= 3,	/*  rport is online             */
+	BFA_RPORT_SM_OFFLINE	= 4,	/*  rport is offline            */
+	BFA_RPORT_SM_FWRSP	= 5,	/*  firmware response           */
+	BFA_RPORT_SM_HWFAIL	= 6,	/*  IOC h/w failure             */
+	BFA_RPORT_SM_QOS_SCN	= 7,	/*  QoS SCN from firmware       */
+	BFA_RPORT_SM_SET_SPEED	= 8,	/*  Set Rport Speed             */
+	BFA_RPORT_SM_QRESUME	= 9,	/*  space in requeue queue      */
+};
+
 #define BFA_RPORT_MIN	4
 
 struct bfa_rport_mod_s {
@@ -432,6 +444,7 @@ struct bfa_fcport_s {
 	u8			myalpa;	/*  my ALPA in LOOP topology */
 	u8			rsvd[3];
 	struct bfa_port_cfg_s	cfg;	/*  current port configuration */
+	bfa_boolean_t		use_flash_cfg; /* get port cfg from flash */
 	struct bfa_qos_attr_s  qos_attr;   /* QoS Attributes */
 	struct bfa_qos_vc_attr_s qos_vc_attr;  /*  VC info from ELP */
 	struct bfa_reqq_wait_s	reqq_wait;
@@ -500,30 +513,9 @@ void bfa_fcport_event_register(struct bfa_s *bfa,
 			void (*event_cbfn) (void *cbarg,
 			enum bfa_port_linkstate event), void *event_cbarg);
 bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
-void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off);
-void bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off);
-bfa_status_t bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa,
-					  enum bfa_port_speed speed);
 enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
 
 void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
-void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status);
-void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
-		       bfa_boolean_t link_e2e_beacon);
-void bfa_fcport_qos_get_attr(struct bfa_s *bfa,
-			     struct bfa_qos_attr_s *qos_attr);
-void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
-				struct bfa_qos_vc_attr_s *qos_vc_attr);
-bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa,
-				      union bfa_fcport_stats_u *stats,
-				      bfa_cb_port_t cbfn, void *cbarg);
-bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
-					void *cbarg);
-bfa_status_t bfa_fcport_get_fcoe_stats(struct bfa_s *bfa,
-				       union bfa_fcport_stats_u *stats,
-				       bfa_cb_port_t cbfn, void *cbarg);
-bfa_status_t bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
-					 void *cbarg);
 bfa_boolean_t     bfa_fcport_is_ratelim(struct bfa_s *bfa);
 bfa_boolean_t	bfa_fcport_is_linkup(struct bfa_s *bfa);
 bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
@@ -537,14 +529,9 @@ bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
  * bfa rport API functions
  */
 struct bfa_rport_s *bfa_rport_create(struct bfa_s *bfa, void *rport_drv);
-void bfa_rport_delete(struct bfa_rport_s *rport);
 void bfa_rport_online(struct bfa_rport_s *rport,
 		      struct bfa_rport_info_s *rport_info);
-void bfa_rport_offline(struct bfa_rport_s *rport);
 void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed);
-void bfa_rport_get_stats(struct bfa_rport_s *rport,
-			 struct bfa_rport_hal_stats_s *stats);
-void bfa_rport_clear_stats(struct bfa_rport_s *rport);
 void bfa_cb_rport_online(void *rport);
 void bfa_cb_rport_offline(void *rport);
 void bfa_cb_rport_qos_scn_flowid(void *rport,
@@ -553,8 +540,6 @@ void bfa_cb_rport_qos_scn_flowid(void *rport,
 void bfa_cb_rport_qos_scn_prio(void *rport,
 			       struct bfa_rport_qos_attr_s old_qos_attr,
 			       struct bfa_rport_qos_attr_s new_qos_attr);
-void bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
-			    struct bfa_rport_qos_attr_s *qos_attr);
 
 /*
  * bfa fcxp API functions
@@ -619,38 +604,18 @@ void bfa_uf_free(struct bfa_uf_s *uf);
 u32 bfa_lps_get_max_vport(struct bfa_s *bfa);
 struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
 void bfa_lps_delete(struct bfa_lps_s *lps);
-void bfa_lps_discard(struct bfa_lps_s *lps);
 void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa,
 		   u16 pdusz, wwn_t pwwn, wwn_t nwwn,
 		   bfa_boolean_t auth_en);
 void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz,
 		   wwn_t pwwn, wwn_t nwwn);
-void bfa_lps_flogo(struct bfa_lps_s *lps);
 void bfa_lps_fdisclogo(struct bfa_lps_s *lps);
-u8 bfa_lps_get_tag(struct bfa_lps_s *lps);
-bfa_boolean_t bfa_lps_is_npiv_en(struct bfa_lps_s *lps);
-bfa_boolean_t bfa_lps_is_fport(struct bfa_lps_s *lps);
-bfa_boolean_t bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps);
-bfa_boolean_t bfa_lps_is_authreq(struct bfa_lps_s *lps);
-bfa_eproto_status_t bfa_lps_get_extstatus(struct bfa_lps_s *lps);
-u32 bfa_lps_get_pid(struct bfa_lps_s *lps);
+void bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, u32 n2n_pid);
 u32 bfa_lps_get_base_pid(struct bfa_s *bfa);
 u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
-u16 bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps);
-wwn_t bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps);
-wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps);
-u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps);
-u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps);
-mac_t bfa_lps_get_lp_mac(struct bfa_lps_s *lps);
 void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
 void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
 void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
 void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
 
-void bfa_trunk_enable_cfg(struct bfa_s *bfa);
-bfa_status_t bfa_trunk_enable(struct bfa_s *bfa);
-bfa_status_t bfa_trunk_disable(struct bfa_s *bfa);
-bfa_status_t bfa_trunk_get_attr(struct bfa_s *bfa,
-		struct bfa_trunk_attr_s *attr);
-
 #endif /* __BFA_SVC_H__ */

+ 18 - 20
drivers/scsi/bfa/bfad.c

@@ -32,7 +32,6 @@
 #include "bfad_drv.h"
 #include "bfad_im.h"
 #include "bfa_fcs.h"
-#include "bfa_os_inc.h"
 #include "bfa_defs.h"
 #include "bfa.h"
 
@@ -61,12 +60,12 @@ int		msix_disable_cb = 0, msix_disable_ct = 0;
 u32	bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size;
 u32     *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc;
 
-const char *msix_name_ct[] = {
+static const char *msix_name_ct[] = {
 	"cpe0", "cpe1", "cpe2", "cpe3",
 	"rme0", "rme1", "rme2", "rme3",
 	"ctrl" };
 
-const char *msix_name_cb[] = {
+static const char *msix_name_cb[] = {
 	"cpe0", "cpe1", "cpe2", "cpe3",
 	"rme0", "rme1", "rme2", "rme3",
 	"eemc", "elpu0", "elpu1", "epss", "mlpu" };
@@ -206,7 +205,7 @@ bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
 		}
 
 		spin_lock_irqsave(&bfad->bfad_lock, flags);
-		bfa_init(&bfad->bfa);
+		bfa_iocfc_init(&bfad->bfa);
 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
 		/* Set up interrupt handler for each vectors */
@@ -533,7 +532,7 @@ bfad_hal_mem_release(struct bfad_s *bfad)
 					(dma_addr_t) meminfo_elem->dma);
 				break;
 			default:
-				bfa_assert(0);
+				WARN_ON(1);
 				break;
 			}
 		}
@@ -725,7 +724,7 @@ bfad_bfa_tmo(unsigned long data)
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 
-	bfa_timer_tick(&bfad->bfa);
+	bfa_timer_beat(&bfad->bfa.timer_mod);
 
 	bfa_comp_deq(&bfad->bfa, &doneq);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -882,8 +881,8 @@ bfad_drv_init(struct bfad_s *bfad)
 		goto out_hal_mem_alloc_failure;
 	}
 
-	bfa_init_trc(&bfad->bfa, bfad->trcmod);
-	bfa_init_plog(&bfad->bfa, &bfad->plog_buf);
+	bfad->bfa.trcmod = bfad->trcmod;
+	bfad->bfa.plog = &bfad->plog_buf;
 	bfa_plog_init(&bfad->plog_buf);
 	bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
 		     0, "Driver Attach");
@@ -893,9 +892,9 @@ bfad_drv_init(struct bfad_s *bfad)
 
 	/* FCS INIT */
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod);
+	bfad->bfa_fcs.trcmod = bfad->trcmod;
 	bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
-	bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable);
+	bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
 	bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
@@ -913,7 +912,7 @@ bfad_drv_uninit(struct bfad_s *bfad)
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	init_completion(&bfad->comp);
-	bfa_stop(&bfad->bfa);
+	bfa_iocfc_stop(&bfad->bfa);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	wait_for_completion(&bfad->comp);
 
@@ -932,8 +931,8 @@ bfad_drv_start(struct bfad_s *bfad)
 	unsigned long	flags;
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	bfa_start(&bfad->bfa);
-	bfa_fcs_start(&bfad->bfa_fcs);
+	bfa_iocfc_start(&bfad->bfa);
+	bfa_fcs_fabric_modstart(&bfad->bfa_fcs);
 	bfad->bfad_flags |= BFAD_HAL_START_DONE;
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
@@ -963,7 +962,7 @@ bfad_stop(struct bfad_s *bfad)
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	init_completion(&bfad->comp);
-	bfa_stop(&bfad->bfa);
+	bfa_iocfc_stop(&bfad->bfa);
 	bfad->bfad_flags &= ~BFAD_HAL_START_DONE;
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	wait_for_completion(&bfad->comp);
@@ -1102,15 +1101,15 @@ bfad_start_ops(struct bfad_s *bfad) {
 
 	/*
 	 * If bfa_linkup_delay is set to -1 default; try to retrive the
-	 * value using the bfad_os_get_linkup_delay(); else use the
+	 * value using the bfad_get_linkup_delay(); else use the
 	 * passed in module param value as the bfa_linkup_delay.
 	 */
 	if (bfa_linkup_delay < 0) {
-		bfa_linkup_delay = bfad_os_get_linkup_delay(bfad);
-		bfad_os_rport_online_wait(bfad);
+		bfa_linkup_delay = bfad_get_linkup_delay(bfad);
+		bfad_rport_online_wait(bfad);
 		bfa_linkup_delay = -1;
 	} else
-		bfad_os_rport_online_wait(bfad);
+		bfad_rport_online_wait(bfad);
 
 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n");
 
@@ -1167,7 +1166,6 @@ bfad_intx(int irq, void *dev_id)
 		spin_lock_irqsave(&bfad->bfad_lock, flags);
 		bfa_comp_free(&bfad->bfa, &doneq);
 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-		bfa_trc_fp(bfad, irq);
 	}
 
 	return IRQ_HANDLED;
@@ -1524,7 +1522,7 @@ bfad_init(void)
 	if (strcmp(FCPI_NAME, " fcpim") == 0)
 		supported_fc4s |= BFA_LPORT_ROLE_FCP_IM;
 
-	bfa_ioc_auto_recover(ioc_auto_recover);
+	bfa_auto_recover = ioc_auto_recover;
 	bfa_fcs_rport_set_del_timeout(rport_del_timeout);
 
 	error = pci_register_driver(&bfad_pci_driver);

+ 9 - 9
drivers/scsi/bfa/bfad_attr.c

@@ -25,7 +25,7 @@
 /*
  * FC transport template entry, get SCSI target port ID.
  */
-void
+static void
 bfad_im_get_starget_port_id(struct scsi_target *starget)
 {
 	struct Scsi_Host *shost;
@@ -40,7 +40,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget)
 	bfad = im_port->bfad;
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 
-	itnim = bfad_os_get_itnim(im_port, starget->id);
+	itnim = bfad_get_itnim(im_port, starget->id);
 	if (itnim)
 		fc_id = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
 
@@ -51,7 +51,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget)
 /*
  * FC transport template entry, get SCSI target nwwn.
  */
-void
+static void
 bfad_im_get_starget_node_name(struct scsi_target *starget)
 {
 	struct Scsi_Host *shost;
@@ -66,7 +66,7 @@ bfad_im_get_starget_node_name(struct scsi_target *starget)
 	bfad = im_port->bfad;
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 
-	itnim = bfad_os_get_itnim(im_port, starget->id);
+	itnim = bfad_get_itnim(im_port, starget->id);
 	if (itnim)
 		node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim);
 
@@ -77,7 +77,7 @@ bfad_im_get_starget_node_name(struct scsi_target *starget)
 /*
  * FC transport template entry, get SCSI target pwwn.
  */
-void
+static void
 bfad_im_get_starget_port_name(struct scsi_target *starget)
 {
 	struct Scsi_Host *shost;
@@ -92,7 +92,7 @@ bfad_im_get_starget_port_name(struct scsi_target *starget)
 	bfad = im_port->bfad;
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 
-	itnim = bfad_os_get_itnim(im_port, starget->id);
+	itnim = bfad_get_itnim(im_port, starget->id);
 	if (itnim)
 		port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
 
@@ -103,7 +103,7 @@ bfad_im_get_starget_port_name(struct scsi_target *starget)
 /*
  * FC transport template entry, get SCSI host port ID.
  */
-void
+static void
 bfad_im_get_host_port_id(struct Scsi_Host *shost)
 {
 	struct bfad_im_port_s *im_port =
@@ -111,7 +111,7 @@ bfad_im_get_host_port_id(struct Scsi_Host *shost)
 	struct bfad_port_s    *port = im_port->port;
 
 	fc_host_port_id(shost) =
-			bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
+			bfa_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
 }
 
 /*
@@ -487,7 +487,7 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
 	wait_for_completion(vport->comp_del);
 
 free_scsi_host:
-	bfad_os_scsi_host_free(bfad, im_port);
+	bfad_scsi_host_free(bfad, im_port);
 
 	kfree(vport);
 

+ 39 - 7
drivers/scsi/bfa/bfad_debugfs.c

@@ -90,7 +90,7 @@ bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
 	memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	rc = bfa_debug_fwtrc(&bfad->bfa,
+	rc = bfa_ioc_debug_fwtrc(&bfad->bfa.ioc,
 			fw_debug->debug_buffer,
 			&fw_debug->buffer_len);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -134,7 +134,7 @@ bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
 	memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	rc = bfa_debug_fwsave(&bfad->bfa,
+	rc = bfa_ioc_debug_fwsave(&bfad->bfa.ioc,
 			fw_debug->debug_buffer,
 			&fw_debug->buffer_len);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -208,7 +208,7 @@ bfad_debugfs_read(struct file *file, char __user *buf,
 	if (!debug || !debug->debug_buffer)
 		return 0;
 
-	return memory_read_from_buffer(buf, nbytes, pos,
+	return simple_read_from_buffer(buf, nbytes, pos,
 				debug->debug_buffer, debug->buffer_len);
 }
 
@@ -254,7 +254,7 @@ bfad_debugfs_read_regrd(struct file *file, char __user *buf,
 	if (!bfad->regdata)
 		return 0;
 
-	rc = memory_read_from_buffer(buf, nbytes, pos,
+	rc = simple_read_from_buffer(buf, nbytes, pos,
 			bfad->regdata, bfad->reglen);
 
 	if ((*pos + nbytes) >= bfad->reglen) {
@@ -279,15 +279,31 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
 	u32 *regbuf;
 	void __iomem *rb, *reg_addr;
 	unsigned long flags;
+	void *kern_buf;
 
-	rc = sscanf(buf, "%x:%x", &addr, &len);
+	kern_buf = kzalloc(nbytes, GFP_KERNEL);
+
+	if (!kern_buf) {
+		printk(KERN_INFO "bfad[%d]: Failed to allocate buffer\n",
+				bfad->inst_no);
+		return -ENOMEM;
+	}
+
+	if (copy_from_user(kern_buf, (void  __user *)buf, nbytes)) {
+		kfree(kern_buf);
+		return -ENOMEM;
+	}
+
+	rc = sscanf(kern_buf, "%x:%x", &addr, &len);
 	if (rc < 2) {
 		printk(KERN_INFO
 			"bfad[%d]: %s failed to read user buf\n",
 			bfad->inst_no, __func__);
+		kfree(kern_buf);
 		return -EINVAL;
 	}
 
+	kfree(kern_buf);
 	kfree(bfad->regdata);
 	bfad->regdata = NULL;
 	bfad->reglen = 0;
@@ -339,14 +355,30 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
 	int addr, val, rc;
 	void __iomem *reg_addr;
 	unsigned long flags;
+	void *kern_buf;
+
+	kern_buf = kzalloc(nbytes, GFP_KERNEL);
+
+	if (!kern_buf) {
+		printk(KERN_INFO "bfad[%d]: Failed to allocate buffer\n",
+				bfad->inst_no);
+		return -ENOMEM;
+	}
+
+	if (copy_from_user(kern_buf, (void  __user *)buf, nbytes)) {
+		kfree(kern_buf);
+		return -ENOMEM;
+	}
 
-	rc = sscanf(buf, "%x:%x", &addr, &val);
+	rc = sscanf(kern_buf, "%x:%x", &addr, &val);
 	if (rc < 2) {
 		printk(KERN_INFO
 			"bfad[%d]: %s failed to read user buf\n",
 			bfad->inst_no, __func__);
+		kfree(kern_buf);
 		return -EINVAL;
 	}
+	kfree(kern_buf);
 
 	addr &= BFA_REG_ADDRMSK(bfa); /* offset only 17 bit and word align */
 
@@ -359,7 +391,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
 		return -EINVAL;
 	}
 
-	reg_addr = (u32 *) ((u8 *) bfa_ioc_bar0(ioc) + addr);
+	reg_addr = (bfa_ioc_bar0(ioc)) + addr;
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	writel(val, reg_addr);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);

+ 27 - 18
drivers/scsi/bfa/bfad_drv.h

@@ -26,7 +26,23 @@
 #ifndef __BFAD_DRV_H__
 #define __BFAD_DRV_H__
 
-#include "bfa_os_inc.h"
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_transport.h>
 
 #include "bfa_modules.h"
 #include "bfa_fcs.h"
@@ -39,7 +55,7 @@
 #ifdef BFA_DRIVER_VERSION
 #define BFAD_DRIVER_VERSION    BFA_DRIVER_VERSION
 #else
-#define BFAD_DRIVER_VERSION    "2.3.2.0"
+#define BFAD_DRIVER_VERSION    "2.3.2.3"
 #endif
 
 #define BFAD_PROTO_NAME FCPI_NAME
@@ -263,28 +279,21 @@ struct bfad_hal_comp {
  */
 #define nextLowerInt(x)                         \
 do {                                            \
-	int i;                                  \
+	int __i;                                  \
 	(*x)--;					\
-	for (i = 1; i < (sizeof(int)*8); i <<= 1) \
-		(*x) = (*x) | (*x) >> i;	\
+	for (__i = 1; __i < (sizeof(int)*8); __i <<= 1) \
+		(*x) = (*x) | (*x) >> __i;	\
 	(*x)++;					\
 	(*x) = (*x) >> 1;			\
 } while (0)
 
 
-#define list_remove_head(list, entry, type, member)		\
-do {								\
-	entry = NULL;                                           \
-	if (!list_empty(list)) {                                \
-		entry = list_entry((list)->next, type, member);	\
-		list_del_init(&entry->member);			\
-	}							\
+#define BFA_LOG(level, bfad, mask, fmt, arg...)				\
+do {									\
+	if (((mask) == 4) || (level[1] <= '4'))				\
+		dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg);	\
 } while (0)
 
-#define list_get_first(list, type, member)				\
-((list_empty(list)) ? NULL :						\
-	list_entry((list)->next, type, member))
-
 bfa_status_t	bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
 				  struct bfa_lport_cfg_s *port_cfg,
 				  struct device *dev);
@@ -316,8 +325,8 @@ void		bfad_debugfs_exit(struct bfad_port_s *port);
 
 void bfad_pci_remove(struct pci_dev *pdev);
 int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid);
-void bfad_os_rport_online_wait(struct bfad_s *bfad);
-int bfad_os_get_linkup_delay(struct bfad_s *bfad);
+void bfad_rport_online_wait(struct bfad_s *bfad);
+int bfad_get_linkup_delay(struct bfad_s *bfad);
 int bfad_install_msix_handler(struct bfad_s *bfad);
 
 extern struct idr bfad_im_port_index;

+ 30 - 29
drivers/scsi/bfa/bfad_im.c

@@ -21,7 +21,6 @@
 
 #include "bfad_drv.h"
 #include "bfad_im.h"
-#include "bfa_cb_ioim.h"
 #include "bfa_fcs.h"
 
 BFA_TRC_FILE(LDRV, IM);
@@ -93,10 +92,10 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
 		if (!cmnd->result && itnim &&
 			 (bfa_lun_queue_depth > cmnd->device->queue_depth)) {
 			/* Queue depth adjustment for good status completion */
-			bfad_os_ramp_up_qdepth(itnim, cmnd->device);
+			bfad_ramp_up_qdepth(itnim, cmnd->device);
 		} else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) {
 			/* qfull handling */
-			bfad_os_handle_qfull(itnim, cmnd->device);
+			bfad_handle_qfull(itnim, cmnd->device);
 		}
 	}
 
@@ -124,7 +123,7 @@ bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
 		if (itnim_data) {
 			itnim = itnim_data->itnim;
 			if (itnim)
-				bfad_os_ramp_up_qdepth(itnim, cmnd->device);
+				bfad_ramp_up_qdepth(itnim, cmnd->device);
 		}
 	}
 
@@ -183,7 +182,7 @@ bfad_im_info(struct Scsi_Host *shost)
 	bfa_get_adapter_model(bfa, model);
 
 	memset(bfa_buf, 0, sizeof(bfa_buf));
-	if (ioc->ctdev)
+	if (ioc->ctdev && !ioc->fcmode)
 		snprintf(bfa_buf, sizeof(bfa_buf),
 		"Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s",
 		 model, bfad->pci_name, BFAD_DRIVER_VERSION);
@@ -258,6 +257,7 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
 	struct bfa_tskim_s *tskim;
 	struct bfa_itnim_s *bfa_itnim;
 	bfa_status_t    rc = BFA_STATUS_OK;
+	struct scsi_lun scsilun;
 
 	tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
 	if (!tskim) {
@@ -274,7 +274,8 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
 	cmnd->host_scribble = NULL;
 	cmnd->SCp.Status = 0;
 	bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
-	bfa_tskim_start(tskim, bfa_itnim, (lun_t)0,
+	memset(&scsilun, 0, sizeof(scsilun));
+	bfa_tskim_start(tskim, bfa_itnim, scsilun,
 			    FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO);
 out:
 	return rc;
@@ -301,6 +302,7 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
 	int             rc = SUCCESS;
 	unsigned long   flags;
 	enum bfi_tskim_status task_status;
+	struct scsi_lun scsilun;
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	itnim = itnim_data->itnim;
@@ -327,8 +329,8 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
 	cmnd->SCp.ptr = (char *)&wq;
 	cmnd->SCp.Status = 0;
 	bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
-	bfa_tskim_start(tskim, bfa_itnim,
-			    bfad_int_to_lun(cmnd->device->lun),
+	int_to_scsilun(cmnd->device->lun, &scsilun);
+	bfa_tskim_start(tskim, bfa_itnim, scsilun,
 			    FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
@@ -364,7 +366,7 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	for (i = 0; i < MAX_FCP_TARGET; i++) {
-		itnim = bfad_os_get_itnim(im_port, i);
+		itnim = bfad_get_itnim(im_port, i);
 		if (itnim) {
 			cmnd->SCp.ptr = (char *)&wq;
 			rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
@@ -447,7 +449,7 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
 	struct bfad_im_s	*im = itnim_drv->im;
 
 	/* online to free state transtion should not happen */
-	bfa_assert(itnim_drv->state != ITNIM_STATE_ONLINE);
+	WARN_ON(itnim_drv->state == ITNIM_STATE_ONLINE);
 
 	itnim_drv->queue_work = 1;
 	/* offline request is not yet done, use the same request to free */
@@ -545,7 +547,7 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
 
 	mutex_unlock(&bfad_mutex);
 
-	im_port->shost = bfad_os_scsi_host_alloc(im_port, bfad);
+	im_port->shost = bfad_scsi_host_alloc(im_port, bfad);
 	if (!im_port->shost) {
 		error = 1;
 		goto out_free_idr;
@@ -571,7 +573,7 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
 	}
 
 	/* setup host fixed attribute if the lk supports */
-	bfad_os_fc_host_init(im_port);
+	bfad_fc_host_init(im_port);
 
 	return 0;
 
@@ -662,7 +664,7 @@ bfad_im_port_clean(struct bfad_im_port_s *im_port)
 	}
 
 	/* the itnim_mapped_list must be empty at this time */
-	bfa_assert(list_empty(&im_port->itnim_mapped_list));
+	WARN_ON(!list_empty(&im_port->itnim_mapped_list));
 
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 }
@@ -682,7 +684,7 @@ bfad_im_probe(struct bfad_s *bfad)
 	bfad->im = im;
 	im->bfad = bfad;
 
-	if (bfad_os_thread_workq(bfad) != BFA_STATUS_OK) {
+	if (bfad_thread_workq(bfad) != BFA_STATUS_OK) {
 		kfree(im);
 		rc = BFA_STATUS_FAILED;
 	}
@@ -695,14 +697,14 @@ void
 bfad_im_probe_undo(struct bfad_s *bfad)
 {
 	if (bfad->im) {
-		bfad_os_destroy_workq(bfad->im);
+		bfad_destroy_workq(bfad->im);
 		kfree(bfad->im);
 		bfad->im = NULL;
 	}
 }
 
 struct Scsi_Host *
-bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
+bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
 {
 	struct scsi_host_template *sht;
 
@@ -717,7 +719,7 @@ bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
 }
 
 void
-bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
+bfad_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
 {
 	if (!(im_port->flags & BFAD_PORT_DELETE))
 		flush_workqueue(bfad->im->drv_workq);
@@ -727,7 +729,7 @@ bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
 }
 
 void
-bfad_os_destroy_workq(struct bfad_im_s *im)
+bfad_destroy_workq(struct bfad_im_s *im)
 {
 	if (im && im->drv_workq) {
 		flush_workqueue(im->drv_workq);
@@ -737,7 +739,7 @@ bfad_os_destroy_workq(struct bfad_im_s *im)
 }
 
 bfa_status_t
-bfad_os_thread_workq(struct bfad_s *bfad)
+bfad_thread_workq(struct bfad_s *bfad)
 {
 	struct bfad_im_s      *im = bfad->im;
 
@@ -841,7 +843,7 @@ bfad_im_module_exit(void)
 }
 
 void
-bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
+bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
 {
 	struct scsi_device *tmp_sdev;
 
@@ -869,7 +871,7 @@ bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
 }
 
 void
-bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
+bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
 {
 	struct scsi_device *tmp_sdev;
 
@@ -883,7 +885,7 @@ bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
 }
 
 struct bfad_itnim_s *
-bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id)
+bfad_get_itnim(struct bfad_im_port_s *im_port, int id)
 {
 	struct bfad_itnim_s   *itnim = NULL;
 
@@ -922,7 +924,7 @@ bfad_im_supported_speeds(struct bfa_s *bfa)
 	if (!ioc_attr)
 		return 0;
 
-	bfa_get_attr(bfa, ioc_attr);
+	bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
 	if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
 		if (ioc_attr->adapter_attr.is_mezz) {
 			supported_speed |= FC_PORTSPEED_8GBIT |
@@ -944,7 +946,7 @@ bfad_im_supported_speeds(struct bfa_s *bfa)
 }
 
 void
-bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
+bfad_fc_host_init(struct bfad_im_port_s *im_port)
 {
 	struct Scsi_Host *host = im_port->shost;
 	struct bfad_s         *bfad = im_port->bfad;
@@ -988,7 +990,7 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
 	rport_ids.port_name =
 		cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
 	rport_ids.port_id =
-		bfa_os_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim));
+		bfa_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim));
 	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
 
 	itnim->fc_rport = fc_rport =
@@ -1109,7 +1111,7 @@ bfad_im_itnim_work_handler(struct work_struct *work)
 		kfree(itnim);
 		break;
 	default:
-		bfa_assert(0);
+		WARN_ON(1);
 		break;
 	}
 
@@ -1172,7 +1174,6 @@ bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd
 	}
 
 	cmnd->host_scribble = (char *)hal_io;
-	bfa_trc_fp(bfad, hal_io->iotag);
 	bfa_ioim_start(hal_io);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
@@ -1190,7 +1191,7 @@ out_fail_cmd:
 static DEF_SCSI_QCMD(bfad_im_queuecommand)
 
 void
-bfad_os_rport_online_wait(struct bfad_s *bfad)
+bfad_rport_online_wait(struct bfad_s *bfad)
 {
 	int i;
 	int rport_delay = 10;
@@ -1218,7 +1219,7 @@ bfad_os_rport_online_wait(struct bfad_s *bfad)
 }
 
 int
-bfad_os_get_linkup_delay(struct bfad_s *bfad)
+bfad_get_linkup_delay(struct bfad_s *bfad)
 {
 	u8		nwwns = 0;
 	wwn_t		wwns[BFA_PREBOOT_BOOTLUN_MAX];

+ 8 - 8
drivers/scsi/bfa/bfad_im.h

@@ -117,17 +117,17 @@ struct bfad_im_s {
 	char            drv_workq_name[KOBJ_NAME_LEN];
 };
 
-struct Scsi_Host *bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port,
+struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
 				struct bfad_s *);
-bfa_status_t bfad_os_thread_workq(struct bfad_s *bfad);
-void bfad_os_destroy_workq(struct bfad_im_s *im);
-void bfad_os_fc_host_init(struct bfad_im_port_s *im_port);
-void bfad_os_scsi_host_free(struct bfad_s *bfad,
+bfa_status_t bfad_thread_workq(struct bfad_s *bfad);
+void bfad_destroy_workq(struct bfad_im_s *im);
+void bfad_fc_host_init(struct bfad_im_port_s *im_port);
+void bfad_scsi_host_free(struct bfad_s *bfad,
 				 struct bfad_im_port_s *im_port);
-void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim,
+void bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim,
 				 struct scsi_device *sdev);
-void bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev);
-struct bfad_itnim_s *bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id);
+void bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev);
+struct bfad_itnim_s *bfad_get_itnim(struct bfad_im_port_s *im_port, int id);
 
 extern struct scsi_host_template bfad_im_scsi_host_template;
 extern struct scsi_host_template bfad_im_vport_template;

+ 4 - 4
drivers/scsi/bfa/bfi.h

@@ -95,8 +95,8 @@ enum {
  */
 union bfi_addr_u {
 	struct {
-		u32	addr_lo;
-		u32	addr_hi;
+		__be32	addr_lo;
+		__be32	addr_hi;
 	} a32;
 };
 
@@ -104,7 +104,7 @@ union bfi_addr_u {
  * Scatter Gather Element
  */
 struct bfi_sge_s {
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 	u32	flags:2,
 			rsvd:2,
 			sg_len:28;
@@ -399,7 +399,7 @@ union bfi_ioc_i2h_msg_u {
  */
 struct bfi_pbc_blun_s {
 	wwn_t		tgt_pwwn;
-	lun_t		tgt_lun;
+	struct scsi_lun	tgt_lun;
 };
 
 /*

+ 1 - 0
drivers/scsi/bfa/bfi_cbreg.h

@@ -208,6 +208,7 @@
 #define BFA_IOC1_HBEAT_REG               HOST_SEM2_INFO_REG
 #define BFA_IOC1_STATE_REG               HOST_SEM3_INFO_REG
 #define BFA_FW_USE_COUNT                 HOST_SEM4_INFO_REG
+#define BFA_IOC_FAIL_SYNC		 HOST_SEM5_INFO_REG
 
 #define CPE_Q_DEPTH(__n) \
 	(CPE_Q0_DEPTH + (__n) * (CPE_Q1_DEPTH - CPE_Q0_DEPTH))

+ 25 - 16
drivers/scsi/bfa/bfi_ctreg.h

@@ -522,6 +522,7 @@ enum {
 #define BFA_IOC1_HBEAT_REG		HOST_SEM2_INFO_REG
 #define BFA_IOC1_STATE_REG		HOST_SEM3_INFO_REG
 #define BFA_FW_USE_COUNT		 HOST_SEM4_INFO_REG
+#define BFA_IOC_FAIL_SYNC		HOST_SEM5_INFO_REG
 
 #define CPE_DEPTH_Q(__n) \
 	(CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
@@ -539,22 +540,30 @@ enum {
 	(RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
 #define RME_CI_PTR_Q(__n) \
 	(RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
-#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \
-	* (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
-#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \
-	* (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
-#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \
-	* (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
-#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \
-	* (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
-#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \
-	* (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
-#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \
-	* (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
-#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \
-	* (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
-#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \
-	* (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
+#define HQM_QSET_RXQ_DRBL_P0(__n) \
+	(HQM_QSET0_RXQ_DRBL_P0 + (__n) *	\
+	(HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
+#define HQM_QSET_TXQ_DRBL_P0(__n) \
+	(HQM_QSET0_TXQ_DRBL_P0 + (__n) *	\
+	(HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
+#define HQM_QSET_IB_DRBL_1_P0(__n) \
+	(HQM_QSET0_IB_DRBL_1_P0 + (__n) *	\
+	(HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
+#define HQM_QSET_IB_DRBL_2_P0(__n) \
+	(HQM_QSET0_IB_DRBL_2_P0 + (__n) *	\
+	(HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
+#define HQM_QSET_RXQ_DRBL_P1(__n) \
+	(HQM_QSET0_RXQ_DRBL_P1 + (__n) *	\
+	(HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
+#define HQM_QSET_TXQ_DRBL_P1(__n) \
+	(HQM_QSET0_TXQ_DRBL_P1 + (__n) *	\
+	(HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
+#define HQM_QSET_IB_DRBL_1_P1(__n) \
+	(HQM_QSET0_IB_DRBL_1_P1 + (__n) *	\
+	(HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
+#define HQM_QSET_IB_DRBL_2_P1(__n) \
+	(HQM_QSET0_IB_DRBL_2_P1 + (__n) *	\
+	(HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
 
 #define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
 #define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))

+ 38 - 28
drivers/scsi/bfa/bfi_ms.h

@@ -47,10 +47,10 @@ struct bfi_iocfc_cfg_s {
 	 */
 	union bfi_addr_u  req_cq_ba[BFI_IOC_MAX_CQS];
 	union bfi_addr_u  req_shadow_ci[BFI_IOC_MAX_CQS];
-	u16    req_cq_elems[BFI_IOC_MAX_CQS];
+	__be16    req_cq_elems[BFI_IOC_MAX_CQS];
 	union bfi_addr_u  rsp_cq_ba[BFI_IOC_MAX_CQS];
 	union bfi_addr_u  rsp_shadow_pi[BFI_IOC_MAX_CQS];
-	u16    rsp_cq_elems[BFI_IOC_MAX_CQS];
+	__be16    rsp_cq_elems[BFI_IOC_MAX_CQS];
 
 	union bfi_addr_u  stats_addr;	/*  DMA-able address for stats	  */
 	union bfi_addr_u  cfgrsp_addr;	/*  config response dma address  */
@@ -102,8 +102,8 @@ struct bfi_iocfc_set_intr_req_s {
 	struct bfi_mhdr_s mh;		/*  common msg header		*/
 	u8		coalesce;	/*  enable intr coalescing	*/
 	u8		rsvd[3];
-	u16	delay;		/*  delay timer 0..1125us	*/
-	u16	latency;	/*  latency timer 0..225us	*/
+	__be16	delay;		/*  delay timer 0..1125us	*/
+	__be16	latency;	/*  latency timer 0..225us	*/
 };
 
 
@@ -188,7 +188,8 @@ struct bfi_fcport_rsp_s {
 	struct bfi_mhdr_s  mh;		/*  common msg header		    */
 	u8		   status;	/*  port enable status		    */
 	u8		   rsvd[3];
-	u32	   msgtag;	/*  msgtag for reply		    */
+	struct	bfa_port_cfg_s port_cfg;/* port configuration	*/
+	u32	msgtag;			/* msgtag for reply	*/
 };
 
 /*
@@ -202,7 +203,8 @@ struct bfi_fcport_enable_req_s {
 	struct bfa_port_cfg_s port_cfg; /*  port configuration	    */
 	union bfi_addr_u   stats_dma_addr; /*  DMA address for stats	    */
 	u32	   msgtag;	/*  msgtag for reply		    */
-	u32	   rsvd2;
+	u8	use_flash_cfg;	/* get prot cfg from flash */
+	u8	rsvd2[3];
 };
 
 /*
@@ -210,7 +212,7 @@ struct bfi_fcport_enable_req_s {
  */
 struct bfi_fcport_set_svc_params_req_s {
 	struct bfi_mhdr_s  mh;		/*  msg header */
-	u16	   tx_bbcredit;	/*  Tx credits */
+	__be16	   tx_bbcredit;	/*  Tx credits */
 	u16	   rsvd;
 };
 
@@ -231,7 +233,7 @@ struct bfi_fcport_trunk_link_s {
 	u8			state;		/* bfa_trunk_link_state_t */
 	u8			speed;		/* bfa_port_speed_t */
 	u8			rsvd;
-	u32		deskew;
+	__be32		deskew;
 };
 
 #define BFI_FCPORT_MAX_LINKS	2
@@ -284,17 +286,17 @@ enum bfi_fcxp_i2h {
  */
 struct bfi_fcxp_send_req_s {
 	struct bfi_mhdr_s  mh;		/*  Common msg header		    */
-	u16	fcxp_tag;	/*  driver request tag		    */
-	u16	max_frmsz;	/*  max send frame size	    */
-	u16	vf_id;		/*  vsan tag if applicable	    */
+	__be16	fcxp_tag;	/*  driver request tag		    */
+	__be16	max_frmsz;	/*  max send frame size	    */
+	__be16	vf_id;		/*  vsan tag if applicable	    */
 	u16	rport_fw_hndl;	/*  FW Handle for the remote port  */
 	u8	 class;		/*  FC class used for req/rsp	    */
 	u8	 rsp_timeout;	/*  timeout in secs, 0-no response */
 	u8	 cts;		/*  continue sequence		    */
 	u8	 lp_tag;	/*  lport tag			    */
 	struct fchs_s	fchs;	/*  request FC header structure    */
-	u32	req_len;	/*  request payload length	    */
-	u32	rsp_maxlen;	/*  max response length expected   */
+	__be32	req_len;	/*  request payload length	    */
+	__be32	rsp_maxlen;	/*  max response length expected   */
 	struct bfi_sge_s   req_sge[BFA_FCXP_MAX_SGES];	/*  request buf    */
 	struct bfi_sge_s   rsp_sge[BFA_FCXP_MAX_SGES];	/*  response buf   */
 };
@@ -304,11 +306,11 @@ struct bfi_fcxp_send_req_s {
  */
 struct bfi_fcxp_send_rsp_s {
 	struct bfi_mhdr_s  mh;		/*  Common msg header		    */
-	u16	fcxp_tag;	/*  send request tag		    */
+	__be16	fcxp_tag;	/*  send request tag		    */
 	u8	 req_status;	/*  request status		    */
 	u8	 rsvd;
-	u32	rsp_len;	/*  actual response length	    */
-	u32	residue_len;	/*  residual response length	    */
+	__be32	rsp_len;	/*  actual response length	    */
+	__be32	residue_len;	/*  residual response length	    */
 	struct fchs_s	fchs;	/*  response FC header structure   */
 };
 
@@ -325,7 +327,7 @@ enum bfi_uf_i2h {
 struct bfi_uf_buf_post_s {
 	struct bfi_mhdr_s  mh;		/*  Common msg header		*/
 	u16	buf_tag;	/*  buffer tag			*/
-	u16	buf_len;	/*  total buffer length	*/
+	__be16	buf_len;	/*  total buffer length	*/
 	struct bfi_sge_s   sge[BFA_UF_MAX_SGES]; /*  buffer DMA SGEs	*/
 };
 
@@ -340,6 +342,7 @@ struct bfi_uf_frm_rcvd_s {
 enum bfi_lps_h2i_msgs {
 	BFI_LPS_H2I_LOGIN_REQ	= 1,
 	BFI_LPS_H2I_LOGOUT_REQ	= 2,
+	BFI_LPS_H2I_N2N_PID_REQ = 3,
 };
 
 enum bfi_lps_i2h_msgs {
@@ -352,7 +355,7 @@ struct bfi_lps_login_req_s {
 	struct bfi_mhdr_s  mh;		/*  common msg header		*/
 	u8		lp_tag;
 	u8		alpa;
-	u16	pdu_size;
+	__be16		pdu_size;
 	wwn_t		pwwn;
 	wwn_t		nwwn;
 	u8		fdisc;
@@ -368,7 +371,7 @@ struct bfi_lps_login_rsp_s {
 	u8		lsrjt_expl;
 	wwn_t		port_name;
 	wwn_t		node_name;
-	u16	bb_credit;
+	__be16		bb_credit;
 	u8		f_port;
 	u8		npiv_en;
 	u32	lp_pid:24;
@@ -399,10 +402,17 @@ struct bfi_lps_cvl_event_s {
 	u8		rsvd[3];
 };
 
+struct bfi_lps_n2n_pid_req_s {
+	struct bfi_mhdr_s	mh;	/*  common msg header		*/
+	u8	lp_tag;
+	u32	lp_pid:24;
+};
+
 union bfi_lps_h2i_msg_u {
 	struct bfi_mhdr_s		*msg;
 	struct bfi_lps_login_req_s	*login_req;
 	struct bfi_lps_logout_req_s	*logout_req;
+	struct bfi_lps_n2n_pid_req_s	*n2n_pid_req;
 };
 
 union bfi_lps_i2h_msg_u {
@@ -427,7 +437,7 @@ enum bfi_rport_i2h_msgs {
 struct bfi_rport_create_req_s {
 	struct bfi_mhdr_s  mh;		/*  common msg header		*/
 	u16	bfa_handle;	/*  host rport handle		*/
-	u16	max_frmsz;	/*  max rcv pdu size		*/
+	__be16	max_frmsz;	/*  max rcv pdu size		*/
 	u32	pid:24,	/*  remote port ID		*/
 		lp_tag:8;	/*  local port tag		*/
 	u32	local_pid:24,	/*  local port ID		*/
@@ -583,7 +593,7 @@ struct bfi_ioim_dif_s {
  */
 struct bfi_ioim_req_s {
 	struct bfi_mhdr_s  mh;		/*  Common msg header		 */
-	u16	io_tag;		/*  I/O tag			 */
+	__be16	io_tag;		/*  I/O tag			 */
 	u16	rport_hdl;	/*  itnim/rport firmware handle */
 	struct fcp_cmnd_s	cmnd;	/*  IO request info	*/
 
@@ -689,7 +699,7 @@ enum bfi_ioim_status {
  */
 struct bfi_ioim_rsp_s {
 	struct bfi_mhdr_s	mh;	/*  common msg header		*/
-	u16	io_tag;		/*  completed IO tag		 */
+	__be16	io_tag;		/*  completed IO tag		 */
 	u16	bfa_rport_hndl;	/*  releated rport handle	 */
 	u8	io_status;	/*  IO completion status	 */
 	u8	reuse_io_tag;	/*  IO tag can be reused	*/
@@ -698,13 +708,13 @@ struct bfi_ioim_rsp_s {
 	u8		sns_len;	/*  scsi sense length		 */
 	u8		resid_flags;	/*  IO residue flags		 */
 	u8		rsvd_a;
-	u32	residue;	/*  IO residual length in bytes */
+	__be32	residue;	/*  IO residual length in bytes */
 	u32	rsvd_b[3];
 };
 
 struct bfi_ioim_abort_req_s {
 	struct bfi_mhdr_s  mh;	/*  Common msg header  */
-	u16	io_tag;	/*  I/O tag	*/
+	__be16	io_tag;	/*  I/O tag	*/
 	u16	abort_tag;	/*  unique request tag */
 };
 
@@ -723,9 +733,9 @@ enum bfi_tskim_i2h {
 
 struct bfi_tskim_req_s {
 	struct bfi_mhdr_s  mh;	/*  Common msg header	*/
-	u16	tsk_tag;	/*  task management tag	*/
+	__be16	tsk_tag;	/*  task management tag	*/
 	u16	itn_fhdl;	/*  itn firmware handle	*/
-	lun_t	lun;	/*  LU number	*/
+	struct 	scsi_lun lun;	/*  LU number	*/
 	u8	tm_flags;	/*  see enum fcp_tm_cmnd	*/
 	u8	t_secs;	/*  Timeout value in seconds	*/
 	u8	rsvd[2];
@@ -733,7 +743,7 @@ struct bfi_tskim_req_s {
 
 struct bfi_tskim_abortreq_s {
 	struct bfi_mhdr_s  mh;	/*  Common msg header	*/
-	u16	tsk_tag;	/*  task management tag	*/
+	__be16	tsk_tag;	/*  task management tag	*/
 	u16	rsvd;
 };
 
@@ -755,7 +765,7 @@ enum bfi_tskim_status {
 
 struct bfi_tskim_rsp_s {
 	struct bfi_mhdr_s  mh;		/*  Common msg header		 */
-	u16	tsk_tag;	/*  task mgmt cmnd tag		 */
+	__be16	tsk_tag;	/*  task mgmt cmnd tag		 */
 	u8	tsk_status;	/*  @ref bfi_tskim_status */
 	u8	rsvd;
 };

+ 2 - 1
drivers/scsi/bnx2i/57xx_iscsi_constants.h

@@ -1,12 +1,13 @@
 /* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
  *
- * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2006 - 2010 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
  * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
  */
 #ifndef __57XX_ISCSI_CONSTANTS_H_
 #define __57XX_ISCSI_CONSTANTS_H_

+ 2 - 1
drivers/scsi/bnx2i/57xx_iscsi_hsi.h

@@ -1,12 +1,13 @@
 /* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
  *
- * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2006 - 2010 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
  * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
  */
 #ifndef __57XX_ISCSI_HSI_LINUX_LE__
 #define __57XX_ISCSI_HSI_LINUX_LE__

+ 7 - 8
drivers/scsi/bnx2i/bnx2i.h

@@ -1,6 +1,6 @@
 /* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2006 - 2010 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *
@@ -9,6 +9,7 @@
  * the Free Software Foundation.
  *
  * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
  */
 
 #ifndef _BNX2I_H_
@@ -649,6 +650,7 @@ enum {
 	EP_STATE_OFLD_FAILED            = 0x8000000,
 	EP_STATE_CONNECT_FAILED         = 0x10000000,
 	EP_STATE_DISCONN_TIMEDOUT       = 0x20000000,
+	EP_STATE_OFLD_FAILED_CID_BUSY   = 0x80000000,
 };
 
 /**
@@ -717,14 +719,11 @@ extern struct device_attribute *bnx2i_dev_attributes[];
  * Function Prototypes
  */
 extern void bnx2i_identify_device(struct bnx2i_hba *hba);
-extern void bnx2i_register_device(struct bnx2i_hba *hba);
 
 extern void bnx2i_ulp_init(struct cnic_dev *dev);
 extern void bnx2i_ulp_exit(struct cnic_dev *dev);
 extern void bnx2i_start(void *handle);
 extern void bnx2i_stop(void *handle);
-extern void bnx2i_reg_dev_all(void);
-extern void bnx2i_unreg_dev_all(void);
 extern struct bnx2i_hba *get_adapter_list_head(void);
 
 struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
@@ -761,11 +760,11 @@ extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn,
 				   struct iscsi_task *mtask);
 extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba,
 				       struct bnx2i_cmd *cmd);
-extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba,
-				     struct bnx2i_endpoint *ep);
-extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn);
-extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
+extern int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba,
 				    struct bnx2i_endpoint *ep);
+extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn);
+extern int bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
+				   struct bnx2i_endpoint *ep);
 
 extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba,
 			       struct bnx2i_endpoint *ep);

+ 69 - 27
drivers/scsi/bnx2i/bnx2i_hwi.c

@@ -1,6 +1,6 @@
 /* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2006 - 2010 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *
@@ -9,6 +9,7 @@
  * the Free Software Foundation.
  *
  * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
  */
 
 #include <linux/gfp.h>
@@ -385,6 +386,7 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
 	struct bnx2i_cmd *bnx2i_cmd;
 	struct bnx2i_tmf_request *tmfabort_wqe;
 	u32 dword;
+	u32 scsi_lun[2];
 
 	bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
 	tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
@@ -426,7 +428,10 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
 	default:
 		tmfabort_wqe->ref_itt = RESERVED_ITT;
 	}
-	memcpy(tmfabort_wqe->lun, tmfabort_hdr->lun, sizeof(struct scsi_lun));
+	memcpy(scsi_lun, tmfabort_hdr->lun, sizeof(struct scsi_lun));
+	tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]);
+	tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]);
+
 	tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
 
 	tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
@@ -697,10 +702,11 @@ void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
  * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
  * 	iscsi connection context clean-up process
  */
-void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+int bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
 {
 	struct kwqe *kwqe_arr[2];
 	struct iscsi_kwqe_conn_destroy conn_cleanup;
+	int rc = -EINVAL;
 
 	memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy));
 
@@ -717,7 +723,9 @@ void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
 
 	kwqe_arr[0] = (struct kwqe *) &conn_cleanup;
 	if (hba->cnic && hba->cnic->submit_kwqes)
-		hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
+		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
+
+	return rc;
 }
 
 
@@ -728,8 +736,8 @@ void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
  *
  * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
  */
-static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
-					  struct bnx2i_endpoint *ep)
+static int bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
+					 struct bnx2i_endpoint *ep)
 {
 	struct kwqe *kwqe_arr[2];
 	struct iscsi_kwqe_conn_offload1 ofld_req1;
@@ -737,6 +745,7 @@ static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
 	dma_addr_t dma_addr;
 	int num_kwqes = 2;
 	u32 *ptbl;
+	int rc = -EINVAL;
 
 	ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
 	ofld_req1.hdr.flags =
@@ -774,7 +783,9 @@ static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
 	ofld_req2.num_additional_wqes = 0;
 
 	if (hba->cnic && hba->cnic->submit_kwqes)
-		hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+	return rc;
 }
 
 
@@ -785,8 +796,8 @@ static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
  *
  * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
  */
-static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
-					   struct bnx2i_endpoint *ep)
+static int bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
+					  struct bnx2i_endpoint *ep)
 {
 	struct kwqe *kwqe_arr[5];
 	struct iscsi_kwqe_conn_offload1 ofld_req1;
@@ -795,6 +806,7 @@ static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
 	dma_addr_t dma_addr;
 	int num_kwqes = 2;
 	u32 *ptbl;
+	int rc = -EINVAL;
 
 	ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
 	ofld_req1.hdr.flags =
@@ -840,7 +852,9 @@ static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
 	num_kwqes += 1;
 
 	if (hba->cnic && hba->cnic->submit_kwqes)
-		hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+	return rc;
 }
 
 /**
@@ -851,12 +865,16 @@ static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
  *
  * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
  */
-void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
 {
+	int rc;
+
 	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
-		bnx2i_5771x_send_conn_ofld_req(hba, ep);
+		rc = bnx2i_5771x_send_conn_ofld_req(hba, ep);
 	else
-		bnx2i_570x_send_conn_ofld_req(hba, ep);
+		rc = bnx2i_570x_send_conn_ofld_req(hba, ep);
+
+	return rc;
 }
 
 
@@ -1513,7 +1531,7 @@ static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
 	task = iscsi_itt_to_task(conn,
 				 nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
 	if (task)
-		iscsi_put_task(task);
+		__iscsi_put_task(task);
 	spin_unlock(&session->lock);
 }
 
@@ -1549,11 +1567,9 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
 	struct iscsi_task *task;
 	struct bnx2i_nop_in_msg *nop_in;
 	struct iscsi_nopin *hdr;
-	u32 itt;
 	int tgt_async_nop = 0;
 
 	nop_in = (struct bnx2i_nop_in_msg *)cqe;
-	itt = nop_in->itt & ISCSI_NOP_IN_MSG_INDEX;
 
 	spin_lock(&session->lock);
 	hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
@@ -1563,7 +1579,7 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
 	hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn);
 	hdr->ttt = cpu_to_be32(nop_in->ttt);
 
-	if (itt == (u16) RESERVED_ITT) {
+	if (nop_in->itt == (u16) RESERVED_ITT) {
 		bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
 		hdr->itt = RESERVED_ITT;
 		tgt_async_nop = 1;
@@ -1571,7 +1587,8 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
 	}
 
 	/* this is a response to one of our nop-outs */
-	task = iscsi_itt_to_task(conn, itt);
+	task = iscsi_itt_to_task(conn,
+			 (itt_t) (nop_in->itt & ISCSI_NOP_IN_MSG_INDEX));
 	if (task) {
 		hdr->flags = ISCSI_FLAG_CMD_FINAL;
 		hdr->itt = task->hdr->itt;
@@ -1721,9 +1738,18 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
 		if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
 			break;
 
-		if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx)))
+		if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) {
+			if (nopin->op_code == ISCSI_OP_NOOP_IN &&
+			    nopin->itt == (u16) RESERVED_ITT) {
+				printk(KERN_ALERT "bnx2i: Unsolicited "
+					"NOP-In detected for suspended "
+					"connection dev=%s!\n",
+					bnx2i_conn->hba->netdev->name);
+				bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+				goto cqe_out;
+			}
 			break;
-
+		}
 		tgt_async_msg = 0;
 
 		switch (nopin->op_code) {
@@ -1770,10 +1796,9 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
 			printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
 					  nopin->op_code);
 		}
-
 		if (!tgt_async_msg)
 			bnx2i_conn->ep->num_active_cmds--;
-
+cqe_out:
 		/* clear out in production version only, till beta keep opcode
 		 * field intact, will be helpful in debugging (context dump)
 		 * nopin->op_code = 0;
@@ -2154,11 +2179,24 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
 	}
 
 	if (ofld_kcqe->completion_status) {
+		ep->state = EP_STATE_OFLD_FAILED;
 		if (ofld_kcqe->completion_status ==
 		    ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE)
-			printk(KERN_ALERT "bnx2i: unable to allocate"
-					  " iSCSI context resources\n");
-		ep->state = EP_STATE_OFLD_FAILED;
+			printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - unable "
+				"to allocate iSCSI context resources\n",
+				hba->netdev->name);
+		else if (ofld_kcqe->completion_status ==
+			 ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE)
+			printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid "
+				"opcode\n", hba->netdev->name);
+		else if (ofld_kcqe->completion_status ==
+			ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY)
+			/* error status code valid only for 5771x chipset */
+			ep->state = EP_STATE_OFLD_FAILED_CID_BUSY;
+		else
+			printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid "
+				"error code %d\n", hba->netdev->name,
+				ofld_kcqe->completion_status);
 	} else {
 		ep->state = EP_STATE_OFLD_COMPL;
 		cid_addr = ofld_kcqe->iscsi_conn_context_id;
@@ -2339,10 +2377,14 @@ static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk)
 static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
 {
 	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+	u32 old_state = ep->state;
 
 	ep->state = EP_STATE_TCP_RST_RCVD;
-	if (ep->conn)
-		bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
+	if (old_state == EP_STATE_DISCONN_START)
+		wake_up_interruptible(&ep->ofld_wait);
+	else
+		if (ep->conn)
+			bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
 }
 
 

+ 20 - 88
drivers/scsi/bnx2i/bnx2i_init.c

@@ -1,6 +1,6 @@
 /* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2006 - 2010 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *
@@ -9,6 +9,7 @@
  * the Free Software Foundation.
  *
  * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
  */
 
 #include "bnx2i.h"
@@ -17,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
 static u32 adapter_count;
 
 #define DRV_MODULE_NAME		"bnx2i"
-#define DRV_MODULE_VERSION	"2.1.3"
-#define DRV_MODULE_RELDATE	"Aug 10, 2010"
+#define DRV_MODULE_VERSION	"2.6.2.2"
+#define DRV_MODULE_RELDATE	"Nov 23, 2010"
 
 static char version[] __devinitdata =
 		"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -65,8 +66,6 @@ MODULE_PARM_DESC(rq_size, "Configure RQ size");
 
 u64 iscsi_error_mask = 0x00;
 
-static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ;
-
 
 /**
  * bnx2i_identify_device - identifies NetXtreme II device type
@@ -211,13 +210,24 @@ void bnx2i_stop(void *handle)
 {
 	struct bnx2i_hba *hba = handle;
 	int conns_active;
+	int wait_delay = 1 * HZ;
 
 	/* check if cleanup happened in GOING_DOWN context */
-	if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
-				&hba->adapter_state))
+	if (!test_and_set_bit(ADAPTER_STATE_GOING_DOWN,
+			      &hba->adapter_state)) {
 		iscsi_host_for_each_session(hba->shost,
 					    bnx2i_drop_session);
-
+		wait_delay = hba->hba_shutdown_tmo;
+	}
+	/* Wait for inflight offload connection tasks to complete before
+	 * proceeding. Forcefully terminate all connection recovery in
+	 * progress at the earliest, either in bind(), send_pdu(LOGIN),
+	 * or conn_start()
+	 */
+	wait_event_interruptible_timeout(hba->eh_wait,
+					 (list_empty(&hba->ep_ofld_list) &&
+					 list_empty(&hba->ep_destroy_list)),
+					 10 * HZ);
 	/* Wait for all endpoints to be torn down, Chip will be reset once
 	 *  control returns to network driver. So it is required to cleanup and
 	 * release all connection resources before returning from this routine.
@@ -226,7 +236,7 @@ void bnx2i_stop(void *handle)
 		conns_active = hba->ofld_conns_active;
 		wait_event_interruptible_timeout(hba->eh_wait,
 				(hba->ofld_conns_active != conns_active),
-				hba->hba_shutdown_tmo);
+				wait_delay);
 		if (hba->ofld_conns_active == conns_active)
 			break;
 	}
@@ -235,88 +245,10 @@ void bnx2i_stop(void *handle)
 	/* This flag should be cleared last so that ep_disconnect() gracefully
 	 * cleans up connection context
 	 */
+	clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
 	clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
 }
 
-/**
- * bnx2i_register_device - register bnx2i adapter instance with the cnic driver
- * @hba:	Adapter instance to register
- *
- * registers bnx2i adapter instance with the cnic driver while holding the
- *	adapter structure lock
- */
-void bnx2i_register_device(struct bnx2i_hba *hba)
-{
-	int rc;
-
-	if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
-	    test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
-		return;
-	}
-
-	rc = hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
-
-	if (!rc)
-		set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
-}
-
-
-/**
- * bnx2i_reg_dev_all - registers all adapter instances with the cnic driver
- *
- * registers all bnx2i adapter instances with the cnic driver while holding
- *	the global resource lock
- */
-void bnx2i_reg_dev_all(void)
-{
-	struct bnx2i_hba *hba, *temp;
-
-	mutex_lock(&bnx2i_dev_lock);
-	list_for_each_entry_safe(hba, temp, &adapter_list, link)
-		bnx2i_register_device(hba);
-	mutex_unlock(&bnx2i_dev_lock);
-}
-
-
-/**
- * bnx2i_unreg_one_device - unregister adapter instance with the cnic driver
- * @hba:	Adapter instance to unregister
- *
- * registers bnx2i adapter instance with the cnic driver while holding
- *	the adapter structure lock
- */
-static void bnx2i_unreg_one_device(struct bnx2i_hba *hba)
-{
-	if (hba->ofld_conns_active ||
-	    !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) ||
-	    test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state))
-		return;
-
-	hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
-
-	/* ep_disconnect could come before NETDEV_DOWN, driver won't
-	 * see NETDEV_DOWN as it already unregistered itself.
-	 */
-	hba->adapter_state = 0;
-	clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
-}
-
-/**
- * bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver
- *
- * unregisters all bnx2i adapter instances with the cnic driver while holding
- *	the global resource lock
- */
-void bnx2i_unreg_dev_all(void)
-{
-	struct bnx2i_hba *hba, *temp;
-
-	mutex_lock(&bnx2i_dev_lock);
-	list_for_each_entry_safe(hba, temp, &adapter_list, link)
-		bnx2i_unreg_one_device(hba);
-	mutex_unlock(&bnx2i_dev_lock);
-}
-
 
 /**
  * bnx2i_init_one - initialize an adapter instance and allocate memory resources

+ 93 - 55
drivers/scsi/bnx2i/bnx2i_iscsi.c

@@ -1,7 +1,7 @@
 /*
  * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2006 - 2010 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *
@@ -10,6 +10,7 @@
  * the Free Software Foundation.
  *
  * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
  */
 
 #include <linux/slab.h>
@@ -411,7 +412,9 @@ static void bnx2i_free_ep(struct iscsi_endpoint *ep)
 	bnx2i_ep->state = EP_STATE_IDLE;
 	bnx2i_ep->hba->ofld_conns_active--;
 
-	bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
+	if (bnx2i_ep->ep_iscsi_cid != (u16) -1)
+		bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
+
 	if (bnx2i_ep->conn) {
 		bnx2i_ep->conn->ep = NULL;
 		bnx2i_ep->conn = NULL;
@@ -1383,6 +1386,12 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
 	ep = iscsi_lookup_endpoint(transport_fd);
 	if (!ep)
 		return -EINVAL;
+	/*
+	 * Forcefully terminate all in progress connection recovery at the
+	 * earliest, either in bind(), send_pdu(LOGIN), or conn_start()
+	 */
+	if (bnx2i_adapter_ready(hba))
+		return -EIO;
 
 	bnx2i_ep = ep->dd_data;
 	if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
@@ -1404,7 +1413,6 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
 				  hba->netdev->name);
 		return -EEXIST;
 	}
-
 	bnx2i_ep->conn = bnx2i_conn;
 	bnx2i_conn->ep = bnx2i_ep;
 	bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
@@ -1461,21 +1469,28 @@ static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
 	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
 	int len = 0;
 
+	if (!(bnx2i_conn && bnx2i_conn->ep && bnx2i_conn->ep->hba))
+		goto out;
+
 	switch (param) {
 	case ISCSI_PARAM_CONN_PORT:
-		if (bnx2i_conn->ep)
+		mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock);
+		if (bnx2i_conn->ep->cm_sk)
 			len = sprintf(buf, "%hu\n",
 				      bnx2i_conn->ep->cm_sk->dst_port);
+		mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock);
 		break;
 	case ISCSI_PARAM_CONN_ADDRESS:
-		if (bnx2i_conn->ep)
+		mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock);
+		if (bnx2i_conn->ep->cm_sk)
 			len = sprintf(buf, "%pI4\n",
 				      &bnx2i_conn->ep->cm_sk->dst_ip);
+		mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock);
 		break;
 	default:
 		return iscsi_conn_get_param(cls_conn, param, buf);
 	}
-
+out:
 	return len;
 }
 
@@ -1599,8 +1614,6 @@ static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
 	struct bnx2i_hba *hba;
 	struct cnic_dev *cnic = NULL;
 
-	bnx2i_reg_dev_all();
-
 	hba = get_adapter_list_head();
 	if (hba && hba->cnic)
 		cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
@@ -1640,18 +1653,26 @@ no_nx2_route:
 static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
 				 struct bnx2i_endpoint *ep)
 {
-	if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
+	if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) && ep->cm_sk)
 		hba->cnic->cm_destroy(ep->cm_sk);
 
-	if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
-		ep->state = EP_STATE_DISCONN_COMPL;
-
 	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
 	    ep->state == EP_STATE_DISCONN_TIMEDOUT) {
-		printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump,"
-				  " NW/PCIe trace, driver msgs to developers"
-				  " for analysis\n");
-		return 1;
+		if (ep->conn && ep->conn->cls_conn &&
+		    ep->conn->cls_conn->dd_data) {
+			struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
+
+			/* Must suspend all rx queue activity for this ep */
+			set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+		}
+		/* CONN_DISCONNECT timeout may or may not be an issue depending
+		 * on what transcribed in TCP layer, different targets behave
+		 * differently
+		 */
+		printk(KERN_ALERT "bnx2i (%s): - WARN - CONN_DISCON timed out, "
+				  "please submit GRC Dump, NW/PCIe trace, "
+				  "driver msgs to developers for analysis\n",
+				  hba->netdev->name);
 	}
 
 	ep->state = EP_STATE_CLEANUP_START;
@@ -1664,7 +1685,9 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
 	bnx2i_ep_destroy_list_add(hba, ep);
 
 	/* destroy iSCSI context, wait for it to complete */
-	bnx2i_send_conn_destroy(hba, ep);
+	if (bnx2i_send_conn_destroy(hba, ep))
+		ep->state = EP_STATE_CLEANUP_CMPL;
+
 	wait_event_interruptible(ep->ofld_wait,
 				 (ep->state != EP_STATE_CLEANUP_START));
 
@@ -1711,8 +1734,6 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
 	if (shost) {
 		/* driver is given scsi host to work with */
 		hba = iscsi_host_priv(shost);
-		/* Register the device with cnic if not already done so */
-		bnx2i_register_device(hba);
 	} else
 		/*
 		 * check if the given destination can be reached through
@@ -1720,13 +1741,17 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
 		 */
 		hba = bnx2i_check_route(dst_addr);
 
-	if (!hba || test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) {
+	if (!hba) {
 		rc = -EINVAL;
 		goto nohba;
 	}
+	mutex_lock(&hba->net_dev_lock);
 
+	if (bnx2i_adapter_ready(hba) || !hba->cid_que.cid_free_cnt) {
+		rc = -EPERM;
+		goto check_busy;
+	}
 	cnic = hba->cnic;
-	mutex_lock(&hba->net_dev_lock);
 	ep = bnx2i_alloc_ep(hba);
 	if (!ep) {
 		rc = -ENOMEM;
@@ -1734,23 +1759,21 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
 	}
 	bnx2i_ep = ep->dd_data;
 
-	if (bnx2i_adapter_ready(hba)) {
-		rc = -EPERM;
-		goto net_if_down;
-	}
-
 	bnx2i_ep->num_active_cmds = 0;
 	iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
 	if (iscsi_cid == -1) {
-		printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n");
+		printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate "
+			"iscsi cid\n", hba->netdev->name);
 		rc = -ENOMEM;
-		goto iscsi_cid_err;
+		bnx2i_free_ep(ep);
+		goto check_busy;
 	}
 	bnx2i_ep->hba_age = hba->age;
 
 	rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
 	if (rc != 0) {
-		printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n");
+		printk(KERN_ALERT "bnx2i (%s): ep_conn - alloc QP resc error"
+			"\n", hba->netdev->name);
 		rc = -ENOMEM;
 		goto qp_resc_err;
 	}
@@ -1765,7 +1788,18 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
 	bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
 	add_timer(&bnx2i_ep->ofld_timer);
 
-	bnx2i_send_conn_ofld_req(hba, bnx2i_ep);
+	if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) {
+		if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
+			printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
+				hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
+			rc = -EBUSY;
+		} else
+			rc = -ENOSPC;
+		printk(KERN_ALERT "bnx2i (%s): unable to send conn offld kwqe"
+			"\n", hba->netdev->name);
+		bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
+		goto conn_failed;
+	}
 
 	/* Wait for CNIC hardware to setup conn context and return 'cid' */
 	wait_event_interruptible(bnx2i_ep->ofld_wait,
@@ -1778,7 +1812,12 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
 	bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
 
 	if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
-		rc = -ENOSPC;
+		if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
+			printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
+				hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
+			rc = -EBUSY;
+		} else
+			rc = -ENOSPC;
 		goto conn_failed;
 	}
 
@@ -1786,7 +1825,8 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
 			     iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
 	if (rc) {
 		rc = -EINVAL;
-		goto conn_failed;
+		/* Need to terminate and cleanup the connection */
+		goto release_ep;
 	}
 
 	bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
@@ -1830,15 +1870,12 @@ release_ep:
 		return ERR_PTR(rc);
 	}
 conn_failed:
-net_if_down:
-iscsi_cid_err:
 	bnx2i_free_qp_resc(hba, bnx2i_ep);
 qp_resc_err:
 	bnx2i_free_ep(ep);
 check_busy:
 	mutex_unlock(&hba->net_dev_lock);
 nohba:
-	bnx2i_unreg_dev_all();
 	return ERR_PTR(rc);
 }
 
@@ -1898,12 +1935,13 @@ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
 		cnic_dev_10g = 1;
 
 	switch (bnx2i_ep->state) {
-	case EP_STATE_CONNECT_START:
+	case EP_STATE_CONNECT_FAILED:
 	case EP_STATE_CLEANUP_FAILED:
 	case EP_STATE_OFLD_FAILED:
 	case EP_STATE_DISCONN_TIMEDOUT:
 		ret = 0;
 		break;
+	case EP_STATE_CONNECT_START:
 	case EP_STATE_CONNECT_COMPL:
 	case EP_STATE_ULP_UPDATE_START:
 	case EP_STATE_ULP_UPDATE_COMPL:
@@ -1914,13 +1952,10 @@ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
 		ret = 1;
 		break;
 	case EP_STATE_TCP_RST_RCVD:
-		ret = 0;
-		break;
-	case EP_STATE_CONNECT_FAILED:
 		if (cnic_dev_10g)
-			ret = 1;
-		else
 			ret = 0;
+		else
+			ret = 1;
 		break;
 	default:
 		ret = 0;
@@ -1953,7 +1988,8 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
 	if (!cnic)
 		return 0;
 
-	if (bnx2i_ep->state == EP_STATE_IDLE)
+	if (bnx2i_ep->state == EP_STATE_IDLE ||
+	    bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
 		return 0;
 
 	if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
@@ -1979,9 +2015,10 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
 			if (session->state == ISCSI_STATE_LOGGING_OUT) {
 				if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) {
 					/* Logout sent, but no resp */
-					printk(KERN_ALERT "bnx2i - WARNING "
-						"logout response was not "
-						"received!\n");
+					printk(KERN_ALERT "bnx2i (%s): WARNING"
+						" logout response was not "
+						"received!\n",
+						bnx2i_ep->hba->netdev->name);
 				} else if (bnx2i_ep->state ==
 					   EP_STATE_LOGOUT_RESP_RCVD)
 					close = 1;
@@ -1999,9 +2036,8 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
 	else
 		close_ret = cnic->cm_abort(bnx2i_ep->cm_sk);
 
-	/* No longer allow CFC delete if cm_close/abort fails the request */
 	if (close_ret)
-		printk(KERN_ALERT "bnx2i: %s close/abort(%d) returned %d\n",
+		printk(KERN_ALERT "bnx2i (%s): close/abort(%d) returned %d\n",
 			bnx2i_ep->hba->netdev->name, close, close_ret);
 	else
 		/* wait for option-2 conn teardown */
@@ -2015,7 +2051,7 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
 destroy_conn:
 	bnx2i_ep_active_list_del(hba, bnx2i_ep);
 	if (bnx2i_tear_down_conn(hba, bnx2i_ep))
-		ret = -EINVAL;
+		return -EINVAL;
 out:
 	bnx2i_ep->state = EP_STATE_IDLE;
 	return ret;
@@ -2054,14 +2090,17 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
 
 	mutex_lock(&hba->net_dev_lock);
 
-	if (bnx2i_ep->state == EP_STATE_IDLE)
-		goto return_bnx2i_ep;
+	if (bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
+		goto out;
 
-	if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+	if (bnx2i_ep->state == EP_STATE_IDLE)
 		goto free_resc;
 
-	if (bnx2i_ep->hba_age != hba->age)
+	if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
+	    (bnx2i_ep->hba_age != hba->age)) {
+		bnx2i_ep_active_list_del(hba, bnx2i_ep);
 		goto free_resc;
+	}
 
 	/* Do all chip cleanup here */
 	if (bnx2i_hw_ep_disconnect(bnx2i_ep)) {
@@ -2070,14 +2109,13 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
 	}
 free_resc:
 	bnx2i_free_qp_resc(hba, bnx2i_ep);
-return_bnx2i_ep:
+
 	if (bnx2i_conn)
 		bnx2i_conn->ep = NULL;
 
 	bnx2i_free_ep(ep);
+out:
 	mutex_unlock(&hba->net_dev_lock);
-	if (!hba->ofld_conns_active)
-		bnx2i_unreg_dev_all();
 
 	wake_up_interruptible(&hba->eh_wait);
 }

+ 2 - 1
drivers/scsi/bnx2i/bnx2i_sysfs.c

@@ -1,12 +1,13 @@
 /* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2004 - 2009 Broadcom Corporation
+ * Copyright (c) 2004 - 2010 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
  * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
  */
 
 #include "bnx2i.h"

+ 9 - 2
drivers/scsi/device_handler/scsi_dh.c

@@ -442,12 +442,19 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
 	sdev = q->queuedata;
 	if (sdev && sdev->scsi_dh_data)
 		scsi_dh = sdev->scsi_dh_data->scsi_dh;
-	if (!scsi_dh || !get_device(&sdev->sdev_gendev))
+	if (!scsi_dh || !get_device(&sdev->sdev_gendev) ||
+	    sdev->sdev_state == SDEV_CANCEL ||
+	    sdev->sdev_state == SDEV_DEL)
 		err = SCSI_DH_NOSYS;
+	if (sdev->sdev_state == SDEV_OFFLINE)
+		err = SCSI_DH_DEV_OFFLINED;
 	spin_unlock_irqrestore(q->queue_lock, flags);
 
-	if (err)
+	if (err) {
+		if (fn)
+			fn(data, err);
 		return err;
+	}
 
 	if (scsi_dh->activate)
 		err = scsi_dh->activate(sdev, fn, data);

+ 3 - 1
drivers/scsi/fcoe/fcoe.c

@@ -854,7 +854,6 @@ static void fcoe_if_destroy(struct fc_lport *lport)
 
 	/* Cleanup the fc_lport */
 	fc_lport_destroy(lport);
-	fc_fcp_destroy(lport);
 
 	/* Stop the transmit retry timer */
 	del_timer_sync(&port->timer);
@@ -876,6 +875,9 @@ static void fcoe_if_destroy(struct fc_lport *lport)
 	fc_remove_host(lport->host);
 	scsi_remove_host(lport->host);
 
+	/* Destroy lport scsi_priv */
+	fc_fcp_destroy(lport);
+
 	/* There are no more rports or I/O, free the EM */
 	fc_exch_mgr_free(lport);
 

+ 241 - 59
drivers/scsi/fcoe/libfcoe.c

@@ -54,6 +54,7 @@ MODULE_LICENSE("GPL v2");
 static void fcoe_ctlr_timeout(unsigned long);
 static void fcoe_ctlr_timer_work(struct work_struct *);
 static void fcoe_ctlr_recv_work(struct work_struct *);
+static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *);
 
 static void fcoe_ctlr_vn_start(struct fcoe_ctlr *);
 static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *, struct sk_buff *);
@@ -176,6 +177,7 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
 	fip->mode = mode;
 	INIT_LIST_HEAD(&fip->fcfs);
 	mutex_init(&fip->ctlr_mutex);
+	spin_lock_init(&fip->ctlr_lock);
 	fip->flogi_oxid = FC_XID_UNKNOWN;
 	setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip);
 	INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work);
@@ -230,6 +232,49 @@ void fcoe_ctlr_destroy(struct fcoe_ctlr *fip)
 }
 EXPORT_SYMBOL(fcoe_ctlr_destroy);
 
+/**
+ * fcoe_ctlr_announce() - announce new FCF selection
+ * @fip: The FCoE controller
+ *
+ * Also sets the destination MAC for FCoE and control packets
+ *
+ * Called with neither ctlr_mutex nor ctlr_lock held.
+ */
+static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
+{
+	struct fcoe_fcf *sel;
+	struct fcoe_fcf *fcf;
+
+	mutex_lock(&fip->ctlr_mutex);
+	spin_lock_bh(&fip->ctlr_lock);
+
+	kfree_skb(fip->flogi_req);
+	fip->flogi_req = NULL;
+	list_for_each_entry(fcf, &fip->fcfs, list)
+		fcf->flogi_sent = 0;
+
+	spin_unlock_bh(&fip->ctlr_lock);
+	sel = fip->sel_fcf;
+
+	if (sel && !compare_ether_addr(sel->fcf_mac, fip->dest_addr))
+		goto unlock;
+	if (!is_zero_ether_addr(fip->dest_addr)) {
+		printk(KERN_NOTICE "libfcoe: host%d: "
+		       "FIP Fibre-Channel Forwarder MAC %pM deselected\n",
+		       fip->lp->host->host_no, fip->dest_addr);
+		memset(fip->dest_addr, 0, ETH_ALEN);
+	}
+	if (sel) {
+		printk(KERN_INFO "libfcoe: host%d: FIP selected "
+		       "Fibre-Channel Forwarder MAC %pM\n",
+		       fip->lp->host->host_no, sel->fcf_mac);
+		memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN);
+		fip->map_dest = 0;
+	}
+unlock:
+	mutex_unlock(&fip->ctlr_mutex);
+}
+
 /**
  * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port
  * @fip: The FCoE controller to get the maximum FCoE size from
@@ -564,6 +609,9 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
  * The caller must check that the length is a multiple of 4.
  * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes).
  * The the skb must also be an fc_frame.
+ *
+ * This is called from the lower-level driver with spinlocks held,
+ * so we must not take a mutex here.
  */
 int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
 		       struct sk_buff *skb)
@@ -601,7 +649,15 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
 	switch (op) {
 	case ELS_FLOGI:
 		op = FIP_DT_FLOGI;
-		break;
+		if (fip->mode == FIP_MODE_VN2VN)
+			break;
+		spin_lock_bh(&fip->ctlr_lock);
+		kfree_skb(fip->flogi_req);
+		fip->flogi_req = skb;
+		fip->flogi_req_send = 1;
+		spin_unlock_bh(&fip->ctlr_lock);
+		schedule_work(&fip->timer_work);
+		return -EINPROGRESS;
 	case ELS_FDISC:
 		if (ntoh24(fh->fh_s_id))
 			return 0;
@@ -922,11 +978,9 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 	}
 	mtu_valid = fcoe_ctlr_mtu_valid(fcf);
 	fcf->time = jiffies;
-	if (!found) {
-		LIBFCOE_FIP_DBG(fip, "New FCF for fab %16.16llx "
-				"map %x val %d\n",
-				fcf->fabric_name, fcf->fc_map, mtu_valid);
-	}
+	if (!found)
+		LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
+				fcf->fabric_name, fcf->fcf_mac);
 
 	/*
 	 * If this advertisement is not solicited and our max receive size
@@ -944,6 +998,17 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 	if (first && time_after(jiffies, fip->sol_time + sol_tov))
 		fcoe_ctlr_solicit(fip, NULL);
 
+	/*
+	 * Put this FCF at the head of the list for priority among equals.
+	 * This helps in the case of an NPV switch which insists we use
+	 * the FCF that answers multicast solicitations, not the others that
+	 * are sending periodic multicast advertisements.
+	 */
+	if (mtu_valid) {
+		list_del(&fcf->list);
+		list_add(&fcf->list, &fip->fcfs);
+	}
+
 	/*
 	 * If this is the first validated FCF, note the time and
 	 * set a timer to trigger selection.
@@ -1061,18 +1126,24 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
 	els_op = *(u8 *)(fh + 1);
 
 	if ((els_dtype == FIP_DT_FLOGI || els_dtype == FIP_DT_FDISC) &&
-	    sub == FIP_SC_REP && els_op == ELS_LS_ACC &&
-	    fip->mode != FIP_MODE_VN2VN) {
-		if (!is_valid_ether_addr(granted_mac)) {
-			LIBFCOE_FIP_DBG(fip,
-				"Invalid MAC address %pM in FIP ELS\n",
-				granted_mac);
-			goto drop;
-		}
-		memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN);
+	    sub == FIP_SC_REP && fip->mode != FIP_MODE_VN2VN) {
+		if (els_op == ELS_LS_ACC) {
+			if (!is_valid_ether_addr(granted_mac)) {
+				LIBFCOE_FIP_DBG(fip,
+					"Invalid MAC address %pM in FIP ELS\n",
+					granted_mac);
+				goto drop;
+			}
+			memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN);
 
-		if (fip->flogi_oxid == ntohs(fh->fh_ox_id))
-			fip->flogi_oxid = FC_XID_UNKNOWN;
+			if (fip->flogi_oxid == ntohs(fh->fh_ox_id)) {
+				fip->flogi_oxid = FC_XID_UNKNOWN;
+				if (els_dtype == FIP_DT_FLOGI)
+					fcoe_ctlr_announce(fip);
+			}
+		} else if (els_dtype == FIP_DT_FLOGI &&
+			   !fcoe_ctlr_flogi_retry(fip))
+			goto drop;	/* retrying FLOGI so drop reject */
 	}
 
 	if ((desc_cnt == 0) || ((els_op != ELS_LS_RJT) &&
@@ -1326,20 +1397,39 @@ drop:
  * fcoe_ctlr_select() - Select the best FCF (if possible)
  * @fip: The FCoE controller
  *
+ * Returns the selected FCF, or NULL if none are usable.
+ *
  * If there are conflicting advertisements, no FCF can be chosen.
  *
+ * If there is already a selected FCF, this will choose a better one or
+ * an equivalent one that hasn't already been sent a FLOGI.
+ *
  * Called with lock held.
  */
-static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
+static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
 {
 	struct fcoe_fcf *fcf;
-	struct fcoe_fcf *best = NULL;
+	struct fcoe_fcf *best = fip->sel_fcf;
+	struct fcoe_fcf *first;
+
+	first = list_first_entry(&fip->fcfs, struct fcoe_fcf, list);
 
 	list_for_each_entry(fcf, &fip->fcfs, list) {
-		LIBFCOE_FIP_DBG(fip, "consider FCF for fab %16.16llx "
-				"VFID %d map %x val %d\n",
-				fcf->fabric_name, fcf->vfid,
-				fcf->fc_map, fcoe_ctlr_mtu_valid(fcf));
+		LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx "
+				"VFID %d mac %pM map %x val %d "
+				"sent %u pri %u\n",
+				fcf->fabric_name, fcf->vfid, fcf->fcf_mac,
+				fcf->fc_map, fcoe_ctlr_mtu_valid(fcf),
+				fcf->flogi_sent, fcf->pri);
+		if (fcf->fabric_name != first->fabric_name ||
+		    fcf->vfid != first->vfid ||
+		    fcf->fc_map != first->fc_map) {
+			LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
+					"or FC-MAP\n");
+			return NULL;
+		}
+		if (fcf->flogi_sent)
+			continue;
 		if (!fcoe_ctlr_fcf_usable(fcf)) {
 			LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx "
 					"map %x %svalid %savailable\n",
@@ -1349,21 +1439,131 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
 					"" : "un");
 			continue;
 		}
-		if (!best) {
-			best = fcf;
-			continue;
-		}
-		if (fcf->fabric_name != best->fabric_name ||
-		    fcf->vfid != best->vfid ||
-		    fcf->fc_map != best->fc_map) {
-			LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
-					"or FC-MAP\n");
-			return;
-		}
-		if (fcf->pri < best->pri)
+		if (!best || fcf->pri < best->pri || best->flogi_sent)
 			best = fcf;
 	}
 	fip->sel_fcf = best;
+	if (best) {
+		LIBFCOE_FIP_DBG(fip, "using FCF mac %pM\n", best->fcf_mac);
+		fip->port_ka_time = jiffies +
+			msecs_to_jiffies(FIP_VN_KA_PERIOD);
+		fip->ctlr_ka_time = jiffies + best->fka_period;
+		if (time_before(fip->ctlr_ka_time, fip->timer.expires))
+			mod_timer(&fip->timer, fip->ctlr_ka_time);
+	}
+	return best;
+}
+
+/**
+ * fcoe_ctlr_flogi_send_locked() - send FIP-encapsulated FLOGI to current FCF
+ * @fip: The FCoE controller
+ *
+ * Returns non-zero error if it could not be sent.
+ *
+ * Called with ctlr_mutex and ctlr_lock held.
+ * Caller must verify that fip->sel_fcf is not NULL.
+ */
+static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip)
+{
+	struct sk_buff *skb;
+	struct sk_buff *skb_orig;
+	struct fc_frame_header *fh;
+	int error;
+
+	skb_orig = fip->flogi_req;
+	if (!skb_orig)
+		return -EINVAL;
+
+	/*
+	 * Clone and send the FLOGI request.  If clone fails, use original.
+	 */
+	skb = skb_clone(skb_orig, GFP_ATOMIC);
+	if (!skb) {
+		skb = skb_orig;
+		fip->flogi_req = NULL;
+	}
+	fh = (struct fc_frame_header *)skb->data;
+	error = fcoe_ctlr_encaps(fip, fip->lp, FIP_DT_FLOGI, skb,
+				 ntoh24(fh->fh_d_id));
+	if (error) {
+		kfree_skb(skb);
+		return error;
+	}
+	fip->send(fip, skb);
+	fip->sel_fcf->flogi_sent = 1;
+	return 0;
+}
+
+/**
+ * fcoe_ctlr_flogi_retry() - resend FLOGI request to a new FCF if possible
+ * @fip: The FCoE controller
+ *
+ * Returns non-zero error code if there's no FLOGI request to retry or
+ * no alternate FCF available.
+ */
+static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
+{
+	struct fcoe_fcf *fcf;
+	int error;
+
+	mutex_lock(&fip->ctlr_mutex);
+	spin_lock_bh(&fip->ctlr_lock);
+	LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n");
+	fcf = fcoe_ctlr_select(fip);
+	if (!fcf || fcf->flogi_sent) {
+		kfree_skb(fip->flogi_req);
+		fip->flogi_req = NULL;
+		error = -ENOENT;
+	} else {
+		fcoe_ctlr_solicit(fip, NULL);
+		error = fcoe_ctlr_flogi_send_locked(fip);
+	}
+	spin_unlock_bh(&fip->ctlr_lock);
+	mutex_unlock(&fip->ctlr_mutex);
+	return error;
+}
+
+
+/**
+ * fcoe_ctlr_flogi_send() - Handle sending of FIP FLOGI.
+ * @fip: The FCoE controller that timed out
+ *
+ * Done here because fcoe_ctlr_els_send() can't get mutex.
+ *
+ * Called with ctlr_mutex held.  The caller must not hold ctlr_lock.
+ */
+static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
+{
+	struct fcoe_fcf *fcf;
+
+	spin_lock_bh(&fip->ctlr_lock);
+	fcf = fip->sel_fcf;
+	if (!fcf || !fip->flogi_req_send)
+		goto unlock;
+
+	LIBFCOE_FIP_DBG(fip, "sending FLOGI\n");
+
+	/*
+	 * If this FLOGI is being sent due to a timeout retry
+	 * to the same FCF as before, select a different FCF if possible.
+	 */
+	if (fcf->flogi_sent) {
+		LIBFCOE_FIP_DBG(fip, "sending FLOGI - reselect\n");
+		fcf = fcoe_ctlr_select(fip);
+		if (!fcf || fcf->flogi_sent) {
+			LIBFCOE_FIP_DBG(fip, "sending FLOGI - clearing\n");
+			list_for_each_entry(fcf, &fip->fcfs, list)
+				fcf->flogi_sent = 0;
+			fcf = fcoe_ctlr_select(fip);
+		}
+	}
+	if (fcf) {
+		fcoe_ctlr_flogi_send_locked(fip);
+		fip->flogi_req_send = 0;
+	} else /* XXX */
+		LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
+unlock:
+	spin_unlock_bh(&fip->ctlr_lock);
 }
 
 /**
@@ -1411,34 +1611,16 @@ static void fcoe_ctlr_timer_work(struct work_struct *work)
 	sel = fip->sel_fcf;
 	if (!sel && fip->sel_time) {
 		if (time_after_eq(jiffies, fip->sel_time)) {
-			fcoe_ctlr_select(fip);
-			sel = fip->sel_fcf;
+			sel = fcoe_ctlr_select(fip);
 			fip->sel_time = 0;
 		} else if (time_after(next_timer, fip->sel_time))
 			next_timer = fip->sel_time;
 	}
 
-	if (sel != fcf) {
-		fcf = sel;		/* the old FCF may have been freed */
-		if (sel) {
-			printk(KERN_INFO "libfcoe: host%d: FIP selected "
-			       "Fibre-Channel Forwarder MAC %pM\n",
-			       fip->lp->host->host_no, sel->fcf_mac);
-			memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN);
-			fip->map_dest = 0;
-			fip->port_ka_time = jiffies +
-				msecs_to_jiffies(FIP_VN_KA_PERIOD);
-			fip->ctlr_ka_time = jiffies + sel->fka_period;
-			if (time_after(next_timer, fip->ctlr_ka_time))
-				next_timer = fip->ctlr_ka_time;
-		} else {
-			printk(KERN_NOTICE "libfcoe: host%d: "
-			       "FIP Fibre-Channel Forwarder timed out.	"
-			       "Starting FCF discovery.\n",
-			       fip->lp->host->host_no);
-			reset = 1;
-		}
-	}
+	if (sel && fip->flogi_req_send)
+		fcoe_ctlr_flogi_send(fip);
+	else if (!sel && fcf)
+		reset = 1;
 
 	if (sel && !sel->fd_flags) {
 		if (time_after_eq(jiffies, fip->ctlr_ka_time)) {
@@ -2475,7 +2657,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
 	case FIP_ST_LINK_WAIT:
 		goto unlock;
 	default:
-		WARN(1, "unexpected state %d", fip->state);
+		WARN(1, "unexpected state %d\n", fip->state);
 		goto unlock;
 	}
 	mod_timer(&fip->timer, next_time);

+ 3 - 1
drivers/scsi/gdth.c

@@ -4273,8 +4273,10 @@ static int ioc_general(void __user *arg, char *cmnd)
     }
 
     rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout, &gen.info);
-    if (rval < 0)
+    if (rval < 0) {
+	gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
         return rval;
+    }
     gen.status = rval;
 
     if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf, 

+ 12 - 4
drivers/scsi/gdth_proc.c

@@ -365,8 +365,10 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
                     len = 0;
                     begin = pos;
                 }
-                if (pos > offset + length)
+		if (pos > offset + length) {
+		    gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
                     goto stop_output;
+		}
             }
         }
         gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
@@ -450,8 +452,10 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
                     len = 0;
                     begin = pos;
                 }
-                if (pos > offset + length)
+		if (pos > offset + length) {
+		    gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
                     goto stop_output;
+		}
             } while (drv_no != -1);
              
             if (is_mirr) {
@@ -472,8 +476,10 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
                 len = 0;
                 begin = pos;
             }
-            if (pos > offset + length)
+	    if (pos > offset + length) {
+		gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
                 goto stop_output;
+	    }
         }       
         gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
         
@@ -542,8 +548,10 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
                     len = 0;
                     begin = pos;
                 }
-                if (pos > offset + length)
+		if (pos > offset + length) {
+		    gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
                     goto stop_output;
+		}
             }
         }
         gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);

+ 0 - 11
drivers/scsi/hpsa.c

@@ -641,11 +641,6 @@ static void fixup_botched_add(struct ctlr_info *h,
 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
 	struct hpsa_scsi_dev_t *dev2)
 {
-	if ((is_logical_dev_addr_mode(dev1->scsi3addr) ||
-		(dev1->lun != -1 && dev2->lun != -1)) &&
-		dev1->devtype != 0x0C)
-		return (memcmp(dev1, dev2, sizeof(*dev1)) == 0);
-
 	/* we compare everything except lun and target as these
 	 * are not yet assigned.  Compare parts likely
 	 * to differ first
@@ -660,12 +655,8 @@ static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
 		return 0;
 	if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
 		return 0;
-	if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0)
-		return 0;
 	if (dev1->devtype != dev2->devtype)
 		return 0;
-	if (dev1->raid_level != dev2->raid_level)
-		return 0;
 	if (dev1->bus != dev2->bus)
 		return 0;
 	return 1;
@@ -1477,8 +1468,6 @@ static int hpsa_update_device_info(struct ctlr_info *h,
 		sizeof(this_device->vendor));
 	memcpy(this_device->model, &inq_buff[16],
 		sizeof(this_device->model));
-	memcpy(this_device->revision, &inq_buff[32],
-		sizeof(this_device->revision));
 	memset(this_device->device_id, 0,
 		sizeof(this_device->device_id));
 	hpsa_get_device_id(h, scsi3addr, this_device->device_id,

+ 0 - 1
drivers/scsi/hpsa.h

@@ -45,7 +45,6 @@ struct hpsa_scsi_dev_t {
 	unsigned char device_id[16];    /* from inquiry pg. 0x83 */
 	unsigned char vendor[8];        /* bytes 8-15 of inquiry data */
 	unsigned char model[16];        /* bytes 16-31 of inquiry data */
-	unsigned char revision[4];      /* bytes 32-35 of inquiry data */
 	unsigned char raid_level;	/* from inquiry page 0xC1 */
 };
 

+ 14 - 14
drivers/scsi/ibmvscsi/ibmvfc.c

@@ -2493,23 +2493,23 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
 }
 
 static const struct ibmvfc_async_desc ae_desc [] = {
-	{ IBMVFC_AE_ELS_PLOGI,		"PLOGI",		IBMVFC_DEFAULT_LOG_LEVEL + 1 },
-	{ IBMVFC_AE_ELS_LOGO,		"LOGO",		IBMVFC_DEFAULT_LOG_LEVEL + 1 },
-	{ IBMVFC_AE_ELS_PRLO,		"PRLO",		IBMVFC_DEFAULT_LOG_LEVEL + 1 },
-	{ IBMVFC_AE_SCN_NPORT,		"N-Port SCN",	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
-	{ IBMVFC_AE_SCN_GROUP,		"Group SCN",	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
-	{ IBMVFC_AE_SCN_DOMAIN,		"Domain SCN",	IBMVFC_DEFAULT_LOG_LEVEL },
-	{ IBMVFC_AE_SCN_FABRIC,		"Fabric SCN",	IBMVFC_DEFAULT_LOG_LEVEL },
-	{ IBMVFC_AE_LINK_UP,		"Link Up",		IBMVFC_DEFAULT_LOG_LEVEL },
-	{ IBMVFC_AE_LINK_DOWN,		"Link Down",	IBMVFC_DEFAULT_LOG_LEVEL },
-	{ IBMVFC_AE_LINK_DEAD,		"Link Dead",	IBMVFC_DEFAULT_LOG_LEVEL },
-	{ IBMVFC_AE_HALT,			"Halt",		IBMVFC_DEFAULT_LOG_LEVEL },
-	{ IBMVFC_AE_RESUME,		"Resume",		IBMVFC_DEFAULT_LOG_LEVEL },
-	{ IBMVFC_AE_ADAPTER_FAILED,	"Adapter Failed",	IBMVFC_DEFAULT_LOG_LEVEL },
+	{ "PLOGI",	IBMVFC_AE_ELS_PLOGI,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+	{ "LOGO",	IBMVFC_AE_ELS_LOGO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+	{ "PRLO",	IBMVFC_AE_ELS_PRLO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+	{ "N-Port SCN",	IBMVFC_AE_SCN_NPORT,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+	{ "Group SCN",	IBMVFC_AE_SCN_GROUP,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+	{ "Domain SCN",	IBMVFC_AE_SCN_DOMAIN,	IBMVFC_DEFAULT_LOG_LEVEL },
+	{ "Fabric SCN",	IBMVFC_AE_SCN_FABRIC,	IBMVFC_DEFAULT_LOG_LEVEL },
+	{ "Link Up",	IBMVFC_AE_LINK_UP,	IBMVFC_DEFAULT_LOG_LEVEL },
+	{ "Link Down",	IBMVFC_AE_LINK_DOWN,	IBMVFC_DEFAULT_LOG_LEVEL },
+	{ "Link Dead",	IBMVFC_AE_LINK_DEAD,	IBMVFC_DEFAULT_LOG_LEVEL },
+	{ "Halt",	IBMVFC_AE_HALT,		IBMVFC_DEFAULT_LOG_LEVEL },
+	{ "Resume",	IBMVFC_AE_RESUME,	IBMVFC_DEFAULT_LOG_LEVEL },
+	{ "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
 };
 
 static const struct ibmvfc_async_desc unknown_ae = {
-	0, "Unknown async", IBMVFC_DEFAULT_LOG_LEVEL
+	"Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
 };
 
 /**

+ 1 - 1
drivers/scsi/ibmvscsi/ibmvfc.h

@@ -542,8 +542,8 @@ enum ibmvfc_async_event {
 };
 
 struct ibmvfc_async_desc {
-	enum ibmvfc_async_event ae;
 	const char *desc;
+	enum ibmvfc_async_event ae;
 	int log_level;
 };
 

+ 44 - 10
drivers/scsi/ipr.c

@@ -146,7 +146,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
 		}
 	},
 	{ /* CRoC */
-		.mailbox = 0x00040,
+		.mailbox = 0x00044,
 		.cache_line_size = 0x20,
 		{
 			.set_interrupt_mask_reg = 0x00010,
@@ -1048,6 +1048,8 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
 			sizeof(res->res_path));
 
 		res->bus = 0;
+		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
+			sizeof(res->dev_lun.scsi_lun));
 		res->lun = scsilun_to_int(&res->dev_lun);
 
 		if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
@@ -1063,9 +1065,6 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
 								  ioa_cfg->max_devs_supported);
 				set_bit(res->target, ioa_cfg->target_ids);
 			}
-
-			memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
-				sizeof(res->dev_lun.scsi_lun));
 		} else if (res->type == IPR_RES_TYPE_IOAFP) {
 			res->bus = IPR_IOAFP_VIRTUAL_BUS;
 			res->target = 0;
@@ -1116,7 +1115,7 @@ static int ipr_is_same_device(struct ipr_resource_entry *res,
 	if (res->ioa_cfg->sis64) {
 		if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
 					sizeof(cfgtew->u.cfgte64->dev_id)) &&
-			!memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
+			!memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
 					sizeof(cfgtew->u.cfgte64->lun))) {
 			return 1;
 		}
@@ -2901,6 +2900,12 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
 		return;
 	}
 
+	if (ioa_cfg->sis64) {
+		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+		ssleep(IPR_DUMP_DELAY_SECONDS);
+		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+	}
+
 	start_addr = readl(ioa_cfg->ioa_mailbox);
 
 	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
@@ -7472,6 +7477,29 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
 	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
 }
 
+/**
+ * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
+ * @ipr_cmd:	ipr command struct
+ *
+ * Description: This function will call to get the unit check buffer.
+ *
+ * Return value:
+ *	IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
+{
+	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+	ENTER;
+	ioa_cfg->ioa_unit_checked = 0;
+	ipr_get_unit_check_buffer(ioa_cfg);
+	ipr_cmd->job_step = ipr_reset_alert;
+	ipr_reset_start_timer(ipr_cmd, 0);
+
+	LEAVE;
+	return IPR_RC_JOB_RETURN;
+}
+
 /**
  * ipr_reset_restore_cfg_space - Restore PCI config space.
  * @ipr_cmd:	ipr command struct
@@ -7512,11 +7540,17 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
 	}
 
 	if (ioa_cfg->ioa_unit_checked) {
-		ioa_cfg->ioa_unit_checked = 0;
-		ipr_get_unit_check_buffer(ioa_cfg);
-		ipr_cmd->job_step = ipr_reset_alert;
-		ipr_reset_start_timer(ipr_cmd, 0);
-		return IPR_RC_JOB_RETURN;
+		if (ioa_cfg->sis64) {
+			ipr_cmd->job_step = ipr_reset_get_unit_check_job;
+			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
+			return IPR_RC_JOB_RETURN;
+		} else {
+			ioa_cfg->ioa_unit_checked = 0;
+			ipr_get_unit_check_buffer(ioa_cfg);
+			ipr_cmd->job_step = ipr_reset_alert;
+			ipr_reset_start_timer(ipr_cmd, 0);
+			return IPR_RC_JOB_RETURN;
+		}
 	}
 
 	if (ioa_cfg->in_ioa_bringdown) {

+ 2 - 0
drivers/scsi/ipr.h

@@ -218,6 +218,8 @@
 #define IPR_WAIT_FOR_BIST_TIMEOUT		(2 * HZ)
 #define IPR_PCI_RESET_TIMEOUT			(HZ / 2)
 #define IPR_DUMP_TIMEOUT			(15 * HZ)
+#define IPR_DUMP_DELAY_SECONDS			4
+#define IPR_DUMP_DELAY_TIMEOUT			(IPR_DUMP_DELAY_SECONDS * HZ)
 
 /*
  * SCSI Literals

+ 40 - 8
drivers/scsi/libfc/fc_exch.c

@@ -67,6 +67,11 @@ struct workqueue_struct *fc_exch_workqueue;
 struct fc_exch_pool {
 	u16		 next_index;
 	u16		 total_exches;
+
+	/* two cache of free slot in exch array */
+	u16		 left;
+	u16		 right;
+
 	spinlock_t	 lock;
 	struct list_head ex_list;
 };
@@ -108,7 +113,6 @@ struct fc_exch_mgr {
 		atomic_t non_bls_resp;
 	} stats;
 };
-#define	fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
 
 /**
  * struct fc_exch_mgr_anchor - primary structure for list of EMs
@@ -397,13 +401,23 @@ static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
 static void fc_exch_delete(struct fc_exch *ep)
 {
 	struct fc_exch_pool *pool;
+	u16 index;
 
 	pool = ep->pool;
 	spin_lock_bh(&pool->lock);
 	WARN_ON(pool->total_exches <= 0);
 	pool->total_exches--;
-	fc_exch_ptr_set(pool, (ep->xid - ep->em->min_xid) >> fc_cpu_order,
-			NULL);
+
+	/* update cache of free slot */
+	index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
+	if (pool->left == FC_XID_UNKNOWN)
+		pool->left = index;
+	else if (pool->right == FC_XID_UNKNOWN)
+		pool->right = index;
+	else
+		pool->next_index = index;
+
+	fc_exch_ptr_set(pool, index, NULL);
 	list_del(&ep->ex_list);
 	spin_unlock_bh(&pool->lock);
 	fc_exch_release(ep);	/* drop hold for exch in mp */
@@ -636,10 +650,13 @@ static void fc_exch_timeout(struct work_struct *work)
 		if (e_stat & ESB_ST_ABNORMAL)
 			rc = fc_exch_done_locked(ep);
 		spin_unlock_bh(&ep->ex_lock);
-		if (!rc)
-			fc_exch_delete(ep);
 		if (resp)
 			resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
+		if (!rc) {
+			/* delete the exchange if it's already being aborted */
+			fc_exch_delete(ep);
+			return;
+		}
 		fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
 		goto done;
 	}
@@ -679,6 +696,19 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
 	pool = per_cpu_ptr(mp->pool, cpu);
 	spin_lock_bh(&pool->lock);
 	put_cpu();
+
+	/* peek cache of free slot */
+	if (pool->left != FC_XID_UNKNOWN) {
+		index = pool->left;
+		pool->left = FC_XID_UNKNOWN;
+		goto hit;
+	}
+	if (pool->right != FC_XID_UNKNOWN) {
+		index = pool->right;
+		pool->right = FC_XID_UNKNOWN;
+		goto hit;
+	}
+
 	index = pool->next_index;
 	/* allocate new exch from pool */
 	while (fc_exch_ptr_get(pool, index)) {
@@ -687,7 +717,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
 			goto err;
 	}
 	pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
-
+hit:
 	fc_exch_hold(ep);	/* hold for exch in mp */
 	spin_lock_init(&ep->ex_lock);
 	/*
@@ -1247,7 +1277,7 @@ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
 
 	list_for_each_entry(ema, &lport->ema_list, ema_list)
 		if ((!ema->match || ema->match(fp)) &&
-		    fc_seq_lookup_recip(lport, ema->mp, fp) != FC_RJT_NONE)
+		    fc_seq_lookup_recip(lport, ema->mp, fp) == FC_RJT_NONE)
 			break;
 	return fr_seq(fp);
 }
@@ -1343,7 +1373,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
 	}
 	if (ep->esb_stat & ESB_ST_COMPLETE) {
 		atomic_inc(&mp->stats.xid_not_found);
-		goto out;
+		goto rel;
 	}
 	if (ep->rxid == FC_XID_UNKNOWN)
 		ep->rxid = ntohs(fh->fh_rx_id);
@@ -2181,6 +2211,8 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
 		goto free_mempool;
 	for_each_possible_cpu(cpu) {
 		pool = per_cpu_ptr(mp->pool, cpu);
+		pool->left = FC_XID_UNKNOWN;
+		pool->right = FC_XID_UNKNOWN;
 		spin_lock_init(&pool->lock);
 		INIT_LIST_HEAD(&pool->ex_list);
 	}

+ 98 - 49
drivers/scsi/libfc/fc_fcp.c

@@ -57,6 +57,9 @@ struct kmem_cache *scsi_pkt_cachep;
 #define FC_SRB_READ		(1 << 1)
 #define FC_SRB_WRITE		(1 << 0)
 
+/* constant added to e_d_tov timeout to get rec_tov value */
+#define REC_TOV_CONST		1
+
 /*
  * The SCp.ptr should be tested and set under the scsi_pkt_queue lock
  */
@@ -96,7 +99,7 @@ static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
 static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
 static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
 static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *);
-static void fc_fcp_recovery(struct fc_fcp_pkt *);
+static void fc_fcp_recovery(struct fc_fcp_pkt *, u8 code);
 static void fc_fcp_timeout(unsigned long);
 static void fc_fcp_rec(struct fc_fcp_pkt *);
 static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
@@ -120,14 +123,13 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
 #define FC_DATA_UNDRUN		7
 #define FC_ERROR		8
 #define FC_HRD_ERROR		9
-#define FC_CMD_RECOVERY		10
+#define FC_CRC_ERROR		10
+#define FC_TIMED_OUT		11
 
 /*
  * Error recovery timeout values.
  */
-#define FC_SCSI_ER_TIMEOUT	(10 * HZ)
 #define FC_SCSI_TM_TOV		(10 * HZ)
-#define FC_SCSI_REC_TOV		(2 * HZ)
 #define FC_HOST_RESET_TIMEOUT	(30 * HZ)
 #define FC_CAN_QUEUE_PERIOD	(60 * HZ)
 
@@ -438,6 +440,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 	void *buf;
 	struct scatterlist *sg;
 	u32 nents;
+	u8 host_bcode = FC_COMPLETE;
 
 	fh = fc_frame_header_get(fp);
 	offset = ntohl(fh->fh_parm_offset);
@@ -446,13 +449,16 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 	buf = fc_frame_payload_get(fp, 0);
 
 	/*
-	 * if this I/O is ddped then clear it
-	 * and initiate recovery since data
-	 * frames are expected to be placed
-	 * directly in that case.
+	 * if this I/O is ddped then clear it and initiate recovery since data
+	 * frames are expected to be placed directly in that case.
+	 *
+	 * Indicate error to scsi-ml because something went wrong with the
+	 * ddp handling to get us here.
 	 */
 	if (fsp->xfer_ddp != FC_XID_UNKNOWN) {
 		fc_fcp_ddp_done(fsp);
+		FC_FCP_DBG(fsp, "DDP I/O in fc_fcp_recv_data set ERROR\n");
+		host_bcode = FC_ERROR;
 		goto err;
 	}
 	if (offset + len > fsp->data_len) {
@@ -462,6 +468,9 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 			goto crc_err;
 		FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx "
 			   "data_len %x\n", len, offset, fsp->data_len);
+
+		/* Data is corrupted indicate scsi-ml should retry */
+		host_bcode = FC_DATA_OVRRUN;
 		goto err;
 	}
 	if (offset != fsp->xfer_len)
@@ -498,8 +507,10 @@ crc_err:
 			 * If so, we need to retry the entire operation.
 			 * Otherwise, ignore it.
 			 */
-			if (fsp->state & FC_SRB_DISCONTIG)
+			if (fsp->state & FC_SRB_DISCONTIG) {
+				host_bcode = FC_CRC_ERROR;
 				goto err;
+			}
 			return;
 		}
 	}
@@ -517,7 +528,7 @@ crc_err:
 		fc_fcp_complete_locked(fsp);
 	return;
 err:
-	fc_fcp_recovery(fsp);
+	fc_fcp_recovery(fsp, host_bcode);
 }
 
 /**
@@ -962,7 +973,13 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
 		}
 		lport->tt.exch_done(seq);
 	}
-	fc_io_compl(fsp);
+	/*
+	 * Some resets driven by SCSI are not I/Os and do not have
+	 * SCSI commands associated with the requests. We should not
+	 * call I/O completion if we do not have a SCSI command.
+	 */
+	if (fsp->cmd)
+		fc_io_compl(fsp);
 }
 
 /**
@@ -1072,6 +1089,21 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
 	return rc;
 }
 
+/**
+ * get_fsp_rec_tov() - Helper function to get REC_TOV
+ * @fsp: the FCP packet
+ */
+static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
+{
+	struct fc_rport *rport;
+	struct fc_rport_libfc_priv *rpriv;
+
+	rport = fsp->rport;
+	rpriv = rport->dd_data;
+
+	return rpriv->e_d_tov + REC_TOV_CONST;
+}
+
 /**
  * fc_fcp_cmd_send() - Send a FCP command
  * @lport: The local port to send the command on
@@ -1089,6 +1121,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
 	struct fc_rport_libfc_priv *rpriv;
 	const size_t len = sizeof(fsp->cdb_cmd);
 	int rc = 0;
+	unsigned int rec_tov;
 
 	if (fc_fcp_lock_pkt(fsp))
 		return 0;
@@ -1119,10 +1152,13 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
 	fsp->seq_ptr = seq;
 	fc_fcp_pkt_hold(fsp);	/* hold for fc_fcp_pkt_destroy */
 
+	rec_tov = get_fsp_rec_tov(fsp);
+
 	setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
-	fc_fcp_timer_set(fsp,
-			 (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ?
-			 FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT);
+
+	if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
+		fc_fcp_timer_set(fsp, rec_tov);
+
 unlock:
 	fc_fcp_unlock_pkt(fsp);
 	return rc;
@@ -1197,13 +1233,16 @@ static void fc_lun_reset_send(unsigned long data)
 {
 	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
 	struct fc_lport *lport = fsp->lp;
+	unsigned int rec_tov;
+
 	if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) {
 		if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
 			return;
 		if (fc_fcp_lock_pkt(fsp))
 			return;
+		rec_tov = get_fsp_rec_tov(fsp);
 		setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
-		fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+		fc_fcp_timer_set(fsp, rec_tov);
 		fc_fcp_unlock_pkt(fsp);
 	}
 }
@@ -1282,27 +1321,27 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 		 *
 		 * scsi-eh will escalate for when either happens.
 		 */
-		return;
+		goto out;
 	}
 
 	if (fc_fcp_lock_pkt(fsp))
-		return;
+		goto out;
 
 	/*
 	 * raced with eh timeout handler.
 	 */
-	if (!fsp->seq_ptr || !fsp->wait_for_comp) {
-		spin_unlock_bh(&fsp->scsi_pkt_lock);
-		return;
-	}
+	if (!fsp->seq_ptr || !fsp->wait_for_comp)
+		goto out_unlock;
 
 	fh = fc_frame_header_get(fp);
 	if (fh->fh_type != FC_TYPE_BLS)
 		fc_fcp_resp(fsp, fp);
 	fsp->seq_ptr = NULL;
 	fsp->lp->tt.exch_done(seq);
-	fc_frame_free(fp);
+out_unlock:
 	fc_fcp_unlock_pkt(fsp);
+out:
+	fc_frame_free(fp);
 }
 
 /**
@@ -1341,13 +1380,10 @@ static void fc_fcp_timeout(unsigned long data)
 
 	if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
 		fc_fcp_rec(fsp);
-	else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2),
-			       jiffies))
-		fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
 	else if (fsp->state & FC_SRB_RCV_STATUS)
 		fc_fcp_complete_locked(fsp);
 	else
-		fc_fcp_recovery(fsp);
+		fc_fcp_recovery(fsp, FC_TIMED_OUT);
 	fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
 unlock:
 	fc_fcp_unlock_pkt(fsp);
@@ -1373,6 +1409,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
 		fc_fcp_complete_locked(fsp);
 		return;
 	}
+
 	fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec));
 	if (!fp)
 		goto retry;
@@ -1383,15 +1420,15 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
 		       FC_FCTL_REQ, 0);
 	if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC,
 				 fc_fcp_rec_resp, fsp,
-				 jiffies_to_msecs(FC_SCSI_REC_TOV))) {
+				 2 * lport->r_a_tov)) {
 		fc_fcp_pkt_hold(fsp);		/* hold while REC outstanding */
 		return;
 	}
 retry:
 	if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
-		fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+		fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
 	else
-		fc_fcp_recovery(fsp);
+		fc_fcp_recovery(fsp, FC_TIMED_OUT);
 }
 
 /**
@@ -1445,7 +1482,6 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 			 * making progress.
 			 */
 			rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
-			fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
 			break;
 		case ELS_RJT_LOGIC:
 		case ELS_RJT_UNAB:
@@ -1460,7 +1496,7 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 				fc_fcp_retry_cmd(fsp);
 				break;
 			}
-			fc_fcp_recovery(fsp);
+			fc_fcp_recovery(fsp, FC_ERROR);
 			break;
 		}
 	} else if (opcode == ELS_LS_ACC) {
@@ -1498,12 +1534,12 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 			}
 			fc_fcp_srr(fsp, r_ctl, offset);
 		} else if (e_stat & ESB_ST_SEQ_INIT) {
-
+			unsigned int rec_tov = get_fsp_rec_tov(fsp);
 			/*
 			 * The remote port has the initiative, so just
 			 * keep waiting for it to complete.
 			 */
-			fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+			fc_fcp_timer_set(fsp, rec_tov);
 		} else {
 
 			/*
@@ -1575,7 +1611,7 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 		if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
 			fc_fcp_rec(fsp);
 		else
-			fc_fcp_recovery(fsp);
+			fc_fcp_recovery(fsp, FC_ERROR);
 		break;
 	}
 	fc_fcp_unlock_pkt(fsp);
@@ -1587,9 +1623,9 @@ out:
  * fc_fcp_recovery() - Handler for fcp_pkt recovery
  * @fsp: The FCP pkt that needs to be aborted
  */
-static void fc_fcp_recovery(struct fc_fcp_pkt *fsp)
+static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
 {
-	fsp->status_code = FC_CMD_RECOVERY;
+	fsp->status_code = code;
 	fsp->cdb_status = 0;
 	fsp->io_status = 0;
 	/*
@@ -1616,6 +1652,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
 	struct fcp_srr *srr;
 	struct fc_frame *fp;
 	u8 cdb_op;
+	unsigned int rec_tov;
 
 	rport = fsp->rport;
 	rpriv = rport->dd_data;
@@ -1640,8 +1677,9 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
 		       rpriv->local_port->port_id, FC_TYPE_FCP,
 		       FC_FCTL_REQ, 0);
 
+	rec_tov = get_fsp_rec_tov(fsp);
 	seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL,
-				      fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
+				      fsp, jiffies_to_msecs(rec_tov));
 	if (!seq)
 		goto retry;
 
@@ -1665,6 +1703,7 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 {
 	struct fc_fcp_pkt *fsp = arg;
 	struct fc_frame_header *fh;
+	unsigned int rec_tov;
 
 	if (IS_ERR(fp)) {
 		fc_fcp_srr_error(fsp, fp);
@@ -1691,11 +1730,12 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 	switch (fc_frame_payload_op(fp)) {
 	case ELS_LS_ACC:
 		fsp->recov_retry = 0;
-		fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+		rec_tov = get_fsp_rec_tov(fsp);
+		fc_fcp_timer_set(fsp, rec_tov);
 		break;
 	case ELS_LS_RJT:
 	default:
-		fc_fcp_recovery(fsp);
+		fc_fcp_recovery(fsp, FC_ERROR);
 		break;
 	}
 	fc_fcp_unlock_pkt(fsp);
@@ -1721,7 +1761,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 		if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
 			fc_fcp_rec(fsp);
 		else
-			fc_fcp_recovery(fsp);
+			fc_fcp_recovery(fsp, FC_TIMED_OUT);
 		break;
 	case -FC_EX_CLOSED:			/* e.g., link failure */
 		/* fall through */
@@ -1820,19 +1860,17 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
 	if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
 		fsp->req_flags = FC_SRB_READ;
 		stats->InputRequests++;
-		stats->InputMegabytes = fsp->data_len;
+		stats->InputBytes += fsp->data_len;
 	} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
 		fsp->req_flags = FC_SRB_WRITE;
 		stats->OutputRequests++;
-		stats->OutputMegabytes = fsp->data_len;
+		stats->OutputBytes += fsp->data_len;
 	} else {
 		fsp->req_flags = 0;
 		stats->ControlRequests++;
 	}
 	put_cpu();
 
-	fsp->tgt_flags = rpriv->flags;
-
 	init_timer(&fsp->timer);
 	fsp->timer.data = (unsigned long)fsp;
 
@@ -1946,18 +1984,29 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
 		break;
 	case FC_CMD_ABORTED:
 		FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
-			   "due to FC_CMD_ABORTED\n");
+			  "due to FC_CMD_ABORTED\n");
 		sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
 		break;
-	case FC_CMD_RECOVERY:
-		sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
-		break;
 	case FC_CMD_RESET:
+		FC_FCP_DBG(fsp, "Returning DID_RESET to scsi-ml "
+			   "due to FC_CMD_RESET\n");
 		sc_cmd->result = (DID_RESET << 16);
 		break;
 	case FC_HRD_ERROR:
+		FC_FCP_DBG(fsp, "Returning DID_NO_CONNECT to scsi-ml "
+			   "due to FC_HRD_ERROR\n");
 		sc_cmd->result = (DID_NO_CONNECT << 16);
 		break;
+	case FC_CRC_ERROR:
+		FC_FCP_DBG(fsp, "Returning DID_PARITY to scsi-ml "
+			   "due to FC_CRC_ERROR\n");
+		sc_cmd->result = (DID_PARITY << 16);
+		break;
+	case FC_TIMED_OUT:
+		FC_FCP_DBG(fsp, "Returning DID_BUS_BUSY to scsi-ml "
+			   "due to FC_TIMED_OUT\n");
+		sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
+		break;
 	default:
 		FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
 			   "due to unknown error\n");
@@ -2004,7 +2053,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
 	fsp = CMD_SP(sc_cmd);
 	if (!fsp) {
 		/* command completed while scsi eh was setting up */
-		spin_unlock_irqrestore(lport->host->host_lock, flags);
+		spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
 		return SUCCESS;
 	}
 	/* grab a ref so the fsp and sc_cmd cannot be relased from under us */

+ 14 - 2
drivers/scsi/libfc/fc_libfc.h

@@ -66,9 +66,21 @@ extern unsigned int fc_debug_logging;
 
 #define FC_FCP_DBG(pkt, fmt, args...)					\
 	FC_CHECK_LOGGING(FC_FCP_LOGGING,				\
-			 printk(KERN_INFO "host%u: fcp: %6.6x: " fmt,	\
+	{								\
+		if ((pkt)->seq_ptr) {					\
+			struct fc_exch *_ep = NULL;			\
+			_ep = fc_seq_exch((pkt)->seq_ptr);		\
+			printk(KERN_INFO "host%u: fcp: %6.6x: "		\
+				"xid %04x-%04x: " fmt,			\
 				(pkt)->lp->host->host_no,		\
-				pkt->rport->port_id, ##args))
+				(pkt)->rport->port_id,			\
+				(_ep)->oxid, (_ep)->rxid, ##args);	\
+		} else {						\
+			printk(KERN_INFO "host%u: fcp: %6.6x: " fmt,	\
+				(pkt)->lp->host->host_no,		\
+				(pkt)->rport->port_id, ##args);		\
+		}							\
+	})
 
 #define FC_EXCH_DBG(exch, fmt, args...)					\
 	FC_CHECK_LOGGING(FC_EXCH_LOGGING,				\

+ 12 - 4
drivers/scsi/libfc/fc_lport.c

@@ -288,6 +288,8 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
 	struct fc_lport *lport = shost_priv(shost);
 	struct timespec v0, v1;
 	unsigned int cpu;
+	u64 fcp_in_bytes = 0;
+	u64 fcp_out_bytes = 0;
 
 	fcoe_stats = &lport->host_stats;
 	memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
@@ -310,10 +312,12 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
 		fcoe_stats->fcp_input_requests += stats->InputRequests;
 		fcoe_stats->fcp_output_requests += stats->OutputRequests;
 		fcoe_stats->fcp_control_requests += stats->ControlRequests;
-		fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
-		fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
+		fcp_in_bytes += stats->InputBytes;
+		fcp_out_bytes += stats->OutputBytes;
 		fcoe_stats->link_failure_count += stats->LinkFailureCount;
 	}
+	fcoe_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
+	fcoe_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);
 	fcoe_stats->lip_count = -1;
 	fcoe_stats->nos_count = -1;
 	fcoe_stats->loss_of_sync_count = -1;
@@ -1703,8 +1707,10 @@ static int fc_lport_els_request(struct fc_bsg_job *job,
 	info->sg = job->reply_payload.sg_list;
 
 	if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
-				     NULL, info, tov))
+				     NULL, info, tov)) {
+		kfree(info);
 		return -ECOMM;
+	}
 	return 0;
 }
 
@@ -1762,8 +1768,10 @@ static int fc_lport_ct_request(struct fc_bsg_job *job,
 	info->sg = job->reply_payload.sg_list;
 
 	if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
-				     NULL, info, tov))
+				     NULL, info, tov)) {
+		kfree(info);
 		return -ECOMM;
+	}
 	return 0;
 }
 

+ 2 - 1
drivers/scsi/libfc/fc_rport.c

@@ -652,7 +652,7 @@ void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 	FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", fc_els_resp_type(fp));
 
 	if (fp == ERR_PTR(-FC_EX_CLOSED))
-		return;
+		goto put;
 
 	mutex_lock(&rdata->rp_mutex);
 
@@ -689,6 +689,7 @@ out:
 	fc_frame_free(fp);
 err:
 	mutex_unlock(&rdata->rp_mutex);
+put:
 	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
 	return;
 bad:

+ 27 - 37
drivers/scsi/libiscsi.c

@@ -505,6 +505,7 @@ static void iscsi_free_task(struct iscsi_task *task)
 	struct iscsi_conn *conn = task->conn;
 	struct iscsi_session *session = conn->session;
 	struct scsi_cmnd *sc = task->sc;
+	int oldstate = task->state;
 
 	ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n",
 			  task->itt, task->state, task->sc);
@@ -525,10 +526,10 @@ static void iscsi_free_task(struct iscsi_task *task)
 		/* SCSI eh reuses commands to verify us */
 		sc->SCp.ptr = NULL;
 		/*
-		 * queue command may call this to free the task, but
-		 * not have setup the sc callback
+		 * queue command may call this to free the task, so
+		 * it will decide how to return sc to scsi-ml.
 		 */
-		if (sc->scsi_done)
+		if (oldstate != ISCSI_TASK_REQUEUE_SCSIQ)
 			sc->scsi_done(sc);
 	}
 }
@@ -539,11 +540,12 @@ void __iscsi_get_task(struct iscsi_task *task)
 }
 EXPORT_SYMBOL_GPL(__iscsi_get_task);
 
-static void __iscsi_put_task(struct iscsi_task *task)
+void __iscsi_put_task(struct iscsi_task *task)
 {
 	if (atomic_dec_and_test(&task->refcount))
 		iscsi_free_task(task);
 }
+EXPORT_SYMBOL_GPL(__iscsi_put_task);
 
 void iscsi_put_task(struct iscsi_task *task)
 {
@@ -571,7 +573,8 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
 			  task->itt, task->state, task->sc);
 	if (task->state == ISCSI_TASK_COMPLETED ||
 	    task->state == ISCSI_TASK_ABRT_TMF ||
-	    task->state == ISCSI_TASK_ABRT_SESS_RECOV)
+	    task->state == ISCSI_TASK_ABRT_SESS_RECOV ||
+	    task->state == ISCSI_TASK_REQUEUE_SCSIQ)
 		return;
 	WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
 	task->state = state;
@@ -1335,17 +1338,16 @@ void iscsi_session_failure(struct iscsi_session *session,
 {
 	struct iscsi_conn *conn;
 	struct device *dev;
-	unsigned long flags;
 
-	spin_lock_irqsave(&session->lock, flags);
+	spin_lock_bh(&session->lock);
 	conn = session->leadconn;
 	if (session->state == ISCSI_STATE_TERMINATE || !conn) {
-		spin_unlock_irqrestore(&session->lock, flags);
+		spin_unlock_bh(&session->lock);
 		return;
 	}
 
 	dev = get_device(&conn->cls_conn->dev);
-	spin_unlock_irqrestore(&session->lock, flags);
+	spin_unlock_bh(&session->lock);
 	if (!dev)
 	        return;
 	/*
@@ -1364,17 +1366,16 @@ EXPORT_SYMBOL_GPL(iscsi_session_failure);
 void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
 {
 	struct iscsi_session *session = conn->session;
-	unsigned long flags;
 
-	spin_lock_irqsave(&session->lock, flags);
+	spin_lock_bh(&session->lock);
 	if (session->state == ISCSI_STATE_FAILED) {
-		spin_unlock_irqrestore(&session->lock, flags);
+		spin_unlock_bh(&session->lock);
 		return;
 	}
 
 	if (conn->stop_stage == 0)
 		session->state = ISCSI_STATE_FAILED;
-	spin_unlock_irqrestore(&session->lock, flags);
+	spin_unlock_bh(&session->lock);
 
 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
@@ -1599,27 +1600,23 @@ enum {
 	FAILURE_SESSION_NOT_READY,
 };
 
-static int iscsi_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
 {
 	struct iscsi_cls_session *cls_session;
-	struct Scsi_Host *host;
 	struct iscsi_host *ihost;
 	int reason = 0;
 	struct iscsi_session *session;
 	struct iscsi_conn *conn;
 	struct iscsi_task *task = NULL;
 
-	sc->scsi_done = done;
 	sc->result = 0;
 	sc->SCp.ptr = NULL;
 
-	host = sc->device->host;
 	ihost = shost_priv(host);
-	spin_unlock(host->host_lock);
 
 	cls_session = starget_to_session(scsi_target(sc->device));
 	session = cls_session->dd_data;
-	spin_lock(&session->lock);
+	spin_lock_bh(&session->lock);
 
 	reason = iscsi_session_chkready(cls_session);
 	if (reason) {
@@ -1705,25 +1702,21 @@ static int iscsi_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi
 	}
 
 	session->queued_cmdsn++;
-	spin_unlock(&session->lock);
-	spin_lock(host->host_lock);
+	spin_unlock_bh(&session->lock);
 	return 0;
 
 prepd_reject:
-	sc->scsi_done = NULL;
-	iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
+	iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
 reject:
-	spin_unlock(&session->lock);
+	spin_unlock_bh(&session->lock);
 	ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
 			  sc->cmnd[0], reason);
-	spin_lock(host->host_lock);
 	return SCSI_MLQUEUE_TARGET_BUSY;
 
 prepd_fault:
-	sc->scsi_done = NULL;
-	iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
+	iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
 fault:
-	spin_unlock(&session->lock);
+	spin_unlock_bh(&session->lock);
 	ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
 			  sc->cmnd[0], reason);
 	if (!scsi_bidi_cmnd(sc))
@@ -1732,12 +1725,9 @@ fault:
 		scsi_out(sc)->resid = scsi_out(sc)->length;
 		scsi_in(sc)->resid = scsi_in(sc)->length;
 	}
-	done(sc);
-	spin_lock(host->host_lock);
+	sc->scsi_done(sc);
 	return 0;
 }
-
-DEF_SCSI_QCMD(iscsi_queuecommand)
 EXPORT_SYMBOL_GPL(iscsi_queuecommand);
 
 int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
@@ -1795,9 +1785,9 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
 				      NULL, 0);
 	if (!task) {
 		spin_unlock_bh(&session->lock);
+		iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");
 		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
 		spin_lock_bh(&session->lock);
-		ISCSI_DBG_EH(session, "tmf exec failure\n");
 		return -EPERM;
 	}
 	conn->tmfcmd_pdus_cnt++;
@@ -2202,7 +2192,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
 		goto success_unlocked;
 	case TMF_TIMEDOUT:
 		spin_unlock_bh(&session->lock);
-		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
 		goto failed_unlocked;
 	case TMF_NOT_FOUND:
 		if (!sc->SCp.ptr) {
@@ -2289,7 +2279,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
 		break;
 	case TMF_TIMEDOUT:
 		spin_unlock_bh(&session->lock);
-		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
 		goto done;
 	default:
 		conn->tmf_state = TMF_INITIAL;
@@ -2370,7 +2360,7 @@ failed:
 	 * we drop the lock here but the leadconn cannot be destoyed while
 	 * we are in the scsi eh
 	 */
-	iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+	iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
 
 	ISCSI_DBG_EH(session, "wait for relogin\n");
 	wait_event_interruptible(conn->ehwait,
@@ -2452,7 +2442,7 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc)
 		break;
 	case TMF_TIMEDOUT:
 		spin_unlock_bh(&session->lock);
-		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
 		goto done;
 	default:
 		conn->tmf_state = TMF_INITIAL;

+ 13 - 5
drivers/scsi/libsas/sas_port.c

@@ -28,6 +28,17 @@
 #include <scsi/scsi_transport_sas.h>
 #include "../scsi_sas_internal.h"
 
+static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy *phy)
+{
+	struct sas_ha_struct *sas_ha = phy->ha;
+
+	if (memcmp(port->attached_sas_addr, phy->attached_sas_addr,
+		   SAS_ADDR_SIZE) != 0 || (sas_ha->strict_wide_ports &&
+	     memcmp(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE) != 0))
+		return false;
+	return true;
+}
+
 /**
  * sas_form_port -- add this phy to a port
  * @phy: the phy of interest
@@ -45,8 +56,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
 	unsigned long flags;
 
 	if (port) {
-		if (memcmp(port->attached_sas_addr, phy->attached_sas_addr,
-			   SAS_ADDR_SIZE) != 0)
+		if (!phy_is_wideport_member(port, phy))
 			sas_deform_port(phy);
 		else {
 			SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
@@ -62,9 +72,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
 		port = sas_ha->sas_port[i];
 		spin_lock(&port->phy_list_lock);
 		if (*(u64 *) port->sas_addr &&
-		    memcmp(port->attached_sas_addr,
-			   phy->attached_sas_addr, SAS_ADDR_SIZE) == 0 &&
-		    port->num_phys > 0) {
+		    phy_is_wideport_member(port, phy) && port->num_phys > 0) {
 			/* wide port */
 			SAS_DPRINTK("phy%d matched wide port%d\n", phy->id,
 				    port->id);

+ 24 - 2
drivers/scsi/lpfc/lpfc.h

@@ -464,12 +464,29 @@ struct unsol_rcv_ct_ctx {
 #define UNSOL_VALID	0x00000001
 };
 
+#define LPFC_USER_LINK_SPEED_AUTO	0	/* auto select (default)*/
+#define LPFC_USER_LINK_SPEED_1G		1	/* 1 Gigabaud */
+#define LPFC_USER_LINK_SPEED_2G		2	/* 2 Gigabaud */
+#define LPFC_USER_LINK_SPEED_4G		4	/* 4 Gigabaud */
+#define LPFC_USER_LINK_SPEED_8G		8	/* 8 Gigabaud */
+#define LPFC_USER_LINK_SPEED_10G	10	/* 10 Gigabaud */
+#define LPFC_USER_LINK_SPEED_16G	16	/* 16 Gigabaud */
+#define LPFC_USER_LINK_SPEED_MAX	LPFC_USER_LINK_SPEED_16G
+#define LPFC_USER_LINK_SPEED_BITMAP ((1 << LPFC_USER_LINK_SPEED_16G) | \
+				     (1 << LPFC_USER_LINK_SPEED_10G) | \
+				     (1 << LPFC_USER_LINK_SPEED_8G) | \
+				     (1 << LPFC_USER_LINK_SPEED_4G) | \
+				     (1 << LPFC_USER_LINK_SPEED_2G) | \
+				     (1 << LPFC_USER_LINK_SPEED_1G) | \
+				     (1 << LPFC_USER_LINK_SPEED_AUTO))
+#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16"
+
 struct lpfc_hba {
 	/* SCSI interface function jump table entries */
 	int (*lpfc_new_scsi_buf)
 		(struct lpfc_vport *, int);
 	struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
-		(struct lpfc_hba *);
+		(struct lpfc_hba *, struct lpfc_nodelist *);
 	int (*lpfc_scsi_prep_dma_buf)
 		(struct lpfc_hba *, struct lpfc_scsi_buf *);
 	void (*lpfc_scsi_unprep_dma_buf)
@@ -545,7 +562,7 @@ struct lpfc_hba {
 	uint32_t hba_flag;	/* hba generic flags */
 #define HBA_ERATT_HANDLED	0x1 /* This flag is set when eratt handled */
 #define DEFER_ERATT		0x2 /* Deferred error attention in progress */
-#define HBA_FCOE_SUPPORT	0x4 /* HBA function supports FCOE */
+#define HBA_FCOE_MODE		0x4 /* HBA function in FCoE Mode */
 #define HBA_SP_QUEUE_EVT	0x8 /* Slow-path qevt posted to worker thread*/
 #define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
 #define FCP_XRI_ABORT_EVENT	0x20
@@ -557,6 +574,7 @@ struct lpfc_hba {
 #define HBA_FIP_SUPPORT		0x800 /* FIP support in HBA */
 #define HBA_AER_ENABLED		0x1000 /* AER enabled with HBA */
 #define HBA_DEVLOSS_TMO         0x2000 /* HBA in devloss timeout */
+#define HBA_RRQ_ACTIVE		0x4000 /* process the rrq active list */
 	uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
 	struct lpfc_dmabuf slim2p;
 
@@ -606,6 +624,7 @@ struct lpfc_hba {
 	/* HBA Config Parameters */
 	uint32_t cfg_ack0;
 	uint32_t cfg_enable_npiv;
+	uint32_t cfg_enable_rrq;
 	uint32_t cfg_topology;
 	uint32_t cfg_link_speed;
 	uint32_t cfg_cr_delay;
@@ -716,6 +735,7 @@ struct lpfc_hba {
 	uint32_t total_scsi_bufs;
 	struct list_head lpfc_iocb_list;
 	uint32_t total_iocbq_bufs;
+	struct list_head active_rrq_list;
 	spinlock_t hbalock;
 
 	/* pci_mem_pools */
@@ -728,6 +748,7 @@ struct lpfc_hba {
 
 	mempool_t *mbox_mem_pool;
 	mempool_t *nlp_mem_pool;
+	mempool_t *rrq_pool;
 
 	struct fc_host_statistics link_stats;
 	enum intr_type_t intr_type;
@@ -784,6 +805,7 @@ struct lpfc_hba {
 	unsigned long skipped_hb;
 	struct timer_list hb_tmofunc;
 	uint8_t hb_outstanding;
+	struct timer_list rrq_tmr;
 	enum hba_temp_state over_temp_state;
 	/* ndlp reference management */
 	spinlock_t ndlp_lock;

+ 54 - 50
drivers/scsi/lpfc/lpfc_attr.c

@@ -52,10 +52,6 @@
 #define LPFC_MIN_DEVLOSS_TMO 1
 #define LPFC_MAX_DEVLOSS_TMO 255
 
-#define LPFC_MAX_LINK_SPEED 8
-#define LPFC_LINK_SPEED_BITMAP 0x00000117
-#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8"
-
 /**
  * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
  * @incr: integer to convert.
@@ -463,7 +459,7 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
 		if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
 			len += snprintf(buf + len, PAGE_SIZE-len,
 					"   Menlo Maint Mode\n");
-		else if (phba->fc_topology == TOPOLOGY_LOOP) {
+		else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 			if (vport->fc_flag & FC_PUBLIC_LOOP)
 				len += snprintf(buf + len, PAGE_SIZE-len,
 						"   Public Loop\n");
@@ -1981,6 +1977,13 @@ lpfc_param_show(enable_npiv);
 lpfc_param_init(enable_npiv, 1, 0, 1);
 static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
 
+int lpfc_enable_rrq;
+module_param(lpfc_enable_rrq, int, 0);
+MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
+lpfc_param_show(enable_rrq);
+lpfc_param_init(enable_rrq, 0, 0, 1);
+static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL);
+
 /*
 # lpfc_suppress_link_up:  Bring link up at initialization
 #            0x0  = bring link up (issue MBX_INIT_LINK)
@@ -2837,14 +2840,8 @@ static struct bin_attribute sysfs_drvr_stat_data_attr = {
 /*
 # lpfc_link_speed: Link speed selection for initializing the Fibre Channel
 # connection.
-#       0  = auto select (default)
-#       1  = 1 Gigabaud
-#       2  = 2 Gigabaud
-#       4  = 4 Gigabaud
-#       8  = 8 Gigabaud
-# Value range is [0,8]. Default value is 0.
+# Value range is [0,16]. Default value is 0.
 */
-
 /**
  * lpfc_link_speed_set - Set the adapters link speed
  * @phba: lpfc_hba pointer.
@@ -2869,7 +2866,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
 	struct Scsi_Host  *shost = class_to_shost(dev);
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
-	int val = 0;
+	int val = LPFC_USER_LINK_SPEED_AUTO;
 	int nolip = 0;
 	const char *val_buf = buf;
 	int err;
@@ -2885,15 +2882,20 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
 	if (sscanf(val_buf, "%i", &val) != 1)
 		return -EINVAL;
 
-	if (((val == LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
-		((val == LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
-		((val == LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
-		((val == LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
-		((val == LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)))
+	if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb))) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2879 lpfc_link_speed attribute cannot be set "
+				"to %d. Speed is not supported by this port.\n",
+				val);
 		return -EINVAL;
-
-	if ((val >= 0 && val <= 8)
-		&& (LPFC_LINK_SPEED_BITMAP & (1 << val))) {
+	}
+	if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
+	    (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
 		prev_val = phba->cfg_link_speed;
 		phba->cfg_link_speed = val;
 		if (nolip)
@@ -2906,11 +2908,9 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
 		} else
 			return strlen(buf);
 	}
-
 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-		"%d:0469 lpfc_link_speed attribute cannot be set to %d, "
-		"allowed range is [0, 8]\n",
-		phba->brd_no, val);
+		"0469 lpfc_link_speed attribute cannot be set to %d, "
+		"allowed values are ["LPFC_LINK_SPEED_STRING"]\n", val);
 	return -EINVAL;
 }
 
@@ -2938,8 +2938,8 @@ lpfc_param_show(link_speed)
 static int
 lpfc_link_speed_init(struct lpfc_hba *phba, int val)
 {
-	if ((val >= 0 && val <= LPFC_MAX_LINK_SPEED)
-		&& (LPFC_LINK_SPEED_BITMAP & (1 << val))) {
+	if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
+	    (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
 		phba->cfg_link_speed = val;
 		return 0;
 	}
@@ -2947,12 +2947,12 @@ lpfc_link_speed_init(struct lpfc_hba *phba, int val)
 			"0405 lpfc_link_speed attribute cannot "
 			"be set to %d, allowed values are "
 			"["LPFC_LINK_SPEED_STRING"]\n", val);
-	phba->cfg_link_speed = 0;
+	phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
 	return -EINVAL;
 }
 
 static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
-		lpfc_link_speed_show, lpfc_link_speed_store);
+		   lpfc_link_speed_show, lpfc_link_speed_store);
 
 /*
 # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
@@ -3305,12 +3305,12 @@ LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX,
 LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
 
 /*
-# lpfc_enable_hba_heartbeat: Enable HBA heartbeat timer..
+# lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer..
 #       0  = HBA Heartbeat disabled
 #       1  = HBA Heartbeat enabled (default)
 # Value range is [0,1]. Default value is 1.
 */
-LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
+LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
 
 /*
 # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
@@ -3401,6 +3401,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_lpfc_fdmi_on,
 	&dev_attr_lpfc_max_luns,
 	&dev_attr_lpfc_enable_npiv,
+	&dev_attr_lpfc_enable_rrq,
 	&dev_attr_nport_evt_cnt,
 	&dev_attr_board_mode,
 	&dev_attr_max_vpi,
@@ -3798,8 +3799,7 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj,
 			}
 			break;
 		case MBX_READ_SPARM64:
-		case MBX_READ_LA:
-		case MBX_READ_LA64:
+		case MBX_READ_TOPOLOGY:
 		case MBX_REG_LOGIN:
 		case MBX_REG_LOGIN64:
 		case MBX_CONFIG_PORT:
@@ -3989,7 +3989,7 @@ lpfc_get_host_port_type(struct Scsi_Host *shost)
 	if (vport->port_type == LPFC_NPIV_PORT) {
 		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
 	} else if (lpfc_is_link_up(phba)) {
-		if (phba->fc_topology == TOPOLOGY_LOOP) {
+		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 			if (vport->fc_flag & FC_PUBLIC_LOOP)
 				fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
 			else
@@ -4058,23 +4058,26 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
 
 	if (lpfc_is_link_up(phba)) {
 		switch(phba->fc_linkspeed) {
-			case LA_1GHZ_LINK:
-				fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+		case LPFC_LINK_SPEED_1GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
 			break;
-			case LA_2GHZ_LINK:
-				fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
+		case LPFC_LINK_SPEED_2GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
 			break;
-			case LA_4GHZ_LINK:
-				fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
+		case LPFC_LINK_SPEED_4GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
 			break;
-			case LA_8GHZ_LINK:
-				fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
+		case LPFC_LINK_SPEED_8GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
 			break;
-			case LA_10GHZ_LINK:
-				fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+		case LPFC_LINK_SPEED_10GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
 			break;
-			default:
-				fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+		case LPFC_LINK_SPEED_16GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
+			break;
+		default:
+			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
 			break;
 		}
 	} else
@@ -4097,7 +4100,7 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
 	spin_lock_irq(shost->host_lock);
 
 	if ((vport->fc_flag & FC_FABRIC) ||
-	    ((phba->fc_topology == TOPOLOGY_LOOP) &&
+	    ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
 	     (vport->fc_flag & FC_PUBLIC_LOOP)))
 		node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
 	else
@@ -4208,11 +4211,11 @@ lpfc_get_stats(struct Scsi_Host *shost)
 	hs->invalid_crc_count -= lso->invalid_crc_count;
 	hs->error_frames -= lso->error_frames;
 
-	if (phba->hba_flag & HBA_FCOE_SUPPORT) {
+	if (phba->hba_flag & HBA_FCOE_MODE) {
 		hs->lip_count = -1;
 		hs->nos_count = (phba->link_events >> 1);
 		hs->nos_count -= lso->link_events;
-	} else if (phba->fc_topology == TOPOLOGY_LOOP) {
+	} else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 		hs->lip_count = (phba->fc_eventTag >> 1);
 		hs->lip_count -= lso->link_events;
 		hs->nos_count = -1;
@@ -4303,7 +4306,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
 	lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
 	lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
 	lso->error_frames = pmb->un.varRdLnk.crcCnt;
-	if (phba->hba_flag & HBA_FCOE_SUPPORT)
+	if (phba->hba_flag & HBA_FCOE_MODE)
 		lso->link_events = (phba->link_events >> 1);
 	else
 		lso->link_events = (phba->fc_eventTag >> 1);
@@ -4615,6 +4618,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
 	lpfc_link_speed_init(phba, lpfc_link_speed);
 	lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
 	lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
+	lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
 	lpfc_use_msi_init(phba, lpfc_use_msi);
 	lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
 	lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);

+ 24 - 41
drivers/scsi/lpfc/lpfc_bsg.c

@@ -162,7 +162,6 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
 			struct lpfc_iocbq *cmdiocbq,
 			struct lpfc_iocbq *rspiocbq)
 {
-	unsigned long iflags;
 	struct bsg_job_data *dd_data;
 	struct fc_bsg_job *job;
 	IOCB_t *rsp;
@@ -173,9 +172,10 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
 	int rc = 0;
 
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
-	dd_data = cmdiocbq->context1;
+	dd_data = cmdiocbq->context2;
 	if (!dd_data) {
 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		lpfc_sli_release_iocbq(phba, cmdiocbq);
 		return;
 	}
 
@@ -183,17 +183,9 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
 	job = iocb->set_job;
 	job->dd_data = NULL; /* so timeout handler does not reply */
 
-	spin_lock_irqsave(&phba->hbalock, iflags);
-	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
-	if (cmdiocbq->context2 && rspiocbq)
-		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
-		       &rspiocbq->iocb, sizeof(IOCB_t));
-	spin_unlock_irqrestore(&phba->hbalock, iflags);
-
 	bmp = iocb->bmp;
-	rspiocbq = iocb->rspiocbq;
 	rsp = &rspiocbq->iocb;
-	ndlp = iocb->ndlp;
+	ndlp = cmdiocbq->context1;
 
 	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
@@ -220,7 +212,6 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
 			rsp->un.genreq64.bdl.bdeSize;
 
 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-	lpfc_sli_release_iocbq(phba, rspiocbq);
 	lpfc_sli_release_iocbq(phba, cmdiocbq);
 	lpfc_nlp_put(ndlp);
 	kfree(bmp);
@@ -247,9 +238,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
 	struct ulp_bde64 *bpl = NULL;
 	uint32_t timeout;
 	struct lpfc_iocbq *cmdiocbq = NULL;
-	struct lpfc_iocbq *rspiocbq = NULL;
 	IOCB_t *cmd;
-	IOCB_t *rsp;
 	struct lpfc_dmabuf *bmp = NULL;
 	int request_nseg;
 	int reply_nseg;
@@ -296,17 +285,10 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
 	}
 
 	cmd = &cmdiocbq->iocb;
-	rspiocbq = lpfc_sli_get_iocbq(phba);
-	if (!rspiocbq) {
-		rc = -ENOMEM;
-		goto free_cmdiocbq;
-	}
-
-	rsp = &rspiocbq->iocb;
 	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
 	if (!bmp->virt) {
 		rc = -ENOMEM;
-		goto free_rspiocbq;
+		goto free_cmdiocbq;
 	}
 
 	INIT_LIST_HEAD(&bmp->list);
@@ -358,14 +340,12 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
 	cmd->ulpTimeout = timeout;
 
 	cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
-	cmdiocbq->context1 = dd_data;
-	cmdiocbq->context2 = rspiocbq;
+	cmdiocbq->context1 = ndlp;
+	cmdiocbq->context2 = dd_data;
 	dd_data->type = TYPE_IOCB;
 	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
-	dd_data->context_un.iocb.rspiocbq = rspiocbq;
 	dd_data->context_un.iocb.set_job = job;
 	dd_data->context_un.iocb.bmp = bmp;
-	dd_data->context_un.iocb.ndlp = ndlp;
 
 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
 		creg_val = readl(phba->HCregaddr);
@@ -391,8 +371,6 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
 
 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
 
-free_rspiocbq:
-	lpfc_sli_release_iocbq(phba, rspiocbq);
 free_cmdiocbq:
 	lpfc_sli_release_iocbq(phba, cmdiocbq);
 free_bmp:
@@ -1220,7 +1198,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
 	int rc = 0;
 
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
-	dd_data = cmdiocbq->context1;
+	dd_data = cmdiocbq->context2;
 	/* normal completion and timeout crossed paths, already done */
 	if (!dd_data) {
 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
@@ -1369,8 +1347,8 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
 	ctiocb->context3 = bmp;
 
 	ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
-	ctiocb->context1 = dd_data;
-	ctiocb->context2 = NULL;
+	ctiocb->context2 = dd_data;
+	ctiocb->context1 = ndlp;
 	dd_data->type = TYPE_IOCB;
 	dd_data->context_un.iocb.cmdiocbq = ctiocb;
 	dd_data->context_un.iocb.rspiocbq = NULL;
@@ -1641,7 +1619,7 @@ job_error:
  * This function obtains a remote port login id so the diag loopback test
  * can send and receive its own unsolicited CT command.
  **/
-static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
+static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
 {
 	LPFC_MBOXQ_t *mbox;
 	struct lpfc_dmabuf *dmabuff;
@@ -1651,10 +1629,14 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
 	if (!mbox)
 		return -ENOMEM;
 
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		*rpi = lpfc_sli4_alloc_rpi(phba);
 	status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
-				(uint8_t *)&phba->pport->fc_sparam, mbox, 0);
+			      (uint8_t *)&phba->pport->fc_sparam, mbox, *rpi);
 	if (status) {
 		mempool_free(mbox, phba->mbox_mem_pool);
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_sli4_free_rpi(phba, *rpi);
 		return -ENOMEM;
 	}
 
@@ -1668,6 +1650,8 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
 		kfree(dmabuff);
 		if (status != MBX_TIMEOUT)
 			mempool_free(mbox, phba->mbox_mem_pool);
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_sli4_free_rpi(phba, *rpi);
 		return -ENODEV;
 	}
 
@@ -1704,8 +1688,9 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
 			mempool_free(mbox, phba->mbox_mem_pool);
 		return -EIO;
 	}
-
 	mempool_free(mbox, phba->mbox_mem_pool);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_sli4_free_rpi(phba, rpi);
 	return 0;
 }
 
@@ -2102,7 +2087,7 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job)
 	uint32_t size;
 	uint32_t full_size;
 	size_t segment_len = 0, segment_offset = 0, current_offset = 0;
-	uint16_t rpi;
+	uint16_t rpi = 0;
 	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
 	IOCB_t *cmd, *rsp;
 	struct lpfc_sli_ct_request *ctreq;
@@ -2162,7 +2147,7 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job)
 		goto loopback_test_exit;
 	}
 
-	if (size >= BUF_SZ_4K) {
+	if (full_size >= BUF_SZ_4K) {
 		/*
 		 * Allocate memory for ioctl data. If buffer is bigger than 64k,
 		 * then we allocate 64k and re-use that buffer over and over to
@@ -2171,7 +2156,7 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job)
 		 * problem with GET_FCPTARGETMAPPING...
 		 */
 		if (size <= (64 * 1024))
-			total_mem = size;
+			total_mem = full_size;
 		else
 			total_mem = 64 * 1024;
 	} else
@@ -2189,7 +2174,6 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job)
 	sg_copy_to_buffer(job->request_payload.sg_list,
 				job->request_payload.sg_cnt,
 				ptr, size);
-
 	rc = lpfcdiag_loop_self_reg(phba, &rpi);
 	if (rc)
 		goto loopback_test_exit;
@@ -2601,12 +2585,11 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
 			phba->wait_4_mlo_maint_flg = 1;
 		} else if (mb->un.varWords[0] == SETVAR_MLORST) {
 			phba->link_flag &= ~LS_LOOPBACK_MODE;
-			phba->fc_topology = TOPOLOGY_PT_PT;
+			phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
 		}
 		break;
 	case MBX_READ_SPARM64:
-	case MBX_READ_LA:
-	case MBX_READ_LA64:
+	case MBX_READ_TOPOLOGY:
 	case MBX_REG_LOGIN:
 	case MBX_REG_LOGIN64:
 	case MBX_CONFIG_PORT:

+ 14 - 5
drivers/scsi/lpfc/lpfc_crtn.h

@@ -31,7 +31,7 @@ void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
 
 void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
-int lpfc_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *);
+int lpfc_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *);
 void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_issue_clear_la(struct lpfc_hba *, struct lpfc_vport *);
 void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -40,7 +40,7 @@ int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
 void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
 int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
-		 LPFC_MBOXQ_t *, uint32_t);
+		 LPFC_MBOXQ_t *, uint16_t);
 void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
 void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
 void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
@@ -64,7 +64,7 @@ void lpfc_cleanup_pending_mbox(struct lpfc_vport *);
 int lpfc_linkdown(struct lpfc_hba *);
 void lpfc_linkdown_port(struct lpfc_vport *);
 void lpfc_port_link_failure(struct lpfc_vport *);
-void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
 void lpfc_retry_pport_discovery(struct lpfc_hba *);
@@ -121,6 +121,7 @@ void lpfc_end_rscn(struct lpfc_vport *);
 int lpfc_els_chk_latt(struct lpfc_vport *);
 int lpfc_els_abort_flogi(struct lpfc_hba *);
 int lpfc_initial_flogi(struct lpfc_vport *);
+void lpfc_issue_init_vfi(struct lpfc_vport *);
 int lpfc_initial_fdisc(struct lpfc_vport *);
 int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t);
 int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
@@ -415,5 +416,13 @@ struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *,
 int __lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
 	struct lpfc_iocbq *, uint32_t);
 uint32_t lpfc_drain_txq(struct lpfc_hba *);
-
-
+void lpfc_clr_rrq_active(struct lpfc_hba *, uint16_t, struct lpfc_node_rrq *);
+int lpfc_test_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, uint16_t);
+void lpfc_handle_rrq_active(struct lpfc_hba *);
+int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
+int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
+	uint16_t, uint16_t, uint16_t);
+void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
+void lpfc_cleanup_vports_rrqs(struct lpfc_vport *);
+struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
+	uint32_t);

+ 26 - 22
drivers/scsi/lpfc/lpfc_ct.c

@@ -48,14 +48,14 @@
 #include "lpfc_vport.h"
 #include "lpfc_debugfs.h"
 
-#define HBA_PORTSPEED_UNKNOWN               0	/* Unknown - transceiver
-						 * incapable of reporting */
-#define HBA_PORTSPEED_1GBIT                 1	/* 1 GBit/sec */
-#define HBA_PORTSPEED_2GBIT                 2	/* 2 GBit/sec */
-#define HBA_PORTSPEED_4GBIT                 8   /* 4 GBit/sec */
-#define HBA_PORTSPEED_8GBIT                16   /* 8 GBit/sec */
-#define HBA_PORTSPEED_10GBIT                4	/* 10 GBit/sec */
-#define HBA_PORTSPEED_NOT_NEGOTIATED        5	/* Speed not established */
+/* FDMI Port Speed definitions */
+#define HBA_PORTSPEED_1GBIT		0x0001	/* 1 GBit/sec */
+#define HBA_PORTSPEED_2GBIT		0x0002	/* 2 GBit/sec */
+#define HBA_PORTSPEED_4GBIT		0x0008	/* 4 GBit/sec */
+#define HBA_PORTSPEED_10GBIT		0x0004	/* 10 GBit/sec */
+#define HBA_PORTSPEED_8GBIT		0x0010	/* 8 GBit/sec */
+#define HBA_PORTSPEED_16GBIT		0x0020	/* 16 GBit/sec */
+#define HBA_PORTSPEED_UNKNOWN		0x0800	/* Unknown */
 
 #define FOURBYTES	4
 
@@ -1593,8 +1593,10 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
 			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
 
 			ae->un.SupportSpeed = 0;
+			if (phba->lmt & LMT_16Gb)
+				ae->un.SupportSpeed |= HBA_PORTSPEED_16GBIT;
 			if (phba->lmt & LMT_10Gb)
-				ae->un.SupportSpeed = HBA_PORTSPEED_10GBIT;
+				ae->un.SupportSpeed |= HBA_PORTSPEED_10GBIT;
 			if (phba->lmt & LMT_8Gb)
 				ae->un.SupportSpeed |= HBA_PORTSPEED_8GBIT;
 			if (phba->lmt & LMT_4Gb)
@@ -1612,24 +1614,26 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
 			ae->ad.bits.AttrType = be16_to_cpu(PORT_SPEED);
 			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
 			switch(phba->fc_linkspeed) {
-				case LA_1GHZ_LINK:
-					ae->un.PortSpeed = HBA_PORTSPEED_1GBIT;
+			case LPFC_LINK_SPEED_1GHZ:
+				ae->un.PortSpeed = HBA_PORTSPEED_1GBIT;
 				break;
-				case LA_2GHZ_LINK:
-					ae->un.PortSpeed = HBA_PORTSPEED_2GBIT;
+			case LPFC_LINK_SPEED_2GHZ:
+				ae->un.PortSpeed = HBA_PORTSPEED_2GBIT;
 				break;
-				case LA_4GHZ_LINK:
-					ae->un.PortSpeed = HBA_PORTSPEED_4GBIT;
+			case LPFC_LINK_SPEED_4GHZ:
+				ae->un.PortSpeed = HBA_PORTSPEED_4GBIT;
 				break;
-				case LA_8GHZ_LINK:
-					ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
+			case LPFC_LINK_SPEED_8GHZ:
+				ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
 				break;
-				case LA_10GHZ_LINK:
-					ae->un.PortSpeed = HBA_PORTSPEED_10GBIT;
+			case LPFC_LINK_SPEED_10GHZ:
+				ae->un.PortSpeed = HBA_PORTSPEED_10GBIT;
 				break;
-				default:
-					ae->un.PortSpeed =
-						HBA_PORTSPEED_UNKNOWN;
+			case LPFC_LINK_SPEED_16GHZ:
+				ae->un.PortSpeed = HBA_PORTSPEED_16GBIT;
+				break;
+			default:
+				ae->un.PortSpeed = HBA_PORTSPEED_UNKNOWN;
 				break;
 			}
 			pab->ab.EntryCnt++;

+ 18 - 1
drivers/scsi/lpfc/lpfc_disc.h

@@ -68,6 +68,12 @@ struct lpfc_fast_path_event {
 	} un;
 };
 
+#define LPFC_SLI4_MAX_XRI	1024	/* Used to make the ndlp's xri_bitmap */
+#define XRI_BITMAP_ULONGS (LPFC_SLI4_MAX_XRI / BITS_PER_LONG)
+struct lpfc_node_rrqs {
+	unsigned long xri_bitmap[XRI_BITMAP_ULONGS];
+};
+
 struct lpfc_nodelist {
 	struct list_head nlp_listp;
 	struct lpfc_name nlp_portname;
@@ -110,8 +116,19 @@ struct lpfc_nodelist {
 	atomic_t cmd_pending;
 	uint32_t cmd_qdepth;
 	unsigned long last_change_time;
+	struct lpfc_node_rrqs active_rrqs;
 	struct lpfc_scsicmd_bkt *lat_data;	/* Latency data */
 };
+struct lpfc_node_rrq {
+	struct list_head list;
+	uint16_t xritag;
+	uint16_t send_rrq;
+	uint16_t rxid;
+	uint32_t         nlp_DID;		/* FC D_ID of entry */
+	struct lpfc_vport *vport;
+	struct lpfc_nodelist *ndlp;
+	unsigned long rrq_stop_time;
+};
 
 /* Defines for nlp_flag (uint32) */
 #define NLP_IGNR_REG_CMPL  0x00000001 /* Rcvd rscn before we cmpl reg login */
@@ -136,7 +153,7 @@ struct lpfc_nodelist {
 #define NLP_NODEV_REMOVE   0x08000000	/* Defer removal till discovery ends */
 #define NLP_TARGET_REMOVE  0x10000000   /* Target remove in process */
 #define NLP_SC_REQ         0x20000000	/* Target requires authentication */
-#define NLP_RPI_VALID      0x80000000	/* nlp_rpi is valid */
+#define NLP_RPI_REGISTERED 0x80000000	/* nlp_rpi is valid */
 
 /* ndlp usage management macros */
 #define NLP_CHK_NODE_ACT(ndlp)		(((ndlp)->nlp_usg_map \

+ 288 - 39
drivers/scsi/lpfc/lpfc_els.c

@@ -375,7 +375,8 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
 		err = 4;
 		goto fail;
 	}
-	rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0);
+	rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
+			  ndlp->nlp_rpi);
 	if (rc) {
 		err = 5;
 		goto fail_free_mbox;
@@ -523,7 +524,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	phba->fc_edtovResol = sp->cmn.edtovResolution;
 	phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
 
-	if (phba->fc_topology == TOPOLOGY_LOOP) {
+	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 		spin_lock_irq(shost->host_lock);
 		vport->fc_flag |= FC_PUBLIC_LOOP;
 		spin_unlock_irq(shost->host_lock);
@@ -832,6 +833,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
 			goto out;
 
+		/* FLOGI failure */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
+				 irsp->ulpStatus, irsp->un.ulpWord[4],
+				 irsp->ulpTimeout);
+
 		/* FLOGI failed, so there is no fabric */
 		spin_lock_irq(shost->host_lock);
 		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -843,13 +850,16 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 		 */
 		if (phba->alpa_map[0] == 0) {
 			vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
+			if ((phba->sli_rev == LPFC_SLI_REV4) &&
+			    (!(vport->fc_flag & FC_VFI_REGISTERED) ||
+			     (vport->fc_prevDID != vport->fc_myDID))) {
+				if (vport->fc_flag & FC_VFI_REGISTERED)
+					lpfc_sli4_unreg_all_rpis(vport);
+				lpfc_issue_reg_vfi(vport);
+				lpfc_nlp_put(ndlp);
+				goto out;
+			}
 		}
-
-		/* FLOGI failure */
-		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
-				 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
-				 irsp->ulpStatus, irsp->un.ulpWord[4],
-				 irsp->ulpTimeout);
 		goto flogifail;
 	}
 	spin_lock_irq(shost->host_lock);
@@ -879,7 +889,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 		 */
 		if (sp->cmn.fPort)
 			rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
-		else if (!(phba->hba_flag & HBA_FCOE_SUPPORT))
+		else if (!(phba->hba_flag & HBA_FCOE_MODE))
 			rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
 		else {
 			lpfc_printf_vlog(vport, KERN_ERR,
@@ -1014,7 +1024,9 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	if (sp->cmn.fcphHigh < FC_PH3)
 		sp->cmn.fcphHigh = FC_PH3;
 
-	if  (phba->sli_rev == LPFC_SLI_REV4) {
+	if  ((phba->sli_rev == LPFC_SLI_REV4) &&
+	     (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+	      LPFC_SLI_INTF_IF_TYPE_0)) {
 		elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
 		elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
 		/* FLOGI needs to be 3 for WQE FCFI */
@@ -1027,7 +1039,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		icmd->ulpCt_l = 0;
 	}
 
-	if (phba->fc_topology != TOPOLOGY_LOOP) {
+	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
 		icmd->un.elsreq64.myID = 0;
 		icmd->un.elsreq64.fl = 1;
 	}
@@ -1281,6 +1293,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
 	uint32_t rc, keepDID = 0;
 	int  put_node;
 	int  put_rport;
+	struct lpfc_node_rrqs rrq;
 
 	/* Fabric nodes can have the same WWPN so we don't bother searching
 	 * by WWPN.  Just return the ndlp that was given to us.
@@ -1298,6 +1311,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
 
 	if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
 		return ndlp;
+	memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap));
 
 	if (!new_ndlp) {
 		rc = memcmp(&ndlp->nlp_portname, name,
@@ -1318,12 +1332,25 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
 		if (!new_ndlp)
 			return ndlp;
 		keepDID = new_ndlp->nlp_DID;
-	} else
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			memcpy(&rrq.xri_bitmap,
+				&new_ndlp->active_rrqs.xri_bitmap,
+				sizeof(new_ndlp->active_rrqs.xri_bitmap));
+	} else {
 		keepDID = new_ndlp->nlp_DID;
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			memcpy(&rrq.xri_bitmap,
+				&new_ndlp->active_rrqs.xri_bitmap,
+				sizeof(new_ndlp->active_rrqs.xri_bitmap));
+	}
 
 	lpfc_unreg_rpi(vport, new_ndlp);
 	new_ndlp->nlp_DID = ndlp->nlp_DID;
 	new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		memcpy(new_ndlp->active_rrqs.xri_bitmap,
+			&ndlp->active_rrqs.xri_bitmap,
+			sizeof(ndlp->active_rrqs.xri_bitmap));
 
 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
 		new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
@@ -1362,12 +1389,20 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
 
 		/* Two ndlps cannot have the same did on the nodelist */
 		ndlp->nlp_DID = keepDID;
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			memcpy(&ndlp->active_rrqs.xri_bitmap,
+				&rrq.xri_bitmap,
+				sizeof(ndlp->active_rrqs.xri_bitmap));
 		lpfc_drop_node(vport, ndlp);
 	}
 	else {
 		lpfc_unreg_rpi(vport, ndlp);
 		/* Two ndlps cannot have the same did */
 		ndlp->nlp_DID = keepDID;
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			memcpy(&ndlp->active_rrqs.xri_bitmap,
+				&rrq.xri_bitmap,
+				sizeof(ndlp->active_rrqs.xri_bitmap));
 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
 		/* Since we are swapping the ndlp passed in with the new one
 		 * and the did has already been swapped, copy over the
@@ -1427,6 +1462,73 @@ lpfc_end_rscn(struct lpfc_vport *vport)
 	}
 }
 
+/**
+ * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine will call the clear rrq function to free the rrq and
+ * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
+ * exist then the clear_rrq is still called because the rrq needs to
+ * be freed.
+ **/
+
+static void
+lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		    struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	IOCB_t *irsp;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_node_rrq *rrq;
+
+	/* we pass cmdiocb to state machine which needs rspiocb as well */
+	rrq = cmdiocb->context_un.rrq;
+	cmdiocb->context_un.rsp_iocb = rspiocb;
+
+	irsp = &rspiocb->iocb;
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"RRQ cmpl:      status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4],
+		irsp->un.elsreq64.remoteID);
+
+	ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2882 RRQ completes to NPort x%x "
+				 "with no ndlp. Data: x%x x%x x%x\n",
+				 irsp->un.elsreq64.remoteID,
+				 irsp->ulpStatus, irsp->un.ulpWord[4],
+				 irsp->ulpIoTag);
+		goto out;
+	}
+
+	/* rrq completes to NPort <nlp_DID> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "2880 RRQ completes to NPort x%x "
+			 "Data: x%x x%x x%x x%x x%x\n",
+			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+			 irsp->ulpTimeout, rrq->xritag, rrq->rxid);
+
+	if (irsp->ulpStatus) {
+		/* Check for retry */
+		/* RRQ failed Don't print the vport to vport rjts */
+		if (irsp->ulpStatus != IOSTAT_LS_RJT ||
+			(((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
+			((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
+			(phba)->pport->cfg_log_verbose & LOG_ELS)
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
+				 ndlp->nlp_DID, irsp->ulpStatus,
+				 irsp->un.ulpWord[4]);
+	}
+out:
+	if (rrq)
+		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+	lpfc_els_free_iocb(phba, cmdiocb);
+	return;
+}
 /**
  * lpfc_cmpl_els_plogi - Completion callback function for plogi
  * @phba: pointer to lpfc hba data structure.
@@ -2722,7 +2824,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 			if (cmd == ELS_CMD_FLOGI) {
 				if (PCI_DEVICE_ID_HORNET ==
 					phba->pcidev->device) {
-					phba->fc_topology = TOPOLOGY_LOOP;
+					phba->fc_topology = LPFC_TOPOLOGY_LOOP;
 					phba->pport->fc_myDID = 0;
 					phba->alpa_map[0] = 0;
 					phba->alpa_map[1] = 0;
@@ -2877,7 +2979,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 		retry = 1;
 
 	if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
-	    (phba->fc_topology != TOPOLOGY_LOOP) &&
+	    (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
 	    !lpfc_error_lost_link(irsp)) {
 		/* FLOGI retry policy */
 		retry = 1;
@@ -3219,14 +3321,6 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
 
-	/*
-	 * This routine is used to register and unregister in previous SLI
-	 * modes.
-	 */
-	if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
-	    (phba->sli_rev == LPFC_SLI_REV4))
-		lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
-
 	pmb->context1 = NULL;
 	pmb->context2 = NULL;
 
@@ -3903,6 +3997,47 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
 	return 0;
 }
 
+/**
+ * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @iocb: pointer to the lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return
+ **/
+static void
+lpfc_els_clear_rrq(struct lpfc_vport *vport,
+      struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	uint8_t *pcmd;
+	struct RRQ *rrq;
+	uint16_t rxid;
+	struct lpfc_node_rrq *prrq;
+
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
+	pcmd += sizeof(uint32_t);
+	rrq = (struct RRQ *)pcmd;
+	rxid = bf_get(rrq_oxid, rrq);
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			"2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
+			" x%x x%x\n",
+			bf_get(rrq_did, rrq),
+			bf_get(rrq_oxid, rrq),
+			rxid,
+			iocb->iotag, iocb->iocb.ulpContext);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+		"Clear RRQ:  did:x%x flg:x%x exchg:x%.08x",
+		ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
+	prrq = lpfc_get_active_rrq(vport, rxid, ndlp->nlp_DID);
+	if (prrq)
+		lpfc_clr_rrq_active(phba, rxid, prrq);
+	return;
+}
+
 /**
  * lpfc_els_rsp_echo_acc - Issue echo acc response
  * @vport: pointer to a virtual N_Port data structure.
@@ -4597,7 +4732,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 
 	lpfc_set_disctmo(vport);
 
-	if (phba->fc_topology == TOPOLOGY_LOOP) {
+	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 		/* We should never receive a FLOGI in loop mode, ignore it */
 		did = icmd->un.elsreq64.remoteID;
 
@@ -4792,6 +4927,8 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 		 struct lpfc_nodelist *ndlp)
 {
 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+	if (vport->phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
 }
 
 /**
@@ -4940,7 +5077,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	pcmd += sizeof(uint32_t); /* Skip past command */
 	rps_rsp = (RPS_RSP *)pcmd;
 
-	if (phba->fc_topology != TOPOLOGY_LOOP)
+	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
 		status = 0x10;
 	else
 		status = 0x8;
@@ -5194,6 +5331,97 @@ reject_out:
 	return 0;
 }
 
+/* lpfc_issue_els_rrq - Process an unsolicited rps iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @did: DID of the target.
+ * @rrq: Pointer to the rrq struct.
+ *
+ * Build a ELS RRQ command and send it to the target. If the issue_iocb is
+ * Successful the the completion handler will clear the RRQ.
+ *
+ * Return codes
+ *   0 - Successfully sent rrq els iocb.
+ *   1 - Failed to send rrq els iocb.
+ **/
+static int
+lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			uint32_t did, struct lpfc_node_rrq *rrq)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct RRQ *els_rrq;
+	IOCB_t *icmd;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int ret;
+
+
+	if (ndlp != rrq->ndlp)
+		ndlp = rrq->ndlp;
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+		return 1;
+
+	/* If ndlp is not NULL, we will bump the reference count on it */
+	cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
+				     ELS_CMD_RRQ);
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	/* For RRQ request, remainder of payload is Exchange IDs */
+	*((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
+	pcmd += sizeof(uint32_t);
+	els_rrq = (struct RRQ *) pcmd;
+
+	bf_set(rrq_oxid, els_rrq, rrq->xritag);
+	bf_set(rrq_rxid, els_rrq, rrq->rxid);
+	bf_set(rrq_did, els_rrq, vport->fc_myDID);
+	els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
+	els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
+
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue RRQ:     did:x%x",
+		did, rrq->xritag, rrq->rxid);
+	elsiocb->context_un.rrq = rrq;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
+	ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+
+	if (ret == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_send_rrq - Sends ELS RRQ if needed.
+ * @phba: pointer to lpfc hba data structure.
+ * @rrq: pointer to the active rrq.
+ *
+ * This routine will call the lpfc_issue_els_rrq if the rrq is
+ * still active for the xri. If this function returns a failure then
+ * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
+ *
+ * Returns 0 Success.
+ *         1 Failure.
+ **/
+int
+lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
+{
+	struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
+							rrq->nlp_DID);
+	if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
+		return lpfc_issue_els_rrq(rrq->vport, ndlp,
+					 rrq->nlp_DID, rrq);
+	else
+		return 1;
+}
+
 /**
  * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
  * @vport: pointer to a host virtual N_Port data structure.
@@ -5482,7 +5710,7 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 		    (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
 			    sizeof(struct lpfc_name)))) {
 			/* This port has switched fabrics. FLOGI is required */
-			lpfc_initial_flogi(vport);
+			lpfc_issue_init_vfi(vport);
 		} else {
 			/* FAN verified - skip FLOGI */
 			vport->fc_myDID = vport->fc_prevDID;
@@ -6201,7 +6429,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 			cmd, did, vport->port_state);
 
 		/* Unsupported ELS command, reject */
-		rjt_err = LSRJT_INVALID_CMD;
+		rjt_err = LSRJT_CMD_UNSUPPORTED;
 
 		/* Unknown ELS command <elsCmd> received from NPORT <did> */
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -6373,7 +6601,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
 	if (!ndlp) {
 		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
 		if (!ndlp) {
-			if (phba->fc_topology == TOPOLOGY_LOOP) {
+			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 				lpfc_disc_start(vport);
 				return;
 			}
@@ -6386,7 +6614,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
 		if (!ndlp) {
-			if (phba->fc_topology == TOPOLOGY_LOOP) {
+			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 				lpfc_disc_start(vport);
 				return;
 			}
@@ -6408,18 +6636,31 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
 	}
 
 	if (vport->cfg_fdmi_on) {
-		ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
-					  GFP_KERNEL);
+		/* If this is the first time, allocate an ndlp and initialize
+		 * it. Otherwise, make sure the node is enabled and then do the
+		 * login.
+		 */
+		ndlp_fdmi = lpfc_findnode_did(vport, FDMI_DID);
+		if (!ndlp_fdmi) {
+			ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
+						  GFP_KERNEL);
+			if (ndlp_fdmi) {
+				lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
+				ndlp_fdmi->nlp_type |= NLP_FABRIC;
+			} else
+				return;
+		}
+		if (!NLP_CHK_NODE_ACT(ndlp_fdmi))
+			ndlp_fdmi = lpfc_enable_node(vport,
+						     ndlp_fdmi,
+						     NLP_STE_NPR_NODE);
+
 		if (ndlp_fdmi) {
-			lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
-			ndlp_fdmi->nlp_type |= NLP_FABRIC;
 			lpfc_nlp_set_state(vport, ndlp_fdmi,
-				NLP_STE_PLOGI_ISSUE);
-			lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID,
-					     0);
+					   NLP_STE_PLOGI_ISSUE);
+			lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 0);
 		}
 	}
-	return;
 }
 
 /**
@@ -6497,7 +6738,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 			spin_unlock_irq(shost->host_lock);
 			if (vport->port_type == LPFC_PHYSICAL_PORT
 				&& !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
-				lpfc_initial_flogi(vport);
+				lpfc_issue_init_vfi(vport);
 			else
 				lpfc_initial_fdisc(vport);
 			break;
@@ -6734,7 +6975,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
 	vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
 	vport->fc_flag |= FC_FABRIC;
-	if (vport->phba->fc_topology == TOPOLOGY_LOOP)
+	if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
 		vport->fc_flag |=  FC_PUBLIC_LOOP;
 	spin_unlock_irq(shost->host_lock);
 
@@ -6844,7 +7085,9 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	icmd->un.elsreq64.myID = 0;
 	icmd->un.elsreq64.fl = 1;
 
-	if  (phba->sli_rev == LPFC_SLI_REV4) {
+	if  ((phba->sli_rev == LPFC_SLI_REV4) &&
+	     (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+	      LPFC_SLI_INTF_IF_TYPE_0)) {
 		/* FDISC needs to be 1 for WQE VPI */
 		elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
 		elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
@@ -7351,8 +7594,11 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
 			  struct sli4_wcqe_xri_aborted *axri)
 {
 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+
 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
 	unsigned long iflag = 0;
+	struct lpfc_nodelist *ndlp;
 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 
 	spin_lock_irqsave(&phba->hbalock, iflag);
@@ -7361,11 +7607,14 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
 			&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
 		if (sglq_entry->sli4_xritag == xri) {
 			list_del(&sglq_entry->list);
+			ndlp = sglq_entry->ndlp;
+			sglq_entry->ndlp = NULL;
 			list_add_tail(&sglq_entry->list,
 				&phba->sli4_hba.lpfc_sgl_list);
 			sglq_entry->state = SGL_FREED;
 			spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
 			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
 
 			/* Check if TXQ queue needs to be serviced */
 			if (pring->txq_cnt)

+ 161 - 78
drivers/scsi/lpfc/lpfc_hbadisc.c

@@ -607,6 +607,8 @@ lpfc_work_done(struct lpfc_hba *phba)
 
 	/* Process SLI4 events */
 	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
+		if (phba->hba_flag & HBA_RRQ_ACTIVE)
+			lpfc_handle_rrq_active(phba);
 		if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
 			lpfc_sli4_fcp_xri_abort_event_proc(phba);
 		if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
@@ -966,6 +968,7 @@ lpfc_linkup(struct lpfc_hba *phba)
 	struct lpfc_vport **vports;
 	int i;
 
+	lpfc_cleanup_wt_rrqs(phba);
 	phba->link_state = LPFC_LINK_UP;
 
 	/* Unblock fabric iocbs if they are blocked */
@@ -1064,7 +1067,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 
 	mempool_free(pmb, phba->mbox_mem_pool);
 
-	if (phba->fc_topology == TOPOLOGY_LOOP &&
+	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
 	    vport->fc_flag & FC_PUBLIC_LOOP &&
 	    !(vport->fc_flag & FC_LBIT)) {
 			/* Need to wait for FAN - use discovery timer
@@ -1078,9 +1081,8 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	/* Start discovery by sending a FLOGI. port_state is identically
 	 * LPFC_FLOGI while waiting for FLOGI cmpl
 	 */
-	if (vport->port_state != LPFC_FLOGI) {
+	if (vport->port_state != LPFC_FLOGI)
 		lpfc_initial_flogi(vport);
-	}
 	return;
 
 out:
@@ -1131,7 +1133,7 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 	if (vport->port_state != LPFC_FLOGI) {
 		phba->hba_flag |= FCF_RR_INPROG;
 		spin_unlock_irq(&phba->hbalock);
-		lpfc_initial_flogi(vport);
+		lpfc_issue_init_vfi(vport);
 		goto out;
 	}
 	spin_unlock_irq(&phba->hbalock);
@@ -1353,7 +1355,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
 		if (phba->pport->port_state != LPFC_FLOGI) {
 			phba->hba_flag |= FCF_RR_INPROG;
 			spin_unlock_irq(&phba->hbalock);
-			lpfc_initial_flogi(phba->pport);
+			lpfc_issue_init_vfi(phba->pport);
 			return;
 		}
 		spin_unlock_irq(&phba->hbalock);
@@ -2331,7 +2333,7 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 				phba->fcf.current_rec.fcf_indx, fcf_index);
 		/* Wait 500 ms before retrying FLOGI to current FCF */
 		msleep(500);
-		lpfc_initial_flogi(phba->pport);
+		lpfc_issue_init_vfi(phba->pport);
 		goto out;
 	}
 
@@ -2421,6 +2423,63 @@ out:
 	lpfc_sli4_mbox_cmd_free(phba, mboxq);
 }
 
+/**
+ * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox data structure.
+ *
+ * This function handles completion of init vfi mailbox command.
+ */
+void
+lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_vport *vport = mboxq->vport;
+
+	if (mboxq->u.mb.mbxStatus && (mboxq->u.mb.mbxStatus != 0x4002)) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+				LOG_MBOX,
+				"2891 Init VFI mailbox failed 0x%x\n",
+				mboxq->u.mb.mbxStatus);
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		return;
+	}
+	lpfc_initial_flogi(vport);
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
+ * @vport: pointer to lpfc_vport data structure.
+ *
+ * This function issue a init_vfi mailbox command to initialize the VFI and
+ * VPI for the physical port.
+ */
+void
+lpfc_issue_init_vfi(struct lpfc_vport *vport)
+{
+	LPFC_MBOXQ_t *mboxq;
+	int rc;
+	struct lpfc_hba *phba = vport->phba;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+			LOG_MBOX, "2892 Failed to allocate "
+			"init_vfi mailbox\n");
+		return;
+	}
+	lpfc_init_vfi(mboxq, vport);
+	mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+			LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n");
+		mempool_free(mboxq, vport->phba->mbox_mem_pool);
+	}
+}
+
 /**
  * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
  * @phba: pointer to lpfc hba data structure.
@@ -2528,7 +2587,7 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
 						     FC_VPORT_FAILED);
 				continue;
 			}
-			if (phba->fc_topology == TOPOLOGY_LOOP) {
+			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 				lpfc_vport_set_state(vports[i],
 						     FC_VPORT_LINKDOWN);
 				continue;
@@ -2564,7 +2623,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 			 "2018 REG_VFI mbxStatus error x%x "
 			 "HBA state x%x\n",
 			 mboxq->u.mb.mbxStatus, vport->port_state);
-		if (phba->fc_topology == TOPOLOGY_LOOP) {
+		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 			/* FLOGI failed, use loop map to make discovery list */
 			lpfc_disc_list_loopmap(vport);
 			/* Start discovery */
@@ -2582,8 +2641,18 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 	spin_unlock_irq(shost->host_lock);
 
 	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
-		lpfc_start_fdiscs(phba);
-		lpfc_do_scr_ns_plogi(phba, vport);
+		/* For private loop just start discovery and we are done. */
+		if ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
+		    (phba->alpa_map[0] == 0) &&
+		    !(vport->fc_flag & FC_PUBLIC_LOOP)) {
+			/* Use loop map to make discovery list */
+			lpfc_disc_list_loopmap(vport);
+			/* Start discovery */
+			lpfc_disc_start(vport);
+		} else {
+			lpfc_start_fdiscs(phba);
+			lpfc_do_scr_ns_plogi(phba, vport);
+		}
 	}
 
 fail_free_mem:
@@ -2644,7 +2713,7 @@ out:
 }
 
 static void
-lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
+lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
 {
 	struct lpfc_vport *vport = phba->pport;
 	LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
@@ -2654,31 +2723,24 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
 	struct fcf_record *fcf_record;
 
 	spin_lock_irq(&phba->hbalock);
-	switch (la->UlnkSpeed) {
-	case LA_1GHZ_LINK:
-		phba->fc_linkspeed = LA_1GHZ_LINK;
-		break;
-	case LA_2GHZ_LINK:
-		phba->fc_linkspeed = LA_2GHZ_LINK;
-		break;
-	case LA_4GHZ_LINK:
-		phba->fc_linkspeed = LA_4GHZ_LINK;
-		break;
-	case LA_8GHZ_LINK:
-		phba->fc_linkspeed = LA_8GHZ_LINK;
-		break;
-	case LA_10GHZ_LINK:
-		phba->fc_linkspeed = LA_10GHZ_LINK;
+	switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
+	case LPFC_LINK_SPEED_1GHZ:
+	case LPFC_LINK_SPEED_2GHZ:
+	case LPFC_LINK_SPEED_4GHZ:
+	case LPFC_LINK_SPEED_8GHZ:
+	case LPFC_LINK_SPEED_10GHZ:
+	case LPFC_LINK_SPEED_16GHZ:
+		phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
 		break;
 	default:
-		phba->fc_linkspeed = LA_UNKNW_LINK;
+		phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
 		break;
 	}
 
-	phba->fc_topology = la->topology;
+	phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
 	phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
 
-	if (phba->fc_topology == TOPOLOGY_LOOP) {
+	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 		phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
 
 		/* if npiv is enabled and this adapter supports npiv log
@@ -2689,11 +2751,11 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
 				"1309 Link Up Event npiv not supported in loop "
 				"topology\n");
 				/* Get Loop Map information */
-		if (la->il)
+		if (bf_get(lpfc_mbx_read_top_il, la))
 			vport->fc_flag |= FC_LBIT;
 
-		vport->fc_myDID = la->granted_AL_PA;
-		i = la->un.lilpBde64.tus.f.bdeSize;
+		vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
+		i = la->lilpBde64.tus.f.bdeSize;
 
 		if (i == 0) {
 			phba->alpa_map[0] = 0;
@@ -2764,7 +2826,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
 		goto out;
 	}
 
-	if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
+	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
 		cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 		if (!cfglink_mbox)
 			goto out;
@@ -2874,17 +2936,17 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
 
 
 /*
- * This routine handles processing a READ_LA mailbox
+ * This routine handles processing a READ_TOPOLOGY mailbox
  * command upon completion. It is setup in the LPFC_MBOXQ
  * as the completion routine when the command is
  * handed off to the SLI layer.
  */
 void
-lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
 	struct lpfc_vport *vport = pmb->vport;
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
-	READ_LA_VAR *la;
+	struct lpfc_mbx_read_top *la;
 	MAILBOX_t *mb = &pmb->u.mb;
 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
 
@@ -2897,15 +2959,15 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 				mb->mbxStatus, vport->port_state);
 		lpfc_mbx_issue_link_down(phba);
 		phba->link_state = LPFC_HBA_ERROR;
-		goto lpfc_mbx_cmpl_read_la_free_mbuf;
+		goto lpfc_mbx_cmpl_read_topology_free_mbuf;
 	}
 
-	la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
+	la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
 
 	memcpy(&phba->alpa_map[0], mp->virt, 128);
 
 	spin_lock_irq(shost->host_lock);
-	if (la->pb)
+	if (bf_get(lpfc_mbx_read_top_pb, la))
 		vport->fc_flag |= FC_BYPASSED_MODE;
 	else
 		vport->fc_flag &= ~FC_BYPASSED_MODE;
@@ -2914,41 +2976,48 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	if ((phba->fc_eventTag  < la->eventTag) ||
 	    (phba->fc_eventTag == la->eventTag)) {
 		phba->fc_stat.LinkMultiEvent++;
-		if (la->attType == AT_LINK_UP)
+		if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)
 			if (phba->fc_eventTag != 0)
 				lpfc_linkdown(phba);
 	}
 
 	phba->fc_eventTag = la->eventTag;
 	spin_lock_irq(&phba->hbalock);
-	if (la->mm)
+	if (bf_get(lpfc_mbx_read_top_mm, la))
 		phba->sli.sli_flag |= LPFC_MENLO_MAINT;
 	else
 		phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
 	spin_unlock_irq(&phba->hbalock);
 
 	phba->link_events++;
-	if (la->attType == AT_LINK_UP && (!la->mm)) {
+	if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) &&
+	    (!bf_get(lpfc_mbx_read_top_mm, la))) {
 		phba->fc_stat.LinkUp++;
 		if (phba->link_flag & LS_LOOPBACK_MODE) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
 					"1306 Link Up Event in loop back mode "
 					"x%x received Data: x%x x%x x%x x%x\n",
 					la->eventTag, phba->fc_eventTag,
-					la->granted_AL_PA, la->UlnkSpeed,
+					bf_get(lpfc_mbx_read_top_alpa_granted,
+					       la),
+					bf_get(lpfc_mbx_read_top_link_spd, la),
 					phba->alpa_map[0]);
 		} else {
 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
 					"1303 Link Up Event x%x received "
 					"Data: x%x x%x x%x x%x x%x x%x %d\n",
 					la->eventTag, phba->fc_eventTag,
-					la->granted_AL_PA, la->UlnkSpeed,
+					bf_get(lpfc_mbx_read_top_alpa_granted,
+					       la),
+					bf_get(lpfc_mbx_read_top_link_spd, la),
 					phba->alpa_map[0],
-					la->mm, la->fa,
+					bf_get(lpfc_mbx_read_top_mm, la),
+					bf_get(lpfc_mbx_read_top_fa, la),
 					phba->wait_4_mlo_maint_flg);
 		}
 		lpfc_mbx_process_link_up(phba, la);
-	} else if (la->attType == AT_LINK_DOWN) {
+	} else if (bf_get(lpfc_mbx_read_top_att_type, la) ==
+		   LPFC_ATT_LINK_DOWN) {
 		phba->fc_stat.LinkDown++;
 		if (phba->link_flag & LS_LOOPBACK_MODE) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -2964,11 +3033,13 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 				"Data: x%x x%x x%x x%x x%x\n",
 				la->eventTag, phba->fc_eventTag,
 				phba->pport->port_state, vport->fc_flag,
-				la->mm, la->fa);
+				bf_get(lpfc_mbx_read_top_mm, la),
+				bf_get(lpfc_mbx_read_top_fa, la));
 		}
 		lpfc_mbx_issue_link_down(phba);
 	}
-	if (la->mm && la->attType == AT_LINK_UP) {
+	if ((bf_get(lpfc_mbx_read_top_mm, la)) &&
+	    (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)) {
 		if (phba->link_state != LPFC_LINK_DOWN) {
 			phba->fc_stat.LinkDown++;
 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -2996,14 +3067,15 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 		}
 	}
 
-	if (la->fa) {
-		if (la->mm)
+	if (bf_get(lpfc_mbx_read_top_fa, la)) {
+		if (bf_get(lpfc_mbx_read_top_mm, la))
 			lpfc_issue_clear_la(phba, vport);
 		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
-				"1311 fa %d\n", la->fa);
+				"1311 fa %d\n",
+				bf_get(lpfc_mbx_read_top_fa, la));
 	}
 
-lpfc_mbx_cmpl_read_la_free_mbuf:
+lpfc_mbx_cmpl_read_topology_free_mbuf:
 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
 	kfree(mp);
 	mempool_free(pmb, phba->mbox_mem_pool);
@@ -3030,8 +3102,8 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
 		ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
 
-	if (ndlp->nlp_flag &  NLP_IGNR_REG_CMPL ||
-		ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
+	if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
+	    ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
 		/* We rcvd a rscn after issuing this
 		 * mbox reg login, we may have cycled
 		 * back through the state and be
@@ -3043,10 +3115,6 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
 		spin_unlock_irq(shost->host_lock);
-		if (phba->sli_rev == LPFC_SLI_REV4)
-			lpfc_sli4_free_rpi(phba,
-				pmb->u.mb.un.varRegLogin.rpi);
-
 	} else
 		/* Good status, call state machine */
 		lpfc_disc_state_machine(vport, ndlp, pmb,
@@ -3092,6 +3160,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	spin_unlock_irq(shost->host_lock);
 	vport->unreg_vpi_cmpl = VPORT_OK;
 	mempool_free(pmb, phba->mbox_mem_pool);
+	lpfc_cleanup_vports_rrqs(vport);
 	/*
 	 * This shost reference might have been taken at the beginning of
 	 * lpfc_vport_delete()
@@ -3333,7 +3402,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 		kfree(mp);
 		mempool_free(pmb, phba->mbox_mem_pool);
 
-		if (phba->fc_topology == TOPOLOGY_LOOP) {
+		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 			/* FLOGI failed, use loop map to make discovery list */
 			lpfc_disc_list_loopmap(vport);
 
@@ -3355,7 +3424,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	}
 
 	ndlp->nlp_rpi = mb->un.varWords[0];
-	ndlp->nlp_flag |= NLP_RPI_VALID;
+	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
 	ndlp->nlp_type |= NLP_FABRIC;
 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 
@@ -3413,7 +3482,7 @@ out:
 		/* If no other thread is using the ndlp, free it */
 		lpfc_nlp_not_used(ndlp);
 
-		if (phba->fc_topology == TOPOLOGY_LOOP) {
+		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 			/*
 			 * RegLogin failed, use loop map to make discovery
 			 * list
@@ -3429,7 +3498,7 @@ out:
 	}
 
 	ndlp->nlp_rpi = mb->un.varWords[0];
-	ndlp->nlp_flag |= NLP_RPI_VALID;
+	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
 	ndlp->nlp_type |= NLP_FABRIC;
 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 
@@ -3762,6 +3831,8 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	NLP_INT_NODE_ACT(ndlp);
 	atomic_set(&ndlp->cmd_pending, 0);
 	ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
+	if (vport->phba->sli_rev == LPFC_SLI_REV4)
+		ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
 }
 
 struct lpfc_nodelist *
@@ -3975,7 +4046,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 	 * by firmware with a no rpi error.
 	 */
 	psli = &phba->sli;
-	if (ndlp->nlp_flag & NLP_RPI_VALID) {
+	if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
 		/* Now process each ring */
 		for (i = 0; i < psli->num_rings; i++) {
 			pring = &psli->ring[i];
@@ -4023,7 +4094,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 	LPFC_MBOXQ_t    *mbox;
 	int rc;
 
-	if (ndlp->nlp_flag & NLP_RPI_VALID) {
+	if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 		if (mbox) {
 			lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
@@ -4035,8 +4106,9 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 		}
 		lpfc_no_rpi(phba, ndlp);
 
-		ndlp->nlp_rpi = 0;
-		ndlp->nlp_flag &= ~NLP_RPI_VALID;
+		if (phba->sli_rev != LPFC_SLI_REV4)
+			ndlp->nlp_rpi = 0;
+		ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
 		return 1;
 	}
@@ -4059,11 +4131,16 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
 	int i;
 
 	vports = lpfc_create_vport_work_array(phba);
+	if (!vports) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+			"2884 Vport array allocation failed \n");
+		return;
+	}
 	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 		shost = lpfc_shost_from_vport(vports[i]);
 		spin_lock_irq(shost->host_lock);
 		list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
-			if (ndlp->nlp_flag & NLP_RPI_VALID) {
+			if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
 				/* The mempool_alloc might sleep */
 				spin_unlock_irq(shost->host_lock);
 				lpfc_unreg_rpi(vports[i], ndlp);
@@ -4192,9 +4269,6 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 				kfree(mp);
 			}
 			list_del(&mb->list);
-			if (phba->sli_rev == LPFC_SLI_REV4)
-				lpfc_sli4_free_rpi(phba,
-					 mb->u.mb.un.varRegLogin.rpi);
 			mempool_free(mb, phba->mbox_mem_pool);
 			/* We shall not invoke the lpfc_nlp_put to decrement
 			 * the ndlp reference count as we are in the process
@@ -4236,15 +4310,15 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 
 	lpfc_cancel_retry_delay_tmo(vport, ndlp);
 	if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
-		!(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
-	    !(ndlp->nlp_flag & NLP_RPI_VALID)) {
+	    !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
+	    !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
 		/* For this case we need to cleanup the default rpi
 		 * allocated by the firmware.
 		 */
 		if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
 			!= NULL) {
 			rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
-			    (uint8_t *) &vport->fc_sparam, mbox, 0);
+			    (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
 			if (rc) {
 				mempool_free(mbox, phba->mbox_mem_pool);
 			}
@@ -4436,7 +4510,7 @@ lpfc_disc_list_loopmap(struct lpfc_vport *vport)
 	if (!lpfc_is_link_up(phba))
 		return;
 
-	if (phba->fc_topology != TOPOLOGY_LOOP)
+	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
 		return;
 
 	/* Check for loop map present or not */
@@ -4788,7 +4862,10 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
 			}
 		}
 		if (vport->port_state != LPFC_FLOGI) {
-			lpfc_initial_flogi(vport);
+			if (phba->sli_rev <= LPFC_SLI_REV3)
+				lpfc_initial_flogi(vport);
+			else
+				lpfc_issue_init_vfi(vport);
 			return;
 		}
 		break;
@@ -4979,7 +5056,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	pmb->context2 = NULL;
 
 	ndlp->nlp_rpi = mb->un.varWords[0];
-	ndlp->nlp_flag |= NLP_RPI_VALID;
+	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
 	ndlp->nlp_type |= NLP_FABRIC;
 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 
@@ -5103,6 +5180,8 @@ lpfc_nlp_release(struct kref *kref)
 	spin_lock_irqsave(&phba->ndlp_lock, flags);
 	NLP_CLR_NODE_ACT(ndlp);
 	spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
 
 	/* free ndlp memory for final ndlp release */
 	if (NLP_CHK_FREE_REQ(ndlp)) {
@@ -5254,6 +5333,10 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
 
 	vports = lpfc_create_vport_work_array(phba);
 
+	/* If driver cannot allocate memory, indicate fcf is in use */
+	if (!vports)
+		return 1;
+
 	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 		shost = lpfc_shost_from_vport(vports[i]);
 		spin_lock_irq(shost->host_lock);
@@ -5269,7 +5352,7 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
 					"logged in\n",
 					ndlp->nlp_rpi, ndlp->nlp_DID,
 					ndlp->nlp_flag);
-				if (ndlp->nlp_flag & NLP_RPI_VALID)
+				if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
 					ret = 1;
 			}
 		}
@@ -5550,7 +5633,7 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
 	 * registered, do nothing.
 	 */
 	spin_lock_irq(&phba->hbalock);
-	if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
+	if (!(phba->hba_flag & HBA_FCOE_MODE) ||
 	    !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
 	    !(phba->hba_flag & HBA_FIP_SUPPORT) ||
 	    (phba->fcf.fcf_flag & FCF_DISCOVERY) ||

+ 118 - 104
drivers/scsi/lpfc/lpfc_hw.h

@@ -880,6 +880,24 @@ struct  RLS_RSP {		/* Structure is in Big Endian format */
 	uint32_t crcCnt;
 };
 
+struct RRQ {			/* Structure is in Big Endian format */
+	uint32_t rrq;
+#define rrq_rsvd_SHIFT		24
+#define rrq_rsvd_MASK		0x000000ff
+#define rrq_rsvd_WORD		rrq
+#define rrq_did_SHIFT		0
+#define rrq_did_MASK		0x00ffffff
+#define rrq_did_WORD		rrq
+	uint32_t rrq_exchg;
+#define rrq_oxid_SHIFT		16
+#define rrq_oxid_MASK		0xffff
+#define rrq_oxid_WORD		rrq_exchg
+#define rrq_rxid_SHIFT		0
+#define rrq_rxid_MASK		0xffff
+#define rrq_rxid_WORD		rrq_exchg
+};
+
+
 struct RTV_RSP {		/* Structure is in Big Endian format */
 	uint32_t ratov;
 	uint32_t edtov;
@@ -1172,7 +1190,10 @@ typedef struct {
 #define PCI_VENDOR_ID_EMULEX        0x10df
 #define PCI_DEVICE_ID_FIREFLY       0x1ae5
 #define PCI_DEVICE_ID_PROTEUS_VF    0xe100
+#define PCI_DEVICE_ID_BALIUS        0xe131
 #define PCI_DEVICE_ID_PROTEUS_PF    0xe180
+#define PCI_DEVICE_ID_LANCER_FC     0xe200
+#define PCI_DEVICE_ID_LANCER_FCOE   0xe260
 #define PCI_DEVICE_ID_SAT_SMB       0xf011
 #define PCI_DEVICE_ID_SAT_MID       0xf015
 #define PCI_DEVICE_ID_RFLY          0xf095
@@ -1189,6 +1210,7 @@ typedef struct {
 #define PCI_DEVICE_ID_SAT           0xf100
 #define PCI_DEVICE_ID_SAT_SCSP      0xf111
 #define PCI_DEVICE_ID_SAT_DCSP      0xf112
+#define PCI_DEVICE_ID_FALCON        0xf180
 #define PCI_DEVICE_ID_SUPERFLY      0xf700
 #define PCI_DEVICE_ID_DRAGONFLY     0xf800
 #define PCI_DEVICE_ID_CENTAUR       0xf900
@@ -1210,8 +1232,6 @@ typedef struct {
 #define PCI_VENDOR_ID_SERVERENGINE  0x19a2
 #define PCI_DEVICE_ID_TIGERSHARK    0x0704
 #define PCI_DEVICE_ID_TOMCAT        0x0714
-#define PCI_DEVICE_ID_FALCON        0xf180
-#define PCI_DEVICE_ID_BALIUS        0xe131
 
 #define JEDEC_ID_ADDRESS            0x0080001c
 #define FIREFLY_JEDEC_ID            0x1ACC
@@ -1368,7 +1388,6 @@ typedef struct {		/* FireFly BIU registers */
 #define MBX_READ_LNK_STAT   0x12
 #define MBX_REG_LOGIN       0x13
 #define MBX_UNREG_LOGIN     0x14
-#define MBX_READ_LA         0x15
 #define MBX_CLEAR_LA        0x16
 #define MBX_DUMP_MEMORY     0x17
 #define MBX_DUMP_CONTEXT    0x18
@@ -1402,7 +1421,7 @@ typedef struct {		/* FireFly BIU registers */
 #define MBX_READ_SPARM64    0x8D
 #define MBX_READ_RPI64      0x8F
 #define MBX_REG_LOGIN64     0x93
-#define MBX_READ_LA64       0x95
+#define MBX_READ_TOPOLOGY   0x95
 #define MBX_REG_VPI	    0x96
 #define MBX_UNREG_VPI	    0x97
 
@@ -1823,12 +1842,13 @@ typedef struct {
 #define FLAGS_IMED_ABORT             0x04000	/* Bit 14 */
 
 	uint32_t link_speed;
-#define LINK_SPEED_AUTO 0       /* Auto selection */
-#define LINK_SPEED_1G   1       /* 1 Gigabaud */
-#define LINK_SPEED_2G   2       /* 2 Gigabaud */
-#define LINK_SPEED_4G   4       /* 4 Gigabaud */
-#define LINK_SPEED_8G   8       /* 8 Gigabaud */
-#define LINK_SPEED_10G   16      /* 10 Gigabaud */
+#define LINK_SPEED_AUTO 0x0     /* Auto selection */
+#define LINK_SPEED_1G   0x1     /* 1 Gigabaud */
+#define LINK_SPEED_2G   0x2     /* 2 Gigabaud */
+#define LINK_SPEED_4G   0x4     /* 4 Gigabaud */
+#define LINK_SPEED_8G   0x8     /* 8 Gigabaud */
+#define LINK_SPEED_10G  0x10    /* 10 Gigabaud */
+#define LINK_SPEED_16G  0x11    /* 16 Gigabaud */
 
 } INIT_LINK_VAR;
 
@@ -1999,6 +2019,7 @@ typedef struct {
 #define LMT_4Gb       0x040
 #define LMT_8Gb       0x080
 #define LMT_10Gb      0x100
+#define LMT_16Gb      0x200
 	uint32_t rsvd2;
 	uint32_t rsvd3;
 	uint32_t max_xri;
@@ -2394,100 +2415,93 @@ typedef struct {
 #endif
 } UNREG_D_ID_VAR;
 
-/* Structure for MB Command READ_LA (21) */
-/* Structure for MB Command READ_LA64 (0x95) */
-
-typedef struct {
+/* Structure for MB Command READ_TOPOLOGY (0x95) */
+struct lpfc_mbx_read_top {
 	uint32_t eventTag;	/* Event tag */
-#ifdef __BIG_ENDIAN_BITFIELD
-	uint32_t rsvd1:19;
-	uint32_t fa:1;
-	uint32_t mm:1;		/* Menlo Maintenance mode enabled */
-	uint32_t rx:1;
-	uint32_t pb:1;
-	uint32_t il:1;
-	uint32_t attType:8;
-#else	/*  __LITTLE_ENDIAN_BITFIELD */
-	uint32_t attType:8;
-	uint32_t il:1;
-	uint32_t pb:1;
-	uint32_t rx:1;
-	uint32_t mm:1;
-	uint32_t fa:1;
-	uint32_t rsvd1:19;
-#endif
-
-#define AT_RESERVED    0x00	/* Reserved - attType */
-#define AT_LINK_UP     0x01	/* Link is up */
-#define AT_LINK_DOWN   0x02	/* Link is down */
-
-#ifdef __BIG_ENDIAN_BITFIELD
-	uint8_t granted_AL_PA;
-	uint8_t lipAlPs;
-	uint8_t lipType;
-	uint8_t topology;
-#else	/*  __LITTLE_ENDIAN_BITFIELD */
-	uint8_t topology;
-	uint8_t lipType;
-	uint8_t lipAlPs;
-	uint8_t granted_AL_PA;
-#endif
-
-#define TOPOLOGY_PT_PT 0x01	/* Topology is pt-pt / pt-fabric */
-#define TOPOLOGY_LOOP  0x02	/* Topology is FC-AL */
-#define TOPOLOGY_LNK_MENLO_MAINTENANCE 0x05 /* maint mode zephtr to menlo */
-
-	union {
-		struct ulp_bde lilpBde; /* This BDE points to a 128 byte buffer
-					   to */
-		/* store the LILP AL_PA position map into */
-		struct ulp_bde64 lilpBde64;
-	} un;
-
-#ifdef __BIG_ENDIAN_BITFIELD
-	uint32_t Dlu:1;
-	uint32_t Dtf:1;
-	uint32_t Drsvd2:14;
-	uint32_t DlnkSpeed:8;
-	uint32_t DnlPort:4;
-	uint32_t Dtx:2;
-	uint32_t Drx:2;
-#else	/*  __LITTLE_ENDIAN_BITFIELD */
-	uint32_t Drx:2;
-	uint32_t Dtx:2;
-	uint32_t DnlPort:4;
-	uint32_t DlnkSpeed:8;
-	uint32_t Drsvd2:14;
-	uint32_t Dtf:1;
-	uint32_t Dlu:1;
-#endif
-
-#ifdef __BIG_ENDIAN_BITFIELD
-	uint32_t Ulu:1;
-	uint32_t Utf:1;
-	uint32_t Ursvd2:14;
-	uint32_t UlnkSpeed:8;
-	uint32_t UnlPort:4;
-	uint32_t Utx:2;
-	uint32_t Urx:2;
-#else	/*  __LITTLE_ENDIAN_BITFIELD */
-	uint32_t Urx:2;
-	uint32_t Utx:2;
-	uint32_t UnlPort:4;
-	uint32_t UlnkSpeed:8;
-	uint32_t Ursvd2:14;
-	uint32_t Utf:1;
-	uint32_t Ulu:1;
-#endif
-
-#define LA_UNKNW_LINK  0x0    /* lnkSpeed */
-#define LA_1GHZ_LINK   0x04   /* lnkSpeed */
-#define LA_2GHZ_LINK   0x08   /* lnkSpeed */
-#define LA_4GHZ_LINK   0x10   /* lnkSpeed */
-#define LA_8GHZ_LINK   0x20   /* lnkSpeed */
-#define LA_10GHZ_LINK  0x40   /* lnkSpeed */
-
-} READ_LA_VAR;
+	uint32_t word2;
+#define lpfc_mbx_read_top_fa_SHIFT		12
+#define lpfc_mbx_read_top_fa_MASK		0x00000001
+#define lpfc_mbx_read_top_fa_WORD		word2
+#define lpfc_mbx_read_top_mm_SHIFT		11
+#define lpfc_mbx_read_top_mm_MASK		0x00000001
+#define lpfc_mbx_read_top_mm_WORD		word2
+#define lpfc_mbx_read_top_pb_SHIFT		9
+#define lpfc_mbx_read_top_pb_MASK		0X00000001
+#define lpfc_mbx_read_top_pb_WORD		word2
+#define lpfc_mbx_read_top_il_SHIFT		8
+#define lpfc_mbx_read_top_il_MASK		0x00000001
+#define lpfc_mbx_read_top_il_WORD		word2
+#define lpfc_mbx_read_top_att_type_SHIFT	0
+#define lpfc_mbx_read_top_att_type_MASK		0x000000FF
+#define lpfc_mbx_read_top_att_type_WORD		word2
+#define LPFC_ATT_RESERVED    0x00	/* Reserved - attType */
+#define LPFC_ATT_LINK_UP     0x01	/* Link is up */
+#define LPFC_ATT_LINK_DOWN   0x02	/* Link is down */
+	uint32_t word3;
+#define lpfc_mbx_read_top_alpa_granted_SHIFT	24
+#define lpfc_mbx_read_top_alpa_granted_MASK	0x000000FF
+#define lpfc_mbx_read_top_alpa_granted_WORD	word3
+#define lpfc_mbx_read_top_lip_alps_SHIFT	16
+#define lpfc_mbx_read_top_lip_alps_MASK		0x000000FF
+#define lpfc_mbx_read_top_lip_alps_WORD		word3
+#define lpfc_mbx_read_top_lip_type_SHIFT	8
+#define lpfc_mbx_read_top_lip_type_MASK		0x000000FF
+#define lpfc_mbx_read_top_lip_type_WORD		word3
+#define lpfc_mbx_read_top_topology_SHIFT	0
+#define lpfc_mbx_read_top_topology_MASK		0x000000FF
+#define lpfc_mbx_read_top_topology_WORD		word3
+#define LPFC_TOPOLOGY_PT_PT 0x01	/* Topology is pt-pt / pt-fabric */
+#define LPFC_TOPOLOGY_LOOP  0x02	/* Topology is FC-AL */
+#define LPFC_TOPOLOGY_MM    0x05	/* maint mode zephtr to menlo */
+	/* store the LILP AL_PA position map into */
+	struct ulp_bde64 lilpBde64;
+#define LPFC_ALPA_MAP_SIZE	128
+	uint32_t word7;
+#define lpfc_mbx_read_top_ld_lu_SHIFT		31
+#define lpfc_mbx_read_top_ld_lu_MASK		0x00000001
+#define lpfc_mbx_read_top_ld_lu_WORD		word7
+#define lpfc_mbx_read_top_ld_tf_SHIFT		30
+#define lpfc_mbx_read_top_ld_tf_MASK		0x00000001
+#define lpfc_mbx_read_top_ld_tf_WORD		word7
+#define lpfc_mbx_read_top_ld_link_spd_SHIFT	8
+#define lpfc_mbx_read_top_ld_link_spd_MASK	0x000000FF
+#define lpfc_mbx_read_top_ld_link_spd_WORD	word7
+#define lpfc_mbx_read_top_ld_nl_port_SHIFT	4
+#define lpfc_mbx_read_top_ld_nl_port_MASK	0x0000000F
+#define lpfc_mbx_read_top_ld_nl_port_WORD	word7
+#define lpfc_mbx_read_top_ld_tx_SHIFT		2
+#define lpfc_mbx_read_top_ld_tx_MASK		0x00000003
+#define lpfc_mbx_read_top_ld_tx_WORD		word7
+#define lpfc_mbx_read_top_ld_rx_SHIFT		0
+#define lpfc_mbx_read_top_ld_rx_MASK		0x00000003
+#define lpfc_mbx_read_top_ld_rx_WORD		word7
+	uint32_t word8;
+#define lpfc_mbx_read_top_lu_SHIFT		31
+#define lpfc_mbx_read_top_lu_MASK		0x00000001
+#define lpfc_mbx_read_top_lu_WORD		word8
+#define lpfc_mbx_read_top_tf_SHIFT		30
+#define lpfc_mbx_read_top_tf_MASK		0x00000001
+#define lpfc_mbx_read_top_tf_WORD		word8
+#define lpfc_mbx_read_top_link_spd_SHIFT	8
+#define lpfc_mbx_read_top_link_spd_MASK		0x000000FF
+#define lpfc_mbx_read_top_link_spd_WORD		word8
+#define lpfc_mbx_read_top_nl_port_SHIFT		4
+#define lpfc_mbx_read_top_nl_port_MASK		0x0000000F
+#define lpfc_mbx_read_top_nl_port_WORD		word8
+#define lpfc_mbx_read_top_tx_SHIFT		2
+#define lpfc_mbx_read_top_tx_MASK		0x00000003
+#define lpfc_mbx_read_top_tx_WORD		word8
+#define lpfc_mbx_read_top_rx_SHIFT		0
+#define lpfc_mbx_read_top_rx_MASK		0x00000003
+#define lpfc_mbx_read_top_rx_WORD		word8
+#define LPFC_LINK_SPEED_UNKNOWN	0x0
+#define LPFC_LINK_SPEED_1GHZ	0x04
+#define LPFC_LINK_SPEED_2GHZ	0x08
+#define LPFC_LINK_SPEED_4GHZ	0x10
+#define LPFC_LINK_SPEED_8GHZ	0x20
+#define LPFC_LINK_SPEED_10GHZ	0x40
+#define LPFC_LINK_SPEED_16GHZ	0x80
+};
 
 /* Structure for MB Command CLEAR_LA (22) */
 
@@ -3016,7 +3030,6 @@ typedef union {
 	READ_LNK_VAR varRdLnk;		/* cmd = 18 (READ_LNK_STAT)  */
 	REG_LOGIN_VAR varRegLogin;	/* cmd = 19 (REG_LOGIN(64))  */
 	UNREG_LOGIN_VAR varUnregLogin;	/* cmd = 20 (UNREG_LOGIN)    */
-	READ_LA_VAR varReadLA;		/* cmd = 21 (READ_LA(64))    */
 	CLEAR_LA_VAR varClearLA;	/* cmd = 22 (CLEAR_LA)       */
 	DUMP_VAR varDmp;		/* Warm Start DUMP mbx cmd   */
 	UNREG_D_ID_VAR varUnregDID;	/* cmd = 0x23 (UNREG_D_ID)   */
@@ -3026,6 +3039,7 @@ typedef union {
 	struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ)  */
 	struct update_cfg_var varUpdateCfg; /* cmd = 0x1B (UPDATE_CFG)*/
 	CONFIG_PORT_VAR varCfgPort;	/* cmd = 0x88 (CONFIG_PORT)  */
+	struct lpfc_mbx_read_top varReadTop; /* cmd = 0x95 (READ_TOPOLOGY) */
 	REG_VPI_VAR varRegVpi;		/* cmd = 0x96 (REG_VPI) */
 	UNREG_VPI_VAR varUnregVpi;	/* cmd = 0x97 (UNREG_VPI) */
 	ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */

+ 218 - 74
drivers/scsi/lpfc/lpfc_hw4.h

@@ -64,29 +64,39 @@ struct lpfc_sli_intf {
 #define lpfc_sli_intf_valid_MASK		0x00000007
 #define lpfc_sli_intf_valid_WORD		word0
 #define LPFC_SLI_INTF_VALID		6
-#define lpfc_sli_intf_featurelevel2_SHIFT	24
-#define lpfc_sli_intf_featurelevel2_MASK	0x0000001F
-#define lpfc_sli_intf_featurelevel2_WORD	word0
-#define lpfc_sli_intf_featurelevel1_SHIFT	16
-#define lpfc_sli_intf_featurelevel1_MASK	0x000000FF
-#define lpfc_sli_intf_featurelevel1_WORD	word0
-#define LPFC_SLI_INTF_FEATURELEVEL1_1	1
-#define LPFC_SLI_INTF_FEATURELEVEL1_2	2
+#define lpfc_sli_intf_sli_hint2_SHIFT		24
+#define lpfc_sli_intf_sli_hint2_MASK		0x0000001F
+#define lpfc_sli_intf_sli_hint2_WORD		word0
+#define LPFC_SLI_INTF_SLI_HINT2_NONE	0
+#define lpfc_sli_intf_sli_hint1_SHIFT		16
+#define lpfc_sli_intf_sli_hint1_MASK		0x000000FF
+#define lpfc_sli_intf_sli_hint1_WORD		word0
+#define LPFC_SLI_INTF_SLI_HINT1_NONE	0
+#define LPFC_SLI_INTF_SLI_HINT1_1	1
+#define LPFC_SLI_INTF_SLI_HINT1_2	2
+#define lpfc_sli_intf_if_type_SHIFT		12
+#define lpfc_sli_intf_if_type_MASK		0x0000000F
+#define lpfc_sli_intf_if_type_WORD		word0
+#define LPFC_SLI_INTF_IF_TYPE_0		0
+#define LPFC_SLI_INTF_IF_TYPE_1		1
+#define LPFC_SLI_INTF_IF_TYPE_2		2
 #define lpfc_sli_intf_sli_family_SHIFT		8
-#define lpfc_sli_intf_sli_family_MASK		0x000000FF
+#define lpfc_sli_intf_sli_family_MASK		0x0000000F
 #define lpfc_sli_intf_sli_family_WORD		word0
-#define LPFC_SLI_INTF_FAMILY_BE2	0
-#define LPFC_SLI_INTF_FAMILY_BE3	1
+#define LPFC_SLI_INTF_FAMILY_BE2	0x0
+#define LPFC_SLI_INTF_FAMILY_BE3	0x1
+#define LPFC_SLI_INTF_FAMILY_LNCR_A0	0xa
+#define LPFC_SLI_INTF_FAMILY_LNCR_B0	0xb
 #define lpfc_sli_intf_slirev_SHIFT		4
 #define lpfc_sli_intf_slirev_MASK		0x0000000F
 #define lpfc_sli_intf_slirev_WORD		word0
 #define LPFC_SLI_INTF_REV_SLI3		3
 #define LPFC_SLI_INTF_REV_SLI4		4
-#define lpfc_sli_intf_if_type_SHIFT		0
-#define lpfc_sli_intf_if_type_MASK		0x00000007
-#define lpfc_sli_intf_if_type_WORD		word0
-#define LPFC_SLI_INTF_IF_TYPE_0		0
-#define LPFC_SLI_INTF_IF_TYPE_1		1
+#define lpfc_sli_intf_func_type_SHIFT		0
+#define lpfc_sli_intf_func_type_MASK		0x00000001
+#define lpfc_sli_intf_func_type_WORD		word0
+#define LPFC_SLI_INTF_IF_TYPE_PHYS	0
+#define LPFC_SLI_INTF_IF_TYPE_VIRT	1
 };
 
 #define LPFC_SLI4_MBX_EMBED	true
@@ -450,35 +460,40 @@ struct lpfc_register {
 	uint32_t word0;
 };
 
+/* The following BAR0 Registers apply to SLI4 if_type 0 UCNAs. */
 #define LPFC_UERR_STATUS_HI		0x00A4
 #define LPFC_UERR_STATUS_LO		0x00A0
 #define LPFC_UE_MASK_HI			0x00AC
 #define LPFC_UE_MASK_LO			0x00A8
+
+/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */
 #define LPFC_SLI_INTF			0x0058
 
-/* BAR0 Registers */
-#define LPFC_HST_STATE			0x00AC
-#define lpfc_hst_state_perr_SHIFT	31
-#define lpfc_hst_state_perr_MASK	0x1
-#define lpfc_hst_state_perr_WORD	word0
-#define lpfc_hst_state_sfi_SHIFT	30
-#define lpfc_hst_state_sfi_MASK		0x1
-#define lpfc_hst_state_sfi_WORD		word0
-#define lpfc_hst_state_nip_SHIFT	29
-#define lpfc_hst_state_nip_MASK		0x1
-#define lpfc_hst_state_nip_WORD		word0
-#define lpfc_hst_state_ipc_SHIFT	28
-#define lpfc_hst_state_ipc_MASK		0x1
-#define lpfc_hst_state_ipc_WORD		word0
-#define lpfc_hst_state_xrom_SHIFT	27
-#define lpfc_hst_state_xrom_MASK	0x1
-#define lpfc_hst_state_xrom_WORD	word0
-#define lpfc_hst_state_dl_SHIFT		26
-#define lpfc_hst_state_dl_MASK		0x1
-#define lpfc_hst_state_dl_WORD		word0
-#define lpfc_hst_state_port_status_SHIFT	0
-#define lpfc_hst_state_port_status_MASK		0xFFFF
-#define lpfc_hst_state_port_status_WORD		word0
+#define LPFC_SLIPORT_IF2_SMPHR		0x0400
+#define lpfc_port_smphr_perr_SHIFT	31
+#define lpfc_port_smphr_perr_MASK	0x1
+#define lpfc_port_smphr_perr_WORD	word0
+#define lpfc_port_smphr_sfi_SHIFT	30
+#define lpfc_port_smphr_sfi_MASK	0x1
+#define lpfc_port_smphr_sfi_WORD	word0
+#define lpfc_port_smphr_nip_SHIFT	29
+#define lpfc_port_smphr_nip_MASK	0x1
+#define lpfc_port_smphr_nip_WORD	word0
+#define lpfc_port_smphr_ipc_SHIFT	28
+#define lpfc_port_smphr_ipc_MASK	0x1
+#define lpfc_port_smphr_ipc_WORD	word0
+#define lpfc_port_smphr_scr1_SHIFT	27
+#define lpfc_port_smphr_scr1_MASK	0x1
+#define lpfc_port_smphr_scr1_WORD	word0
+#define lpfc_port_smphr_scr2_SHIFT	26
+#define lpfc_port_smphr_scr2_MASK	0x1
+#define lpfc_port_smphr_scr2_WORD	word0
+#define lpfc_port_smphr_host_scratch_SHIFT	16
+#define lpfc_port_smphr_host_scratch_MASK	0xFF
+#define lpfc_port_smphr_host_scratch_WORD	word0
+#define lpfc_port_smphr_port_status_SHIFT	0
+#define lpfc_port_smphr_port_status_MASK	0xFFFF
+#define lpfc_port_smphr_port_status_WORD	word0
 
 #define LPFC_POST_STAGE_POWER_ON_RESET			0x0000
 #define LPFC_POST_STAGE_AWAITING_HOST_RDY		0x0001
@@ -511,10 +526,46 @@ struct lpfc_register {
 #define LPFC_POST_STAGE_RC_DONE				0x0B07
 #define LPFC_POST_STAGE_REBOOT_SYSTEM			0x0B08
 #define LPFC_POST_STAGE_MAC_ADDRESS			0x0C00
-#define LPFC_POST_STAGE_ARMFW_READY			0xC000
-#define LPFC_POST_STAGE_ARMFW_UE 			0xF000
+#define LPFC_POST_STAGE_PORT_READY			0xC000
+#define LPFC_POST_STAGE_PORT_UE 			0xF000
+
+#define LPFC_SLIPORT_STATUS		0x0404
+#define lpfc_sliport_status_err_SHIFT	31
+#define lpfc_sliport_status_err_MASK	0x1
+#define lpfc_sliport_status_err_WORD	word0
+#define lpfc_sliport_status_end_SHIFT	30
+#define lpfc_sliport_status_end_MASK	0x1
+#define lpfc_sliport_status_end_WORD	word0
+#define lpfc_sliport_status_oti_SHIFT	29
+#define lpfc_sliport_status_oti_MASK	0x1
+#define lpfc_sliport_status_oti_WORD	word0
+#define lpfc_sliport_status_rn_SHIFT	24
+#define lpfc_sliport_status_rn_MASK	0x1
+#define lpfc_sliport_status_rn_WORD	word0
+#define lpfc_sliport_status_rdy_SHIFT	23
+#define lpfc_sliport_status_rdy_MASK	0x1
+#define lpfc_sliport_status_rdy_WORD	word0
+#define MAX_IF_TYPE_2_RESETS	1000
+
+#define LPFC_SLIPORT_CNTRL		0x0408
+#define lpfc_sliport_ctrl_end_SHIFT	30
+#define lpfc_sliport_ctrl_end_MASK	0x1
+#define lpfc_sliport_ctrl_end_WORD	word0
+#define LPFC_SLIPORT_LITTLE_ENDIAN 0
+#define LPFC_SLIPORT_BIG_ENDIAN	   1
+#define lpfc_sliport_ctrl_ip_SHIFT	27
+#define lpfc_sliport_ctrl_ip_MASK	0x1
+#define lpfc_sliport_ctrl_ip_WORD	word0
+#define LPFC_SLIPORT_INIT_PORT	1
+
+#define LPFC_SLIPORT_ERR_1		0x040C
+#define LPFC_SLIPORT_ERR_2		0x0410
+
+/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
+ * reside in BAR 2.
+ */
+#define LPFC_SLIPORT_IF0_SMPHR	0x00AC
 
-/* BAR1 Registers */
 #define LPFC_IMR_MASK_ALL	0xFFFFFFFF
 #define LPFC_ISCR_CLEAR_ALL	0xFFFFFFFF
 
@@ -569,14 +620,21 @@ struct lpfc_register {
 #define LPFC_SLI4_INTR30		BIT30
 #define LPFC_SLI4_INTR31		BIT31
 
-/* BAR2 Registers */
+/*
+ * The Doorbell registers defined here exist in different BAR
+ * register sets depending on the UCNA Port's reported if_type
+ * value.  For UCNA ports running SLI4 and if_type 0, they reside in
+ * BAR4.  For UCNA ports running SLI4 and if_type 2, they reside in
+ * BAR0.  The offsets are the same so the driver must account for
+ * any base address difference.
+ */
 #define LPFC_RQ_DOORBELL		0x00A0
 #define lpfc_rq_doorbell_num_posted_SHIFT	16
 #define lpfc_rq_doorbell_num_posted_MASK	0x3FFF
 #define lpfc_rq_doorbell_num_posted_WORD	word0
 #define LPFC_RQ_POST_BATCH		8	/* RQEs to post at one time */
 #define lpfc_rq_doorbell_id_SHIFT		0
-#define lpfc_rq_doorbell_id_MASK		0x03FF
+#define lpfc_rq_doorbell_id_MASK		0xFFFF
 #define lpfc_rq_doorbell_id_WORD		word0
 
 #define LPFC_WQ_DOORBELL		0x0040
@@ -591,6 +649,11 @@ struct lpfc_register {
 #define lpfc_wq_doorbell_id_WORD		word0
 
 #define LPFC_EQCQ_DOORBELL		0x0120
+#define lpfc_eqcq_doorbell_se_SHIFT		31
+#define lpfc_eqcq_doorbell_se_MASK		0x0001
+#define lpfc_eqcq_doorbell_se_WORD		word0
+#define LPFC_EQCQ_SOLICIT_ENABLE_OFF	0
+#define LPFC_EQCQ_SOLICIT_ENABLE_ON	1
 #define lpfc_eqcq_doorbell_arm_SHIFT		29
 #define lpfc_eqcq_doorbell_arm_MASK		0x0001
 #define lpfc_eqcq_doorbell_arm_WORD		word0
@@ -628,7 +691,7 @@ struct lpfc_register {
 #define lpfc_mq_doorbell_num_posted_MASK	0x3FFF
 #define lpfc_mq_doorbell_num_posted_WORD	word0
 #define lpfc_mq_doorbell_id_SHIFT		0
-#define lpfc_mq_doorbell_id_MASK		0x03FF
+#define lpfc_mq_doorbell_id_MASK		0xFFFF
 #define lpfc_mq_doorbell_id_WORD		word0
 
 struct lpfc_sli4_cfg_mhdr {
@@ -1048,12 +1111,18 @@ struct lpfc_mbx_mq_create_ext {
 #define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT	LPFC_TRAILER_CODE_LINK
 #define lpfc_mbx_mq_create_ext_async_evt_link_MASK	0x00000001
 #define lpfc_mbx_mq_create_ext_async_evt_link_WORD	async_evt_bmap
-#define lpfc_mbx_mq_create_ext_async_evt_fcfste_SHIFT	LPFC_TRAILER_CODE_FCOE
-#define lpfc_mbx_mq_create_ext_async_evt_fcfste_MASK	0x00000001
-#define lpfc_mbx_mq_create_ext_async_evt_fcfste_WORD	async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_fip_SHIFT	LPFC_TRAILER_CODE_FCOE
+#define lpfc_mbx_mq_create_ext_async_evt_fip_MASK	0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_fip_WORD	async_evt_bmap
 #define lpfc_mbx_mq_create_ext_async_evt_group5_SHIFT	LPFC_TRAILER_CODE_GRP5
 #define lpfc_mbx_mq_create_ext_async_evt_group5_MASK	0x00000001
 #define lpfc_mbx_mq_create_ext_async_evt_group5_WORD	async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_fc_SHIFT	LPFC_TRAILER_CODE_FC
+#define lpfc_mbx_mq_create_ext_async_evt_fc_MASK	0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_fc_WORD	async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_sli_SHIFT	LPFC_TRAILER_CODE_SLI
+#define lpfc_mbx_mq_create_ext_async_evt_sli_MASK	0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_sli_WORD	async_evt_bmap
 			struct mq_context context;
 			struct dma_address page[LPFC_MAX_MQ_PAGE];
 		} request;
@@ -1307,7 +1376,7 @@ struct lpfc_mbx_query_fw_cfg {
 #define lpfc_function_mode_dal_WORD		function_mode
 #define lpfc_function_mode_lro_SHIFT		9
 #define lpfc_function_mode_lro_MASK		0x00000001
-#define lpfc_function_mode_lro_WORD		function_mode9
+#define lpfc_function_mode_lro_WORD		function_mode
 #define lpfc_function_mode_flex10_SHIFT		10
 #define lpfc_function_mode_flex10_MASK		0x00000001
 #define lpfc_function_mode_flex10_WORD		function_mode
@@ -1358,10 +1427,16 @@ struct lpfc_mbx_init_vfi {
 #define lpfc_init_vfi_vf_SHIFT		29
 #define lpfc_init_vfi_vf_MASK		0x00000001
 #define lpfc_init_vfi_vf_WORD		word1
+#define lpfc_init_vfi_vp_SHIFT		28
+#define lpfc_init_vfi_vp_MASK		0x00000001
+#define lpfc_init_vfi_vp_WORD		word1
 #define lpfc_init_vfi_vfi_SHIFT		0
 #define lpfc_init_vfi_vfi_MASK		0x0000FFFF
 #define lpfc_init_vfi_vfi_WORD		word1
 	uint32_t word2;
+#define lpfc_init_vfi_vpi_SHIFT		16
+#define lpfc_init_vfi_vpi_MASK		0x0000FFFF
+#define lpfc_init_vfi_vpi_WORD		word2
 #define lpfc_init_vfi_fcfi_SHIFT	0
 #define lpfc_init_vfi_fcfi_MASK		0x0000FFFF
 #define lpfc_init_vfi_fcfi_WORD		word2
@@ -2069,6 +2144,8 @@ struct lpfc_mcqe {
 #define LPFC_TRAILER_CODE_FCOE	0x2
 #define LPFC_TRAILER_CODE_DCBX	0x3
 #define LPFC_TRAILER_CODE_GRP5	0x5
+#define LPFC_TRAILER_CODE_FC	0x10
+#define LPFC_TRAILER_CODE_SLI	0x11
 };
 
 struct lpfc_acqe_link {
@@ -2094,11 +2171,12 @@ struct lpfc_acqe_link {
 #define LPFC_ASYNC_LINK_STATUS_UP		0x1
 #define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN	0x2
 #define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP	0x3
-#define lpfc_acqe_link_physical_SHIFT		0
-#define lpfc_acqe_link_physical_MASK		0x000000FF
-#define lpfc_acqe_link_physical_WORD		word0
-#define LPFC_ASYNC_LINK_PORT_A			0x0
-#define LPFC_ASYNC_LINK_PORT_B			0x1
+#define lpfc_acqe_link_type_SHIFT		6
+#define lpfc_acqe_link_type_MASK		0x00000003
+#define lpfc_acqe_link_type_WORD		word0
+#define lpfc_acqe_link_number_SHIFT		0
+#define lpfc_acqe_link_number_MASK		0x0000003F
+#define lpfc_acqe_link_number_WORD		word0
 	uint32_t word1;
 #define lpfc_acqe_link_fault_SHIFT	0
 #define lpfc_acqe_link_fault_MASK	0x000000FF
@@ -2106,29 +2184,31 @@ struct lpfc_acqe_link {
 #define LPFC_ASYNC_LINK_FAULT_NONE	0x0
 #define LPFC_ASYNC_LINK_FAULT_LOCAL	0x1
 #define LPFC_ASYNC_LINK_FAULT_REMOTE	0x2
-#define lpfc_acqe_qos_link_speed_SHIFT	16
-#define lpfc_acqe_qos_link_speed_MASK	0x0000FFFF
-#define lpfc_acqe_qos_link_speed_WORD	word1
+#define lpfc_acqe_logical_link_speed_SHIFT	16
+#define lpfc_acqe_logical_link_speed_MASK	0x0000FFFF
+#define lpfc_acqe_logical_link_speed_WORD	word1
 	uint32_t event_tag;
 	uint32_t trailer;
+#define LPFC_LINK_EVENT_TYPE_PHYSICAL	0x0
+#define LPFC_LINK_EVENT_TYPE_VIRTUAL	0x1
 };
 
-struct lpfc_acqe_fcoe {
+struct lpfc_acqe_fip {
 	uint32_t index;
 	uint32_t word1;
-#define lpfc_acqe_fcoe_fcf_count_SHIFT		0
-#define lpfc_acqe_fcoe_fcf_count_MASK		0x0000FFFF
-#define lpfc_acqe_fcoe_fcf_count_WORD		word1
-#define lpfc_acqe_fcoe_event_type_SHIFT		16
-#define lpfc_acqe_fcoe_event_type_MASK		0x0000FFFF
-#define lpfc_acqe_fcoe_event_type_WORD		word1
-#define LPFC_FCOE_EVENT_TYPE_NEW_FCF		0x1
-#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL	0x2
-#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD		0x3
-#define LPFC_FCOE_EVENT_TYPE_CVL		0x4
-#define LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD	0x5
+#define lpfc_acqe_fip_fcf_count_SHIFT		0
+#define lpfc_acqe_fip_fcf_count_MASK		0x0000FFFF
+#define lpfc_acqe_fip_fcf_count_WORD		word1
+#define lpfc_acqe_fip_event_type_SHIFT		16
+#define lpfc_acqe_fip_event_type_MASK		0x0000FFFF
+#define lpfc_acqe_fip_event_type_WORD		word1
 	uint32_t event_tag;
 	uint32_t trailer;
+#define LPFC_FIP_EVENT_TYPE_NEW_FCF		0x1
+#define LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL	0x2
+#define LPFC_FIP_EVENT_TYPE_FCF_DEAD		0x3
+#define LPFC_FIP_EVENT_TYPE_CVL			0x4
+#define LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD	0x5
 };
 
 struct lpfc_acqe_dcbx {
@@ -2140,9 +2220,12 @@ struct lpfc_acqe_dcbx {
 
 struct lpfc_acqe_grp5 {
 	uint32_t word0;
-#define lpfc_acqe_grp5_pport_SHIFT	0
-#define lpfc_acqe_grp5_pport_MASK	0x000000FF
-#define lpfc_acqe_grp5_pport_WORD	word0
+#define lpfc_acqe_grp5_type_SHIFT		6
+#define lpfc_acqe_grp5_type_MASK		0x00000003
+#define lpfc_acqe_grp5_type_WORD		word0
+#define lpfc_acqe_grp5_number_SHIFT		0
+#define lpfc_acqe_grp5_number_MASK		0x0000003F
+#define lpfc_acqe_grp5_number_WORD		word0
 	uint32_t word1;
 #define lpfc_acqe_grp5_llink_spd_SHIFT	16
 #define lpfc_acqe_grp5_llink_spd_MASK	0x0000FFFF
@@ -2151,6 +2234,68 @@ struct lpfc_acqe_grp5 {
 	uint32_t trailer;
 };
 
+struct lpfc_acqe_fc_la {
+	uint32_t word0;
+#define lpfc_acqe_fc_la_speed_SHIFT		24
+#define lpfc_acqe_fc_la_speed_MASK		0x000000FF
+#define lpfc_acqe_fc_la_speed_WORD		word0
+#define LPFC_FC_LA_SPEED_UNKOWN		0x0
+#define LPFC_FC_LA_SPEED_1G		0x1
+#define LPFC_FC_LA_SPEED_2G		0x2
+#define LPFC_FC_LA_SPEED_4G		0x4
+#define LPFC_FC_LA_SPEED_8G		0x8
+#define LPFC_FC_LA_SPEED_10G		0xA
+#define LPFC_FC_LA_SPEED_16G		0x10
+#define lpfc_acqe_fc_la_topology_SHIFT		16
+#define lpfc_acqe_fc_la_topology_MASK		0x000000FF
+#define lpfc_acqe_fc_la_topology_WORD		word0
+#define LPFC_FC_LA_TOP_UNKOWN		0x0
+#define LPFC_FC_LA_TOP_P2P		0x1
+#define LPFC_FC_LA_TOP_FCAL		0x2
+#define LPFC_FC_LA_TOP_INTERNAL_LOOP	0x3
+#define LPFC_FC_LA_TOP_SERDES_LOOP	0x4
+#define lpfc_acqe_fc_la_att_type_SHIFT		8
+#define lpfc_acqe_fc_la_att_type_MASK		0x000000FF
+#define lpfc_acqe_fc_la_att_type_WORD		word0
+#define LPFC_FC_LA_TYPE_LINK_UP		0x1
+#define LPFC_FC_LA_TYPE_LINK_DOWN	0x2
+#define LPFC_FC_LA_TYPE_NO_HARD_ALPA	0x3
+#define lpfc_acqe_fc_la_port_type_SHIFT		6
+#define lpfc_acqe_fc_la_port_type_MASK		0x00000003
+#define lpfc_acqe_fc_la_port_type_WORD		word0
+#define LPFC_LINK_TYPE_ETHERNET		0x0
+#define LPFC_LINK_TYPE_FC		0x1
+#define lpfc_acqe_fc_la_port_number_SHIFT	0
+#define lpfc_acqe_fc_la_port_number_MASK	0x0000003F
+#define lpfc_acqe_fc_la_port_number_WORD	word0
+	uint32_t word1;
+#define lpfc_acqe_fc_la_llink_spd_SHIFT		16
+#define lpfc_acqe_fc_la_llink_spd_MASK		0x0000FFFF
+#define lpfc_acqe_fc_la_llink_spd_WORD		word1
+#define lpfc_acqe_fc_la_fault_SHIFT		0
+#define lpfc_acqe_fc_la_fault_MASK		0x000000FF
+#define lpfc_acqe_fc_la_fault_WORD		word1
+#define LPFC_FC_LA_FAULT_NONE		0x0
+#define LPFC_FC_LA_FAULT_LOCAL		0x1
+#define LPFC_FC_LA_FAULT_REMOTE		0x2
+	uint32_t event_tag;
+	uint32_t trailer;
+#define LPFC_FC_LA_EVENT_TYPE_FC_LINK		0x1
+#define LPFC_FC_LA_EVENT_TYPE_SHARED_LINK	0x2
+};
+
+struct lpfc_acqe_sli {
+	uint32_t event_data1;
+	uint32_t event_data2;
+	uint32_t reserved;
+	uint32_t trailer;
+#define LPFC_SLI_EVENT_TYPE_PORT_ERROR		0x1
+#define LPFC_SLI_EVENT_TYPE_OVER_TEMP		0x2
+#define LPFC_SLI_EVENT_TYPE_NORM_TEMP		0x3
+#define LPFC_SLI_EVENT_TYPE_NVLOG_POST		0x4
+#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP		0x5
+};
+
 /*
  * Define the bootstrap mailbox (bmbx) region used to communicate
  * mailbox command between the host and port. The mailbox consists
@@ -2210,7 +2355,7 @@ struct wqe_common {
 #define wqe_rcvoxid_WORD      word9
 	uint32_t word10;
 #define wqe_ebde_cnt_SHIFT    0
-#define wqe_ebde_cnt_MASK     0x00000007
+#define wqe_ebde_cnt_MASK     0x0000000f
 #define wqe_ebde_cnt_WORD     word10
 #define wqe_lenloc_SHIFT      7
 #define wqe_lenloc_MASK       0x00000003
@@ -2402,7 +2547,6 @@ struct xmit_seq64_wqe {
 	uint32_t relative_offset;
 	struct wqe_rctl_dfctl wge_ctl;
 	struct wqe_common wqe_com; /* words 6-11 */
-	/* Note: word10 different REVISIT */
 	uint32_t xmit_len;
 	uint32_t rsvd_12_15[3];
 };

File diff suppressed because it is too large
+ 493 - 225
drivers/scsi/lpfc/lpfc_init.c


+ 1 - 1
drivers/scsi/lpfc/lpfc_logmsg.h

@@ -33,7 +33,7 @@
 #define LOG_FCP_ERROR	0x00001000	/* log errors, not underruns */
 #define LOG_LIBDFC	0x00002000	/* Libdfc events */
 #define LOG_VPORT	0x00004000	/* NPIV events */
-#define LOF_SECURITY	0x00008000	/* Security events */
+#define LOG_SECURITY	0x00008000	/* Security events */
 #define LOG_EVENT	0x00010000	/* CT,TEMP,DUMP, logging */
 #define LOG_FIP		0x00020000	/* FIP events */
 #define LOG_ALL_MSG	0xffffffff	/* LOG all messages */

+ 46 - 30
drivers/scsi/lpfc/lpfc_mbox.c

@@ -263,18 +263,19 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 }
 
 /**
- * lpfc_read_la - Prepare a mailbox command for reading HBA link attention
+ * lpfc_read_topology - Prepare a mailbox command for reading HBA topology
  * @phba: pointer to lpfc hba data structure.
  * @pmb: pointer to the driver internal queue element for mailbox command.
  * @mp: DMA buffer memory for reading the link attention information into.
  *
- * The read link attention mailbox command is issued to read the Link Event
- * Attention information indicated by the HBA port when the Link Event bit
- * of the Host Attention (HSTATT) register is set to 1. A Link Event
+ * The read topology mailbox command is issued to read the link topology
+ * information indicated by the HBA port when the Link Event bit of the Host
+ * Attention (HSTATT) register is set to 1 (For SLI-3) or when an FC Link
+ * Attention ACQE is received from the port (For SLI-4). A Link Event
  * Attention occurs based on an exception detected at the Fibre Channel link
  * interface.
  *
- * This routine prepares the mailbox command for reading HBA link attention
+ * This routine prepares the mailbox command for reading HBA link topology
  * information. A DMA memory has been set aside and address passed to the
  * HBA through @mp for the HBA to DMA link attention information into the
  * memory as part of the execution of the mailbox command.
@@ -283,7 +284,8 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
  *    0 - Success (currently always return 0)
  **/
 int
-lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
+lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
+		   struct lpfc_dmabuf *mp)
 {
 	MAILBOX_t *mb;
 	struct lpfc_sli *psli;
@@ -293,15 +295,15 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 	INIT_LIST_HEAD(&mp->list);
-	mb->mbxCommand = MBX_READ_LA64;
-	mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize = 128;
-	mb->un.varReadLA.un.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
-	mb->un.varReadLA.un.lilpBde64.addrLow = putPaddrLow(mp->phys);
+	mb->mbxCommand = MBX_READ_TOPOLOGY;
+	mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE;
+	mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
+	mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys);
 
 	/* Save address for later completion and set the owner to host so that
 	 * the FW knows this mailbox is available for processing.
 	 */
-	pmb->context1 = (uint8_t *) mp;
+	pmb->context1 = (uint8_t *)mp;
 	mb->mbxOwner = OWN_HOST;
 	return (0);
 }
@@ -516,18 +518,33 @@ lpfc_init_link(struct lpfc_hba * phba,
 	vpd = &phba->vpd;
 	if (vpd->rev.feaLevelHigh >= 0x02){
 		switch(linkspeed){
-			case LINK_SPEED_1G:
-			case LINK_SPEED_2G:
-			case LINK_SPEED_4G:
-			case LINK_SPEED_8G:
-				mb->un.varInitLnk.link_flags |=
-							FLAGS_LINK_SPEED;
-				mb->un.varInitLnk.link_speed = linkspeed;
+		case LPFC_USER_LINK_SPEED_1G:
+			mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_1G;
+			break;
+		case LPFC_USER_LINK_SPEED_2G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_2G;
+			break;
+		case LPFC_USER_LINK_SPEED_4G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_4G;
+			break;
+		case LPFC_USER_LINK_SPEED_8G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_8G;
+			break;
+		case LPFC_USER_LINK_SPEED_10G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_10G;
 			break;
-			case LINK_SPEED_AUTO:
-			default:
-				mb->un.varInitLnk.link_speed =
-							LINK_SPEED_AUTO;
+		case LPFC_USER_LINK_SPEED_16G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_16G;
+			break;
+		case LPFC_USER_LINK_SPEED_AUTO:
+		default:
+			mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
 			break;
 		}
 
@@ -693,7 +710,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
  * @did: remote port identifier.
  * @param: pointer to memory holding the server parameters.
  * @pmb: pointer to the driver internal queue element for mailbox command.
- * @flag: action flag to be passed back for the complete function.
+ * @rpi: the rpi to use in the registration (usually only used for SLI4.
  *
  * The registration login mailbox command is used to register an N_Port or
  * F_Port login. This registration allows the HBA to cache the remote N_Port
@@ -712,7 +729,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
  **/
 int
 lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
-	       uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
+	     uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi)
 {
 	MAILBOX_t *mb = &pmb->u.mb;
 	uint8_t *sparam;
@@ -722,17 +739,13 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
 
 	mb->un.varRegLogin.rpi = 0;
 	if (phba->sli_rev == LPFC_SLI_REV4) {
-		mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba);
+		mb->un.varRegLogin.rpi = rpi;
 		if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
 			return 1;
 	}
-
 	mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
 	mb->un.varRegLogin.did = did;
-	mb->un.varWords[30] = flag;	/* Set flag to issue action on cmpl */
-
 	mb->mbxOwner = OWN_HOST;
-
 	/* Get a buffer to hold NPorts Service Parameters */
 	mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
 	if (mp)
@@ -743,7 +756,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
 		/* REG_LOGIN: no buffers */
 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
 				"0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
-				"flag x%x\n", vpi, did, flag);
+				"rpi x%x\n", vpi, did, rpi);
 		return (1);
 	}
 	INIT_LIST_HEAD(&mp->list);
@@ -1918,11 +1931,14 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
 	struct lpfc_mbx_init_vfi *init_vfi;
 
 	memset(mbox, 0, sizeof(*mbox));
+	mbox->vport = vport;
 	init_vfi = &mbox->u.mqe.un.init_vfi;
 	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
 	bf_set(lpfc_init_vfi_vr, init_vfi, 1);
 	bf_set(lpfc_init_vfi_vt, init_vfi, 1);
+	bf_set(lpfc_init_vfi_vp, init_vfi, 1);
 	bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
+	bf_set(lpfc_init_vpi_vpi, init_vfi, vport->vpi + vport->phba->vpi_base);
 	bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
 }
 

+ 9 - 1
drivers/scsi/lpfc/lpfc_mem.c

@@ -113,11 +113,16 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
 		goto fail_free_mbox_pool;
 
 	if (phba->sli_rev == LPFC_SLI_REV4) {
+		phba->rrq_pool =
+			mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+						sizeof(struct lpfc_node_rrq));
+		if (!phba->rrq_pool)
+			goto fail_free_nlp_mem_pool;
 		phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
 					      phba->pcidev,
 					      LPFC_HDR_BUF_SIZE, align, 0);
 		if (!phba->lpfc_hrb_pool)
-			goto fail_free_nlp_mem_pool;
+			goto fail_free_rrq_mem_pool;
 
 		phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
 					      phba->pcidev,
@@ -147,6 +152,9 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
  fail_free_hrb_pool:
 	pci_pool_destroy(phba->lpfc_hrb_pool);
 	phba->lpfc_hrb_pool = NULL;
+ fail_free_rrq_mem_pool:
+	mempool_destroy(phba->rrq_pool);
+	phba->rrq_pool = NULL;
  fail_free_nlp_mem_pool:
 	mempool_destroy(phba->nlp_mem_pool);
 	phba->nlp_mem_pool = NULL;

+ 5 - 11
drivers/scsi/lpfc/lpfc_nportdisc.c

@@ -386,7 +386,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		goto out;
 
 	rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
-			    (uint8_t *) sp, mbox, 0);
+			    (uint8_t *) sp, mbox, ndlp->nlp_rpi);
 	if (rc) {
 		mempool_free(mbox, phba->mbox_mem_pool);
 		goto out;
@@ -632,7 +632,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
-	if (!(ndlp->nlp_flag & NLP_RPI_VALID)) {
+	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
 		return 0;
 	}
@@ -968,7 +968,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
 	lpfc_unreg_rpi(vport, ndlp);
 
 	if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
-			   (uint8_t *) sp, mbox, 0) == 0) {
+			 (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
 		switch (ndlp->nlp_DID) {
 		case NameServer_DID:
 			mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
@@ -1338,12 +1338,6 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
-			if (phba->sli_rev == LPFC_SLI_REV4) {
-				spin_unlock_irq(&phba->hbalock);
-				lpfc_sli4_free_rpi(phba,
-					mb->u.mb.un.varRegLogin.rpi);
-				spin_lock_irq(&phba->hbalock);
-			}
 			mp = (struct lpfc_dmabuf *) (mb->context1);
 			if (mp) {
 				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -1426,7 +1420,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
 	}
 
 	ndlp->nlp_rpi = mb->un.varWords[0];
-	ndlp->nlp_flag |= NLP_RPI_VALID;
+	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
 
 	/* Only if we are not a fabric nport do we issue PRLI */
 	if (!(ndlp->nlp_type & NLP_FABRIC)) {
@@ -2027,7 +2021,7 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
 
 	if (!mb->mbxStatus) {
 		ndlp->nlp_rpi = mb->un.varWords[0];
-		ndlp->nlp_flag |= NLP_RPI_VALID;
+		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
 	} else {
 		if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
 			lpfc_drop_node(vport, ndlp);

+ 76 - 6
drivers/scsi/lpfc/lpfc_scsi.c

@@ -621,10 +621,13 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
 			  struct sli4_wcqe_xri_aborted *axri)
 {
 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
 	struct lpfc_scsi_buf *psb, *next_psb;
 	unsigned long iflag = 0;
 	struct lpfc_iocbq *iocbq;
 	int i;
+	struct lpfc_nodelist *ndlp;
+	int rrq_empty = 0;
 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 
 	spin_lock_irqsave(&phba->hbalock, iflag);
@@ -637,8 +640,14 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
 			psb->status = IOSTAT_SUCCESS;
 			spin_unlock(
 				&phba->sli4_hba.abts_scsi_buf_list_lock);
+			ndlp = psb->rdata->pnode;
+			rrq_empty = list_empty(&phba->active_rrq_list);
 			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			if (ndlp)
+				lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
 			lpfc_release_scsi_buf_s4(phba, psb);
+			if (rrq_empty)
+				lpfc_worker_wake_up(phba);
 			return;
 		}
 	}
@@ -914,7 +923,7 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
 }
 
 /**
- * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
  * @phba: The HBA for which this call is being executed.
  *
  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
@@ -925,7 +934,7 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
  *   Pointer to lpfc_scsi_buf - Success
  **/
 static struct lpfc_scsi_buf*
-lpfc_get_scsi_buf(struct lpfc_hba * phba)
+lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
 	struct  lpfc_scsi_buf * lpfc_cmd = NULL;
 	struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
@@ -941,6 +950,67 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
 	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
 	return  lpfc_cmd;
 }
+/**
+ * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * @phba: The HBA for which this call is being executed.
+ *
+ * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
+ * and returns to caller.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_scsi_buf - Success
+ **/
+static struct lpfc_scsi_buf*
+lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+	struct  lpfc_scsi_buf *lpfc_cmd = NULL;
+	struct  lpfc_scsi_buf *start_lpfc_cmd = NULL;
+	struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
+	unsigned long iflag = 0;
+	int found = 0;
+
+	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+	list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
+	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+	while (!found && lpfc_cmd) {
+		if (lpfc_test_rrq_active(phba, ndlp,
+					 lpfc_cmd->cur_iocbq.sli4_xritag)) {
+			lpfc_release_scsi_buf_s4(phba, lpfc_cmd);
+			spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+			list_remove_head(scsi_buf_list, lpfc_cmd,
+					 struct lpfc_scsi_buf, list);
+			spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
+						 iflag);
+			if (lpfc_cmd == start_lpfc_cmd) {
+				lpfc_cmd = NULL;
+				break;
+			} else
+				continue;
+		}
+		found = 1;
+		lpfc_cmd->seg_cnt = 0;
+		lpfc_cmd->nonsg_phys = 0;
+		lpfc_cmd->prot_seg_cnt = 0;
+	}
+	return  lpfc_cmd;
+}
+/**
+ * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * @phba: The HBA for which this call is being executed.
+ *
+ * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
+ * and returns to caller.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_scsi_buf - Success
+ **/
+static struct lpfc_scsi_buf*
+lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+	return  phba->lpfc_get_scsi_buf(phba, ndlp);
+}
 
 /**
  * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
@@ -2744,18 +2814,19 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
 
 	phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
 	phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
-	phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
 
 	switch (dev_grp) {
 	case LPFC_PCI_DEV_LP:
 		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
+		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
 		break;
 	case LPFC_PCI_DEV_OC:
 		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
+		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
 		break;
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -2764,7 +2835,6 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
 		return -ENODEV;
 		break;
 	}
-	phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
 	phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
 	phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
 	return 0;
@@ -2940,7 +3010,7 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
 		goto out_host_busy;
 
-	lpfc_cmd = lpfc_get_scsi_buf(phba);
+	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
 	if (lpfc_cmd == NULL) {
 		lpfc_rampdown_queue_depth(phba);
 
@@ -3239,7 +3309,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
 	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
 		return FAILED;
 
-	lpfc_cmd = lpfc_get_scsi_buf(phba);
+	lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
 	if (lpfc_cmd == NULL)
 		return FAILED;
 	lpfc_cmd->timeout = 60;

+ 568 - 95
drivers/scsi/lpfc/lpfc_sli.c

@@ -512,9 +512,345 @@ __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
 	return sglq;
 }
 
+/**
+ * __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @ndlp: nodelist pointer for this target.
+ * @xritag: xri used in this exchange.
+ * @rxid: Remote Exchange ID.
+ * @send_rrq: Flag used to determine if we should send rrq els cmd.
+ *
+ * This function is called with hbalock held.
+ * The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an
+ * rrq struct and adds it to the active_rrq_list.
+ *
+ * returns  0 for rrq slot for this xri
+ *         < 0  Were not able to get rrq mem or invalid parameter.
+ **/
+static int
+__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+		uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
+{
+	uint16_t adj_xri;
+	struct lpfc_node_rrq *rrq;
+	int empty;
+
+	/*
+	 * set the active bit even if there is no mem available.
+	 */
+	adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
+	if (!ndlp)
+		return -EINVAL;
+	if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
+		return -EINVAL;
+	rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
+	if (rrq) {
+		rrq->send_rrq = send_rrq;
+		rrq->xritag = xritag;
+		rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
+		rrq->ndlp = ndlp;
+		rrq->nlp_DID = ndlp->nlp_DID;
+		rrq->vport = ndlp->vport;
+		rrq->rxid = rxid;
+		empty = list_empty(&phba->active_rrq_list);
+		if (phba->cfg_enable_rrq && send_rrq)
+			/*
+			 * We need the xri before we can add this to the
+			 * phba active rrq list.
+			 */
+			rrq->send_rrq = send_rrq;
+		else
+			rrq->send_rrq = 0;
+		list_add_tail(&rrq->list, &phba->active_rrq_list);
+		if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
+			phba->hba_flag |= HBA_RRQ_ACTIVE;
+			if (empty)
+				lpfc_worker_wake_up(phba);
+		}
+		return 0;
+	}
+	return -ENOMEM;
+}
+
+/**
+ * __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @xritag: xri used in this exchange.
+ * @rrq: The RRQ to be cleared.
+ *
+ * This function is called with hbalock held. This function
+ **/
+static void
+__lpfc_clr_rrq_active(struct lpfc_hba *phba,
+			uint16_t xritag,
+			struct lpfc_node_rrq *rrq)
+{
+	uint16_t adj_xri;
+	struct lpfc_nodelist *ndlp;
+
+	ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
+
+	/* The target DID could have been swapped (cable swap)
+	 * we should use the ndlp from the findnode if it is
+	 * available.
+	 */
+	if (!ndlp)
+		ndlp = rrq->ndlp;
+
+	adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
+	if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
+		rrq->send_rrq = 0;
+		rrq->xritag = 0;
+		rrq->rrq_stop_time = 0;
+	}
+	mempool_free(rrq, phba->rrq_pool);
+}
+
+/**
+ * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with hbalock held. This function
+ * Checks if stop_time (ratov from setting rrq active) has
+ * been reached, if it has and the send_rrq flag is set then
+ * it will call lpfc_send_rrq. If the send_rrq flag is not set
+ * then it will just call the routine to clear the rrq and
+ * free the rrq resource.
+ * The timer is set to the next rrq that is going to expire before
+ * leaving the routine.
+ *
+ **/
+void
+lpfc_handle_rrq_active(struct lpfc_hba *phba)
+{
+	struct lpfc_node_rrq *rrq;
+	struct lpfc_node_rrq *nextrrq;
+	unsigned long next_time;
+	unsigned long iflags;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+	next_time = jiffies + HZ * (phba->fc_ratov + 1);
+	list_for_each_entry_safe(rrq, nextrrq,
+			&phba->active_rrq_list, list) {
+		if (time_after(jiffies, rrq->rrq_stop_time)) {
+			list_del(&rrq->list);
+			if (!rrq->send_rrq)
+				/* this call will free the rrq */
+				__lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+			else {
+			/* if we send the rrq then the completion handler
+			 *  will clear the bit in the xribitmap.
+			 */
+				spin_unlock_irqrestore(&phba->hbalock, iflags);
+				if (lpfc_send_rrq(phba, rrq)) {
+					lpfc_clr_rrq_active(phba, rrq->xritag,
+								 rrq);
+				}
+				spin_lock_irqsave(&phba->hbalock, iflags);
+			}
+		} else if  (time_before(rrq->rrq_stop_time, next_time))
+			next_time = rrq->rrq_stop_time;
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	if (!list_empty(&phba->active_rrq_list))
+		mod_timer(&phba->rrq_tmr, next_time);
+}
+
+/**
+ * lpfc_get_active_rrq - Get the active RRQ for this exchange.
+ * @vport: Pointer to vport context object.
+ * @xri: The xri used in the exchange.
+ * @did: The targets DID for this exchange.
+ *
+ * returns NULL = rrq not found in the phba->active_rrq_list.
+ *         rrq = rrq for this xri and target.
+ **/
+struct lpfc_node_rrq *
+lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_node_rrq *rrq;
+	struct lpfc_node_rrq *nextrrq;
+	unsigned long iflags;
+
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		return NULL;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
+		if (rrq->vport == vport && rrq->xritag == xri &&
+				rrq->nlp_DID == did){
+			list_del(&rrq->list);
+			spin_unlock_irqrestore(&phba->hbalock, iflags);
+			return rrq;
+		}
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return NULL;
+}
+
+/**
+ * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
+ * @vport: Pointer to vport context object.
+ *
+ * Remove all active RRQs for this vport from the phba->active_rrq_list and
+ * clear the rrq.
+ **/
+void
+lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport)
+
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_node_rrq *rrq;
+	struct lpfc_node_rrq *nextrrq;
+	unsigned long iflags;
+
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		return;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
+		if (rrq->vport == vport) {
+			list_del(&rrq->list);
+			__lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+		}
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+}
+
+/**
+ * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
+ * @phba: Pointer to HBA context object.
+ *
+ * Remove all rrqs from the phba->active_rrq_list and free them by
+ * calling __lpfc_clr_active_rrq
+ *
+ **/
+void
+lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
+{
+	struct lpfc_node_rrq *rrq;
+	struct lpfc_node_rrq *nextrrq;
+	unsigned long next_time;
+	unsigned long iflags;
+
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		return;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+	next_time = jiffies + HZ * (phba->fc_ratov * 2);
+	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
+		list_del(&rrq->list);
+		__lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	if (!list_empty(&phba->active_rrq_list))
+		mod_timer(&phba->rrq_tmr, next_time);
+}
+
+
+/**
+ * __lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @ndlp: Targets nodelist pointer for this exchange.
+ * @xritag the xri in the bitmap to test.
+ *
+ * This function is called with hbalock held. This function
+ * returns 0 = rrq not active for this xri
+ *         1 = rrq is valid for this xri.
+ **/
+static int
+__lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+			uint16_t  xritag)
+{
+	uint16_t adj_xri;
+
+	adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
+	if (!ndlp)
+		return 0;
+	if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
+			return 1;
+	else
+		return 0;
+}
+
+/**
+ * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @ndlp: nodelist pointer for this target.
+ * @xritag: xri used in this exchange.
+ * @rxid: Remote Exchange ID.
+ * @send_rrq: Flag used to determine if we should send rrq els cmd.
+ *
+ * This function takes the hbalock.
+ * The active bit is always set in the active rrq xri_bitmap even
+ * if there is no slot avaiable for the other rrq information.
+ *
+ * returns 0 rrq actived for this xri
+ *         < 0 No memory or invalid ndlp.
+ **/
+int
+lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+			uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
+{
+	int ret;
+	unsigned long iflags;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	ret = __lpfc_set_rrq_active(phba, ndlp, xritag, rxid, send_rrq);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return ret;
+}
+
+/**
+ * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @xritag: xri used in this exchange.
+ * @rrq: The RRQ to be cleared.
+ *
+ * This function is takes the hbalock.
+ **/
+void
+lpfc_clr_rrq_active(struct lpfc_hba *phba,
+			uint16_t xritag,
+			struct lpfc_node_rrq *rrq)
+{
+	unsigned long iflags;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	__lpfc_clr_rrq_active(phba, xritag, rrq);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return;
+}
+
+
+
+/**
+ * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @ndlp: Targets nodelist pointer for this exchange.
+ * @xritag the xri in the bitmap to test.
+ *
+ * This function takes the hbalock.
+ * returns 0 = rrq not active for this xri
+ *         1 = rrq is valid for this xri.
+ **/
+int
+lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+			uint16_t  xritag)
+{
+	int ret;
+	unsigned long iflags;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	ret = __lpfc_test_rrq_active(phba, ndlp, xritag);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return ret;
+}
+
 /**
  * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
  * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to the iocbq.
  *
  * This function is called with hbalock held. This function
  * Gets a new driver sglq object from the sglq list. If the
@@ -522,17 +858,51 @@ __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
  * allocated sglq object else it returns NULL.
  **/
 static struct lpfc_sglq *
-__lpfc_sli_get_sglq(struct lpfc_hba *phba)
+__lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
 {
 	struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
 	struct lpfc_sglq *sglq = NULL;
+	struct lpfc_sglq *start_sglq = NULL;
 	uint16_t adj_xri;
+	struct lpfc_scsi_buf *lpfc_cmd;
+	struct lpfc_nodelist *ndlp;
+	int found = 0;
+
+	if (piocbq->iocb_flag &  LPFC_IO_FCP) {
+		lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
+		ndlp = lpfc_cmd->rdata->pnode;
+	} else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
+			!(piocbq->iocb_flag & LPFC_IO_LIBDFC))
+		ndlp = piocbq->context_un.ndlp;
+	else
+		ndlp = piocbq->context1;
+
 	list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
-	if (!sglq)
-		return NULL;
-	adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
-	phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
-	sglq->state = SGL_ALLOCATED;
+	start_sglq = sglq;
+	while (!found) {
+		if (!sglq)
+			return NULL;
+		adj_xri = sglq->sli4_xritag -
+				phba->sli4_hba.max_cfg_param.xri_base;
+		if (__lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
+			/* This xri has an rrq outstanding for this DID.
+			 * put it back in the list and get another xri.
+			 */
+			list_add_tail(&sglq->list, lpfc_sgl_list);
+			sglq = NULL;
+			list_remove_head(lpfc_sgl_list, sglq,
+						struct lpfc_sglq, list);
+			if (sglq == start_sglq) {
+				sglq = NULL;
+				break;
+			} else
+				continue;
+		}
+		sglq->ndlp = ndlp;
+		found = 1;
+		phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
+		sglq->state = SGL_ALLOCATED;
+	}
 	return sglq;
 }
 
@@ -598,6 +968,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
 				&phba->sli4_hba.abts_sgl_list_lock, iflag);
 		} else {
 			sglq->state = SGL_FREED;
+			sglq->ndlp = NULL;
 			list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
 
 			/* Check if TXQ queue needs to be serviced */
@@ -1634,7 +2005,6 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
 	case MBX_READ_LNK_STAT:
 	case MBX_REG_LOGIN:
 	case MBX_UNREG_LOGIN:
-	case MBX_READ_LA:
 	case MBX_CLEAR_LA:
 	case MBX_DUMP_MEMORY:
 	case MBX_DUMP_CONTEXT:
@@ -1656,7 +2026,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
 	case MBX_READ_SPARM64:
 	case MBX_READ_RPI64:
 	case MBX_REG_LOGIN64:
-	case MBX_READ_LA64:
+	case MBX_READ_TOPOLOGY:
 	case MBX_WRITE_WWN:
 	case MBX_SET_DEBUG:
 	case MBX_LOAD_EXP_ROM:
@@ -1746,11 +2116,6 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 		kfree(mp);
 	}
 
-	if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
-	    (phba->sli_rev == LPFC_SLI_REV4) &&
-	    (pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0))
-		lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
-
 	/*
 	 * If a REG_LOGIN succeeded  after node is destroyed or node
 	 * is in re-discovery driver need to cleanup the RPI.
@@ -3483,12 +3848,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
 	phba->pport->fc_myDID = 0;
 	phba->pport->fc_prevDID = 0;
 
-	/* Turn off parity checking and serr during the physical reset */
-	pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
-	pci_write_config_word(phba->pcidev, PCI_COMMAND,
-			      (cfg_value &
-			      ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
-
 	spin_lock_irq(&phba->hbalock);
 	psli->sli_flag &= ~(LPFC_PROCESS_LA);
 	phba->fcf.fcf_flag = 0;
@@ -3508,9 +3867,18 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
 	/* Now physically reset the device */
 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 			"0389 Performing PCI function reset!\n");
+
+	/* Turn off parity checking and serr during the physical reset */
+	pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
+	pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
+			      ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
+
 	/* Perform FCoE PCI function reset */
 	lpfc_pci_function_reset(phba);
 
+	/* Restore PCI cmd register */
+	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
+
 	return 0;
 }
 
@@ -4317,6 +4685,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 	struct lpfc_vport *vport = phba->pport;
 	struct lpfc_dmabuf *mp;
 
+	/*
+	 * TODO:  Why does this routine execute these task in a different
+	 * order from probe?
+	 */
 	/* Perform a PCI function reset to start from clean */
 	rc = lpfc_pci_function_reset(phba);
 	if (unlikely(rc))
@@ -4357,13 +4729,16 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 	}
 
 	rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
-	if (unlikely(rc))
-		goto out_free_vpd;
-
+	if (unlikely(rc)) {
+		kfree(vpd);
+		goto out_free_mbox;
+	}
 	mqe = &mboxq->u.mqe;
 	phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
 	if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
-		phba->hba_flag |= HBA_FCOE_SUPPORT;
+		phba->hba_flag |= HBA_FCOE_MODE;
+	else
+		phba->hba_flag &= ~HBA_FCOE_MODE;
 
 	if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
 		LPFC_DCBX_CEE_MODE)
@@ -4372,13 +4747,14 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 		phba->hba_flag &= ~HBA_FIP_SUPPORT;
 
 	if (phba->sli_rev != LPFC_SLI_REV4 ||
-	    !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
+	    !(phba->hba_flag & HBA_FCOE_MODE)) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
 			"0376 READ_REV Error. SLI Level %d "
 			"FCoE enabled %d\n",
-			phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT);
+			phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
 		rc = -EIO;
-		goto out_free_vpd;
+		kfree(vpd);
+		goto out_free_mbox;
 	}
 	/*
 	 * Evaluate the read rev and vpd data. Populate the driver
@@ -4392,6 +4768,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 				"Using defaults.\n", rc);
 		rc = 0;
 	}
+	kfree(vpd);
 
 	/* Save information as VPD data */
 	phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
@@ -4428,7 +4805,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
 	if (unlikely(rc)) {
 		rc = -EIO;
-		goto out_free_vpd;
+		goto out_free_mbox;
 	}
 
 	/*
@@ -4476,7 +4853,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 	if (rc) {
 		phba->link_state = LPFC_HBA_ERROR;
 		rc = -ENOMEM;
-		goto out_free_vpd;
+		goto out_free_mbox;
 	}
 
 	mboxq->vport = vport;
@@ -4501,7 +4878,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 				rc, bf_get(lpfc_mqe_status, mqe));
 		phba->link_state = LPFC_HBA_ERROR;
 		rc = -EIO;
-		goto out_free_vpd;
+		goto out_free_mbox;
 	}
 
 	if (phba->cfg_soft_wwnn)
@@ -4526,7 +4903,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 				"0582 Error %d during sgl post operation\n",
 					rc);
 		rc = -ENODEV;
-		goto out_free_vpd;
+		goto out_free_mbox;
 	}
 
 	/* Register SCSI SGL pool to the device */
@@ -4538,7 +4915,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 		/* Some Scsi buffers were moved to the abort scsi list */
 		/* A pci function reset will repost them */
 		rc = -ENODEV;
-		goto out_free_vpd;
+		goto out_free_mbox;
 	}
 
 	/* Post the rpi header region to the device. */
@@ -4548,7 +4925,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 				"0393 Error %d during rpi post operation\n",
 				rc);
 		rc = -ENODEV;
-		goto out_free_vpd;
+		goto out_free_mbox;
 	}
 
 	/* Set up all the queues to the device */
@@ -4608,33 +4985,33 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 		}
 	}
 
+	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+		/*
+		 * The FC Port needs to register FCFI (index 0)
+		 */
+		lpfc_reg_fcfi(phba, mboxq);
+		mboxq->vport = phba->pport;
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+		if (rc == MBX_SUCCESS)
+			rc = 0;
+		else
+			goto out_unset_queue;
+	}
 	/*
 	 * The port is ready, set the host's link state to LINK_DOWN
 	 * in preparation for link interrupts.
 	 */
-	lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
-	mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-	lpfc_set_loopback_flag(phba);
-	/* Change driver state to LPFC_LINK_DOWN right before init link */
 	spin_lock_irq(&phba->hbalock);
 	phba->link_state = LPFC_LINK_DOWN;
 	spin_unlock_irq(&phba->hbalock);
-	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
-	if (unlikely(rc != MBX_NOT_FINISHED)) {
-		kfree(vpd);
-		return 0;
-	} else
-		rc = -EIO;
-
+	rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+out_unset_queue:
 	/* Unset all the queues set up in this routine when error out */
 	if (rc)
 		lpfc_sli4_queue_unset(phba);
-
 out_stop_timers:
 	if (rc)
 		lpfc_stop_hba_timers(phba);
-out_free_vpd:
-	kfree(vpd);
 out_free_mbox:
 	mempool_free(mboxq, phba->mbox_mem_pool);
 	return rc;
@@ -5863,6 +6240,8 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
 	IOCB_t *icmd;
 	int numBdes = 0;
 	int i = 0;
+	uint32_t offset = 0; /* accumulated offset in the sg request list */
+	int inbound = 0; /* number of sg reply entries inbound from firmware */
 
 	if (!piocbq || !sglq)
 		return xritag;
@@ -5897,6 +6276,20 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
 			 */
 			bde.tus.w = le32_to_cpu(bpl->tus.w);
 			sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
+			/* The offsets in the sgl need to be accumulated
+			 * separately for the request and reply lists.
+			 * The request is always first, the reply follows.
+			 */
+			if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
+				/* add up the reply sg entries */
+				if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
+					inbound++;
+				/* first inbound? reset the offset */
+				if (inbound == 1)
+					offset = 0;
+				bf_set(lpfc_sli4_sge_offset, sgl, offset);
+				offset += bde.tus.f.bdeSize;
+			}
 			bpl++;
 			sgl++;
 		}
@@ -6028,11 +6421,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 		bf_set(els_req64_vf, &wqe->els_req, 0);
 		/* And a VFID for word 12 */
 		bf_set(els_req64_vfid, &wqe->els_req, 0);
-		/*
-		 * Set ct field to 3, indicates that the context_tag field
-		 * contains the FCFI and remote N_Port_ID is
-		 * in word 5.
-		 */
 		ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
 		bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
 		       iocbq->iocb.ulpContext);
@@ -6140,6 +6528,18 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 		bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
 	break;
 	case CMD_GEN_REQUEST64_CR:
+		/* For this command calculate the xmit length of the
+		 * request bde.
+		 */
+		xmit_len = 0;
+		numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
+			sizeof(struct ulp_bde64);
+		for (i = 0; i < numBdes; i++) {
+			if (bpl[i].tus.f.bdeFlags != BUFF_TYPE_BDE_64)
+				break;
+			bde.tus.w = le32_to_cpu(bpl[i].tus.w);
+			xmit_len += bde.tus.f.bdeSize;
+		}
 		/* word3 iocb=IO_TAG wqe=request_payload_len */
 		wqe->gen_req.request_payload_len = xmit_len;
 		/* word4 iocb=parameter wqe=relative_offset memcpy */
@@ -6320,7 +6720,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
 					return IOCB_BUSY;
 				}
 			} else {
-			sglq = __lpfc_sli_get_sglq(phba);
+			sglq = __lpfc_sli_get_sglq(phba, piocb);
 				if (!sglq) {
 					if (!(flag & SLI_IOCB_RET_IOCB)) {
 						__lpfc_sli_ringtx_put(phba,
@@ -8033,29 +8433,66 @@ static int
 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
 {
 	uint32_t uerr_sta_hi, uerr_sta_lo;
+	uint32_t if_type, portsmphr;
+	struct lpfc_register portstat_reg;
 
-	/* For now, use the SLI4 device internal unrecoverable error
+	/*
+	 * For now, use the SLI4 device internal unrecoverable error
 	 * registers for error attention. This can be changed later.
 	 */
-	uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
-	uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
-	if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
-	    (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	switch (if_type) {
+	case LPFC_SLI_INTF_IF_TYPE_0:
+		uerr_sta_lo = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
+		uerr_sta_hi = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
+		if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
+		    (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"1423 HBA Unrecoverable error: "
+					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
+					"ue_mask_lo_reg=0x%x, "
+					"ue_mask_hi_reg=0x%x\n",
+					uerr_sta_lo, uerr_sta_hi,
+					phba->sli4_hba.ue_mask_lo,
+					phba->sli4_hba.ue_mask_hi);
+			phba->work_status[0] = uerr_sta_lo;
+			phba->work_status[1] = uerr_sta_hi;
+			phba->work_ha |= HA_ERATT;
+			phba->hba_flag |= HBA_ERATT_HANDLED;
+			return 1;
+		}
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_2:
+		portstat_reg.word0 =
+			readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
+		portsmphr = readl(phba->sli4_hba.PSMPHRregaddr);
+		if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
+			phba->work_status[0] =
+				readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
+			phba->work_status[1] =
+				readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2885 Port Error Detected: "
+					"port status reg 0x%x, "
+					"port smphr reg 0x%x, "
+					"error 1=0x%x, error 2=0x%x\n",
+					portstat_reg.word0,
+					portsmphr,
+					phba->work_status[0],
+					phba->work_status[1]);
+			phba->work_ha |= HA_ERATT;
+			phba->hba_flag |= HBA_ERATT_HANDLED;
+			return 1;
+		}
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_1:
+	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"1423 HBA Unrecoverable error: "
-				"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
-				"ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
-				uerr_sta_lo, uerr_sta_hi,
-				phba->sli4_hba.ue_mask_lo,
-				phba->sli4_hba.ue_mask_hi);
-		phba->work_status[0] = uerr_sta_lo;
-		phba->work_status[1] = uerr_sta_hi;
-		/* Set the driver HA work bitmap */
-		phba->work_ha |= HA_ERATT;
-		/* Indicate polling handles this ERATT */
-		phba->hba_flag |= HBA_ERATT_HANDLED;
+				"2886 HBA Error Attention on unsupported "
+				"if type %d.", if_type);
 		return 1;
 	}
+
 	return 0;
 }
 
@@ -8110,7 +8547,7 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
 		ha_copy = lpfc_sli_eratt_read(phba);
 		break;
 	case LPFC_SLI_REV4:
-		/* Read devcie Uncoverable Error (UERR) registers */
+		/* Read device Uncoverable Error (UERR) registers */
 		ha_copy = lpfc_sli4_eratt_read(phba);
 		break;
 	default:
@@ -10155,16 +10592,20 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
 			 length, LPFC_SLI4_MBX_EMBED);
 
 	mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
-	bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request,
-		    mq->page_count);
-	bf_set(lpfc_mbx_mq_create_ext_async_evt_link, &mq_create_ext->u.request,
-	       1);
-	bf_set(lpfc_mbx_mq_create_ext_async_evt_fcfste,
+	bf_set(lpfc_mbx_mq_create_ext_num_pages,
+	       &mq_create_ext->u.request, mq->page_count);
+	bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
+	       &mq_create_ext->u.request, 1);
+	bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
 	       &mq_create_ext->u.request, 1);
 	bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
 	       &mq_create_ext->u.request, 1);
-	bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
-	       cq->queue_id);
+	bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
+	       &mq_create_ext->u.request, 1);
+	bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
+	       &mq_create_ext->u.request, 1);
+	bf_set(lpfc_mq_context_cq_id,
+	       &mq_create_ext->u.request.context, cq->queue_id);
 	bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
 	switch (mq->entry_count) {
 	default:
@@ -11137,7 +11578,8 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
 static int
 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
 {
-	char *rctl_names[] = FC_RCTL_NAMES_INIT;
+	/*  make rctl_names static to save stack space */
+	static char *rctl_names[] = FC_RCTL_NAMES_INIT;
 	char *type_names[] = FC_TYPE_NAMES_INIT;
 	struct fc_vft_header *fc_vft_hdr;
 
@@ -11538,6 +11980,10 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
 				"SID:x%x\n", oxid, sid);
 		return;
 	}
+	if (rxid >= phba->sli4_hba.max_cfg_param.xri_base
+		&& rxid <= (phba->sli4_hba.max_cfg_param.max_xri
+		+ phba->sli4_hba.max_cfg_param.xri_base))
+		lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
 
 	/* Allocate buffer for acc iocb */
 	ctiocb = lpfc_sli_get_iocbq(phba);
@@ -11560,6 +12006,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
 	icmd->ulpLe = 1;
 	icmd->ulpClass = CLASS3;
 	icmd->ulpContext = ndlp->nlp_rpi;
+	ctiocb->context1 = ndlp;
 
 	ctiocb->iocb_cmpl = NULL;
 	ctiocb->vport = phba->pport;
@@ -12129,42 +12576,37 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
 
 /**
  * lpfc_sli4_init_vpi - Initialize a vpi with the port
- * @phba: pointer to lpfc hba data structure.
- * @vpi: vpi value to activate with the port.
+ * @vport: Pointer to the vport for which the vpi is being initialized
  *
- * This routine is invoked to activate a vpi with the
- * port when the host intends to use vports with a
- * nonzero vpi.
+ * This routine is invoked to activate a vpi with the port.
  *
  * Returns:
  *    0 success
  *    -Evalue otherwise
  **/
 int
-lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
+lpfc_sli4_init_vpi(struct lpfc_vport *vport)
 {
 	LPFC_MBOXQ_t *mboxq;
 	int rc = 0;
 	int retval = MBX_SUCCESS;
 	uint32_t mbox_tmo;
-
-	if (vpi == 0)
-		return -EINVAL;
+	struct lpfc_hba *phba = vport->phba;
 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (!mboxq)
 		return -ENOMEM;
-	lpfc_init_vpi(phba, mboxq, vpi);
+	lpfc_init_vpi(phba, mboxq, vport->vpi);
 	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
 	rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
 	if (rc != MBX_SUCCESS) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
 				"2022 INIT VPI Mailbox failed "
 				"status %d, mbxStatus x%x\n", rc,
 				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
 		retval = -EIO;
 	}
 	if (rc != MBX_TIMEOUT)
-		mempool_free(mboxq, phba->mbox_mem_pool);
+		mempool_free(mboxq, vport->phba->mbox_mem_pool);
 
 	return retval;
 }
@@ -12854,6 +13296,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
 	struct lpfc_nodelist *act_mbx_ndlp = NULL;
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 	LIST_HEAD(mbox_cmd_list);
+	uint8_t restart_loop;
 
 	/* Clean up internally queued mailbox commands with the vport */
 	spin_lock_irq(&phba->hbalock);
@@ -12882,15 +13325,44 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
 			mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
 		}
 	}
+	/* Cleanup any mailbox completions which are not yet processed */
+	do {
+		restart_loop = 0;
+		list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
+			/*
+			 * If this mailox is already processed or it is
+			 * for another vport ignore it.
+			 */
+			if ((mb->vport != vport) ||
+				(mb->mbox_flag & LPFC_MBX_IMED_UNREG))
+				continue;
+
+			if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
+				(mb->u.mb.mbxCommand != MBX_REG_VPI))
+				continue;
+
+			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+			if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+				ndlp = (struct lpfc_nodelist *)mb->context2;
+				/* Unregister the RPI when mailbox complete */
+				mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
+				restart_loop = 1;
+				spin_unlock_irq(&phba->hbalock);
+				spin_lock(shost->host_lock);
+				ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+				spin_unlock(shost->host_lock);
+				spin_lock_irq(&phba->hbalock);
+				break;
+			}
+		}
+	} while (restart_loop);
+
 	spin_unlock_irq(&phba->hbalock);
 
 	/* Release the cleaned-up mailbox commands */
 	while (!list_empty(&mbox_cmd_list)) {
 		list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
 		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
-			if (phba->sli_rev == LPFC_SLI_REV4)
-				__lpfc_sli4_free_rpi(phba,
-						mb->u.mb.un.varRegLogin.rpi);
 			mp = (struct lpfc_dmabuf *) (mb->context1);
 			if (mp) {
 				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -12948,12 +13420,13 @@ lpfc_drain_txq(struct lpfc_hba *phba)
 	while (pring->txq_cnt) {
 		spin_lock_irqsave(&phba->hbalock, iflags);
 
-		sglq = __lpfc_sli_get_sglq(phba);
+		piocbq = lpfc_sli_ringtx_get(phba, pring);
+		sglq = __lpfc_sli_get_sglq(phba, piocbq);
 		if (!sglq) {
+			__lpfc_sli_ringtx_put(phba, pring, piocbq);
 			spin_unlock_irqrestore(&phba->hbalock, iflags);
 			break;
 		} else {
-			piocbq = lpfc_sli_ringtx_get(phba, pring);
 			if (!piocbq) {
 				/* The txq_cnt out of sync. This should
 				 * never happen

+ 4 - 1
drivers/scsi/lpfc/lpfc_sli.h

@@ -34,9 +34,11 @@ struct lpfc_cq_event {
 	union {
 		struct lpfc_mcqe		mcqe_cmpl;
 		struct lpfc_acqe_link		acqe_link;
-		struct lpfc_acqe_fcoe		acqe_fcoe;
+		struct lpfc_acqe_fip		acqe_fip;
 		struct lpfc_acqe_dcbx		acqe_dcbx;
 		struct lpfc_acqe_grp5		acqe_grp5;
+		struct lpfc_acqe_fc_la		acqe_fc;
+		struct lpfc_acqe_sli		acqe_sli;
 		struct lpfc_rcqe		rcqe_cmpl;
 		struct sli4_wcqe_xri_aborted	wcqe_axri;
 		struct lpfc_wcqe_complete	wcqe_cmpl;
@@ -82,6 +84,7 @@ struct lpfc_iocbq {
 		struct lpfc_iocbq    *rsp_iocb;
 		struct lpfcMboxq     *mbox;
 		struct lpfc_nodelist *ndlp;
+		struct lpfc_node_rrq *rrq;
 	} context_un;
 
 	void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,

+ 38 - 20
drivers/scsi/lpfc/lpfc_sli4.h

@@ -137,9 +137,11 @@ struct lpfc_sli4_link {
 	uint8_t speed;
 	uint8_t duplex;
 	uint8_t status;
-	uint8_t physical;
+	uint8_t type;
+	uint8_t number;
 	uint8_t fault;
 	uint16_t logical_speed;
+	uint16_t topology;
 };
 
 struct lpfc_fcf_rec {
@@ -367,23 +369,39 @@ struct lpfc_sli4_hba {
 					     PCI BAR1, control registers */
 	void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
 					     PCI BAR2, doorbell registers */
-	/* BAR0 PCI config space register memory map */
-	void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */
-	void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
-	void __iomem *UEMASKLOregaddr; /* Address to UE_MASK_LO register */
-	void __iomem *UEMASKHIregaddr; /* Address to UE_MASK_HI register */
-	void __iomem *SLIINTFregaddr; /* Address to SLI_INTF register */
-	/* BAR1 FCoE function CSR register memory map */
-	void __iomem *STAregaddr;    /* Address to HST_STATE register */
-	void __iomem *ISRregaddr;    /* Address to HST_ISR register */
-	void __iomem *IMRregaddr;    /* Address to HST_IMR register */
-	void __iomem *ISCRregaddr;   /* Address to HST_ISCR register */
-	/* BAR2 VF-0 doorbell register memory map */
-	void __iomem *RQDBregaddr;   /* Address to RQ_DOORBELL register */
-	void __iomem *WQDBregaddr;   /* Address to WQ_DOORBELL register */
-	void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */
-	void __iomem *MQDBregaddr;   /* Address to MQ_DOORBELL register */
-	void __iomem *BMBXregaddr;   /* Address to BootStrap MBX register */
+	union {
+		struct {
+			/* IF Type 0, BAR 0 PCI cfg space reg mem map */
+			void __iomem *UERRLOregaddr;
+			void __iomem *UERRHIregaddr;
+			void __iomem *UEMASKLOregaddr;
+			void __iomem *UEMASKHIregaddr;
+		} if_type0;
+		struct {
+			/* IF Type 2, BAR 0 PCI cfg space reg mem map. */
+			void __iomem *STATUSregaddr;
+			void __iomem *CTRLregaddr;
+			void __iomem *ERR1regaddr;
+			void __iomem *ERR2regaddr;
+		} if_type2;
+	} u;
+
+	/* IF type 0, BAR1 and if type 2, Bar 0 CSR register memory map */
+	void __iomem *PSMPHRregaddr;
+
+	/* Well-known SLI INTF register memory map. */
+	void __iomem *SLIINTFregaddr;
+
+	/* IF type 0, BAR 1 function CSR register memory map */
+	void __iomem *ISRregaddr;	/* HST_ISR register */
+	void __iomem *IMRregaddr;	/* HST_IMR register */
+	void __iomem *ISCRregaddr;	/* HST_ISCR register */
+	/* IF type 0, BAR 0 and if type 2, BAR 0 doorbell register memory map */
+	void __iomem *RQDBregaddr;	/* RQ_DOORBELL register */
+	void __iomem *WQDBregaddr;	/* WQ_DOORBELL register */
+	void __iomem *EQCQDBregaddr;	/* EQCQ_DOORBELL register */
+	void __iomem *MQDBregaddr;	/* MQ_DOORBELL register */
+	void __iomem *BMBXregaddr;	/* BootStrap MBX register */
 
 	uint32_t ue_mask_lo;
 	uint32_t ue_mask_hi;
@@ -466,6 +484,7 @@ struct lpfc_sglq {
 	struct list_head clist;
 	enum lpfc_sge_type buff_type; /* is this a scsi sgl */
 	enum lpfc_sgl_state state;
+	struct lpfc_nodelist *ndlp; /* ndlp associated with IO */
 	uint16_t iotag;         /* pre-assigned IO tag */
 	uint16_t sli4_xritag;   /* pre-assigned XRI, (OXID) tag. */
 	struct sli4_sge *sgl;	/* pre-assigned SGL */
@@ -532,7 +551,6 @@ int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
 struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
 void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
 int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
-void __lpfc_sli4_free_rpi(struct lpfc_hba *, int);
 void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
 void lpfc_sli4_remove_rpis(struct lpfc_hba *);
 void lpfc_sli4_async_event_proc(struct lpfc_hba *);
@@ -548,7 +566,7 @@ int lpfc_sli4_brdreset(struct lpfc_hba *);
 int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
 void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
 int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
-int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_init_vpi(struct lpfc_vport *);
 uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
 uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
 void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);

Some files were not shown because too many files changed in this diff