Преглед изворни кода

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

SCSI updates for post 3.2 merge window

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (67 commits)
  [SCSI] lpfc 8.3.28: Update driver version to 8.3.28
  [SCSI] lpfc 8.3.28: Add Loopback support for SLI4 adapters
  [SCSI] lpfc 8.3.28: Critical Miscellaneous fixes
  [SCSI] Lpfc 8.3.28: FC and SCSI Discovery Fixes
  [SCSI] lpfc 8.3.28: Add support for ABTS failure handling
  [SCSI] lpfc 8.3.28: SLI fixes and added SLI4 support
  [SCSI] lpfc 8.3.28: Miscellaneous fixes in sysfs and mgmt interfaces
  [SCSI] mpt2sas: Removed redundant calling of _scsih_probe_devices() from _scsih_probe
  [SCSI] mac_scsi: Remove obsolete IRQ_FLG_* users
  [SCSI] qla4xxx: Update driver version to 5.02.00-k10
  [SCSI] qla4xxx: check for FW alive before calling chip_reset
  [SCSI] qla4xxx: Fix qla4xxx_dump_buffer to dump buffer correctly
  [SCSI] qla4xxx: Fix the IDC locking mechanism
  [SCSI] qla4xxx: Wait for disable_acb before doing set_acb
  [SCSI] qla4xxx: Don't recover adapter if device state is FAILED
  [SCSI] qla4xxx: fix call trace on rmmod with ql4xdontresethba=1
  [SCSI] qla4xxx: Fix CPU lockups when ql4xdontresethba set
  [SCSI] qla4xxx: Perform context resets in case of context failures.
  [SCSI] iscsi class: export pid of process that created
  [SCSI] mpt2sas: Remove unused duplicate diag_buffer_enable param
  ...
Linus Torvalds пре 13 година
родитељ
комит
d04baa157d
70 измењених фајлова са 3511 додато и 2576 уклоњено
  1. 1 0
      drivers/message/fusion/lsi/mpi_cnfg.h
  2. 4 3
      drivers/message/fusion/mptbase.c
  3. 1 0
      drivers/message/fusion/mptbase.h
  4. 2 0
      drivers/message/fusion/mptsas.c
  5. 3 2
      drivers/scsi/be2iscsi/be_main.c
  6. 2 2
      drivers/scsi/bfa/bfa_defs.h
  7. 220 217
      drivers/scsi/bfa/bfa_defs_svc.h
  8. 3 3
      drivers/scsi/bfa/bfa_ioc.c
  9. 1 2
      drivers/scsi/bfa/bfad_debugfs.c
  10. 0 58
      drivers/scsi/device_handler/scsi_dh.c
  11. 19 0
      drivers/scsi/device_handler/scsi_dh_emc.c
  12. 19 0
      drivers/scsi/device_handler/scsi_dh_hp_sw.c
  13. 19 0
      drivers/scsi/device_handler/scsi_dh_rdac.c
  14. 4 2
      drivers/scsi/hpsa.c
  15. 0 14
      drivers/scsi/lpfc/lpfc.h
  16. 151 285
      drivers/scsi/lpfc/lpfc_attr.c
  17. 333 99
      drivers/scsi/lpfc/lpfc_bsg.c
  18. 2 1
      drivers/scsi/lpfc/lpfc_bsg.h
  19. 3 2
      drivers/scsi/lpfc/lpfc_compat.h
  20. 11 2
      drivers/scsi/lpfc/lpfc_crtn.h
  21. 113 59
      drivers/scsi/lpfc/lpfc_debugfs.c
  22. 124 90
      drivers/scsi/lpfc/lpfc_els.c
  23. 112 45
      drivers/scsi/lpfc/lpfc_hbadisc.c
  24. 16 5
      drivers/scsi/lpfc/lpfc_hw.h
  25. 9 2
      drivers/scsi/lpfc/lpfc_hw4.h
  26. 228 110
      drivers/scsi/lpfc/lpfc_init.c
  27. 18 7
      drivers/scsi/lpfc/lpfc_mbox.c
  28. 2 2
      drivers/scsi/lpfc/lpfc_mem.c
  29. 9 1
      drivers/scsi/lpfc/lpfc_nportdisc.c
  30. 14 3
      drivers/scsi/lpfc/lpfc_scsi.c
  31. 3 2
      drivers/scsi/lpfc/lpfc_scsi.h
  32. 390 177
      drivers/scsi/lpfc/lpfc_sli.c
  33. 10 1
      drivers/scsi/lpfc/lpfc_sli4.h
  34. 1 1
      drivers/scsi/lpfc/lpfc_version.h
  35. 3 3
      drivers/scsi/lpfc/lpfc_vport.c
  36. 1 2
      drivers/scsi/mac_scsi.c
  37. 7 3
      drivers/scsi/mpt2sas/mpi/mpi2.h
  38. 19 9
      drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
  39. 29 20
      drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
  40. 60 7
      drivers/scsi/mpt2sas/mpi/mpi2_raid.h
  41. 7 2
      drivers/scsi/mpt2sas/mpi/mpi2_tool.h
  42. 118 87
      drivers/scsi/mpt2sas/mpt2sas_base.c
  43. 22 4
      drivers/scsi/mpt2sas/mpt2sas_base.h
  44. 4 4
      drivers/scsi/mpt2sas/mpt2sas_ctl.c
  45. 103 65
      drivers/scsi/mpt2sas/mpt2sas_scsih.c
  46. 5 4
      drivers/scsi/mpt2sas/mpt2sas_transport.c
  47. 5 5
      drivers/scsi/qla2xxx/qla_attr.c
  48. 18 2
      drivers/scsi/qla2xxx/qla_bsg.c
  49. 158 152
      drivers/scsi/qla2xxx/qla_dbg.c
  50. 9 10
      drivers/scsi/qla2xxx/qla_dbg.h
  51. 2 4
      drivers/scsi/qla2xxx/qla_def.h
  52. 1 1
      drivers/scsi/qla2xxx/qla_gbl.h
  53. 1 1
      drivers/scsi/qla2xxx/qla_gs.c
  54. 54 41
      drivers/scsi/qla2xxx/qla_init.c
  55. 537 104
      drivers/scsi/qla2xxx/qla_iocb.c
  56. 144 109
      drivers/scsi/qla2xxx/qla_isr.c
  57. 1 1
      drivers/scsi/qla2xxx/qla_mbx.c
  58. 13 497
      drivers/scsi/qla2xxx/qla_nx.c
  59. 175 188
      drivers/scsi/qla2xxx/qla_os.c
  60. 3 2
      drivers/scsi/qla2xxx/qla_sup.c
  61. 3 3
      drivers/scsi/qla4xxx/ql4_dbg.c
  62. 1 0
      drivers/scsi/qla4xxx/ql4_def.h
  63. 19 11
      drivers/scsi/qla4xxx/ql4_isr.c
  64. 15 8
      drivers/scsi/qla4xxx/ql4_nx.c
  65. 102 25
      drivers/scsi/qla4xxx/ql4_os.c
  66. 1 1
      drivers/scsi/qla4xxx/ql4_version.h
  67. 0 1
      drivers/scsi/scsi_priv.h
  68. 19 2
      drivers/scsi/scsi_transport_iscsi.c
  69. 0 1
      include/scsi/scsi_device.h
  70. 5 0
      include/scsi/scsi_transport_iscsi.h

+ 1 - 0
drivers/message/fusion/lsi/mpi_cnfg.h

@@ -583,6 +583,7 @@ typedef struct _MSG_CONFIG_REPLY
 #define MPI_MANUFACTPAGE_DEVID_SAS1066E             (0x005A)
 #define MPI_MANUFACTPAGE_DEVID_SAS1068              (0x0054)
 #define MPI_MANUFACTPAGE_DEVID_SAS1068E             (0x0058)
+#define MPI_MANUFACTPAGE_DEVID_SAS1068_820XELP      (0x0059)
 #define MPI_MANUFACTPAGE_DEVID_SAS1078              (0x0062)
 
 

+ 4 - 3
drivers/message/fusion/mptbase.c

@@ -115,7 +115,8 @@ module_param(mpt_fwfault_debug, int, 0600);
 MODULE_PARM_DESC(mpt_fwfault_debug,
 		 "Enable detection of Firmware fault and halt Firmware on fault - (default=0)");
 
-static char	MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS][50];
+static char	MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS]
+				[MPT_MAX_CALLBACKNAME_LEN+1];
 
 #ifdef MFCNT
 static int mfcounter = 0;
@@ -717,8 +718,8 @@ mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass, char *func_name)
 			MptDriverClass[cb_idx] = dclass;
 			MptEvHandlers[cb_idx] = NULL;
 			last_drv_idx = cb_idx;
-			memcpy(MptCallbacksName[cb_idx], func_name,
-			    strlen(func_name) > 50 ? 50 : strlen(func_name));
+			strlcpy(MptCallbacksName[cb_idx], func_name,
+				MPT_MAX_CALLBACKNAME_LEN+1);
 			break;
 		}
 	}

+ 1 - 0
drivers/message/fusion/mptbase.h

@@ -89,6 +89,7 @@
  */
 #define MPT_MAX_ADAPTERS		18
 #define MPT_MAX_PROTOCOL_DRIVERS	16
+#define MPT_MAX_CALLBACKNAME_LEN	49
 #define MPT_MAX_BUS			1	/* Do not change */
 #define MPT_MAX_FC_DEVICES		255
 #define MPT_MAX_SCSI_DEVICES		16

+ 2 - 0
drivers/message/fusion/mptsas.c

@@ -5376,6 +5376,8 @@ static struct pci_device_id mptsas_pci_table[] = {
 		PCI_ANY_ID, PCI_ANY_ID },
 	{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1078,
 		PCI_ANY_ID, PCI_ANY_ID },
+	{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1068_820XELP,
+		PCI_ANY_ID, PCI_ANY_ID },
 	{0}	/* Terminating entry */
 };
 MODULE_DEVICE_TABLE(pci, mptsas_pci_table);

+ 3 - 2
drivers/scsi/be2iscsi/be_main.c

@@ -1105,7 +1105,6 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
 	struct be_status_bhs *sts_bhs =
 				(struct be_status_bhs *)io_task->cmd_bhs;
 	struct iscsi_conn *conn = beiscsi_conn->conn;
-	unsigned int sense_len;
 	unsigned char *sense;
 	u32 resid = 0, exp_cmdsn, max_cmdsn;
 	u8 rsp, status, flags;
@@ -1153,9 +1152,11 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
 	}
 
 	if (status == SAM_STAT_CHECK_CONDITION) {
+		u16 sense_len;
 		unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
+
 		sense = sts_bhs->sense_info + sizeof(unsigned short);
-		sense_len =  cpu_to_be16(*slen);
+		sense_len = be16_to_cpu(*slen);
 		memcpy(task->sc->sense_buffer, sense,
 		       min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
 	}

+ 2 - 2
drivers/scsi/bfa/bfa_defs.h

@@ -902,7 +902,7 @@ struct sfp_mem_s {
 union sfp_xcvr_e10g_code_u {
 	u8		b;
 	struct {
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 		u8	e10g_unall:1;   /* 10G Ethernet compliance */
 		u8	e10g_lrm:1;
 		u8	e10g_lr:1;
@@ -982,7 +982,7 @@ union sfp_xcvr_fc2_code_u {
 union sfp_xcvr_fc3_code_u {
 	u8		b;
 	struct {
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 		u8	rsv4:1;
 		u8	mb800:1;    /* 800 Mbytes/sec */
 		u8	mb1600:1;   /* 1600 Mbytes/sec */

+ 220 - 217
drivers/scsi/bfa/bfa_defs_svc.h

@@ -34,22 +34,22 @@
 struct bfa_iocfc_intr_attr_s {
 	u8		coalesce;	/*  enable/disable coalescing */
 	u8		rsvd[3];
-	__be16	latency;	/*  latency in microseconds   */
-	__be16	delay;		/*  delay in microseconds     */
+	__be16		latency;	/*  latency in microseconds   */
+	__be16		delay;		/*  delay in microseconds     */
 };
 
 /*
  * IOC firmware configuraton
  */
 struct bfa_iocfc_fwcfg_s {
-	u16        num_fabrics;	/*  number of fabrics		*/
-	u16        num_lports;	/*  number of local lports	*/
-	u16        num_rports;	/*  number of remote ports	*/
-	u16        num_ioim_reqs;	/*  number of IO reqs		*/
-	u16        num_tskim_reqs;	/*  task management requests	*/
-	u16	   num_fwtio_reqs;	/* number of TM IO reqs in FW */
-	u16        num_fcxp_reqs;	/*  unassisted FC exchanges	*/
-	u16        num_uf_bufs;	/*  unsolicited recv buffers	*/
+	u16		num_fabrics;	/*  number of fabrics		*/
+	u16		num_lports;	/*  number of local lports	*/
+	u16		num_rports;	/*  number of remote ports	*/
+	u16		num_ioim_reqs;	/*  number of IO reqs		*/
+	u16		num_tskim_reqs;	/*  task management requests	*/
+	u16		num_fwtio_reqs;	/* number of TM IO reqs in FW   */
+	u16		num_fcxp_reqs;	/*  unassisted FC exchanges	*/
+	u16		num_uf_bufs;	/*  unsolicited recv buffers	*/
 	u8		num_cqs;
 	u8		fw_tick_res;	/*  FW clock resolution in ms */
 	u8		rsvd[2];
@@ -57,19 +57,19 @@ struct bfa_iocfc_fwcfg_s {
 #pragma pack()
 
 struct bfa_iocfc_drvcfg_s {
-	u16        num_reqq_elems;	/*  number of req queue elements */
-	u16        num_rspq_elems;	/*  number of rsp queue elements */
-	u16        num_sgpgs;	/*  number of total SG pages	  */
-	u16        num_sboot_tgts;	/*  number of SAN boot targets	  */
-	u16        num_sboot_luns;	/*  number of SAN boot luns	  */
-	u16	    ioc_recover;	/*  IOC recovery mode		  */
-	u16	    min_cfg;	/*  minimum configuration	  */
-	u16        path_tov;	/*  device path timeout	  */
-	u16		num_tio_reqs;   /*!< number of TM IO reqs	*/
+	u16		num_reqq_elems;	/*  number of req queue elements */
+	u16		num_rspq_elems;	/*  number of rsp queue elements */
+	u16		num_sgpgs;	/*  number of total SG pages	 */
+	u16		num_sboot_tgts;	/*  number of SAN boot targets	 */
+	u16		num_sboot_luns;	/*  number of SAN boot luns	 */
+	u16		ioc_recover;	/*  IOC recovery mode		 */
+	u16		min_cfg;	/*  minimum configuration	 */
+	u16		path_tov;	/*  device path timeout		*/
+	u16		num_tio_reqs;	/* number of TM IO reqs	*/
 	u8		port_mode;
 	u8		rsvd_a;
-	bfa_boolean_t   delay_comp; /*  delay completion of
-							failed inflight IOs */
+	bfa_boolean_t	delay_comp;	/* delay completion of failed
+					 * inflight IOs */
 	u16		num_ttsk_reqs;	 /* TM task management requests */
 	u32		rsvd;
 };
@@ -101,8 +101,8 @@ struct bfa_fw_ioim_stats_s {
 	u32	fw_frm_drop;		/*  f/w drop the frame */
 
 	u32	rec_timeout;		/*  FW rec timed out */
-	u32	error_rec;			/*  FW sending rec on
-							* an error condition*/
+	u32	error_rec;		/*  FW sending rec on
+					 *  an error condition*/
 	u32	wait_for_si;		/*  FW wait for SI */
 	u32	rec_rsp_inval;		/*  REC rsp invalid */
 	u32	seqr_io_abort;		/*  target does not know cmd so abort */
@@ -124,9 +124,9 @@ struct bfa_fw_ioim_stats_s {
 	u32	unexp_fcp_rsp;		/*  fcp response in wrong state */
 
 	u32	fcp_rsp_under_run;	/*  fcp rsp IO underrun */
-	u32        fcp_rsp_under_run_wr;   /*  fcp rsp IO underrun for write */
+	u32     fcp_rsp_under_run_wr;   /*  fcp rsp IO underrun for write */
 	u32	fcp_rsp_under_run_err;	/*  fcp rsp IO underrun error */
-	u32        fcp_rsp_resid_inval;    /*  invalid residue */
+	u32     fcp_rsp_resid_inval;    /*  invalid residue */
 	u32	fcp_rsp_over_run;	/*  fcp rsp IO overrun */
 	u32	fcp_rsp_over_run_err;	/*  fcp rsp IO overrun error */
 	u32	fcp_rsp_proto_err;	/*  protocol error in fcp rsp */
@@ -142,21 +142,20 @@ struct bfa_fw_ioim_stats_s {
 	u32	ioh_hit_class2_event;	/*  IOH hit class2 */
 	u32	ioh_miss_other_event;	/*  IOH miss other */
 	u32	ioh_seq_cnt_err_event;	/*  IOH seq cnt error */
-	u32	ioh_len_err_event;		/*  IOH len error - fcp_dl !=
-							* bytes xfered */
+	u32	ioh_len_err_event;	/*  IOH len error - fcp_dl !=
+					 *  bytes xfered */
 	u32	ioh_seq_len_err_event;	/*  IOH seq len error */
 	u32	ioh_data_oor_event;	/*  Data out of range */
 	u32	ioh_ro_ooo_event;	/*  Relative offset out of range */
 	u32	ioh_cpu_owned_event;	/*  IOH hit -iost owned by f/w */
 	u32	ioh_unexp_frame_event;	/*  unexpected frame received
-						 *   count */
+					 *  count */
 	u32	ioh_err_int;		/*  IOH error int during data-phase
-						 *   for scsi write
-						 */
+					 *  for scsi write */
 };
 
 struct bfa_fw_tio_stats_s {
-	u32	tio_conf_proc;	/* TIO CONF processed */
+	u32	tio_conf_proc;	    /* TIO CONF processed */
 	u32	tio_conf_drop;      /* TIO CONF dropped */
 	u32	tio_cleanup_req;    /* TIO cleanup requested */
 	u32	tio_cleanup_comp;   /* TIO cleanup completed */
@@ -164,34 +163,36 @@ struct bfa_fw_tio_stats_s {
 	u32	tio_abort_rsp_comp; /* TIO abort rsp completed */
 	u32	tio_abts_req;       /* TIO ABTS requested */
 	u32	tio_abts_ack;       /* TIO ABTS ack-ed */
-	u32	tio_abts_ack_nocomp; /* TIO ABTS ack-ed but not completed */
+	u32	tio_abts_ack_nocomp;/* TIO ABTS ack-ed but not completed */
 	u32	tio_abts_tmo;       /* TIO ABTS timeout */
 	u32	tio_snsdata_dma;    /* TIO sense data DMA */
-	u32	tio_rxwchan_wait; /* TIO waiting for RX wait channel */
-	u32	tio_rxwchan_avail; /* TIO RX wait channel available */
+	u32	tio_rxwchan_wait;   /* TIO waiting for RX wait channel */
+	u32	tio_rxwchan_avail;  /* TIO RX wait channel available */
 	u32	tio_hit_bls;        /* TIO IOH BLS event */
 	u32	tio_uf_recv;        /* TIO received UF */
-	u32	tio_rd_invalid_sm; /* TIO read reqst in wrong state machine */
-	u32	tio_wr_invalid_sm;/* TIO write reqst in wrong state machine */
+	u32	tio_rd_invalid_sm;  /* TIO read reqst in wrong state machine */
+	u32	tio_wr_invalid_sm;  /* TIO write reqst in wrong state machine */
 
-	u32	ds_rxwchan_wait; /* DS waiting for RX wait channel */
-	u32	ds_rxwchan_avail; /* DS RX wait channel available */
+	u32	ds_rxwchan_wait;    /* DS waiting for RX wait channel */
+	u32	ds_rxwchan_avail;   /* DS RX wait channel available */
 	u32	ds_unaligned_rd;    /* DS unaligned read */
-	u32	ds_rdcomp_invalid_sm; /* DS read completed in wrong state machine */
-	u32	ds_wrcomp_invalid_sm; /* DS write completed in wrong state machine */
+	u32	ds_rdcomp_invalid_sm; /* DS read completed in wrong state
+				       * machine */
+	u32	ds_wrcomp_invalid_sm; /* DS write completed in wrong state
+				       * machine */
 	u32	ds_flush_req;       /* DS flush requested */
 	u32	ds_flush_comp;      /* DS flush completed */
 	u32	ds_xfrdy_exp;       /* DS XFER_RDY expired */
 	u32	ds_seq_cnt_err;     /* DS seq cnt error */
 	u32	ds_seq_len_err;     /* DS seq len error */
 	u32	ds_data_oor;        /* DS data out of order */
-	u32	ds_hit_bls;     /* DS hit BLS */
+	u32	ds_hit_bls;	    /* DS hit BLS */
 	u32	ds_edtov_timer_exp; /* DS edtov expired */
 	u32	ds_cpu_owned;       /* DS cpu owned */
 	u32	ds_hit_class2;      /* DS hit class2 */
 	u32	ds_length_err;      /* DS length error */
 	u32	ds_ro_ooo_err;      /* DS relative offset out-of-order error */
-	u32	ds_rectov_timer_exp;    /* DS rectov expired */
+	u32	ds_rectov_timer_exp;/* DS rectov expired */
 	u32	ds_unexp_fr_err;    /* DS unexp frame error */
 };
 
@@ -208,119 +209,119 @@ struct bfa_fw_io_stats_s {
  */
 
 struct bfa_fw_port_fpg_stats_s {
-    u32    intr_evt;
-    u32    intr;
-    u32    intr_excess;
-    u32    intr_cause0;
-    u32    intr_other;
-    u32    intr_other_ign;
-    u32    sig_lost;
-    u32    sig_regained;
-    u32    sync_lost;
-    u32    sync_to;
-    u32    sync_regained;
-    u32    div2_overflow;
-    u32    div2_underflow;
-    u32    efifo_overflow;
-    u32    efifo_underflow;
-    u32    idle_rx;
-    u32    lrr_rx;
-    u32    lr_rx;
-    u32    ols_rx;
-    u32    nos_rx;
-    u32    lip_rx;
-    u32    arbf0_rx;
-    u32    arb_rx;
-    u32    mrk_rx;
-    u32    const_mrk_rx;
-    u32    prim_unknown;
+	u32    intr_evt;
+	u32    intr;
+	u32    intr_excess;
+	u32    intr_cause0;
+	u32    intr_other;
+	u32    intr_other_ign;
+	u32    sig_lost;
+	u32    sig_regained;
+	u32    sync_lost;
+	u32    sync_to;
+	u32    sync_regained;
+	u32    div2_overflow;
+	u32    div2_underflow;
+	u32    efifo_overflow;
+	u32    efifo_underflow;
+	u32    idle_rx;
+	u32    lrr_rx;
+	u32    lr_rx;
+	u32    ols_rx;
+	u32    nos_rx;
+	u32    lip_rx;
+	u32    arbf0_rx;
+	u32    arb_rx;
+	u32    mrk_rx;
+	u32    const_mrk_rx;
+	u32    prim_unknown;
 };
 
 
 struct bfa_fw_port_lksm_stats_s {
-    u32    hwsm_success;       /*  hwsm state machine success          */
-    u32    hwsm_fails;         /*  hwsm fails                          */
-    u32    hwsm_wdtov;         /*  hwsm timed out                      */
-    u32    swsm_success;       /*  swsm success                        */
-    u32    swsm_fails;         /*  swsm fails                          */
-    u32    swsm_wdtov;         /*  swsm timed out                      */
-    u32    busybufs;           /*  link init failed due to busybuf     */
-    u32    buf_waits;          /*  bufwait state entries               */
-    u32    link_fails;         /*  link failures                       */
-    u32    psp_errors;         /*  primitive sequence protocol errors  */
-    u32    lr_unexp;           /*  No. of times LR rx-ed unexpectedly  */
-    u32    lrr_unexp;          /*  No. of times LRR rx-ed unexpectedly */
-    u32    lr_tx;              /*  No. of times LR tx started          */
-    u32    lrr_tx;             /*  No. of times LRR tx started         */
-    u32    ols_tx;             /*  No. of times OLS tx started         */
-    u32    nos_tx;             /*  No. of times NOS tx started         */
-    u32    hwsm_lrr_rx;        /*  No. of times LRR rx-ed by HWSM      */
-    u32    hwsm_lr_rx;         /*  No. of times LR rx-ed by HWSM      */
-	u32	bbsc_lr;	/* LKSM LR tx for credit recovery	*/
+	u32    hwsm_success;       /*  hwsm state machine success          */
+	u32    hwsm_fails;         /*  hwsm fails                          */
+	u32    hwsm_wdtov;         /*  hwsm timed out                      */
+	u32    swsm_success;       /*  swsm success                        */
+	u32    swsm_fails;         /*  swsm fails                          */
+	u32    swsm_wdtov;         /*  swsm timed out                      */
+	u32    busybufs;           /*  link init failed due to busybuf     */
+	u32    buf_waits;          /*  bufwait state entries               */
+	u32    link_fails;         /*  link failures                       */
+	u32    psp_errors;         /*  primitive sequence protocol errors  */
+	u32    lr_unexp;           /*  No. of times LR rx-ed unexpectedly  */
+	u32    lrr_unexp;          /*  No. of times LRR rx-ed unexpectedly */
+	u32    lr_tx;              /*  No. of times LR tx started          */
+	u32    lrr_tx;             /*  No. of times LRR tx started         */
+	u32    ols_tx;             /*  No. of times OLS tx started         */
+	u32    nos_tx;             /*  No. of times NOS tx started         */
+	u32    hwsm_lrr_rx;        /*  No. of times LRR rx-ed by HWSM      */
+	u32    hwsm_lr_rx;         /*  No. of times LR rx-ed by HWSM       */
+	u32    bbsc_lr;		   /* LKSM LR tx for credit recovery       */
 };
 
 struct bfa_fw_port_snsm_stats_s {
-    u32    hwsm_success;       /*  Successful hwsm terminations        */
-    u32    hwsm_fails;         /*  hwsm fail count                     */
-    u32    hwsm_wdtov;         /*  hwsm timed out                      */
-    u32    swsm_success;       /*  swsm success                        */
-    u32    swsm_wdtov;         /*  swsm timed out                      */
-    u32    error_resets;       /*  error resets initiated by upsm      */
-    u32    sync_lost;          /*  Sync loss count                     */
-    u32    sig_lost;           /*  Signal loss count                   */
-	u32	asn8g_attempts;	/* SNSM HWSM at 8Gbps attempts */
+	u32    hwsm_success;       /*  Successful hwsm terminations        */
+	u32    hwsm_fails;         /*  hwsm fail count                     */
+	u32    hwsm_wdtov;         /*  hwsm timed out                      */
+	u32    swsm_success;       /*  swsm success                        */
+	u32    swsm_wdtov;         /*  swsm timed out                      */
+	u32    error_resets;       /*  error resets initiated by upsm      */
+	u32    sync_lost;          /*  Sync loss count                     */
+	u32    sig_lost;           /*  Signal loss count                   */
+	u32    asn8g_attempts;	   /* SNSM HWSM at 8Gbps attempts	   */
 };
 
 struct bfa_fw_port_physm_stats_s {
-    u32    module_inserts;     /*  Module insert count                 */
-    u32    module_xtracts;     /*  Module extracts count               */
-    u32    module_invalids;    /*  Invalid module inserted count       */
-    u32    module_read_ign;    /*  Module validation status ignored    */
-    u32    laser_faults;       /*  Laser fault count                   */
-    u32    rsvd;
+	u32    module_inserts;     /*  Module insert count                 */
+	u32    module_xtracts;     /*  Module extracts count               */
+	u32    module_invalids;    /*  Invalid module inserted count       */
+	u32    module_read_ign;    /*  Module validation status ignored    */
+	u32    laser_faults;       /*  Laser fault count                   */
+	u32    rsvd;
 };
 
 struct bfa_fw_fip_stats_s {
-    u32    vlan_req;           /*  vlan discovery requests             */
-    u32    vlan_notify;        /*  vlan notifications                  */
-    u32    vlan_err;           /*  vlan response error                 */
-    u32    vlan_timeouts;      /*  vlan disvoery timeouts              */
-    u32    vlan_invalids;      /*  invalid vlan in discovery advert.   */
-    u32    disc_req;           /*  Discovery solicit requests          */
-    u32    disc_rsp;           /*  Discovery solicit response          */
-    u32    disc_err;           /*  Discovery advt. parse errors        */
-    u32    disc_unsol;         /*  Discovery unsolicited               */
-    u32    disc_timeouts;      /*  Discovery timeouts                  */
-    u32    disc_fcf_unavail;   /*  Discovery FCF Not Avail.            */
-    u32    linksvc_unsupp;     /*  Unsupported link service req        */
-    u32    linksvc_err;        /*  Parse error in link service req     */
-    u32    logo_req;           /*  FIP logos received                  */
-    u32    clrvlink_req;       /*  Clear virtual link req              */
-    u32    op_unsupp;          /*  Unsupported FIP operation           */
-    u32    untagged;           /*  Untagged frames (ignored)           */
-    u32    invalid_version;    /*  Invalid FIP version                 */
+	u32    vlan_req;           /*  vlan discovery requests             */
+	u32    vlan_notify;        /*  vlan notifications                  */
+	u32    vlan_err;           /*  vlan response error                 */
+	u32    vlan_timeouts;      /*  vlan disvoery timeouts              */
+	u32    vlan_invalids;      /*  invalid vlan in discovery advert.   */
+	u32    disc_req;           /*  Discovery solicit requests          */
+	u32    disc_rsp;           /*  Discovery solicit response          */
+	u32    disc_err;           /*  Discovery advt. parse errors        */
+	u32    disc_unsol;         /*  Discovery unsolicited               */
+	u32    disc_timeouts;      /*  Discovery timeouts                  */
+	u32    disc_fcf_unavail;   /*  Discovery FCF Not Avail.            */
+	u32    linksvc_unsupp;     /*  Unsupported link service req        */
+	u32    linksvc_err;        /*  Parse error in link service req     */
+	u32    logo_req;           /*  FIP logos received                  */
+	u32    clrvlink_req;       /*  Clear virtual link req              */
+	u32    op_unsupp;          /*  Unsupported FIP operation           */
+	u32    untagged;           /*  Untagged frames (ignored)           */
+	u32    invalid_version;    /*  Invalid FIP version                 */
 };
 
 struct bfa_fw_lps_stats_s {
-    u32    mac_invalids;       /*  Invalid mac assigned                */
-    u32    rsvd;
+	u32    mac_invalids;       /*  Invalid mac assigned                */
+	u32    rsvd;
 };
 
 struct bfa_fw_fcoe_stats_s {
-    u32    cee_linkups;        /*  CEE link up count                   */
-    u32    cee_linkdns;        /*  CEE link down count                 */
-    u32    fip_linkups;        /*  FIP link up count                   */
-    u32    fip_linkdns;        /*  FIP link up count                   */
-    u32    fip_fails;          /*  FIP fail count                      */
-    u32    mac_invalids;       /*  Invalid mac assigned                */
+	u32    cee_linkups;        /*  CEE link up count                   */
+	u32    cee_linkdns;        /*  CEE link down count                 */
+	u32    fip_linkups;        /*  FIP link up count                   */
+	u32    fip_linkdns;        /*  FIP link up count                   */
+	u32    fip_fails;          /*  FIP fail count                      */
+	u32    mac_invalids;       /*  Invalid mac assigned                */
 };
 
 /*
  * IOC firmware FCoE port stats
  */
 struct bfa_fw_fcoe_port_stats_s {
-	struct bfa_fw_fcoe_stats_s  fcoe_stats;
-	struct bfa_fw_fip_stats_s   fip_stats;
+	struct bfa_fw_fcoe_stats_s		fcoe_stats;
+	struct bfa_fw_fip_stats_s		fip_stats;
 };
 
 /*
@@ -335,8 +336,8 @@ struct bfa_fw_fc_uport_stats_s {
  * IOC firmware FC port stats
  */
 union bfa_fw_fc_port_stats_s {
-	struct bfa_fw_fc_uport_stats_s	fc_stats;
-	struct bfa_fw_fcoe_port_stats_s	fcoe_stats;
+	struct bfa_fw_fc_uport_stats_s		fc_stats;
+	struct bfa_fw_fcoe_port_stats_s		fcoe_stats;
 };
 
 /*
@@ -366,25 +367,25 @@ struct bfa_fw_lpsm_stats_s {
  */
 struct bfa_fw_trunk_stats_s {
 	u32 emt_recvd;		/*  Trunk EMT received		*/
-	u32 emt_accepted;		/*  Trunk EMT Accepted		*/
-	u32 emt_rejected;		/*  Trunk EMT rejected		*/
+	u32 emt_accepted;	/*  Trunk EMT Accepted		*/
+	u32 emt_rejected;	/*  Trunk EMT rejected		*/
 	u32 etp_recvd;		/*  Trunk ETP received		*/
-	u32 etp_accepted;		/*  Trunk ETP Accepted		*/
-	u32 etp_rejected;		/*  Trunk ETP rejected		*/
+	u32 etp_accepted;	/*  Trunk ETP Accepted		*/
+	u32 etp_rejected;	/*  Trunk ETP rejected		*/
 	u32 lr_recvd;		/*  Trunk LR received		*/
-	u32 rsvd;			/*  padding for 64 bit alignment */
+	u32 rsvd;		/*  padding for 64 bit alignment */
 };
 
 struct bfa_fw_advsm_stats_s {
 	u32 flogi_sent;		/*  Flogi sent			*/
 	u32 flogi_acc_recvd;	/*  Flogi Acc received		*/
 	u32 flogi_rjt_recvd;	/*  Flogi rejects received	*/
-	u32 flogi_retries;		/*  Flogi retries		*/
+	u32 flogi_retries;	/*  Flogi retries		*/
 
 	u32 elp_recvd;		/*  ELP received		*/
-	u32 elp_accepted;		/*  ELP Accepted		*/
-	u32 elp_rejected;		/*  ELP rejected		*/
-	u32 elp_dropped;		/*  ELP dropped		*/
+	u32 elp_accepted;	/*  ELP Accepted		*/
+	u32 elp_rejected;	/*  ELP rejected		*/
+	u32 elp_dropped;	/*  ELP dropped			*/
 };
 
 /*
@@ -521,7 +522,7 @@ struct bfa_qos_vc_attr_s {
 	u16  total_vc_count;                    /*  Total VC Count */
 	u16  shared_credit;
 	u32  elp_opmode_flags;
-	struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC];  /*   as many as
+	struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC];  /* as many as
 							    * total_vc_count */
 };
 
@@ -531,16 +532,16 @@ struct bfa_qos_vc_attr_s {
 struct bfa_qos_stats_s {
 	u32	flogi_sent;		/*  QoS Flogi sent */
 	u32	flogi_acc_recvd;	/*  QoS Flogi Acc received */
-	u32	flogi_rjt_recvd; /*  QoS Flogi rejects received */
+	u32	flogi_rjt_recvd;	/*  QoS Flogi rejects received */
 	u32	flogi_retries;		/*  QoS Flogi retries */
 
 	u32	elp_recvd;		/*  QoS ELP received */
 	u32	elp_accepted;		/*  QoS ELP Accepted */
-	u32	elp_rejected;       /*  QoS ELP rejected */
-	u32	elp_dropped;        /*  QoS ELP dropped  */
+	u32	elp_rejected;		/*  QoS ELP rejected */
+	u32	elp_dropped;		/*  QoS ELP dropped  */
 
-	u32	qos_rscn_recvd;     /*  QoS RSCN received */
-	u32	rsvd;		    /* padding for 64 bit alignment */
+	u32	qos_rscn_recvd;		/*  QoS RSCN received */
+	u32	rsvd;			/* padding for 64 bit alignment */
 };
 
 /*
@@ -548,9 +549,9 @@ struct bfa_qos_stats_s {
  */
 struct bfa_fcoe_stats_s {
 	u64	secs_reset;	/*  Seconds since stats reset	     */
-	u64	cee_linkups;	/*  CEE link up		     */
+	u64	cee_linkups;	/*  CEE link up			     */
 	u64	cee_linkdns;	/*  CEE link down		     */
-	u64	fip_linkups;	/*  FIP link up		     */
+	u64	fip_linkups;	/*  FIP link up			     */
 	u64	fip_linkdns;	/*  FIP link down		     */
 	u64	fip_fails;	/*  FIP failures		     */
 	u64	mac_invalids;	/*  Invalid mac assignments	     */
@@ -560,38 +561,38 @@ struct bfa_fcoe_stats_s {
 	u64	vlan_timeouts;	/*  Vlan request timeouts	     */
 	u64	vlan_invalids;	/*  Vlan invalids		     */
 	u64	disc_req;	/*  Discovery requests		     */
-	u64	disc_rsp;	/*  Discovery responses	     */
+	u64	disc_rsp;	/*  Discovery responses		     */
 	u64	disc_err;	/*  Discovery error frames	     */
 	u64	disc_unsol;	/*  Discovery unsolicited	     */
 	u64	disc_timeouts;	/*  Discovery timeouts		     */
 	u64	disc_fcf_unavail; /*  Discovery FCF not avail	     */
-	u64	linksvc_unsupp;	/*  FIP link service req unsupp.    */
-	u64	linksvc_err;	/*  FIP link service req errors     */
+	u64	linksvc_unsupp;	/*  FIP link service req unsupp	     */
+	u64	linksvc_err;	/*  FIP link service req errors	     */
 	u64	logo_req;	/*  FIP logos received		     */
-	u64	clrvlink_req;	/*  Clear virtual link requests     */
+	u64	clrvlink_req;	/*  Clear virtual link requests	     */
 	u64	op_unsupp;	/*  FIP operation unsupp.	     */
-	u64	untagged;	/*  FIP untagged frames	     */
+	u64	untagged;	/*  FIP untagged frames		     */
 	u64	txf_ucast;	/*  Tx FCoE unicast frames	     */
-	u64	txf_ucast_vlan;	/*  Tx FCoE unicast vlan frames     */
+	u64	txf_ucast_vlan;	/*  Tx FCoE unicast vlan frames      */
 	u64	txf_ucast_octets; /*  Tx FCoE unicast octets	     */
 	u64	txf_mcast;	/*  Tx FCoE multicast frames	     */
-	u64	txf_mcast_vlan;	/*  Tx FCoE multicast vlan frames   */
+	u64	txf_mcast_vlan;	/*  Tx FCoE multicast vlan frames    */
 	u64	txf_mcast_octets; /*  Tx FCoE multicast octets	     */
 	u64	txf_bcast;	/*  Tx FCoE broadcast frames	     */
-	u64	txf_bcast_vlan;	/*  Tx FCoE broadcast vlan frames   */
+	u64	txf_bcast_vlan;	/*  Tx FCoE broadcast vlan frames    */
 	u64	txf_bcast_octets; /*  Tx FCoE broadcast octets	     */
-	u64	txf_timeout;	/*  Tx timeouts		     */
+	u64	txf_timeout;	  /*  Tx timeouts		     */
 	u64	txf_parity_errors; /*  Transmit parity err	     */
-	u64	txf_fid_parity_errors; /*  Transmit FID parity err  */
+	u64	txf_fid_parity_errors; /*  Transmit FID parity err   */
 	u64	rxf_ucast_octets; /*  Rx FCoE unicast octets	     */
 	u64	rxf_ucast;	/*  Rx FCoE unicast frames	     */
-	u64	rxf_ucast_vlan;	/*  Rx FCoE unicast vlan frames     */
+	u64	rxf_ucast_vlan;	/*  Rx FCoE unicast vlan frames	     */
 	u64	rxf_mcast_octets; /*  Rx FCoE multicast octets	     */
 	u64	rxf_mcast;	/*  Rx FCoE multicast frames	     */
-	u64	rxf_mcast_vlan;	/*  Rx FCoE multicast vlan frames   */
+	u64	rxf_mcast_vlan;	/*  Rx FCoE multicast vlan frames    */
 	u64	rxf_bcast_octets; /*  Rx FCoE broadcast octets	     */
 	u64	rxf_bcast;	/*  Rx FCoE broadcast frames	     */
-	u64	rxf_bcast_vlan;	/*  Rx FCoE broadcast vlan frames   */
+	u64	rxf_bcast_vlan;	/*  Rx FCoE broadcast vlan frames    */
 };
 
 /*
@@ -852,12 +853,12 @@ struct bfa_port_cfg_s {
 	u8	 tx_bbcredit;	/*  transmit buffer credits	*/
 	u8	 ratelimit;	/*  ratelimit enabled or not	*/
 	u8	 trl_def_speed;	/*  ratelimit default speed	*/
-	u8	bb_scn;		/*  BB_SCN value from FLOGI Exchg */
-	u8	bb_scn_state;	/*  Config state of BB_SCN */
-	u8	faa_state;	/*  FAA enabled/disabled        */
-	u8	rsvd[1];
-	u16 path_tov;	/*  device path timeout	*/
-	u16 q_depth;	/*  SCSI Queue depth		*/
+	u8	 bb_scn;	/*  BB_SCN value from FLOGI Exchg */
+	u8	 bb_scn_state;	/*  Config state of BB_SCN */
+	u8	 faa_state;	/*  FAA enabled/disabled        */
+	u8	 rsvd[1];
+	u16	 path_tov;	/*  device path timeout	*/
+	u16	 q_depth;	/*  SCSI Queue depth		*/
 };
 #pragma pack()
 
@@ -868,20 +869,21 @@ struct bfa_port_attr_s {
 	/*
 	 * Static fields
 	 */
-	wwn_t	   nwwn;		/*  node wwn */
-	wwn_t	   pwwn;		/*  port wwn */
-	wwn_t	   factorynwwn;	/*  factory node wwn */
-	wwn_t	   factorypwwn;	/*  factory port wwn */
-	enum fc_cos	cos_supported;	/*  supported class of services */
-	u32	rsvd;
+	wwn_t			nwwn;		/*  node wwn */
+	wwn_t			pwwn;		/*  port wwn */
+	wwn_t			factorynwwn;	/*  factory node wwn */
+	wwn_t			factorypwwn;	/*  factory port wwn */
+	enum fc_cos		cos_supported;	/*  supported class of
+						 *  services */
+	u32			rsvd;
 	struct fc_symname_s	port_symname;	/*  port symbolic name */
-	enum bfa_port_speed speed_supported; /*  supported speeds */
-	bfa_boolean_t   pbind_enabled;
+	enum bfa_port_speed	speed_supported; /* supported speeds */
+	bfa_boolean_t		pbind_enabled;
 
 	/*
 	 * Configured values
 	 */
-	struct bfa_port_cfg_s pport_cfg;	/*  pport cfg */
+	struct bfa_port_cfg_s	pport_cfg;	/*  pport cfg */
 
 	/*
 	 * Dynamic field - info from BFA
@@ -890,19 +892,20 @@ struct bfa_port_attr_s {
 	enum bfa_port_speed	speed;		/*  current speed */
 	enum bfa_port_topology	topology;	/*  current topology */
 	bfa_boolean_t		beacon;		/*  current beacon status */
-	bfa_boolean_t		link_e2e_beacon; /*  link beacon is on */
-	bfa_boolean_t	bbsc_op_status;	/* fc credit recovery oper state */
+	bfa_boolean_t		link_e2e_beacon; /* link beacon is on */
+	bfa_boolean_t		bbsc_op_status;	/* fc credit recovery oper
+						 * state */
 
 	/*
 	 * Dynamic field - info from FCS
 	 */
-	u32		pid;		/*  port ID */
+	u32			pid;		/*  port ID */
 	enum bfa_port_type	port_type;	/*  current topology */
-	u32		loopback;	/*  external loopback */
-	u32		authfail;	/*  auth fail state */
+	u32			loopback;	/*  external loopback */
+	u32			authfail;	/*  auth fail state */
 
 	/* FCoE specific  */
-	u16		fcoe_vlan;
+	u16			fcoe_vlan;
 	u8			rsvd1[2];
 };
 
@@ -910,48 +913,48 @@ struct bfa_port_attr_s {
  *	      Port FCP mappings.
  */
 struct bfa_port_fcpmap_s {
-	char		osdevname[256];
+	char	osdevname[256];
 	u32	bus;
 	u32	target;
 	u32	oslun;
 	u32	fcid;
-	wwn_t	   nwwn;
-	wwn_t	   pwwn;
+	wwn_t	nwwn;
+	wwn_t	pwwn;
 	u64	fcplun;
-	char		luid[256];
+	char	luid[256];
 };
 
 /*
  *	      Port RNID info.
  */
 struct bfa_port_rnid_s {
-	wwn_t	     wwn;
+	wwn_t	  wwn;
 	u32	  unittype;
 	u32	  portid;
 	u32	  attached_nodes_num;
 	u16	  ip_version;
 	u16	  udp_port;
-	u8	   ipaddr[16];
+	u8	  ipaddr[16];
 	u16	  rsvd;
 	u16	  topologydiscoveryflags;
 };
 
 #pragma pack(1)
 struct bfa_fcport_fcf_s {
-	wwn_t	   name;	   /*  FCF name		 */
-	wwn_t	   fabric_name;    /*  Fabric Name	      */
-	u8		fipenabled;	/*  FIP enabled or not */
-	u8		fipfailed;	/*  FIP failed or not	*/
-	u8		resv[2];
-	u8	 pri;	    /*  FCF priority	     */
-	u8	 version;	/*  FIP version used	 */
-	u8	 available;      /*  Available  for  login    */
-	u8	 fka_disabled;   /*  FKA is disabled	  */
-	u8	 maxsz_verified; /*  FCoE max size verified   */
-	u8	 fc_map[3];      /*  FC map		   */
-	__be16	 vlan;	   /*  FCoE vlan tag/priority   */
-	u32	fka_adv_per;    /*  FIP  ka advert. period   */
-	mac_t	   mac;	    /*  FCF mac		  */
+	wwn_t	name;		/*  FCF name		   */
+	wwn_t	fabric_name;    /*  Fabric Name		   */
+	u8	fipenabled;	/*  FIP enabled or not	   */
+	u8	fipfailed;	/*  FIP failed or not	   */
+	u8	resv[2];
+	u8	pri;		/*  FCF priority	   */
+	u8	version;	/*  FIP version used	   */
+	u8	available;      /*  Available for login    */
+	u8	fka_disabled;   /*  FKA is disabled	   */
+	u8	maxsz_verified; /*  FCoE max size verified */
+	u8	fc_map[3];      /*  FC map		   */
+	__be16	vlan;		/*  FCoE vlan tag/priority */
+	u32	fka_adv_per;    /*  FIP  ka advert. period */
+	mac_t	mac;		/*  FCF mac		   */
 };
 
 /*
@@ -981,7 +984,7 @@ struct bfa_port_link_s {
 	u8	 linkstate_rsn;	/*  bfa_port_linkstate_rsn_t */
 	u8	 topology;	/*  P2P/LOOP bfa_port_topology */
 	u8	 speed;		/*  Link speed (1/2/4/8 G) */
-	u32	linkstate_opt;  /*  Linkstate optional data (debug) */
+	u32	 linkstate_opt; /*  Linkstate optional data (debug) */
 	u8	 trunked;	/*  Trunked or not (1 or 0) */
 	u8	 resvd[3];
 	struct bfa_qos_attr_s  qos_attr;   /* QoS Attributes */
@@ -1035,7 +1038,7 @@ struct bfa_rport_hal_stats_s {
 	u32        sm_fwc_del;	    /*  fw create: delete events   */
 	u32        sm_fwc_off;	    /*  fw create: offline events  */
 	u32        sm_fwc_hwf;	    /*  fw create: IOC down        */
-	u32        sm_fwc_unexp;	    /*  fw create: exception events*/
+	u32        sm_fwc_unexp;    /*  fw create: exception events*/
 	u32        sm_on_off;	    /*  online: offline events     */
 	u32        sm_on_del;	    /*  online: delete events      */
 	u32        sm_on_hwf;	    /*  online: IOC down events    */
@@ -1043,25 +1046,25 @@ struct bfa_rport_hal_stats_s {
 	u32        sm_fwd_rsp;	    /*  fw delete: fw responses    */
 	u32        sm_fwd_del;	    /*  fw delete: delete events   */
 	u32        sm_fwd_hwf;	    /*  fw delete: IOC down events */
-	u32        sm_fwd_unexp;	    /*  fw delete: exception events*/
+	u32        sm_fwd_unexp;    /*  fw delete: exception events*/
 	u32        sm_off_del;	    /*  offline: delete events     */
 	u32        sm_off_on;	    /*  offline: online events     */
 	u32        sm_off_hwf;	    /*  offline: IOC down events   */
-	u32        sm_off_unexp;	    /*  offline: exception events  */
-	u32        sm_del_fwrsp;	    /*  delete: fw responses       */
+	u32        sm_off_unexp;    /*  offline: exception events  */
+	u32        sm_del_fwrsp;    /*  delete: fw responses       */
 	u32        sm_del_hwf;	    /*  delete: IOC down events    */
-	u32        sm_del_unexp;	    /*  delete: exception events   */
-	u32        sm_delp_fwrsp;	    /*  delete pend: fw responses  */
+	u32        sm_del_unexp;    /*  delete: exception events   */
+	u32        sm_delp_fwrsp;   /*  delete pend: fw responses  */
 	u32        sm_delp_hwf;	    /*  delete pend: IOC downs     */
-	u32        sm_delp_unexp;	    /*  delete pend: exceptions    */
-	u32        sm_offp_fwrsp;	    /*  off-pending: fw responses  */
+	u32        sm_delp_unexp;   /*  delete pend: exceptions    */
+	u32        sm_offp_fwrsp;   /*  off-pending: fw responses  */
 	u32        sm_offp_del;	    /*  off-pending: deletes       */
 	u32        sm_offp_hwf;	    /*  off-pending: IOC downs     */
-	u32        sm_offp_unexp;	    /*  off-pending: exceptions    */
+	u32        sm_offp_unexp;   /*  off-pending: exceptions    */
 	u32        sm_iocd_off;	    /*  IOC down: offline events   */
 	u32        sm_iocd_del;	    /*  IOC down: delete events    */
 	u32        sm_iocd_on;	    /*  IOC down: online events    */
-	u32        sm_iocd_unexp;	    /*  IOC down: exceptions       */
+	u32        sm_iocd_unexp;   /*  IOC down: exceptions       */
 	u32        rsvd;
 };
 #pragma pack(1)
@@ -1069,9 +1072,9 @@ struct bfa_rport_hal_stats_s {
  *  Rport's QoS attributes
  */
 struct bfa_rport_qos_attr_s {
-	u8			qos_priority;  /*  rport's QoS priority   */
-	u8			rsvd[3];
-	u32	       qos_flow_id;	  /*  QoS flow Id	 */
+	u8		qos_priority;	/*  rport's QoS priority   */
+	u8		rsvd[3];
+	u32		qos_flow_id;	/*  QoS flow Id	 */
 };
 #pragma pack()
 

+ 3 - 3
drivers/scsi/bfa/bfa_ioc.c

@@ -3727,11 +3727,11 @@ bfa_sfp_media_get(struct bfa_sfp_s *sfp)
 			 (xmtr_tech & SFP_XMTR_TECH_SA))
 			*media = BFA_SFP_MEDIA_SW;
 		/* Check 10G Ethernet Compilance code */
-		else if (e10g.b & 0x10)
+		else if (e10g.r.e10g_sr)
 			*media = BFA_SFP_MEDIA_SW;
-		else if (e10g.b & 0x60)
+		else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
 			*media = BFA_SFP_MEDIA_LW;
-		else if (e10g.r.e10g_unall & 0x80)
+		else if (e10g.r.e10g_unall)
 			*media = BFA_SFP_MEDIA_UNKNOWN;
 		else
 			bfa_trc(sfp, 0);

+ 1 - 2
drivers/scsi/bfa/bfad_debugfs.c

@@ -557,8 +557,7 @@ bfad_debugfs_exit(struct bfad_port_s *port)
 		}
 	}
 
-	/*
-	 * Remove the pci_dev debugfs directory for the port */
+	/* Remove the pci_dev debugfs directory for the port */
 	if (port->port_debugfs_root) {
 		debugfs_remove(port->port_debugfs_root);
 		port->port_debugfs_root = NULL;

+ 0 - 58
drivers/scsi/device_handler/scsi_dh.c

@@ -28,7 +28,6 @@
 
 static DEFINE_SPINLOCK(list_lock);
 static LIST_HEAD(scsi_dh_list);
-static int scsi_dh_list_idx = 1;
 
 static struct scsi_device_handler *get_device_handler(const char *name)
 {
@@ -45,21 +44,6 @@ static struct scsi_device_handler *get_device_handler(const char *name)
 	return found;
 }
 
-static struct scsi_device_handler *get_device_handler_by_idx(int idx)
-{
-	struct scsi_device_handler *tmp, *found = NULL;
-
-	spin_lock(&list_lock);
-	list_for_each_entry(tmp, &scsi_dh_list, list) {
-		if (tmp->idx == idx) {
-			found = tmp;
-			break;
-		}
-	}
-	spin_unlock(&list_lock);
-	return found;
-}
-
 /*
  * device_handler_match_function - Match a device handler to a device
  * @sdev - SCSI device to be tested
@@ -83,23 +67,6 @@ device_handler_match_function(struct scsi_device *sdev)
 	return found_dh;
 }
 
-/*
- * device_handler_match_devlist - Match a device handler to a device
- * @sdev - SCSI device to be tested
- *
- * Tests @sdev against all device_handler registered in the devlist.
- * Returns the found device handler or NULL if not found.
- */
-static struct scsi_device_handler *
-device_handler_match_devlist(struct scsi_device *sdev)
-{
-	int idx;
-
-	idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model,
-					  SCSI_DEVINFO_DH);
-	return get_device_handler_by_idx(idx);
-}
-
 /*
  * device_handler_match - Attach a device handler to a device
  * @scsi_dh - The device handler to match against or NULL
@@ -116,8 +83,6 @@ device_handler_match(struct scsi_device_handler *scsi_dh,
 	struct scsi_device_handler *found_dh;
 
 	found_dh = device_handler_match_function(sdev);
-	if (!found_dh)
-		found_dh = device_handler_match_devlist(sdev);
 
 	if (scsi_dh && found_dh != scsi_dh)
 		found_dh = NULL;
@@ -361,25 +326,14 @@ static int scsi_dh_notifier_remove(struct device *dev, void *data)
  */
 int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
 {
-	int i;
 
 	if (get_device_handler(scsi_dh->name))
 		return -EBUSY;
 
 	spin_lock(&list_lock);
-	scsi_dh->idx = scsi_dh_list_idx++;
 	list_add(&scsi_dh->list, &scsi_dh_list);
 	spin_unlock(&list_lock);
 
-	for (i = 0; scsi_dh->devlist && scsi_dh->devlist[i].vendor; i++) {
-		scsi_dev_info_list_add_keyed(0,
-					scsi_dh->devlist[i].vendor,
-					scsi_dh->devlist[i].model,
-					NULL,
-					scsi_dh->idx,
-					SCSI_DEVINFO_DH);
-	}
-
 	bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
 	printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
 
@@ -396,7 +350,6 @@ EXPORT_SYMBOL_GPL(scsi_register_device_handler);
  */
 int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
 {
-	int i;
 
 	if (!get_device_handler(scsi_dh->name))
 		return -ENODEV;
@@ -404,12 +357,6 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
 	bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
 			 scsi_dh_notifier_remove);
 
-	for (i = 0; scsi_dh->devlist && scsi_dh->devlist[i].vendor; i++) {
-		scsi_dev_info_list_del_keyed(scsi_dh->devlist[i].vendor,
-					     scsi_dh->devlist[i].model,
-					     SCSI_DEVINFO_DH);
-	}
-
 	spin_lock(&list_lock);
 	list_del(&scsi_dh->list);
 	spin_unlock(&list_lock);
@@ -588,10 +535,6 @@ static int __init scsi_dh_init(void)
 {
 	int r;
 
-	r = scsi_dev_info_add_list(SCSI_DEVINFO_DH, "SCSI Device Handler");
-	if (r)
-		return r;
-
 	r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
 
 	if (!r)
@@ -606,7 +549,6 @@ static void __exit scsi_dh_exit(void)
 	bus_for_each_dev(&scsi_bus_type, NULL, NULL,
 			 scsi_dh_sysfs_attr_remove);
 	bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
-	scsi_dev_info_remove_list(SCSI_DEVINFO_DH);
 }
 
 module_init(scsi_dh_init);

+ 19 - 0
drivers/scsi/device_handler/scsi_dh_emc.c

@@ -629,6 +629,24 @@ static const struct scsi_dh_devlist clariion_dev_list[] = {
 	{NULL, NULL},
 };
 
+static bool clariion_match(struct scsi_device *sdev)
+{
+	int i;
+
+	if (scsi_device_tpgs(sdev))
+		return false;
+
+	for (i = 0; clariion_dev_list[i].vendor; i++) {
+		if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor,
+			strlen(clariion_dev_list[i].vendor)) &&
+		    !strncmp(sdev->model, clariion_dev_list[i].model,
+			strlen(clariion_dev_list[i].model))) {
+			return true;
+		}
+	}
+	return false;
+}
+
 static int clariion_bus_attach(struct scsi_device *sdev);
 static void clariion_bus_detach(struct scsi_device *sdev);
 
@@ -642,6 +660,7 @@ static struct scsi_device_handler clariion_dh = {
 	.activate	= clariion_activate,
 	.prep_fn	= clariion_prep_fn,
 	.set_params	= clariion_set_params,
+	.match		= clariion_match,
 };
 
 static int clariion_bus_attach(struct scsi_device *sdev)

+ 19 - 0
drivers/scsi/device_handler/scsi_dh_hp_sw.c

@@ -320,6 +320,24 @@ static const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
 	{NULL, NULL},
 };
 
+static bool hp_sw_match(struct scsi_device *sdev)
+{
+	int i;
+
+	if (scsi_device_tpgs(sdev))
+		return false;
+
+	for (i = 0; hp_sw_dh_data_list[i].vendor; i++) {
+		if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
+			strlen(hp_sw_dh_data_list[i].vendor)) &&
+		    !strncmp(sdev->model, hp_sw_dh_data_list[i].model,
+			strlen(hp_sw_dh_data_list[i].model))) {
+			return true;
+		}
+	}
+	return false;
+}
+
 static int hp_sw_bus_attach(struct scsi_device *sdev);
 static void hp_sw_bus_detach(struct scsi_device *sdev);
 
@@ -331,6 +349,7 @@ static struct scsi_device_handler hp_sw_dh = {
 	.detach		= hp_sw_bus_detach,
 	.activate	= hp_sw_activate,
 	.prep_fn	= hp_sw_prep_fn,
+	.match		= hp_sw_match,
 };
 
 static int hp_sw_bus_attach(struct scsi_device *sdev)

+ 19 - 0
drivers/scsi/device_handler/scsi_dh_rdac.c

@@ -820,6 +820,24 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
 	{NULL, NULL},
 };
 
+static bool rdac_match(struct scsi_device *sdev)
+{
+	int i;
+
+	if (scsi_device_tpgs(sdev))
+		return false;
+
+	for (i = 0; rdac_dev_list[i].vendor; i++) {
+		if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
+			strlen(rdac_dev_list[i].vendor)) &&
+		    !strncmp(sdev->model, rdac_dev_list[i].model,
+			strlen(rdac_dev_list[i].model))) {
+			return true;
+		}
+	}
+	return false;
+}
+
 static int rdac_bus_attach(struct scsi_device *sdev);
 static void rdac_bus_detach(struct scsi_device *sdev);
 
@@ -832,6 +850,7 @@ static struct scsi_device_handler rdac_dh = {
 	.attach = rdac_bus_attach,
 	.detach = rdac_bus_detach,
 	.activate = rdac_activate,
+	.match = rdac_match,
 };
 
 static int rdac_bus_attach(struct scsi_device *sdev)

+ 4 - 2
drivers/scsi/hpsa.c

@@ -293,12 +293,14 @@ static u32 unresettable_controller[] = {
 	0x3215103C, /* Smart Array E200i */
 	0x3237103C, /* Smart Array E500 */
 	0x323D103C, /* Smart Array P700m */
+	0x40800E11, /* Smart Array 5i */
 	0x409C0E11, /* Smart Array 6400 */
 	0x409D0E11, /* Smart Array 6400 EM */
 };
 
 /* List of controllers which cannot even be soft reset */
 static u32 soft_unresettable_controller[] = {
+	0x40800E11, /* Smart Array 5i */
 	/* Exclude 640x boards.  These are two pci devices in one slot
 	 * which share a battery backed cache module.  One controls the
 	 * cache, the other accesses the cache through the one that controls
@@ -4072,10 +4074,10 @@ static int hpsa_request_irq(struct ctlr_info *h,
 
 	if (h->msix_vector || h->msi_vector)
 		rc = request_irq(h->intr[h->intr_mode], msixhandler,
-				IRQF_DISABLED, h->devname, h);
+				0, h->devname, h);
 	else
 		rc = request_irq(h->intr[h->intr_mode], intxhandler,
-				IRQF_DISABLED, h->devname, h);
+				IRQF_SHARED, h->devname, h);
 	if (rc) {
 		dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
 		       h->intr[h->intr_mode], h->devname);

+ 0 - 14
drivers/scsi/lpfc/lpfc.h

@@ -247,18 +247,6 @@ struct lpfc_stats {
 	uint32_t fcpLocalErr;
 };
 
-enum sysfs_mbox_state {
-	SMBOX_IDLE,
-	SMBOX_WRITING,
-	SMBOX_READING
-};
-
-struct lpfc_sysfs_mbox {
-	enum sysfs_mbox_state state;
-	size_t                offset;
-	struct lpfcMboxq *    mbox;
-};
-
 struct lpfc_hba;
 
 
@@ -783,8 +771,6 @@ struct lpfc_hba {
 	uint64_t bg_apptag_err_cnt;
 	uint64_t bg_reftag_err_cnt;
 
-	struct lpfc_sysfs_mbox sysfs_mbox;
-
 	/* fastpath list. */
 	spinlock_t scsi_buf_list_lock;
 	struct list_head lpfc_scsi_buf_list;

+ 151 - 285
drivers/scsi/lpfc/lpfc_attr.c

@@ -351,10 +351,23 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
 	struct Scsi_Host  *shost = class_to_shost(dev);
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
+	uint32_t if_type;
+	uint8_t sli_family;
 	char fwrev[32];
+	int len;
 
 	lpfc_decode_firmware_rev(phba, fwrev, 1);
-	return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev);
+	if_type = phba->sli4_hba.pc_sli4_params.if_type;
+	sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		len = snprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
+			       fwrev, phba->sli_rev);
+	else
+		len = snprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
+			       fwrev, phba->sli_rev, if_type, sli_family);
+
+	return len;
 }
 
 /**
@@ -487,6 +500,34 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
 	return len;
 }
 
+/**
+ * lpfc_sli4_protocol_show - Return the fip mode of the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		return snprintf(buf, PAGE_SIZE, "fc\n");
+
+	if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
+		if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
+			return snprintf(buf, PAGE_SIZE, "fcoe\n");
+		if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
+			return snprintf(buf, PAGE_SIZE, "fc\n");
+	}
+	return snprintf(buf, PAGE_SIZE, "unknown\n");
+}
+
 /**
  * lpfc_link_state_store - Transition the link_state on an HBA port
  * @dev: class device that is converted into a Scsi_host.
@@ -773,7 +814,12 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
  * the readyness after performing a firmware reset.
  *
  * Returns:
- * zero for success
+ * zero for success, -EPERM when port does not have privilage to perform the
+ * reset, -EIO when port timeout from recovering from the reset.
+ *
+ * Note:
+ * As the caller will interpret the return code by value, be careful in making
+ * change or addition to return codes.
  **/
 int
 lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
@@ -826,9 +872,11 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
 {
 	struct completion online_compl;
 	struct pci_dev *pdev = phba->pcidev;
+	uint32_t before_fc_flag;
+	uint32_t sriov_nr_virtfn;
 	uint32_t reg_val;
-	int status = 0;
-	int rc;
+	int status = 0, rc = 0;
+	int job_posted = 1, sriov_err;
 
 	if (!phba->cfg_enable_hba_reset)
 		return -EACCES;
@@ -838,6 +886,10 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
 	     LPFC_SLI_INTF_IF_TYPE_2))
 		return -EPERM;
 
+	/* Keep state if we need to restore back */
+	before_fc_flag = phba->pport->fc_flag;
+	sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
+
 	/* Disable SR-IOV virtual functions if enabled */
 	if (phba->cfg_sriov_nr_virtfn) {
 		pci_disable_sriov(pdev);
@@ -869,21 +921,44 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
 	/* delay driver action following IF_TYPE_2 reset */
 	rc = lpfc_sli4_pdev_status_reg_wait(phba);
 
-	if (rc)
+	if (rc == -EPERM) {
+		/* no privilage for reset, restore if needed */
+		if (before_fc_flag & FC_OFFLINE_MODE)
+			goto out;
+	} else if (rc == -EIO) {
+		/* reset failed, there is nothing more we can do */
 		return rc;
+	}
+
+	/* keep the original port state */
+	if (before_fc_flag & FC_OFFLINE_MODE)
+		goto out;
 
 	init_completion(&online_compl);
-	rc = lpfc_workq_post_event(phba, &status, &online_compl,
-				   LPFC_EVT_ONLINE);
-	if (rc == 0)
-		return -ENOMEM;
+	job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
+					   LPFC_EVT_ONLINE);
+	if (!job_posted)
+		goto out;
 
 	wait_for_completion(&online_compl);
 
-	if (status != 0)
-		return -EIO;
+out:
+	/* in any case, restore the virtual functions enabled as before */
+	if (sriov_nr_virtfn) {
+		sriov_err =
+			lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
+		if (!sriov_err)
+			phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn;
+	}
 
-	return 0;
+	/* return proper error code */
+	if (!rc) {
+		if (!job_posted)
+			rc = -ENOMEM;
+		else if (status)
+			rc = -EIO;
+	}
+	return rc;
 }
 
 /**
@@ -955,33 +1030,38 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 	struct completion online_compl;
-	int status=0;
+	char *board_mode_str = NULL;
+	int status = 0;
 	int rc;
 
-	if (!phba->cfg_enable_hba_reset)
-		return -EACCES;
+	if (!phba->cfg_enable_hba_reset) {
+		status = -EACCES;
+		goto board_mode_out;
+	}
 
 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-		"3050 lpfc_board_mode set to %s\n", buf);
+			 "3050 lpfc_board_mode set to %s\n", buf);
 
 	init_completion(&online_compl);
 
 	if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
 		rc = lpfc_workq_post_event(phba, &status, &online_compl,
 				      LPFC_EVT_ONLINE);
-		if (rc == 0)
-			return -ENOMEM;
+		if (rc == 0) {
+			status = -ENOMEM;
+			goto board_mode_out;
+		}
 		wait_for_completion(&online_compl);
 	} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
 		status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
 	else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
 		if (phba->sli_rev == LPFC_SLI_REV4)
-			return -EINVAL;
+			status = -EINVAL;
 		else
 			status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
 	else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
 		if (phba->sli_rev == LPFC_SLI_REV4)
-			return -EINVAL;
+			status = -EINVAL;
 		else
 			status = lpfc_do_offline(phba, LPFC_EVT_KILL);
 	else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
@@ -991,12 +1071,21 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
 	else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
 		status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
 	else
-		return -EINVAL;
+		status = -EINVAL;
 
+board_mode_out:
 	if (!status)
 		return strlen(buf);
-	else
+	else {
+		board_mode_str = strchr(buf, '\n');
+		if (board_mode_str)
+			*board_mode_str = '\0';
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "3097 Failed \"%s\", status(%d), "
+				 "fc_flag(x%x)\n",
+				 buf, status, phba->pport->fc_flag);
 		return status;
+	}
 }
 
 /**
@@ -1942,6 +2031,7 @@ static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
 static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
 static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
 		   lpfc_sriov_hw_max_virtfn_show, NULL);
+static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
 
 static char *lpfc_soft_wwn_key = "C99G71SL8032A";
 
@@ -2687,6 +2777,14 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
 	if (val >= 0 && val <= 6) {
 		prev_val = phba->cfg_topology;
 		phba->cfg_topology = val;
+		if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
+			val == 4) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"3113 Loop mode not supported at speed %d\n",
+				phba->cfg_link_speed);
+			phba->cfg_topology = prev_val;
+			return -EINVAL;
+		}
 		if (nolip)
 			return strlen(buf);
 
@@ -3132,6 +3230,14 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
 				val);
 		return -EINVAL;
 	}
+	if (val == LPFC_USER_LINK_SPEED_16G &&
+		 phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3112 lpfc_link_speed attribute cannot be set "
+				"to %d. Speed is not supported in loop mode.\n",
+				val);
+		return -EINVAL;
+	}
 	if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
 	    (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
 		prev_val = phba->cfg_link_speed;
@@ -3176,6 +3282,13 @@ lpfc_param_show(link_speed)
 static int
 lpfc_link_speed_init(struct lpfc_hba *phba, int val)
 {
+	if (val == LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"3111 lpfc_link_speed of %d cannot "
+			"support loop mode, setting topology to default.\n",
+			 val);
+		phba->cfg_topology = 0;
+	}
 	if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
 	    (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
 		phba->cfg_link_speed = val;
@@ -3830,6 +3943,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_lpfc_fips_rev,
 	&dev_attr_lpfc_dss,
 	&dev_attr_lpfc_sriov_hw_max_virtfn,
+	&dev_attr_protocol,
 	NULL,
 };
 
@@ -3987,23 +4101,6 @@ static struct bin_attribute sysfs_ctlreg_attr = {
 	.write = sysfs_ctlreg_write,
 };
 
-/**
- * sysfs_mbox_idle - frees the sysfs mailbox
- * @phba: lpfc_hba pointer
- **/
-static void
-sysfs_mbox_idle(struct lpfc_hba *phba)
-{
-	phba->sysfs_mbox.state = SMBOX_IDLE;
-	phba->sysfs_mbox.offset = 0;
-
-	if (phba->sysfs_mbox.mbox) {
-		mempool_free(phba->sysfs_mbox.mbox,
-			     phba->mbox_mem_pool);
-		phba->sysfs_mbox.mbox = NULL;
-	}
-}
-
 /**
  * sysfs_mbox_write - Write method for writing information via mbox
  * @filp: open sysfs file
@@ -4014,71 +4111,18 @@ sysfs_mbox_idle(struct lpfc_hba *phba)
  * @count: bytes to transfer.
  *
  * Description:
- * Accessed via /sys/class/scsi_host/hostxxx/mbox.
- * Uses the sysfs mbox to send buf contents to the adapter.
+ * Deprecated function. All mailbox access from user space is performed via the
+ * bsg interface.
  *
  * Returns:
- * -ERANGE off and count combo out of range
- * -EINVAL off, count or buff address invalid
- * zero if count is zero
- * -EPERM adapter is offline
- * -ENOMEM failed to allocate memory for the mail box
- * -EAGAIN offset, state or mbox is NULL
- * count number of bytes transferred
+ * -EPERM operation not permitted
  **/
 static ssize_t
 sysfs_mbox_write(struct file *filp, struct kobject *kobj,
 		 struct bin_attribute *bin_attr,
 		 char *buf, loff_t off, size_t count)
 {
-	struct device *dev = container_of(kobj, struct device, kobj);
-	struct Scsi_Host  *shost = class_to_shost(dev);
-	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
-	struct lpfc_hba   *phba = vport->phba;
-	struct lpfcMboxq  *mbox = NULL;
-
-	if ((count + off) > MAILBOX_CMD_SIZE)
-		return -ERANGE;
-
-	if (off % 4 ||  count % 4 || (unsigned long)buf % 4)
-		return -EINVAL;
-
-	if (count == 0)
-		return 0;
-
-	if (off == 0) {
-		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-		if (!mbox)
-			return -ENOMEM;
-		memset(mbox, 0, sizeof (LPFC_MBOXQ_t));
-	}
-
-	spin_lock_irq(&phba->hbalock);
-
-	if (off == 0) {
-		if (phba->sysfs_mbox.mbox)
-			mempool_free(mbox, phba->mbox_mem_pool);
-		else
-			phba->sysfs_mbox.mbox = mbox;
-		phba->sysfs_mbox.state = SMBOX_WRITING;
-	} else {
-		if (phba->sysfs_mbox.state  != SMBOX_WRITING ||
-		    phba->sysfs_mbox.offset != off           ||
-		    phba->sysfs_mbox.mbox   == NULL) {
-			sysfs_mbox_idle(phba);
-			spin_unlock_irq(&phba->hbalock);
-			return -EAGAIN;
-		}
-	}
-
-	memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off,
-	       buf, count);
-
-	phba->sysfs_mbox.offset = off + count;
-
-	spin_unlock_irq(&phba->hbalock);
-
-	return count;
+	return -EPERM;
 }
 
 /**
@@ -4091,201 +4135,18 @@ sysfs_mbox_write(struct file *filp, struct kobject *kobj,
  * @count: bytes to transfer.
  *
  * Description:
- * Accessed via /sys/class/scsi_host/hostxxx/mbox.
- * Uses the sysfs mbox to receive data from to the adapter.
+ * Deprecated function. All mailbox access from user space is performed via the
+ * bsg interface.
  *
  * Returns:
- * -ERANGE off greater than mailbox command size
- * -EINVAL off, count or buff address invalid
- * zero if off and count are zero
- * -EACCES adapter over temp
- * -EPERM garbage can value to catch a multitude of errors
- * -EAGAIN management IO not permitted, state or off error
- * -ETIME mailbox timeout
- * -ENODEV mailbox error
- * count number of bytes transferred
+ * -EPERM operation not permitted
  **/
 static ssize_t
 sysfs_mbox_read(struct file *filp, struct kobject *kobj,
 		struct bin_attribute *bin_attr,
 		char *buf, loff_t off, size_t count)
 {
-	struct device *dev = container_of(kobj, struct device, kobj);
-	struct Scsi_Host  *shost = class_to_shost(dev);
-	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
-	struct lpfc_hba   *phba = vport->phba;
-	LPFC_MBOXQ_t *mboxq;
-	MAILBOX_t *pmb;
-	uint32_t mbox_tmo;
-	int rc;
-
-	if (off > MAILBOX_CMD_SIZE)
-		return -ERANGE;
-
-	if ((count + off) > MAILBOX_CMD_SIZE)
-		count = MAILBOX_CMD_SIZE - off;
-
-	if (off % 4 ||  count % 4 || (unsigned long)buf % 4)
-		return -EINVAL;
-
-	if (off && count == 0)
-		return 0;
-
-	spin_lock_irq(&phba->hbalock);
-
-	if (phba->over_temp_state == HBA_OVER_TEMP) {
-		sysfs_mbox_idle(phba);
-		spin_unlock_irq(&phba->hbalock);
-		return  -EACCES;
-	}
-
-	if (off == 0 &&
-	    phba->sysfs_mbox.state  == SMBOX_WRITING &&
-	    phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
-		mboxq = (LPFC_MBOXQ_t *)&phba->sysfs_mbox.mbox;
-		pmb = &mboxq->u.mb;
-		switch (pmb->mbxCommand) {
-			/* Offline only */
-		case MBX_INIT_LINK:
-		case MBX_DOWN_LINK:
-		case MBX_CONFIG_LINK:
-		case MBX_CONFIG_RING:
-		case MBX_RESET_RING:
-		case MBX_UNREG_LOGIN:
-		case MBX_CLEAR_LA:
-		case MBX_DUMP_CONTEXT:
-		case MBX_RUN_DIAGS:
-		case MBX_RESTART:
-		case MBX_SET_MASK:
-		case MBX_SET_DEBUG:
-			if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
-				printk(KERN_WARNING "mbox_read:Command 0x%x "
-				       "is illegal in on-line state\n",
-				       pmb->mbxCommand);
-				sysfs_mbox_idle(phba);
-				spin_unlock_irq(&phba->hbalock);
-				return -EPERM;
-			}
-		case MBX_WRITE_NV:
-		case MBX_WRITE_VPARMS:
-		case MBX_LOAD_SM:
-		case MBX_READ_NV:
-		case MBX_READ_CONFIG:
-		case MBX_READ_RCONFIG:
-		case MBX_READ_STATUS:
-		case MBX_READ_XRI:
-		case MBX_READ_REV:
-		case MBX_READ_LNK_STAT:
-		case MBX_DUMP_MEMORY:
-		case MBX_DOWN_LOAD:
-		case MBX_UPDATE_CFG:
-		case MBX_KILL_BOARD:
-		case MBX_LOAD_AREA:
-		case MBX_LOAD_EXP_ROM:
-		case MBX_BEACON:
-		case MBX_DEL_LD_ENTRY:
-		case MBX_SET_VARIABLE:
-		case MBX_WRITE_WWN:
-		case MBX_PORT_CAPABILITIES:
-		case MBX_PORT_IOV_CONTROL:
-			break;
-		case MBX_SECURITY_MGMT:
-		case MBX_AUTH_PORT:
-			if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
-				printk(KERN_WARNING "mbox_read:Command 0x%x "
-				       "is not permitted\n", pmb->mbxCommand);
-				sysfs_mbox_idle(phba);
-				spin_unlock_irq(&phba->hbalock);
-				return -EPERM;
-			}
-			break;
-		case MBX_READ_SPARM64:
-		case MBX_READ_TOPOLOGY:
-		case MBX_REG_LOGIN:
-		case MBX_REG_LOGIN64:
-		case MBX_CONFIG_PORT:
-		case MBX_RUN_BIU_DIAG:
-			printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
-			       pmb->mbxCommand);
-			sysfs_mbox_idle(phba);
-			spin_unlock_irq(&phba->hbalock);
-			return -EPERM;
-		default:
-			printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
-			       pmb->mbxCommand);
-			sysfs_mbox_idle(phba);
-			spin_unlock_irq(&phba->hbalock);
-			return -EPERM;
-		}
-
-		/* If HBA encountered an error attention, allow only DUMP
-		 * or RESTART mailbox commands until the HBA is restarted.
-		 */
-		if (phba->pport->stopped &&
-		    pmb->mbxCommand != MBX_DUMP_MEMORY &&
-		    pmb->mbxCommand != MBX_RESTART &&
-		    pmb->mbxCommand != MBX_WRITE_VPARMS &&
-		    pmb->mbxCommand != MBX_WRITE_WWN)
-			lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
-					"1259 mbox: Issued mailbox cmd "
-					"0x%x while in stopped state.\n",
-					pmb->mbxCommand);
-
-		phba->sysfs_mbox.mbox->vport = vport;
-
-		/* Don't allow mailbox commands to be sent when blocked
-		 * or when in the middle of discovery
-		 */
-		if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
-			sysfs_mbox_idle(phba);
-			spin_unlock_irq(&phba->hbalock);
-			return  -EAGAIN;
-		}
-
-		if ((vport->fc_flag & FC_OFFLINE_MODE) ||
-		    (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
-
-			spin_unlock_irq(&phba->hbalock);
-			rc = lpfc_sli_issue_mbox (phba,
-						  phba->sysfs_mbox.mbox,
-						  MBX_POLL);
-			spin_lock_irq(&phba->hbalock);
-
-		} else {
-			spin_unlock_irq(&phba->hbalock);
-			mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
-			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
-			spin_lock_irq(&phba->hbalock);
-		}
-
-		if (rc != MBX_SUCCESS) {
-			if (rc == MBX_TIMEOUT) {
-				phba->sysfs_mbox.mbox = NULL;
-			}
-			sysfs_mbox_idle(phba);
-			spin_unlock_irq(&phba->hbalock);
-			return  (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
-		}
-		phba->sysfs_mbox.state = SMBOX_READING;
-	}
-	else if (phba->sysfs_mbox.offset != off ||
-		 phba->sysfs_mbox.state  != SMBOX_READING) {
-		printk(KERN_WARNING  "mbox_read: Bad State\n");
-		sysfs_mbox_idle(phba);
-		spin_unlock_irq(&phba->hbalock);
-		return -EAGAIN;
-	}
-
-	memcpy(buf, (uint8_t *) &pmb + off, count);
-
-	phba->sysfs_mbox.offset = off + count;
-
-	if (phba->sysfs_mbox.offset == MAILBOX_CMD_SIZE)
-		sysfs_mbox_idle(phba);
-
-	spin_unlock_irq(&phba->hbalock);
-
-	return count;
+	return -EPERM;
 }
 
 static struct bin_attribute sysfs_mbox_attr = {
@@ -4429,8 +4290,13 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
 		case LPFC_LINK_UP:
 		case LPFC_CLEAR_LA:
 		case LPFC_HBA_READY:
-			/* Links up, beyond this port_type reports state */
-			fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+			/* Links up, reports port state accordingly */
+			if (vport->port_state < LPFC_VPORT_READY)
+				fc_host_port_state(shost) =
+							FC_PORTSTATE_BYPASSED;
+			else
+				fc_host_port_state(shost) =
+							FC_PORTSTATE_ONLINE;
 			break;
 		case LPFC_HBA_ERROR:
 			fc_host_port_state(shost) = FC_PORTSTATE_ERROR;

+ 333 - 99
drivers/scsi/lpfc/lpfc_bsg.c

@@ -916,9 +916,11 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 				} else {
 					switch (cmd) {
 					case ELX_LOOPBACK_DATA:
-						diag_cmd_data_free(phba,
-						(struct lpfc_dmabufext *)
-							dmabuf);
+						if (phba->sli_rev <
+						    LPFC_SLI_REV4)
+							diag_cmd_data_free(phba,
+							(struct lpfc_dmabufext
+							 *)dmabuf);
 						break;
 					case ELX_LOOPBACK_XRI_SETUP:
 						if ((phba->sli_rev ==
@@ -1000,7 +1002,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 error_ct_unsol_exit:
 	if (!list_empty(&head))
 		list_del(&head);
-	if (evt_req_id == SLI_CT_ELX_LOOPBACK)
+	if ((phba->sli_rev < LPFC_SLI_REV4) &&
+	    (evt_req_id == SLI_CT_ELX_LOOPBACK))
 		return 0;
 	return 1;
 }
@@ -1566,7 +1569,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
 	struct diag_mode_set *loopback_mode;
 	uint32_t link_flags;
 	uint32_t timeout;
-	LPFC_MBOXQ_t *pmboxq;
+	LPFC_MBOXQ_t *pmboxq  = NULL;
 	int mbxstatus = MBX_SUCCESS;
 	int i = 0;
 	int rc = 0;
@@ -1615,7 +1618,6 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
 				rc = -ETIMEDOUT;
 				goto loopback_mode_exit;
 			}
-
 			msleep(10);
 		}
 
@@ -1635,7 +1637,9 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
 		if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
 			rc = -ENODEV;
 		else {
+			spin_lock_irq(&phba->hbalock);
 			phba->link_flag |= LS_LOOPBACK_MODE;
+			spin_unlock_irq(&phba->hbalock);
 			/* wait for the link attention interrupt */
 			msleep(100);
 
@@ -1659,7 +1663,7 @@ loopback_mode_exit:
 	/*
 	 * Let SLI layer release mboxq if mbox command completed after timeout.
 	 */
-	if (mbxstatus != MBX_TIMEOUT)
+	if (pmboxq && mbxstatus != MBX_TIMEOUT)
 		mempool_free(pmboxq, phba->mbox_mem_pool);
 
 job_error:
@@ -1700,11 +1704,16 @@ lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
 		rc = -ENOMEM;
 		goto link_diag_state_set_out;
 	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
+			diag, phba->sli4_hba.lnk_info.lnk_tp,
+			phba->sli4_hba.lnk_info.lnk_no);
+
 	link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
 	bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
-	       phba->sli4_hba.link_state.number);
+	       phba->sli4_hba.lnk_info.lnk_no);
 	bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
-	       phba->sli4_hba.link_state.type);
+	       phba->sli4_hba.lnk_info.lnk_tp);
 	if (diag)
 		bf_set(lpfc_mbx_set_diag_state_diag,
 		       &link_diag_state->u.req, 1);
@@ -1726,6 +1735,79 @@ link_diag_state_set_out:
 	return rc;
 }
 
+/**
+ * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is responsible for issuing a sli4 mailbox command for setting
+ * up internal loopback diagnostic.
+ */
+static int
+lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *pmboxq;
+	uint32_t req_len, alloc_len;
+	struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
+	int mbxstatus = MBX_SUCCESS, rc = 0;
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq)
+		return -ENOMEM;
+	req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
+		   sizeof(struct lpfc_sli4_cfg_mhdr));
+	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
+				req_len, LPFC_SLI4_MBX_EMBED);
+	if (alloc_len != req_len) {
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+		return -ENOMEM;
+	}
+	link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
+	bf_set(lpfc_mbx_set_diag_state_link_num,
+	       &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
+	bf_set(lpfc_mbx_set_diag_state_link_type,
+	       &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
+	bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
+	       LPFC_DIAG_LOOPBACK_TYPE_SERDES);
+
+	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+	if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"3127 Failed setup loopback mode mailbox "
+				"command, rc:x%x, status:x%x\n", mbxstatus,
+				pmboxq->u.mb.mbxStatus);
+		rc = -ENODEV;
+	}
+	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
+ * @phba: Pointer to HBA context object.
+ *
+ * This function set up SLI4 FC port registrations for diagnostic run, which
+ * includes all the rpis, vfi, and also vpi.
+ */
+static int
+lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
+{
+	int rc;
+
+	if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"3136 Port still had vfi registered: "
+				"mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
+				phba->pport->fc_myDID, phba->fcf.fcfi,
+				phba->sli4_hba.vfi_ids[phba->pport->vfi],
+				phba->vpi_ids[phba->pport->vpi]);
+		return -EINVAL;
+	}
+	rc = lpfc_issue_reg_vfi(phba->pport);
+	return rc;
+}
+
 /**
  * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
  * @phba: Pointer to HBA context object.
@@ -1738,10 +1820,8 @@ static int
 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
 {
 	struct diag_mode_set *loopback_mode;
-	uint32_t link_flags, timeout, req_len, alloc_len;
-	struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
-	LPFC_MBOXQ_t *pmboxq = NULL;
-	int mbxstatus = MBX_SUCCESS, i, rc = 0;
+	uint32_t link_flags, timeout;
+	int i, rc = 0;
 
 	/* no data to return just the return code */
 	job->reply->reply_payload_rcv_len = 0;
@@ -1762,65 +1842,100 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
 	if (rc)
 		goto job_error;
 
+	/* indicate we are in loobpack diagnostic mode */
+	spin_lock_irq(&phba->hbalock);
+	phba->link_flag |= LS_LOOPBACK_MODE;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* reset port to start frome scratch */
+	rc = lpfc_selective_reset(phba);
+	if (rc)
+		goto job_error;
+
 	/* bring the link to diagnostic mode */
+	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"3129 Bring link to diagnostic state.\n");
 	loopback_mode = (struct diag_mode_set *)
 		job->request->rqst_data.h_vendor.vendor_cmd;
 	link_flags = loopback_mode->type;
 	timeout = loopback_mode->timeout * 100;
 
 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
-	if (rc)
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"3130 Failed to bring link to diagnostic "
+				"state, rc:x%x\n", rc);
 		goto loopback_mode_exit;
+	}
 
 	/* wait for link down before proceeding */
 	i = 0;
 	while (phba->link_state != LPFC_LINK_DOWN) {
 		if (i++ > timeout) {
 			rc = -ETIMEDOUT;
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"3131 Timeout waiting for link to "
+					"diagnostic mode, timeout:%d ms\n",
+					timeout * 10);
 			goto loopback_mode_exit;
 		}
 		msleep(10);
 	}
+
 	/* set up loopback mode */
-	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-	if (!pmboxq) {
-		rc = -ENOMEM;
-		goto loopback_mode_exit;
-	}
-	req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
-		   sizeof(struct lpfc_sli4_cfg_mhdr));
-	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
-				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
-				req_len, LPFC_SLI4_MBX_EMBED);
-	if (alloc_len != req_len) {
-		rc = -ENOMEM;
+	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"3132 Set up loopback mode:x%x\n", link_flags);
+
+	if (link_flags == INTERNAL_LOOP_BACK)
+		rc = lpfc_sli4_bsg_set_internal_loopback(phba);
+	else if (link_flags == EXTERNAL_LOOP_BACK)
+		rc = lpfc_hba_init_link_fc_topology(phba,
+						    FLAGS_TOPOLOGY_MODE_PT_PT,
+						    MBX_NOWAIT);
+	else {
+		rc = -EINVAL;
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"3141 Loopback mode:x%x not supported\n",
+				link_flags);
 		goto loopback_mode_exit;
 	}
-	link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
-	bf_set(lpfc_mbx_set_diag_state_link_num,
-	       &link_diag_loopback->u.req, phba->sli4_hba.link_state.number);
-	bf_set(lpfc_mbx_set_diag_state_link_type,
-	       &link_diag_loopback->u.req, phba->sli4_hba.link_state.type);
-	if (link_flags == INTERNAL_LOOP_BACK)
-		bf_set(lpfc_mbx_set_diag_lpbk_type,
-		       &link_diag_loopback->u.req,
-		       LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
-	else
-		bf_set(lpfc_mbx_set_diag_lpbk_type,
-		       &link_diag_loopback->u.req,
-		       LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL);
 
-	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
-	if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
-		rc = -ENODEV;
-	else {
-		phba->link_flag |= LS_LOOPBACK_MODE;
+	if (!rc) {
 		/* wait for the link attention interrupt */
 		msleep(100);
 		i = 0;
+		while (phba->link_state < LPFC_LINK_UP) {
+			if (i++ > timeout) {
+				rc = -ETIMEDOUT;
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"3137 Timeout waiting for link up "
+					"in loopback mode, timeout:%d ms\n",
+					timeout * 10);
+				break;
+			}
+			msleep(10);
+		}
+	}
+
+	/* port resource registration setup for loopback diagnostic */
+	if (!rc) {
+		/* set up a none zero myDID for loopback test */
+		phba->pport->fc_myDID = 1;
+		rc = lpfc_sli4_diag_fcport_reg_setup(phba);
+	} else
+		goto loopback_mode_exit;
+
+	if (!rc) {
+		/* wait for the port ready */
+		msleep(100);
+		i = 0;
 		while (phba->link_state != LPFC_HBA_READY) {
 			if (i++ > timeout) {
 				rc = -ETIMEDOUT;
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"3133 Timeout waiting for port "
+					"loopback mode ready, timeout:%d ms\n",
+					timeout * 10);
 				break;
 			}
 			msleep(10);
@@ -1828,14 +1943,14 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
 	}
 
 loopback_mode_exit:
+	/* clear loopback diagnostic mode */
+	if (rc) {
+		spin_lock_irq(&phba->hbalock);
+		phba->link_flag &= ~LS_LOOPBACK_MODE;
+		spin_unlock_irq(&phba->hbalock);
+	}
 	lpfc_bsg_diag_mode_exit(phba);
 
-	/*
-	 * Let SLI layer release mboxq if mbox command completed after timeout.
-	 */
-	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
-		mempool_free(pmboxq, phba->mbox_mem_pool);
-
 job_error:
 	/* make error code available to userspace */
 	job->reply->result = rc;
@@ -1879,7 +1994,6 @@ lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
 		rc = -ENODEV;
 
 	return rc;
-
 }
 
 /**
@@ -1895,7 +2009,9 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
 	struct Scsi_Host *shost;
 	struct lpfc_vport *vport;
 	struct lpfc_hba *phba;
-	int rc;
+	struct diag_mode_set *loopback_mode_end_cmd;
+	uint32_t timeout;
+	int rc, i;
 
 	shost = job->shost;
 	if (!shost)
@@ -1913,11 +2029,47 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
 	    LPFC_SLI_INTF_IF_TYPE_2)
 		return -ENODEV;
 
+	/* clear loopback diagnostic mode */
+	spin_lock_irq(&phba->hbalock);
+	phba->link_flag &= ~LS_LOOPBACK_MODE;
+	spin_unlock_irq(&phba->hbalock);
+	loopback_mode_end_cmd = (struct diag_mode_set *)
+			job->request->rqst_data.h_vendor.vendor_cmd;
+	timeout = loopback_mode_end_cmd->timeout * 100;
+
 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"3139 Failed to bring link to diagnostic "
+				"state, rc:x%x\n", rc);
+		goto loopback_mode_end_exit;
+	}
 
-	if (!rc)
-		rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+	/* wait for link down before proceeding */
+	i = 0;
+	while (phba->link_state != LPFC_LINK_DOWN) {
+		if (i++ > timeout) {
+			rc = -ETIMEDOUT;
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"3140 Timeout waiting for link to "
+					"diagnostic mode_end, timeout:%d ms\n",
+					timeout * 10);
+			/* there is nothing much we can do here */
+			break;
+		}
+		msleep(10);
+	}
+
+	/* reset port resource registrations */
+	rc = lpfc_selective_reset(phba);
+	phba->pport->fc_myDID = 0;
 
+loopback_mode_end_exit:
+	/* make return code available to userspace */
+	job->reply->result = rc;
+	/* complete the job back to userspace if no error */
+	if (rc == 0)
+		job->job_done(job);
 	return rc;
 }
 
@@ -2012,9 +2164,9 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
 	}
 	run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
 	bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
-	       phba->sli4_hba.link_state.number);
+	       phba->sli4_hba.lnk_info.lnk_no);
 	bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
-	       phba->sli4_hba.link_state.type);
+	       phba->sli4_hba.lnk_info.lnk_tp);
 	bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
 	       link_diag_test_cmd->test_id);
 	bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
@@ -2091,10 +2243,18 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
 	if (!mbox)
 		return -ENOMEM;
 
-	if (phba->sli_rev == LPFC_SLI_REV4)
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
+				(uint8_t *)&phba->pport->fc_sparam,
+				mbox, *rpi);
+	else {
 		*rpi = lpfc_sli4_alloc_rpi(phba);
-	status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
-			      (uint8_t *)&phba->pport->fc_sparam, mbox, *rpi);
+		status = lpfc_reg_rpi(phba, phba->pport->vpi,
+				phba->pport->fc_myDID,
+				(uint8_t *)&phba->pport->fc_sparam,
+				mbox, *rpi);
+	}
+
 	if (status) {
 		mempool_free(mbox, phba->mbox_mem_pool);
 		if (phba->sli_rev == LPFC_SLI_REV4)
@@ -2117,7 +2277,8 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
 		return -ENODEV;
 	}
 
-	*rpi = mbox->u.mb.un.varWords[0];
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		*rpi = mbox->u.mb.un.varWords[0];
 
 	lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
 	kfree(dmabuff);
@@ -2142,7 +2303,12 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
 	if (mbox == NULL)
 		return -ENOMEM;
 
-	lpfc_unreg_login(phba, 0, rpi, mbox);
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		lpfc_unreg_login(phba, 0, rpi, mbox);
+	else
+		lpfc_unreg_login(phba, phba->pport->vpi,
+				 phba->sli4_hba.rpi_ids[rpi], mbox);
+
 	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
 
 	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
@@ -2630,15 +2796,15 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
 	uint32_t full_size;
 	size_t segment_len = 0, segment_offset = 0, current_offset = 0;
 	uint16_t rpi = 0;
-	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
-	IOCB_t *cmd, *rsp;
+	struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
+	IOCB_t *cmd, *rsp = NULL;
 	struct lpfc_sli_ct_request *ctreq;
 	struct lpfc_dmabuf *txbmp;
 	struct ulp_bde64 *txbpl = NULL;
 	struct lpfc_dmabufext *txbuffer = NULL;
 	struct list_head head;
 	struct lpfc_dmabuf  *curr;
-	uint16_t txxri, rxxri;
+	uint16_t txxri = 0, rxxri;
 	uint32_t num_bde;
 	uint8_t *ptr = NULL, *rx_databuf = NULL;
 	int rc = 0;
@@ -2665,7 +2831,6 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
 		rc = -EINVAL;
 		goto loopback_test_exit;
 	}
-
 	diag_mode = (struct diag_mode_test *)
 		job->request->rqst_data.h_vendor.vendor_cmd;
 
@@ -2720,18 +2885,19 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
 	if (rc)
 		goto loopback_test_exit;
 
-	rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
-	if (rc) {
-		lpfcdiag_loop_self_unreg(phba, rpi);
-		goto loopback_test_exit;
-	}
+	if (phba->sli_rev < LPFC_SLI_REV4) {
+		rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
+		if (rc) {
+			lpfcdiag_loop_self_unreg(phba, rpi);
+			goto loopback_test_exit;
+		}
 
-	rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
-	if (rc) {
-		lpfcdiag_loop_self_unreg(phba, rpi);
-		goto loopback_test_exit;
+		rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
+		if (rc) {
+			lpfcdiag_loop_self_unreg(phba, rpi);
+			goto loopback_test_exit;
+		}
 	}
-
 	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
 				SLI_CT_ELX_LOOPBACK);
 	if (!evt) {
@@ -2746,7 +2912,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 
 	cmdiocbq = lpfc_sli_get_iocbq(phba);
-	rspiocbq = lpfc_sli_get_iocbq(phba);
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		rspiocbq = lpfc_sli_get_iocbq(phba);
 	txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
 
 	if (txbmp) {
@@ -2759,14 +2926,18 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
 		}
 	}
 
-	if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer ||
-		!txbmp->virt) {
+	if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
+		rc = -ENOMEM;
+		goto err_loopback_test_exit;
+	}
+	if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
 		rc = -ENOMEM;
 		goto err_loopback_test_exit;
 	}
 
 	cmd = &cmdiocbq->iocb;
-	rsp = &rspiocbq->iocb;
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		rsp = &rspiocbq->iocb;
 
 	INIT_LIST_HEAD(&head);
 	list_add_tail(&head, &txbuffer->dma.list);
@@ -2796,7 +2967,6 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
 	list_del(&head);
 
 	/* Build the XMIT_SEQUENCE iocb */
-
 	num_bde = (uint32_t)txbuffer->flag;
 
 	cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
@@ -2813,16 +2983,27 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
 	cmd->ulpBdeCount = 1;
 	cmd->ulpLe = 1;
 	cmd->ulpClass = CLASS3;
-	cmd->ulpContext = txxri;
 
+	if (phba->sli_rev < LPFC_SLI_REV4) {
+		cmd->ulpContext = txxri;
+	} else {
+		cmd->un.xseq64.bdl.ulpIoTag32 = 0;
+		cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
+		cmdiocbq->context3 = txbmp;
+		cmdiocbq->sli4_xritag = NO_XRI;
+		cmd->unsli3.rcvsli3.ox_id = 0xffff;
+	}
 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
 	cmdiocbq->vport = phba->pport;
-
 	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
 					     rspiocbq, (phba->fc_ratov * 2) +
 					     LPFC_DRVR_TIMEOUT);
 
-	if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
+	if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) &&
+					   (rsp->ulpStatus != IOCB_SUCCESS))) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"3126 Failed loopback test issue iocb: "
+				"iocb_stat:x%x\n", iocb_stat);
 		rc = -EIO;
 		goto err_loopback_test_exit;
 	}
@@ -2832,9 +3013,12 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
 		evt->wq, !list_empty(&evt->events_to_see),
 		((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
 	evt->waiting = 0;
-	if (list_empty(&evt->events_to_see))
+	if (list_empty(&evt->events_to_see)) {
 		rc = (time_left) ? -EINTR : -ETIMEDOUT;
-	else {
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"3125 Not receiving unsolicited event, "
+				"rc:x%x\n", rc);
+	} else {
 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
 		list_move(evt->events_to_see.prev, &evt->events_to_get);
 		evdat = list_entry(evt->events_to_get.prev,
@@ -2891,7 +3075,7 @@ loopback_test_exit:
 	job->reply->result = rc;
 	job->dd_data = NULL;
 	/* complete the job back to userspace if no error */
-	if (rc == 0)
+	if (rc == IOCB_SUCCESS)
 		job->job_done(job);
 	return rc;
 }
@@ -3078,7 +3262,9 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
 			&& (mb->un.varWords[1] == 1)) {
 			phba->wait_4_mlo_maint_flg = 1;
 		} else if (mb->un.varWords[0] == SETVAR_MLORST) {
+			spin_lock_irq(&phba->hbalock);
 			phba->link_flag &= ~LS_LOOPBACK_MODE;
+			spin_unlock_irq(&phba->hbalock);
 			phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
 		}
 		break;
@@ -3140,6 +3326,9 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 	unsigned long flags;
 	uint32_t size;
 	int rc = 0;
+	struct lpfc_dmabuf *dmabuf;
+	struct lpfc_sli_config_mbox *sli_cfg_mbx;
+	uint8_t *pmbx;
 
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	dd_data = pmboxq->context1;
@@ -3156,7 +3345,19 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 	 */
 	pmb = (uint8_t *)&pmboxq->u.mb;
 	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+	/* Copy the byte swapped response mailbox back to the user */
 	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
+	/* if there is any non-embedded extended data copy that too */
+	dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
+	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
+	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
+		pmbx = (uint8_t *)dmabuf->virt;
+		/* byte swap the extended data following the mailbox command */
+		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
+			&pmbx[sizeof(MAILBOX_t)],
+			sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
+	}
 
 	job = dd_data->context_un.mbox.set_job;
 	if (job) {
@@ -3519,6 +3720,18 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
 	/* state change */
 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
 
+	/*
+	 * Non-embedded mailbox subcommand data gets byte swapped here because
+	 * the lower level driver code only does the first 64 mailbox words.
+	 */
+	if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
+	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
+		(nemb_tp == nemb_mse))
+		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
+			&pmbx[sizeof(MAILBOX_t)],
+				sli_cfg_mbx->un.sli_config_emb0_subsys.
+					mse[0].buf_len);
+
 	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
 	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3575,7 +3788,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
 			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
-					"2953 Handled SLI_CONFIG(mse) wr, "
+					"2953 Failed SLI_CONFIG(mse) wr, "
 					"ext_buf_cnt(%d) out of range(%d)\n",
 					ext_buf_cnt,
 					LPFC_MBX_SLI_CONFIG_MAX_MSE);
@@ -3593,7 +3806,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
 		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
-					"2954 Handled SLI_CONFIG(hbd) wr, "
+					"2954 Failed SLI_CONFIG(hbd) wr, "
 					"ext_buf_cnt(%d) out of range(%d)\n",
 					ext_buf_cnt,
 					LPFC_MBX_SLI_CONFIG_MAX_HBD);
@@ -3687,6 +3900,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
 				"2956 Failed to issue SLI_CONFIG ext-buffer "
 				"maibox command, rc:x%x\n", rc);
 		rc = -EPIPE;
+		goto job_error;
 	}
 
 	/* wait for additoinal external buffers */
@@ -3721,7 +3935,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
 	uint32_t opcode;
 	int rc = SLI_CONFIG_NOT_HANDLED;
 
-	/* state change */
+	/* state change on new multi-buffer pass-through mailbox command */
 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
 
 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
@@ -3752,18 +3966,36 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
 				break;
 			default:
 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
-						"2959 Not handled SLI_CONFIG "
+						"2959 Reject SLI_CONFIG "
 						"subsys_fcoe, opcode:x%x\n",
 						opcode);
-				rc = SLI_CONFIG_NOT_HANDLED;
+				rc = -EPERM;
+				break;
+			}
+		} else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
+			switch (opcode) {
+			case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+						"3106 Handled SLI_CONFIG "
+						"subsys_fcoe, opcode:x%x\n",
+						opcode);
+				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+							nemb_mse, dmabuf);
+				break;
+			default:
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+						"3107 Reject SLI_CONFIG "
+						"subsys_fcoe, opcode:x%x\n",
+						opcode);
+				rc = -EPERM;
 				break;
 			}
 		} else {
 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
-					"2977 Handled SLI_CONFIG "
+					"2977 Reject SLI_CONFIG "
 					"subsys:x%d, opcode:x%x\n",
 					subsys, opcode);
-			rc = SLI_CONFIG_NOT_HANDLED;
+			rc = -EPERM;
 		}
 	} else {
 		subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
@@ -3799,12 +4031,17 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
 			}
 		} else {
 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
-					"2978 Handled SLI_CONFIG "
+					"2978 Not handled SLI_CONFIG "
 					"subsys:x%d, opcode:x%x\n",
 					subsys, opcode);
 			rc = SLI_CONFIG_NOT_HANDLED;
 		}
 	}
+
+	/* state reset on not handled new multi-buffer mailbox command */
+	if (rc != SLI_CONFIG_HANDLED)
+		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
+
 	return rc;
 }
 
@@ -4262,11 +4499,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
 
 	/* extended mailbox commands will need an extended buffer */
 	if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
-		/* any data for the device? */
-		if (mbox_req->inExtWLen) {
-			from = pmbx;
-			ext = from + sizeof(MAILBOX_t);
-		}
+		from = pmbx;
+		ext = from + sizeof(MAILBOX_t);
 		pmboxq->context2 = ext;
 		pmboxq->in_ext_byte_len =
 			mbox_req->inExtWLen * sizeof(uint32_t);

+ 2 - 1
drivers/scsi/lpfc/lpfc_bsg.h

@@ -96,7 +96,7 @@ struct get_mgmt_rev {
 };
 
 #define MANAGEMENT_MAJOR_REV   1
-#define MANAGEMENT_MINOR_REV   0
+#define MANAGEMENT_MINOR_REV   1
 
 /* the MgmtRevInfo structure */
 struct MgmtRevInfo {
@@ -248,6 +248,7 @@ struct lpfc_sli_config_emb1_subsys {
 #define COMN_OPCODE_WRITE_OBJECT	0xAC
 #define COMN_OPCODE_READ_OBJECT_LIST	0xAD
 #define COMN_OPCODE_DELETE_OBJECT	0xAE
+#define COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES	0x79
 	uint32_t timeout;
 	uint32_t request_length;
 	uint32_t word9;

+ 3 - 2
drivers/scsi/lpfc/lpfc_compat.h

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2005 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -82,7 +82,8 @@ lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
 static inline void
 lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes)
 {
-	__iowrite32_copy(dest, src, bytes);
+	/* convert bytes in argument list to word count for copy function */
+	__iowrite32_copy(dest, src, bytes / sizeof(uint32_t));
 }
 
 static inline void

+ 11 - 2
drivers/scsi/lpfc/lpfc_crtn.h

@@ -26,7 +26,7 @@ void lpfc_sli_read_link_ste(struct lpfc_hba *);
 void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
 void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
 int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
-int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
+int lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *, struct lpfcMboxq *);
 void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
 
@@ -78,6 +78,7 @@ void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
 void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
 struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
@@ -106,7 +107,7 @@ void lpfc_cleanup(struct lpfc_vport *);
 void lpfc_disc_timeout(unsigned long);
 
 struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
-
+struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
 void lpfc_worker_wake_up(struct lpfc_hba *);
 int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
 int lpfc_do_work(void *);
@@ -453,3 +454,11 @@ int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
 uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *);
 int lpfc_sli4_queue_create(struct lpfc_hba *);
 void lpfc_sli4_queue_destroy(struct lpfc_hba *);
+void lpfc_sli4_abts_err_handler(struct lpfc_hba *, struct lpfc_nodelist *,
+				struct sli4_wcqe_xri_aborted *);
+int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t);
+int lpfc_issue_reg_vfi(struct lpfc_vport *);
+int lpfc_issue_unreg_vfi(struct lpfc_vport *);
+int lpfc_selective_reset(struct lpfc_hba *);
+int lpfc_sli4_read_config(struct lpfc_hba *phba);
+int lpfc_scsi_buf_update(struct lpfc_hba *phba);

+ 113 - 59
drivers/scsi/lpfc/lpfc_debugfs.c

@@ -1997,7 +1997,8 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
 	/* Get slow-path event queue information */
 	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"Slow-path EQ information:\n");
-	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+	if (phba->sli4_hba.sp_eq) {
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"\tEQID[%02d], "
 			"QE-COUNT[%04d], QE-SIZE[%04d], "
 			"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2006,12 +2007,17 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
 			phba->sli4_hba.sp_eq->entry_size,
 			phba->sli4_hba.sp_eq->host_index,
 			phba->sli4_hba.sp_eq->hba_index);
+	}
 
 	/* Get fast-path event queue information */
 	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"Fast-path EQ information:\n");
-	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
-		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+	if (phba->sli4_hba.fp_eq) {
+		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+		     fcp_qidx++) {
+			if (phba->sli4_hba.fp_eq[fcp_qidx]) {
+				len += snprintf(pbuffer+len,
+					LPFC_QUE_INFO_GET_BUF_SIZE-len,
 				"\tEQID[%02d], "
 				"QE-COUNT[%04d], QE-SIZE[%04d], "
 				"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2020,16 +2026,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
 				phba->sli4_hba.fp_eq[fcp_qidx]->entry_size,
 				phba->sli4_hba.fp_eq[fcp_qidx]->host_index,
 				phba->sli4_hba.fp_eq[fcp_qidx]->hba_index);
+			}
+		}
 	}
 	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
 
 	/* Get mailbox complete queue information */
 	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"Slow-path MBX CQ information:\n");
-	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+	if (phba->sli4_hba.mbx_cq) {
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"Associated EQID[%02d]:\n",
 			phba->sli4_hba.mbx_cq->assoc_qid);
-	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"\tCQID[%02d], "
 			"QE-COUNT[%04d], QE-SIZE[%04d], "
 			"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2038,14 +2047,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
 			phba->sli4_hba.mbx_cq->entry_size,
 			phba->sli4_hba.mbx_cq->host_index,
 			phba->sli4_hba.mbx_cq->hba_index);
+	}
 
 	/* Get slow-path complete queue information */
 	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"Slow-path ELS CQ information:\n");
-	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+	if (phba->sli4_hba.els_cq) {
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"Associated EQID[%02d]:\n",
 			phba->sli4_hba.els_cq->assoc_qid);
-	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"\tCQID [%02d], "
 			"QE-COUNT[%04d], QE-SIZE[%04d], "
 			"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2054,16 +2065,21 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
 			phba->sli4_hba.els_cq->entry_size,
 			phba->sli4_hba.els_cq->host_index,
 			phba->sli4_hba.els_cq->hba_index);
+	}
 
 	/* Get fast-path complete queue information */
 	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"Fast-path FCP CQ information:\n");
 	fcp_qidx = 0;
-	do {
-		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+	if (phba->sli4_hba.fcp_cq) {
+		do {
+			if (phba->sli4_hba.fcp_cq[fcp_qidx]) {
+				len += snprintf(pbuffer+len,
+					LPFC_QUE_INFO_GET_BUF_SIZE-len,
 				"Associated EQID[%02d]:\n",
 				phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
-		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+				len += snprintf(pbuffer+len,
+					LPFC_QUE_INFO_GET_BUF_SIZE-len,
 				"\tCQID[%02d], "
 				"QE-COUNT[%04d], QE-SIZE[%04d], "
 				"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2072,16 +2088,20 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
 				phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
 				phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
 				phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
-	} while (++fcp_qidx < phba->cfg_fcp_eq_count);
-	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+			}
+		} while (++fcp_qidx < phba->cfg_fcp_eq_count);
+		len += snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+	}
 
 	/* Get mailbox queue information */
 	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"Slow-path MBX MQ information:\n");
-	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+	if (phba->sli4_hba.mbx_wq) {
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"Associated CQID[%02d]:\n",
 			phba->sli4_hba.mbx_wq->assoc_qid);
-	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"\tWQID[%02d], "
 			"QE-COUNT[%04d], QE-SIZE[%04d], "
 			"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2090,14 +2110,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
 			phba->sli4_hba.mbx_wq->entry_size,
 			phba->sli4_hba.mbx_wq->host_index,
 			phba->sli4_hba.mbx_wq->hba_index);
+	}
 
 	/* Get slow-path work queue information */
 	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"Slow-path ELS WQ information:\n");
-	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+	if (phba->sli4_hba.els_wq) {
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"Associated CQID[%02d]:\n",
 			phba->sli4_hba.els_wq->assoc_qid);
-	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"\tWQID[%02d], "
 			"QE-COUNT[%04d], QE-SIZE[%04d], "
 			"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2106,15 +2128,22 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
 			phba->sli4_hba.els_wq->entry_size,
 			phba->sli4_hba.els_wq->host_index,
 			phba->sli4_hba.els_wq->hba_index);
+	}
 
 	/* Get fast-path work queue information */
 	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"Fast-path FCP WQ information:\n");
-	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) {
-		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+	if (phba->sli4_hba.fcp_wq) {
+		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
+		     fcp_qidx++) {
+			if (!phba->sli4_hba.fcp_wq[fcp_qidx])
+				continue;
+			len += snprintf(pbuffer+len,
+					LPFC_QUE_INFO_GET_BUF_SIZE-len,
 				"Associated CQID[%02d]:\n",
 				phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid);
-		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			len += snprintf(pbuffer+len,
+					LPFC_QUE_INFO_GET_BUF_SIZE-len,
 				"\tWQID[%02d], "
 				"QE-COUNT[%04d], WQE-SIZE[%04d], "
 				"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2123,16 +2152,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
 				phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size,
 				phba->sli4_hba.fcp_wq[fcp_qidx]->host_index,
 				phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index);
+		}
+		len += snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
 	}
-	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
 
 	/* Get receive queue information */
 	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"Slow-path RQ information:\n");
-	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+	if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"Associated CQID[%02d]:\n",
 			phba->sli4_hba.hdr_rq->assoc_qid);
-	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"\tHQID[%02d], "
 			"QE-COUNT[%04d], QE-SIZE[%04d], "
 			"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2141,7 +2173,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
 			phba->sli4_hba.hdr_rq->entry_size,
 			phba->sli4_hba.hdr_rq->host_index,
 			phba->sli4_hba.hdr_rq->hba_index);
-	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
 			"\tDQID[%02d], "
 			"QE-COUNT[%04d], QE-SIZE[%04d], "
 			"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2150,7 +2182,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
 			phba->sli4_hba.dat_rq->entry_size,
 			phba->sli4_hba.dat_rq->host_index,
 			phba->sli4_hba.dat_rq->hba_index);
-
+	}
 	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
 }
 
@@ -2360,7 +2392,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
 	switch (quetp) {
 	case LPFC_IDIAG_EQ:
 		/* Slow-path event queue */
-		if (phba->sli4_hba.sp_eq->queue_id == queid) {
+		if (phba->sli4_hba.sp_eq &&
+		    phba->sli4_hba.sp_eq->queue_id == queid) {
 			/* Sanity check */
 			rc = lpfc_idiag_que_param_check(
 					phba->sli4_hba.sp_eq, index, count);
@@ -2370,23 +2403,29 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
 			goto pass_check;
 		}
 		/* Fast-path event queue */
-		for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
-			if (phba->sli4_hba.fp_eq[qidx]->queue_id == queid) {
-				/* Sanity check */
-				rc = lpfc_idiag_que_param_check(
+		if (phba->sli4_hba.fp_eq) {
+			for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
+				if (phba->sli4_hba.fp_eq[qidx] &&
+				    phba->sli4_hba.fp_eq[qidx]->queue_id ==
+				    queid) {
+					/* Sanity check */
+					rc = lpfc_idiag_que_param_check(
 						phba->sli4_hba.fp_eq[qidx],
 						index, count);
-				if (rc)
-					goto error_out;
-				idiag.ptr_private = phba->sli4_hba.fp_eq[qidx];
-				goto pass_check;
+					if (rc)
+						goto error_out;
+					idiag.ptr_private =
+						phba->sli4_hba.fp_eq[qidx];
+					goto pass_check;
+				}
 			}
 		}
 		goto error_out;
 		break;
 	case LPFC_IDIAG_CQ:
 		/* MBX complete queue */
-		if (phba->sli4_hba.mbx_cq->queue_id == queid) {
+		if (phba->sli4_hba.mbx_cq &&
+		    phba->sli4_hba.mbx_cq->queue_id == queid) {
 			/* Sanity check */
 			rc = lpfc_idiag_que_param_check(
 					phba->sli4_hba.mbx_cq, index, count);
@@ -2396,7 +2435,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
 			goto pass_check;
 		}
 		/* ELS complete queue */
-		if (phba->sli4_hba.els_cq->queue_id == queid) {
+		if (phba->sli4_hba.els_cq &&
+		    phba->sli4_hba.els_cq->queue_id == queid) {
 			/* Sanity check */
 			rc = lpfc_idiag_que_param_check(
 					phba->sli4_hba.els_cq, index, count);
@@ -2406,25 +2446,30 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
 			goto pass_check;
 		}
 		/* FCP complete queue */
-		qidx = 0;
-		do {
-			if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) {
-				/* Sanity check */
-				rc = lpfc_idiag_que_param_check(
+		if (phba->sli4_hba.fcp_cq) {
+			qidx = 0;
+			do {
+				if (phba->sli4_hba.fcp_cq[qidx] &&
+				    phba->sli4_hba.fcp_cq[qidx]->queue_id ==
+				    queid) {
+					/* Sanity check */
+					rc = lpfc_idiag_que_param_check(
 						phba->sli4_hba.fcp_cq[qidx],
 						index, count);
-				if (rc)
-					goto error_out;
-				idiag.ptr_private =
+					if (rc)
+						goto error_out;
+					idiag.ptr_private =
 						phba->sli4_hba.fcp_cq[qidx];
-				goto pass_check;
-			}
-		} while (++qidx < phba->cfg_fcp_eq_count);
+					goto pass_check;
+				}
+			} while (++qidx < phba->cfg_fcp_eq_count);
+		}
 		goto error_out;
 		break;
 	case LPFC_IDIAG_MQ:
 		/* MBX work queue */
-		if (phba->sli4_hba.mbx_wq->queue_id == queid) {
+		if (phba->sli4_hba.mbx_wq &&
+		    phba->sli4_hba.mbx_wq->queue_id == queid) {
 			/* Sanity check */
 			rc = lpfc_idiag_que_param_check(
 					phba->sli4_hba.mbx_wq, index, count);
@@ -2433,10 +2478,12 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
 			idiag.ptr_private = phba->sli4_hba.mbx_wq;
 			goto pass_check;
 		}
+		goto error_out;
 		break;
 	case LPFC_IDIAG_WQ:
 		/* ELS work queue */
-		if (phba->sli4_hba.els_wq->queue_id == queid) {
+		if (phba->sli4_hba.els_wq &&
+		    phba->sli4_hba.els_wq->queue_id == queid) {
 			/* Sanity check */
 			rc = lpfc_idiag_que_param_check(
 					phba->sli4_hba.els_wq, index, count);
@@ -2446,24 +2493,30 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
 			goto pass_check;
 		}
 		/* FCP work queue */
-		for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
-			if (phba->sli4_hba.fcp_wq[qidx]->queue_id == queid) {
-				/* Sanity check */
-				rc = lpfc_idiag_que_param_check(
+		if (phba->sli4_hba.fcp_wq) {
+			for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
+				if (!phba->sli4_hba.fcp_wq[qidx])
+					continue;
+				if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
+				    queid) {
+					/* Sanity check */
+					rc = lpfc_idiag_que_param_check(
 						phba->sli4_hba.fcp_wq[qidx],
 						index, count);
-				if (rc)
-					goto error_out;
-				idiag.ptr_private =
-					phba->sli4_hba.fcp_wq[qidx];
-				goto pass_check;
+					if (rc)
+						goto error_out;
+					idiag.ptr_private =
+						phba->sli4_hba.fcp_wq[qidx];
+					goto pass_check;
+				}
 			}
 		}
 		goto error_out;
 		break;
 	case LPFC_IDIAG_RQ:
 		/* HDR queue */
-		if (phba->sli4_hba.hdr_rq->queue_id == queid) {
+		if (phba->sli4_hba.hdr_rq &&
+		    phba->sli4_hba.hdr_rq->queue_id == queid) {
 			/* Sanity check */
 			rc = lpfc_idiag_que_param_check(
 					phba->sli4_hba.hdr_rq, index, count);
@@ -2473,7 +2526,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
 			goto pass_check;
 		}
 		/* DAT queue */
-		if (phba->sli4_hba.dat_rq->queue_id == queid) {
+		if (phba->sli4_hba.dat_rq &&
+		    phba->sli4_hba.dat_rq->queue_id == queid) {
 			/* Sanity check */
 			rc = lpfc_idiag_que_param_check(
 					phba->sli4_hba.dat_rq, index, count);

+ 124 - 90
drivers/scsi/lpfc/lpfc_els.c

@@ -421,13 +421,13 @@ fail:
  * @vport: pointer to a host virtual N_Port data structure.
  *
  * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
- * the @vport. This mailbox command is necessary for FCoE only.
+ * the @vport. This mailbox command is necessary for SLI4 port only.
  *
  * Return code
  *   0 - successfully issued REG_VFI for @vport
  *   A failure code otherwise.
  **/
-static int
+int
 lpfc_issue_reg_vfi(struct lpfc_vport *vport)
 {
 	struct lpfc_hba  *phba = vport->phba;
@@ -438,10 +438,14 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
 	int rc = 0;
 
 	sp = &phba->fc_fabparam;
-	ndlp = lpfc_findnode_did(vport, Fabric_DID);
-	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
-		rc = -ENODEV;
-		goto fail;
+	/* move forward in case of SLI4 FC port loopback test */
+	if ((phba->sli_rev == LPFC_SLI_REV4) &&
+	    !(phba->link_flag & LS_LOOPBACK_MODE)) {
+		ndlp = lpfc_findnode_did(vport, Fabric_DID);
+		if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+			rc = -ENODEV;
+			goto fail;
+		}
 	}
 
 	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
@@ -486,6 +490,54 @@ fail:
 	return rc;
 }
 
+/**
+ * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
+ * the @vport. This mailbox command is necessary for SLI4 port only.
+ *
+ * Return code
+ *   0 - successfully issued REG_VFI for @vport
+ *   A failure code otherwise.
+ **/
+int
+lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct Scsi_Host *shost;
+	LPFC_MBOXQ_t *mboxq;
+	int rc;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+				"2556 UNREG_VFI mbox allocation failed"
+				"HBA state x%x\n", phba->pport->port_state);
+		return -ENOMEM;
+	}
+
+	lpfc_unreg_vfi(mboxq, vport);
+	mboxq->vport = vport;
+	mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
+
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+				"2557 UNREG_VFI issue mbox failed rc x%x "
+				"HBA state x%x\n",
+				rc, phba->pport->port_state);
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		return -EIO;
+	}
+
+	shost = lpfc_shost_from_vport(vport);
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_VFI_REGISTERED;
+	spin_unlock_irq(shost->host_lock);
+	return 0;
+}
+
 /**
  * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
  * @vport: pointer to a host virtual N_Port data structure.
@@ -615,7 +667,9 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 					 "1816 FLOGI NPIV supported, "
 					 "response data 0x%x\n",
 					 sp->cmn.response_multiple_NPort);
+			spin_lock_irq(&phba->hbalock);
 			phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
+			spin_unlock_irq(&phba->hbalock);
 		} else {
 			/* Because we asked f/w for NPIV it still expects us
 			to call reg_vnpid atleast for the physcial host */
@@ -623,7 +677,9 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 					 LOG_ELS | LOG_VPORT,
 					 "1817 Fabric does not support NPIV "
 					 "- configuring single port mode.\n");
+			spin_lock_irq(&phba->hbalock);
 			phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+			spin_unlock_irq(&phba->hbalock);
 		}
 	}
 
@@ -686,11 +742,16 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 			lpfc_do_scr_ns_plogi(phba, vport);
 		} else if (vport->fc_flag & FC_VFI_REGISTERED)
 			lpfc_issue_init_vpi(vport);
-		else
+		else {
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+					"3135 Need register VFI: (x%x/%x)\n",
+					vport->fc_prevDID, vport->fc_myDID);
 			lpfc_issue_reg_vfi(vport);
+		}
 	}
 	return 0;
 }
+
 /**
  * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
  * @vport: pointer to a host virtual N_Port data structure.
@@ -907,17 +968,16 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 		 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
 		 * alpa map would take too long otherwise.
 		 */
-		if (phba->alpa_map[0] == 0) {
+		if (phba->alpa_map[0] == 0)
 			vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
-			if ((phba->sli_rev == LPFC_SLI_REV4) &&
-			    (!(vport->fc_flag & FC_VFI_REGISTERED) ||
-			     (vport->fc_prevDID != vport->fc_myDID))) {
-				if (vport->fc_flag & FC_VFI_REGISTERED)
-					lpfc_sli4_unreg_all_rpis(vport);
-				lpfc_issue_reg_vfi(vport);
-				lpfc_nlp_put(ndlp);
-				goto out;
-			}
+		if ((phba->sli_rev == LPFC_SLI_REV4) &&
+		    (!(vport->fc_flag & FC_VFI_REGISTERED) ||
+		     (vport->fc_prevDID != vport->fc_myDID))) {
+			if (vport->fc_flag & FC_VFI_REGISTERED)
+				lpfc_sli4_unreg_all_rpis(vport);
+			lpfc_issue_reg_vfi(vport);
+			lpfc_nlp_put(ndlp);
+			goto out;
 		}
 		goto flogifail;
 	}
@@ -1075,6 +1135,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	/* Setup CSPs accordingly for Fabric */
 	sp->cmn.e_d_tov = 0;
 	sp->cmn.w2.r_a_tov = 0;
+	sp->cmn.virtual_fabric_support = 0;
 	sp->cls1.classValid = 0;
 	sp->cls2.seqDelivery = 1;
 	sp->cls3.seqDelivery = 1;
@@ -1163,8 +1224,7 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
 	spin_lock_irq(&phba->hbalock);
 	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
 		icmd = &iocb->iocb;
-		if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
-		    icmd->un.elsreq64.bdl.ulpIoTag32) {
+		if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
 			ndlp = (struct lpfc_nodelist *)(iocb->context1);
 			if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
 			    (ndlp->nlp_DID == Fabric_DID))
@@ -3066,17 +3126,22 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 	if (did == FDMI_DID)
 		retry = 1;
 
-	if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
+	if ((cmd == ELS_CMD_FLOGI) &&
 	    (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
 	    !lpfc_error_lost_link(irsp)) {
 		/* FLOGI retry policy */
 		retry = 1;
-		/* retry forever */
+		/* retry FLOGI forever */
 		maxretry = 0;
 		if (cmdiocb->retry >= 100)
 			delay = 5000;
 		else if (cmdiocb->retry >= 32)
 			delay = 1000;
+	} else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
+		/* retry FDISCs every second up to devloss */
+		retry = 1;
+		maxretry = vport->cfg_devloss_tmo;
+		delay = 1000;
 	}
 
 	cmdiocb->retry++;
@@ -3389,11 +3454,17 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
 	/*
 	 * The driver received a LOGO from the rport and has ACK'd it.
-	 * At this point, the driver is done so release the IOCB and
-	 * remove the ndlp reference.
+	 * At this point, the driver is done so release the IOCB
 	 */
 	lpfc_els_free_iocb(phba, cmdiocb);
-	lpfc_nlp_put(ndlp);
+
+	/*
+	 * Remove the ndlp reference if it's a fabric node that has
+	 * sent us an unsolicted LOGO.
+	 */
+	if (ndlp->nlp_type & NLP_FABRIC)
+		lpfc_nlp_put(ndlp);
+
 	return;
 }
 
@@ -4867,23 +4938,31 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 			    sizeof(struct lpfc_name));
 
 		if (!rc) {
-			mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-			if (!mbox)
+			if (phba->sli_rev < LPFC_SLI_REV4) {
+				mbox = mempool_alloc(phba->mbox_mem_pool,
+						     GFP_KERNEL);
+				if (!mbox)
+					return 1;
+				lpfc_linkdown(phba);
+				lpfc_init_link(phba, mbox,
+					       phba->cfg_topology,
+					       phba->cfg_link_speed);
+				mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
+				mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+				mbox->vport = vport;
+				rc = lpfc_sli_issue_mbox(phba, mbox,
+							 MBX_NOWAIT);
+				lpfc_set_loopback_flag(phba);
+				if (rc == MBX_NOT_FINISHED)
+					mempool_free(mbox, phba->mbox_mem_pool);
 				return 1;
-
-			lpfc_linkdown(phba);
-			lpfc_init_link(phba, mbox,
-				       phba->cfg_topology,
-				       phba->cfg_link_speed);
-			mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
-			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-			mbox->vport = vport;
-			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
-			lpfc_set_loopback_flag(phba);
-			if (rc == MBX_NOT_FINISHED) {
-				mempool_free(mbox, phba->mbox_mem_pool);
+			} else {
+				/* abort the flogi coming back to ourselves
+				 * due to external loopback on the port.
+				 */
+				lpfc_els_abort_flogi(phba);
+				return 0;
 			}
-			return 1;
 		} else if (rc > 0) {	/* greater than */
 			spin_lock_irq(shost->host_lock);
 			vport->fc_flag |= FC_PT2PT_PLOGI;
@@ -5838,8 +5917,12 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 			vport->fc_myDID = vport->fc_prevDID;
 			if (phba->sli_rev < LPFC_SLI_REV4)
 				lpfc_issue_fabric_reglogin(vport);
-			else
+			else {
+				lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+					"3138 Need register VFI: (x%x/%x)\n",
+					vport->fc_prevDID, vport->fc_myDID);
 				lpfc_issue_reg_vfi(vport);
+			}
 		}
 	}
 	return 0;
@@ -6595,56 +6678,6 @@ dropit:
 	phba->fc_stat.elsRcvDrop++;
 }
 
-/**
- * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
- * @phba: pointer to lpfc hba data structure.
- * @vpi: host virtual N_Port identifier.
- *
- * This routine finds a vport on a HBA (referred by @phba) through a
- * @vpi. The function walks the HBA's vport list and returns the address
- * of the vport with the matching @vpi.
- *
- * Return code
- *    NULL - No vport with the matching @vpi found
- *    Otherwise - Address to the vport with the matching @vpi.
- **/
-struct lpfc_vport *
-lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
-{
-	struct lpfc_vport *vport;
-	unsigned long flags;
-	int i = 0;
-
-	/* The physical ports are always vpi 0 - translate is unnecessary. */
-	if (vpi > 0) {
-		/*
-		 * Translate the physical vpi to the logical vpi.  The
-		 * vport stores the logical vpi.
-		 */
-		for (i = 0; i < phba->max_vpi; i++) {
-			if (vpi == phba->vpi_ids[i])
-				break;
-		}
-
-		if (i >= phba->max_vpi) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
-					 "2936 Could not find Vport mapped "
-					 "to vpi %d\n", vpi);
-			return NULL;
-		}
-	}
-
-	spin_lock_irqsave(&phba->hbalock, flags);
-	list_for_each_entry(vport, &phba->port_list, listentry) {
-		if (vport->vpi == i) {
-			spin_unlock_irqrestore(&phba->hbalock, flags);
-			return vport;
-		}
-	}
-	spin_unlock_irqrestore(&phba->hbalock, flags);
-	return NULL;
-}
-
 /**
  * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
  * @phba: pointer to lpfc hba data structure.
@@ -7281,6 +7314,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	/* Setup CSPs accordingly for Fabric */
 	sp->cmn.e_d_tov = 0;
 	sp->cmn.w2.r_a_tov = 0;
+	sp->cmn.virtual_fabric_support = 0;
 	sp->cls1.classValid = 0;
 	sp->cls2.seqDelivery = 1;
 	sp->cls3.seqDelivery = 1;

+ 112 - 45
drivers/scsi/lpfc/lpfc_hbadisc.c

@@ -1074,6 +1074,12 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 
 	mempool_free(pmb, phba->mbox_mem_pool);
 
+	/* don't perform discovery for SLI4 loopback diagnostic test */
+	if ((phba->sli_rev == LPFC_SLI_REV4) &&
+	    !(phba->hba_flag & HBA_FCOE_MODE) &&
+	    (phba->link_flag & LS_LOOPBACK_MODE))
+		return;
+
 	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
 	    vport->fc_flag & FC_PUBLIC_LOOP &&
 	    !(vport->fc_flag & FC_LBIT)) {
@@ -2646,9 +2652,14 @@ lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 {
 	struct lpfc_vport *vport = mboxq->vport;
 
-	/* VFI not supported on interface type 0, just do the flogi */
-	if (mboxq->u.mb.mbxStatus && (bf_get(lpfc_sli_intf_if_type,
-	    &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_0)) {
+	/*
+	 * VFI not supported on interface type 0, just do the flogi
+	 * Also continue if the VFI is in use - just use the same one.
+	 */
+	if (mboxq->u.mb.mbxStatus &&
+	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+			LPFC_SLI_INTF_IF_TYPE_0) &&
+	    mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
 		lpfc_printf_vlog(vport, KERN_ERR,
 				LOG_MBOX,
 				"2891 Init VFI mailbox failed 0x%x\n",
@@ -2842,10 +2853,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 			lpfc_disc_list_loopmap(vport);
 			/* Start discovery */
 			lpfc_disc_start(vport);
-			goto fail_free_mem;
+			goto out_free_mem;
 		}
 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
-		goto fail_free_mem;
+		goto out_free_mem;
 	}
 	/* The VPI is implicitly registered when the VFI is registered */
 	spin_lock_irq(shost->host_lock);
@@ -2855,10 +2866,16 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 	vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
 	spin_unlock_irq(shost->host_lock);
 
+	/* In case SLI4 FC loopback test, we are ready */
+	if ((phba->sli_rev == LPFC_SLI_REV4) &&
+	    (phba->link_flag & LS_LOOPBACK_MODE)) {
+		phba->link_state = LPFC_HBA_READY;
+		goto out_free_mem;
+	}
+
 	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
 		/* For private loop just start discovery and we are done. */
 		if ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
-		    (phba->alpa_map[0] == 0) &&
 		    !(vport->fc_flag & FC_PUBLIC_LOOP)) {
 			/* Use loop map to make discovery list */
 			lpfc_disc_list_loopmap(vport);
@@ -2870,7 +2887,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 		}
 	}
 
-fail_free_mem:
+out_free_mem:
 	mempool_free(mboxq, phba->mbox_mem_pool);
 	lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
 	kfree(dmabuf);
@@ -2923,6 +2940,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
 {
 	struct lpfc_vport *vport = phba->pport;
 	LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
+	struct Scsi_Host *shost;
 	int i;
 	struct lpfc_dmabuf *mp;
 	int rc;
@@ -2946,6 +2964,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
 	phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
 	phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
 
+	shost = lpfc_shost_from_vport(vport);
 	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 		phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
 
@@ -2957,8 +2976,11 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
 				"1309 Link Up Event npiv not supported in loop "
 				"topology\n");
 				/* Get Loop Map information */
-		if (bf_get(lpfc_mbx_read_top_il, la))
+		if (bf_get(lpfc_mbx_read_top_il, la)) {
+			spin_lock_irq(shost->host_lock);
 			vport->fc_flag |= FC_LBIT;
+			spin_unlock_irq(shost->host_lock);
+		}
 
 		vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
 		i = la->lilpBde64.tus.f.bdeSize;
@@ -3003,11 +3025,13 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
 	} else {
 		if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
 			if (phba->max_vpi && phba->cfg_enable_npiv &&
-			   (phba->sli_rev == 3))
+			   (phba->sli_rev >= LPFC_SLI_REV3))
 				phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
 		}
 		vport->fc_myDID = phba->fc_pref_DID;
+		spin_lock_irq(shost->host_lock);
 		vport->fc_flag |= FC_LBIT;
+		spin_unlock_irq(shost->host_lock);
 	}
 	spin_unlock_irq(&phba->hbalock);
 
@@ -3224,15 +3248,14 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	} else if (bf_get(lpfc_mbx_read_top_att_type, la) ==
 		   LPFC_ATT_LINK_DOWN) {
 		phba->fc_stat.LinkDown++;
-		if (phba->link_flag & LS_LOOPBACK_MODE) {
+		if (phba->link_flag & LS_LOOPBACK_MODE)
 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
 				"1308 Link Down Event in loop back mode "
 				"x%x received "
 				"Data: x%x x%x x%x\n",
 				la->eventTag, phba->fc_eventTag,
 				phba->pport->port_state, vport->fc_flag);
-		}
-		else {
+		else
 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
 				"1305 Link Down Event x%x received "
 				"Data: x%x x%x x%x x%x x%x\n",
@@ -3240,7 +3263,6 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 				phba->pport->port_state, vport->fc_flag,
 				bf_get(lpfc_mbx_read_top_mm, la),
 				bf_get(lpfc_mbx_read_top_fa, la));
-		}
 		lpfc_mbx_issue_link_down(phba);
 	}
 	if ((bf_get(lpfc_mbx_read_top_mm, la)) &&
@@ -3594,6 +3616,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	MAILBOX_t *mb = &pmb->u.mb;
 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
 	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost;
 
 	ndlp = (struct lpfc_nodelist *) pmb->context2;
 	pmb->context1 = NULL;
@@ -3639,8 +3662,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 		 * vport discovery */
 		if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
 			lpfc_start_fdiscs(phba);
-		else
+		else {
+			shost = lpfc_shost_from_vport(vport);
+			spin_lock_irq(shost->host_lock);
 			vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
+			spin_unlock_irq(shost->host_lock);
+		}
 		lpfc_do_scr_ns_plogi(phba, vport);
 	}
 
@@ -5353,6 +5380,73 @@ lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
 	return ndlp;
 }
 
+/*
+ * This routine looks up the ndlp lists for the given RPI. If the rpi
+ * is found, the routine returns the node element list pointer else
+ * return NULL.
+ */
+struct lpfc_nodelist *
+lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp;
+
+	spin_lock_irq(shost->host_lock);
+	ndlp = __lpfc_findnode_rpi(vport, rpi);
+	spin_unlock_irq(shost->host_lock);
+	return ndlp;
+}
+
+/**
+ * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: the physical host virtual N_Port identifier.
+ *
+ * This routine finds a vport on a HBA (referred by @phba) through a
+ * @vpi. The function walks the HBA's vport list and returns the address
+ * of the vport with the matching @vpi.
+ *
+ * Return code
+ *    NULL - No vport with the matching @vpi found
+ *    Otherwise - Address to the vport with the matching @vpi.
+ **/
+struct lpfc_vport *
+lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
+{
+	struct lpfc_vport *vport;
+	unsigned long flags;
+	int i = 0;
+
+	/* The physical ports are always vpi 0 - translate is unnecessary. */
+	if (vpi > 0) {
+		/*
+		 * Translate the physical vpi to the logical vpi.  The
+		 * vport stores the logical vpi.
+		 */
+		for (i = 0; i < phba->max_vpi; i++) {
+			if (vpi == phba->vpi_ids[i])
+				break;
+		}
+
+		if (i >= phba->max_vpi) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+					 "2936 Could not find Vport mapped "
+					 "to vpi %d\n", vpi);
+			return NULL;
+		}
+	}
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	list_for_each_entry(vport, &phba->port_list, listentry) {
+		if (vport->vpi == i) {
+			spin_unlock_irqrestore(&phba->hbalock, flags);
+			return vport;
+		}
+	}
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	return NULL;
+}
+
 void
 lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	      uint32_t did)
@@ -5599,7 +5693,7 @@ out:
  *
  * This function frees memory associated with the mailbox command.
  */
-static void
+void
 lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 {
 	struct lpfc_vport *vport = mboxq->vport;
@@ -5651,7 +5745,6 @@ lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 int
 lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
 {
-	LPFC_MBOXQ_t *mbox;
 	struct lpfc_vport **vports;
 	struct lpfc_nodelist *ndlp;
 	struct Scsi_Host *shost;
@@ -5687,35 +5780,9 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
 	/* Cleanup any outstanding ELS commands */
 	lpfc_els_flush_all_cmd(phba);
 
-	/* Unregister VFI */
-	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-	if (!mbox) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
-				"2556 UNREG_VFI mbox allocation failed"
-				"HBA state x%x\n", phba->pport->port_state);
-		return -ENOMEM;
-	}
-
-	lpfc_unreg_vfi(mbox, phba->pport);
-	mbox->vport = phba->pport;
-	mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
-
-	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
-	if (rc == MBX_NOT_FINISHED) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
-				"2557 UNREG_VFI issue mbox failed rc x%x "
-				"HBA state x%x\n",
-				rc, phba->pport->port_state);
-		mempool_free(mbox, phba->mbox_mem_pool);
-		return -EIO;
-	}
-
-	shost = lpfc_shost_from_vport(phba->pport);
-	spin_lock_irq(shost->host_lock);
-	phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
-	spin_unlock_irq(shost->host_lock);
-
-	return 0;
+	/* Unregister the physical port VFI */
+	rc = lpfc_issue_unreg_vfi(phba->pport);
+	return rc;
 }
 
 /**

+ 16 - 5
drivers/scsi/lpfc/lpfc_hw.h

@@ -349,6 +349,12 @@ struct csp {
  * Word 1 Bit 31 in FLOGI response is clean address bit
  */
 #define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */
+/*
+ * Word 1 Bit 30 in common service parameter is overloaded.
+ * Word 1 Bit 30 in FLOGI request is Virtual Fabrics
+ * Word 1 Bit 30 in PLOGI request is random offset
+ */
+#define virtual_fabric_support randomOffset /* Word 1, bit 30 */
 #ifdef __BIG_ENDIAN_BITFIELD
 	uint16_t request_multiple_Nport:1;	/* FC Word 1, bit 31 */
 	uint16_t randomOffset:1;	/* FC Word 1, bit 30 */
@@ -1852,8 +1858,8 @@ typedef struct {
 	uint8_t fabric_AL_PA;	/* If using a Fabric Assigned AL_PA */
 #endif
 
-#define FLAGS_LOCAL_LB               0x01 /* link_flags (=1) ENDEC loopback */
 #define FLAGS_TOPOLOGY_MODE_LOOP_PT  0x00 /* Attempt loop then pt-pt */
+#define FLAGS_LOCAL_LB               0x01 /* link_flags (=1) ENDEC loopback */
 #define FLAGS_TOPOLOGY_MODE_PT_PT    0x02 /* Attempt pt-pt only */
 #define FLAGS_TOPOLOGY_MODE_LOOP     0x04 /* Attempt loop only */
 #define FLAGS_TOPOLOGY_MODE_PT_LOOP  0x06 /* Attempt pt-pt then loop */
@@ -2819,7 +2825,8 @@ typedef struct {
 #ifdef __BIG_ENDIAN_BITFIELD
 	uint32_t rsvd1     : 19;  /* Reserved                             */
 	uint32_t cdss      :  1;  /* Configure Data Security SLI          */
-	uint32_t rsvd2     :  3;  /* Reserved                             */
+	uint32_t casabt    :  1;  /* Configure async abts status notice   */
+	uint32_t rsvd2     :  2;  /* Reserved                             */
 	uint32_t cbg       :  1;  /* Configure BlockGuard                 */
 	uint32_t cmv       :  1;  /* Configure Max VPIs                   */
 	uint32_t ccrp      :  1;  /* Config Command Ring Polling          */
@@ -2839,14 +2846,16 @@ typedef struct {
 	uint32_t ccrp      :  1;  /* Config Command Ring Polling          */
 	uint32_t cmv	   :  1;  /* Configure Max VPIs                   */
 	uint32_t cbg       :  1;  /* Configure BlockGuard                 */
-	uint32_t rsvd2     :  3;  /* Reserved                             */
+	uint32_t rsvd2     :  2;  /* Reserved                             */
+	uint32_t casabt    :  1;  /* Configure async abts status notice   */
 	uint32_t cdss      :  1;  /* Configure Data Security SLI          */
 	uint32_t rsvd1     : 19;  /* Reserved                             */
 #endif
 #ifdef __BIG_ENDIAN_BITFIELD
 	uint32_t rsvd3     : 19;  /* Reserved                             */
 	uint32_t gdss      :  1;  /* Configure Data Security SLI          */
-	uint32_t rsvd4     :  3;  /* Reserved                             */
+	uint32_t gasabt    :  1;  /* Grant async abts status notice       */
+	uint32_t rsvd4     :  2;  /* Reserved                             */
 	uint32_t gbg       :  1;  /* Grant BlockGuard                     */
 	uint32_t gmv	   :  1;  /* Grant Max VPIs                       */
 	uint32_t gcrp	   :  1;  /* Grant Command Ring Polling           */
@@ -2866,7 +2875,8 @@ typedef struct {
 	uint32_t gcrp	   :  1;  /* Grant Command Ring Polling           */
 	uint32_t gmv	   :  1;  /* Grant Max VPIs                       */
 	uint32_t gbg       :  1;  /* Grant BlockGuard                     */
-	uint32_t rsvd4     :  3;  /* Reserved                             */
+	uint32_t rsvd4     :  2;  /* Reserved                             */
+	uint32_t gasabt    :  1;  /* Grant async abts status notice       */
 	uint32_t gdss      :  1;  /* Configure Data Security SLI          */
 	uint32_t rsvd3     : 19;  /* Reserved                             */
 #endif
@@ -3465,6 +3475,7 @@ typedef struct {
 } ASYNCSTAT_FIELDS;
 #define ASYNC_TEMP_WARN		0x100
 #define ASYNC_TEMP_SAFE		0x101
+#define ASYNC_STATUS_CN		0x102
 
 /* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7)
    or CMD_IOCB_RCV_SEQ64_CX (0xB5) */

+ 9 - 2
drivers/scsi/lpfc/lpfc_hw4.h

@@ -1351,11 +1351,11 @@ struct lpfc_mbx_set_link_diag_loopback {
 		struct {
 			uint32_t word0;
 #define lpfc_mbx_set_diag_lpbk_type_SHIFT	0
-#define lpfc_mbx_set_diag_lpbk_type_MASK	0x00000001
+#define lpfc_mbx_set_diag_lpbk_type_MASK	0x00000003
 #define lpfc_mbx_set_diag_lpbk_type_WORD	word0
 #define LPFC_DIAG_LOOPBACK_TYPE_DISABLE		0x0
 #define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL	0x1
-#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL	0x2
+#define LPFC_DIAG_LOOPBACK_TYPE_SERDES		0x2
 #define lpfc_mbx_set_diag_lpbk_link_num_SHIFT	16
 #define lpfc_mbx_set_diag_lpbk_link_num_MASK	0x0000003F
 #define lpfc_mbx_set_diag_lpbk_link_num_WORD	word0
@@ -1830,6 +1830,8 @@ struct lpfc_mbx_init_vfi {
 #define lpfc_init_vfi_hop_count_MASK	0x000000FF
 #define lpfc_init_vfi_hop_count_WORD	word4
 };
+#define MBX_VFI_IN_USE			0x9F02
+
 
 struct lpfc_mbx_reg_vfi {
 	uint32_t word1;
@@ -2104,6 +2106,8 @@ struct lpfc_mbx_read_config {
 #define lpfc_mbx_rd_conf_lnk_type_SHIFT		6
 #define lpfc_mbx_rd_conf_lnk_type_MASK		0x00000003
 #define lpfc_mbx_rd_conf_lnk_type_WORD		word2
+#define LPFC_LNK_TYPE_GE	0
+#define LPFC_LNK_TYPE_FC	1
 #define lpfc_mbx_rd_conf_lnk_ldv_SHIFT		8
 #define lpfc_mbx_rd_conf_lnk_ldv_MASK		0x00000001
 #define lpfc_mbx_rd_conf_lnk_ldv_WORD		word2
@@ -3320,6 +3324,9 @@ struct wqe_rctl_dfctl {
 #define wqe_la_SHIFT 3
 #define wqe_la_MASK  0x000000001
 #define wqe_la_WORD  word5
+#define wqe_xo_SHIFT	6
+#define wqe_xo_MASK	0x000000001
+#define wqe_xo_WORD	word5
 #define wqe_ls_SHIFT 7
 #define wqe_ls_MASK  0x000000001
 #define wqe_ls_WORD  word5

+ 228 - 110
drivers/scsi/lpfc/lpfc_init.c

@@ -62,7 +62,6 @@ static int lpfc_post_rcv_buf(struct lpfc_hba *);
 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
 static int lpfc_setup_endian_order(struct lpfc_hba *);
-static int lpfc_sli4_read_config(struct lpfc_hba *);
 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
 static void lpfc_free_sgl_list(struct lpfc_hba *);
 static int lpfc_init_sgl_list(struct lpfc_hba *);
@@ -475,27 +474,6 @@ lpfc_config_port_post(struct lpfc_hba *phba)
 	/* Get the default values for Model Name and Description */
 	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
 
-	if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G)
-	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G)
-		&& !(phba->lmt & LMT_1Gb))
-	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G)
-		&& !(phba->lmt & LMT_2Gb))
-	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G)
-		&& !(phba->lmt & LMT_4Gb))
-	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G)
-		&& !(phba->lmt & LMT_8Gb))
-	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G)
-		&& !(phba->lmt & LMT_10Gb))
-	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
-		&& !(phba->lmt & LMT_16Gb))) {
-		/* Reset link speed to auto */
-		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
-			"1302 Invalid speed for this board: "
-			"Reset link speed to auto: x%x\n",
-			phba->cfg_link_speed);
-			phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
-	}
-
 	phba->link_state = LPFC_LINK_DOWN;
 
 	/* Only process IOCBs on ELS ring till hba_state is READY */
@@ -585,28 +563,10 @@ lpfc_config_port_post(struct lpfc_hba *phba)
 			return -EIO;
 		}
 	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
-		lpfc_init_link(phba, pmb, phba->cfg_topology,
-			phba->cfg_link_speed);
-		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-		lpfc_set_loopback_flag(phba);
-		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
-		if (rc != MBX_SUCCESS) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0454 Adapter failed to init, mbxCmd x%x "
-				"INIT_LINK, mbxStatus x%x\n",
-				mb->mbxCommand, mb->mbxStatus);
-
-			/* Clear all interrupt enable conditions */
-			writel(0, phba->HCregaddr);
-			readl(phba->HCregaddr); /* flush */
-			/* Clear all pending interrupts */
-			writel(0xffffffff, phba->HAregaddr);
-			readl(phba->HAregaddr); /* flush */
-			phba->link_state = LPFC_HBA_ERROR;
-			if (rc != MBX_BUSY)
-				mempool_free(pmb, phba->mbox_mem_pool);
-			return -EIO;
-		}
+		mempool_free(pmb, phba->mbox_mem_pool);
+		rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+		if (rc)
+			return rc;
 	}
 	/* MBOX buffer will be freed in mbox compl */
 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -667,6 +627,28 @@ lpfc_config_port_post(struct lpfc_hba *phba)
  **/
 int
 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
+{
+	return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
+}
+
+/**
+ * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
+ * @phba: pointer to lpfc hba data structure.
+ * @fc_topology: desired fc topology.
+ * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
+ *
+ * This routine will issue the INIT_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use as a delayed link up mechanism with the
+ * module parameter lpfc_suppress_link_up.
+ *
+ * Return code
+ *              0 - success
+ *              Any other value - error
+ **/
+int
+lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
+			       uint32_t flag)
 {
 	struct lpfc_vport *vport = phba->pport;
 	LPFC_MBOXQ_t *pmb;
@@ -681,9 +663,30 @@ lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
 	mb = &pmb->u.mb;
 	pmb->vport = vport;
 
-	lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
+	if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
+	     !(phba->lmt & LMT_1Gb)) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
+	     !(phba->lmt & LMT_2Gb)) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
+	     !(phba->lmt & LMT_4Gb)) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
+	     !(phba->lmt & LMT_8Gb)) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
+	     !(phba->lmt & LMT_10Gb)) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
+	     !(phba->lmt & LMT_16Gb))) {
+		/* Reset link speed to auto */
+		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+			"1302 Invalid speed for this board:%d "
+			"Reset link speed to auto.\n",
+			phba->cfg_link_speed);
+			phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
+	}
+	lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
 	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-	lpfc_set_loopback_flag(phba);
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		lpfc_set_loopback_flag(phba);
 	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1437,7 +1440,10 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
 	uint32_t event_data;
 	struct Scsi_Host *shost;
 	uint32_t if_type;
-	struct lpfc_register portstat_reg;
+	struct lpfc_register portstat_reg = {0};
+	uint32_t reg_err1, reg_err2;
+	uint32_t uerrlo_reg, uemasklo_reg;
+	uint32_t pci_rd_rc1, pci_rd_rc2;
 	int rc;
 
 	/* If the pci channel is offline, ignore possible errors, since
@@ -1449,38 +1455,52 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
 	if (!phba->cfg_enable_hba_reset)
 		return;
 
-	/* Send an internal error event to mgmt application */
-	lpfc_board_errevt_to_mgmt(phba);
-
-	/* For now, the actual action for SLI4 device handling is not
-	 * specified yet, just treated it as adaptor hardware failure
-	 */
-	event_data = FC_REG_DUMP_EVENT;
-	shost = lpfc_shost_from_vport(vport);
-	fc_host_post_vendor_event(shost, fc_get_event_number(),
-				  sizeof(event_data), (char *) &event_data,
-				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
-
 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
 	switch (if_type) {
 	case LPFC_SLI_INTF_IF_TYPE_0:
+		pci_rd_rc1 = lpfc_readl(
+				phba->sli4_hba.u.if_type0.UERRLOregaddr,
+				&uerrlo_reg);
+		pci_rd_rc2 = lpfc_readl(
+				phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
+				&uemasklo_reg);
+		/* consider PCI bus read error as pci_channel_offline */
+		if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
+			return;
 		lpfc_sli4_offline_eratt(phba);
 		break;
 	case LPFC_SLI_INTF_IF_TYPE_2:
-		portstat_reg.word0 =
-			readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
-
+		pci_rd_rc1 = lpfc_readl(
+				phba->sli4_hba.u.if_type2.STATUSregaddr,
+				&portstat_reg.word0);
+		/* consider PCI bus read error as pci_channel_offline */
+		if (pci_rd_rc1 == -EIO)
+			return;
+		reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
+		reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
 		if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
 			/* TODO: Register for Overtemp async events. */
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"2889 Port Overtemperature event, "
-				"taking port\n");
+				"taking port offline\n");
 			spin_lock_irq(&phba->hbalock);
 			phba->over_temp_state = HBA_OVER_TEMP;
 			spin_unlock_irq(&phba->hbalock);
 			lpfc_sli4_offline_eratt(phba);
-			return;
+			break;
 		}
+		if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+		    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3143 Port Down: Firmware Restarted\n");
+		else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+			 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3144 Port Down: Debug Dump\n");
+		else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+			 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3145 Port Down: Provisioning\n");
 		/*
 		 * On error status condition, driver need to wait for port
 		 * ready before performing reset.
@@ -1489,14 +1509,19 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
 		if (!rc) {
 			/* need reset: attempt for port recovery */
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"2887 Port Error: Attempting "
-					"Port Recovery\n");
+					"2887 Reset Needed: Attempting Port "
+					"Recovery...\n");
 			lpfc_offline_prep(phba);
 			lpfc_offline(phba);
 			lpfc_sli_brdrestart(phba);
 			if (lpfc_online(phba) == 0) {
 				lpfc_unblock_mgmt_io(phba);
-				return;
+				/* don't report event on forced debug dump */
+				if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+				    reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
+					return;
+				else
+					break;
 			}
 			/* fall through for not able to recover */
 		}
@@ -1506,6 +1531,16 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
 	default:
 		break;
 	}
+	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+			"3123 Report dump event to upper layer\n");
+	/* Send an internal error event to mgmt application */
+	lpfc_board_errevt_to_mgmt(phba);
+
+	event_data = FC_REG_DUMP_EVENT;
+	shost = lpfc_shost_from_vport(vport);
+	fc_host_post_vendor_event(shost, fc_get_event_number(),
+				  sizeof(event_data), (char *) &event_data,
+				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
 }
 
 /**
@@ -2673,6 +2708,32 @@ lpfc_offline(struct lpfc_hba *phba)
 	lpfc_destroy_vport_work_array(phba, vports);
 }
 
+/**
+ * lpfc_scsi_buf_update - Update the scsi_buffers that are already allocated.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine goes through all the scsi buffers in the system and updates the
+ * Physical XRIs assigned to the SCSI buffer because these may change after any
+ * firmware reset
+ *
+ * Return codes
+ *   0 - successful (for now, it always returns 0)
+ **/
+int
+lpfc_scsi_buf_update(struct lpfc_hba *phba)
+{
+	struct lpfc_scsi_buf *sb, *sb_next;
+
+	spin_lock_irq(&phba->hbalock);
+	spin_lock(&phba->scsi_buf_list_lock);
+	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list)
+		sb->cur_iocbq.sli4_xritag =
+			phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
+	spin_unlock(&phba->scsi_buf_list_lock);
+	spin_unlock_irq(&phba->hbalock);
+	return 0;
+}
+
 /**
  * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
  * @phba: pointer to lpfc hba data structure.
@@ -5040,15 +5101,8 @@ lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
 	struct lpfc_rpi_hdr *rpi_hdr;
 
 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
-	/*
-	 * If the SLI4 port supports extents, posting the rpi header isn't
-	 * required.  Set the expected maximum count and let the actual value
-	 * get set when extents are fully allocated.
-	 */
-	if (!phba->sli4_hba.rpi_hdrs_in_use) {
-		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+	if (!phba->sli4_hba.rpi_hdrs_in_use)
 		return rc;
-	}
 	if (phba->sli4_hba.extents_in_use)
 		return -EIO;
 
@@ -5942,7 +5996,7 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
  * 	-ENOMEM - No available memory
  *      -EIO - The mailbox failed to complete successfully.
  **/
-static int
+int
 lpfc_sli4_read_config(struct lpfc_hba *phba)
 {
 	LPFC_MBOXQ_t *pmb;
@@ -5974,6 +6028,20 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
 		rc = -EIO;
 	} else {
 		rd_config = &pmb->u.mqe.un.rd_config;
+		if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
+			phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
+			phba->sli4_hba.lnk_info.lnk_tp =
+				bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
+			phba->sli4_hba.lnk_info.lnk_no =
+				bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
+			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+					"3081 lnk_type:%d, lnk_numb:%d\n",
+					phba->sli4_hba.lnk_info.lnk_tp,
+					phba->sli4_hba.lnk_info.lnk_no);
+		} else
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					"3082 Mailbox (x%x) returned ldv:x0\n",
+					bf_get(lpfc_mqe_command, &pmb->u.mqe));
 		phba->sli4_hba.extents_in_use =
 			bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
 		phba->sli4_hba.max_cfg_param.max_xri =
@@ -6462,6 +6530,7 @@ out_free_fcp_wq:
 		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
 	}
 	kfree(phba->sli4_hba.fcp_wq);
+	phba->sli4_hba.fcp_wq = NULL;
 out_free_els_wq:
 	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
 	phba->sli4_hba.els_wq = NULL;
@@ -6474,6 +6543,7 @@ out_free_fcp_cq:
 		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
 	}
 	kfree(phba->sli4_hba.fcp_cq);
+	phba->sli4_hba.fcp_cq = NULL;
 out_free_els_cq:
 	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
 	phba->sli4_hba.els_cq = NULL;
@@ -6486,6 +6556,7 @@ out_free_fp_eq:
 		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
 	}
 	kfree(phba->sli4_hba.fp_eq);
+	phba->sli4_hba.fp_eq = NULL;
 out_free_sp_eq:
 	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
 	phba->sli4_hba.sp_eq = NULL;
@@ -6519,8 +6590,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
 	phba->sli4_hba.els_wq = NULL;
 
 	/* Release FCP work queue */
-	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
-		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
+	if (phba->sli4_hba.fcp_wq != NULL)
+		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
+		     fcp_qidx++)
+			lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
 	kfree(phba->sli4_hba.fcp_wq);
 	phba->sli4_hba.fcp_wq = NULL;
 
@@ -6540,15 +6613,18 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
 
 	/* Release FCP response complete queue */
 	fcp_qidx = 0;
-	do
-		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
-	while (++fcp_qidx < phba->cfg_fcp_eq_count);
+	if (phba->sli4_hba.fcp_cq != NULL)
+		do
+			lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
+		while (++fcp_qidx < phba->cfg_fcp_eq_count);
 	kfree(phba->sli4_hba.fcp_cq);
 	phba->sli4_hba.fcp_cq = NULL;
 
 	/* Release fast-path event queue */
-	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
-		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
+	if (phba->sli4_hba.fp_eq != NULL)
+		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+		     fcp_qidx++)
+			lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
 	kfree(phba->sli4_hba.fp_eq);
 	phba->sli4_hba.fp_eq = NULL;
 
@@ -6601,11 +6677,18 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
 			phba->sli4_hba.sp_eq->queue_id);
 
 	/* Set up fast-path event queue */
+	if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3147 Fast-path EQs not allocated\n");
+		rc = -ENOMEM;
+		goto out_destroy_sp_eq;
+	}
 	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
 		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 					"0522 Fast-path EQ (%d) not "
 					"allocated\n", fcp_eqidx);
+			rc = -ENOMEM;
 			goto out_destroy_fp_eq;
 		}
 		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
@@ -6630,6 +6713,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
 	if (!phba->sli4_hba.mbx_cq) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0528 Mailbox CQ not allocated\n");
+		rc = -ENOMEM;
 		goto out_destroy_fp_eq;
 	}
 	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
@@ -6649,6 +6733,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
 	if (!phba->sli4_hba.els_cq) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0530 ELS CQ not allocated\n");
+		rc = -ENOMEM;
 		goto out_destroy_mbx_cq;
 	}
 	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
@@ -6665,12 +6750,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
 			phba->sli4_hba.sp_eq->queue_id);
 
 	/* Set up fast-path FCP Response Complete Queue */
+	if (!phba->sli4_hba.fcp_cq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3148 Fast-path FCP CQ array not "
+				"allocated\n");
+		rc = -ENOMEM;
+		goto out_destroy_els_cq;
+	}
 	fcp_cqidx = 0;
 	do {
 		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 					"0526 Fast-path FCP CQ (%d) not "
 					"allocated\n", fcp_cqidx);
+			rc = -ENOMEM;
 			goto out_destroy_fcp_cq;
 		}
 		if (phba->cfg_fcp_eq_count)
@@ -6709,6 +6802,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
 	if (!phba->sli4_hba.mbx_wq) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0538 Slow-path MQ not allocated\n");
+		rc = -ENOMEM;
 		goto out_destroy_fcp_cq;
 	}
 	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
@@ -6728,6 +6822,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
 	if (!phba->sli4_hba.els_wq) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0536 Slow-path ELS WQ not allocated\n");
+		rc = -ENOMEM;
 		goto out_destroy_mbx_wq;
 	}
 	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
@@ -6744,11 +6839,19 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
 			phba->sli4_hba.els_cq->queue_id);
 
 	/* Set up fast-path FCP Work Queue */
+	if (!phba->sli4_hba.fcp_wq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3149 Fast-path FCP WQ array not "
+				"allocated\n");
+		rc = -ENOMEM;
+		goto out_destroy_els_wq;
+	}
 	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
 		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 					"0534 Fast-path FCP WQ (%d) not "
 					"allocated\n", fcp_wqidx);
+			rc = -ENOMEM;
 			goto out_destroy_fcp_wq;
 		}
 		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
@@ -6779,6 +6882,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
 	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0540 Receive Queue not allocated\n");
+		rc = -ENOMEM;
 		goto out_destroy_fcp_wq;
 	}
 
@@ -6805,18 +6909,21 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
 out_destroy_fcp_wq:
 	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
 		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
+out_destroy_els_wq:
 	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
 out_destroy_mbx_wq:
 	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
 out_destroy_fcp_cq:
 	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
 		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
+out_destroy_els_cq:
 	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
 out_destroy_mbx_cq:
 	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
 out_destroy_fp_eq:
 	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
 		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
+out_destroy_sp_eq:
 	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
 out_error:
 	return rc;
@@ -6853,13 +6960,18 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
 	/* Unset ELS complete queue */
 	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
 	/* Unset FCP response complete queue */
-	fcp_qidx = 0;
-	do {
-		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
-	} while (++fcp_qidx < phba->cfg_fcp_eq_count);
+	if (phba->sli4_hba.fcp_cq) {
+		fcp_qidx = 0;
+		do {
+			lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
+		} while (++fcp_qidx < phba->cfg_fcp_eq_count);
+	}
 	/* Unset fast-path event queue */
-	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
-		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
+	if (phba->sli4_hba.fp_eq) {
+		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+		     fcp_qidx++)
+			lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
+	}
 	/* Unset slow-path event queue */
 	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
 }
@@ -7398,22 +7510,25 @@ out:
 static void
 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
 {
-	struct pci_dev *pdev;
-
-	/* Obtain PCI device reference */
-	if (!phba->pcidev)
-		return;
-	else
-		pdev = phba->pcidev;
-
-	/* Free coherent DMA memory allocated */
-
-	/* Unmap I/O memory space */
-	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
-	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
-	iounmap(phba->sli4_hba.conf_regs_memmap_p);
+	uint32_t if_type;
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
 
-	return;
+	switch (if_type) {
+	case LPFC_SLI_INTF_IF_TYPE_0:
+		iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+		iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+		iounmap(phba->sli4_hba.conf_regs_memmap_p);
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_2:
+		iounmap(phba->sli4_hba.conf_regs_memmap_p);
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_1:
+	default:
+		dev_printk(KERN_ERR, &phba->pcidev->dev,
+			   "FATAL - unsupported SLI4 interface type - %d\n",
+			   if_type);
+		break;
+	}
 }
 
 /**
@@ -9198,12 +9313,15 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
 	/* Perform post initialization setup */
 	lpfc_post_init_setup(phba);
 
-	/* check for firmware upgrade or downgrade */
-	snprintf(file_name, 16, "%s.grp", phba->ModelName);
-	error = request_firmware(&fw, file_name, &phba->pcidev->dev);
-	if (!error) {
-		lpfc_write_firmware(phba, fw);
-		release_firmware(fw);
+	/* check for firmware upgrade or downgrade (if_type 2 only) */
+	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+	    LPFC_SLI_INTF_IF_TYPE_2) {
+		snprintf(file_name, 16, "%s.grp", phba->ModelName);
+		error = request_firmware(&fw, file_name, &phba->pcidev->dev);
+		if (!error) {
+			lpfc_write_firmware(phba, fw);
+			release_firmware(fw);
+		}
 	}
 
 	/* Check if there are static vports to be created. */

+ 18 - 7
drivers/scsi/lpfc/lpfc_mbox.c

@@ -1293,6 +1293,10 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 		phba->sli_rev = LPFC_SLI_REV2;
 	mb->un.varCfgPort.sli_mode = phba->sli_rev;
 
+	/* If this is an SLI3 port, configure async status notification. */
+	if (phba->sli_rev == LPFC_SLI_REV3)
+		mb->un.varCfgPort.casabt = 1;
+
 	/* Now setup pcb */
 	phba->pcb->type = TYPE_NATIVE_SLI2;
 	phba->pcb->feature = FEATURE_INITIAL_SLI2;
@@ -2129,6 +2133,14 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
 	reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
 	reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
 	bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
+			"3134 Register VFI, mydid:x%x, fcfi:%d, "
+			" vfi:%d, vpi:%d, fc_pname:%x%x\n",
+			vport->fc_myDID,
+			vport->phba->fcf.fcfi,
+			vport->phba->sli4_hba.vfi_ids[vport->vfi],
+			vport->phba->vpi_ids[vport->vpi],
+			reg_vfi->wwn[0], reg_vfi->wwn[1]);
 }
 
 /**
@@ -2175,16 +2187,15 @@ lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
 }
 
 /**
- * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters.
+ * lpfc_sli4_dump_cfg_rg23 - Dump sli4 port config region 23
  * @phba: pointer to the hba structure containing.
  * @mbox: pointer to lpfc mbox command to initialize.
  *
- * This function create a SLI4 dump mailbox command to dump FCoE
- * parameters stored in region 23.
+ * This function create a SLI4 dump mailbox command to dump configure
+ * region 23.
  **/
 int
-lpfc_dump_fcoe_param(struct lpfc_hba *phba,
-		struct lpfcMboxq *mbox)
+lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
 {
 	struct lpfc_dmabuf *mp = NULL;
 	MAILBOX_t *mb;
@@ -2198,9 +2209,9 @@ lpfc_dump_fcoe_param(struct lpfc_hba *phba,
 
 	if (!mp || !mp->virt) {
 		kfree(mp);
-		/* dump_fcoe_param failed to allocate memory */
+		/* dump config region 23 failed to allocate memory */
 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
-			"2569 lpfc_dump_fcoe_param: memory"
+			"2569 lpfc dump config region 23: memory"
 			" allocation failed\n");
 		return 1;
 	}

+ 2 - 2
drivers/scsi/lpfc/lpfc_mem.c

@@ -389,7 +389,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
 {
 	struct hbq_dmabuf *hbqbp;
 
-	hbqbp = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+	hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
 	if (!hbqbp)
 		return NULL;
 
@@ -441,7 +441,7 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
 {
 	struct hbq_dmabuf *dma_buf;
 
-	dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+	dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
 	if (!dma_buf)
 		return NULL;
 

+ 9 - 1
drivers/scsi/lpfc/lpfc_nportdisc.c

@@ -782,6 +782,14 @@ lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	return NLP_STE_FREED_NODE;
 }
 
+static uint32_t
+lpfc_device_recov_unused_node(struct lpfc_vport *vport,
+			struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	return ndlp->nlp_state;
+}
+
 static uint32_t
 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 			   void *arg, uint32_t evt)
@@ -2147,7 +2155,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
 	lpfc_disc_illegal,		/* CMPL_ADISC      */
 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
 	lpfc_device_rm_unused_node,	/* DEVICE_RM       */
-	lpfc_disc_illegal,		/* DEVICE_RECOVERY */
+	lpfc_device_recov_unused_node,	/* DEVICE_RECOVERY */
 
 	lpfc_rcv_plogi_plogi_issue,	/* RCV_PLOGI   PLOGI_ISSUE    */
 	lpfc_rcv_prli_plogi_issue,	/* RCV_PRLI        */

+ 14 - 3
drivers/scsi/lpfc/lpfc_scsi.c

@@ -681,8 +681,10 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
 
 			rrq_empty = list_empty(&phba->active_rrq_list);
 			spin_unlock_irqrestore(&phba->hbalock, iflag);
-			if (ndlp)
+			if (ndlp) {
 				lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
+				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
+			}
 			lpfc_release_scsi_buf_s4(phba, psb);
 			if (rrq_empty)
 				lpfc_worker_wake_up(phba);
@@ -2911,8 +2913,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 	int_to_scsilun(lpfc_cmd->pCmd->device->lun,
 			&lpfc_cmd->fcp_cmnd->fcp_lun);
 
-	memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
-
+	memset(&fcp_cmnd->fcpCdb[0], 0, LPFC_FCP_CDB_LEN);
+	memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
 	if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
 		switch (tag[0]) {
 		case HEAD_OF_QUEUE_TAG:
@@ -3236,6 +3238,15 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 		cmnd->result = err;
 		goto out_fail_command;
 	}
+	/*
+	 * Do not let the mid-layer retry I/O too fast. If an I/O is retried
+	 * without waiting a bit then indicate that the device is busy.
+	 */
+	if (cmnd->retries &&
+	    time_before(jiffies, (cmnd->jiffies_at_alloc +
+				  msecs_to_jiffies(LPFC_RETRY_PAUSE *
+						   cmnd->retries))))
+		return SCSI_MLQUEUE_DEVICE_BUSY;
 	ndlp = rdata->pnode;
 
 	if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&

+ 3 - 2
drivers/scsi/lpfc/lpfc_scsi.h

@@ -21,6 +21,7 @@
 #include <asm/byteorder.h>
 
 struct lpfc_hba;
+#define LPFC_FCP_CDB_LEN 16
 
 #define list_remove_head(list, entry, type, member)		\
 	do {							\
@@ -102,7 +103,7 @@ struct fcp_cmnd {
 #define  WRITE_DATA      0x01	/* Bit 0 */
 #define  READ_DATA       0x02	/* Bit 1 */
 
-	uint8_t fcpCdb[16];	/* SRB cdb field is copied here */
+	uint8_t fcpCdb[LPFC_FCP_CDB_LEN]; /* SRB cdb field is copied here */
 	uint32_t fcpDl;		/* Total transfer length */
 
 };
@@ -153,5 +154,5 @@ struct lpfc_scsi_buf {
 
 #define LPFC_SCSI_DMA_EXT_SIZE 264
 #define LPFC_BPL_SIZE          1024
-
+#define LPFC_RETRY_PAUSE       300
 #define MDAC_DIRECT_CMD                  0x22

Разлика између датотеке није приказан због своје велике величине
+ 390 - 177
drivers/scsi/lpfc/lpfc_sli.c


+ 10 - 1
drivers/scsi/lpfc/lpfc_sli4.h

@@ -291,7 +291,7 @@ struct lpfc_bmbx {
 #define LPFC_RQE_SIZE		8
 
 #define LPFC_EQE_DEF_COUNT	1024
-#define LPFC_CQE_DEF_COUNT      256
+#define LPFC_CQE_DEF_COUNT      1024
 #define LPFC_WQE_DEF_COUNT      256
 #define LPFC_MQE_DEF_COUNT      16
 #define LPFC_RQE_DEF_COUNT	512
@@ -420,7 +420,16 @@ struct lpfc_sli4_hba {
 			void __iomem *STATUSregaddr;
 			void __iomem *CTRLregaddr;
 			void __iomem *ERR1regaddr;
+#define SLIPORT_ERR1_REG_ERR_CODE_1		0x1
+#define SLIPORT_ERR1_REG_ERR_CODE_2		0x2
 			void __iomem *ERR2regaddr;
+#define SLIPORT_ERR2_REG_FW_RESTART		0x0
+#define SLIPORT_ERR2_REG_FUNC_PROVISON		0x1
+#define SLIPORT_ERR2_REG_FORCED_DUMP		0x2
+#define SLIPORT_ERR2_REG_FAILURE_EQ		0x3
+#define SLIPORT_ERR2_REG_FAILURE_CQ		0x4
+#define SLIPORT_ERR2_REG_FAILURE_BUS		0x5
+#define SLIPORT_ERR2_REG_FAILURE_RQ		0x6
 		} if_type2;
 	} u;
 

+ 1 - 1
drivers/scsi/lpfc/lpfc_version.h

@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.27"
+#define LPFC_DRIVER_VERSION "8.3.28"
 #define LPFC_DRIVER_NAME		"lpfc"
 #define LPFC_SP_DRIVER_HANDLER_NAME	"lpfc:sp"
 #define LPFC_FP_DRIVER_HANDLER_NAME	"lpfc:fp"

+ 3 - 3
drivers/scsi/lpfc/lpfc_vport.c

@@ -774,10 +774,10 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
 		return NULL;
 	spin_lock_irq(&phba->hbalock);
 	list_for_each_entry(port_iterator, &phba->port_list, listentry) {
+		if (port_iterator->load_flag & FC_UNLOADING)
+			continue;
 		if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
-			if (!(port_iterator->load_flag & FC_UNLOADING))
-				lpfc_printf_vlog(port_iterator, KERN_ERR,
-					 LOG_VPORT,
+			lpfc_printf_vlog(port_iterator, KERN_ERR, LOG_VPORT,
 					 "1801 Create vport work array FAILED: "
 					 "cannot do scsi_host_get\n");
 			continue;

+ 1 - 2
drivers/scsi/mac_scsi.c

@@ -291,8 +291,7 @@ int __init macscsi_detect(struct scsi_host_template * tpnt)
     ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0;
 
     if (instance->irq != SCSI_IRQ_NONE)
-	if (request_irq(instance->irq, NCR5380_intr, IRQ_FLG_SLOW, 
-			"ncr5380", instance)) {
+	if (request_irq(instance->irq, NCR5380_intr, 0, "ncr5380", instance)) {
 	    printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n",
 		   instance->host_no, instance->irq);
 	    instance->irq = SCSI_IRQ_NONE;

+ 7 - 3
drivers/scsi/mpt2sas/mpi/mpi2.h

@@ -8,7 +8,7 @@
  *                  scatter/gather formats.
  *  Creation Date:  June 21, 2006
  *
- *  mpi2.h Version:  02.00.20
+ *  mpi2.h Version:  02.00.22
  *
  *  Version History
  *  ---------------
@@ -69,6 +69,8 @@
  *  02-23-11  02.00.19  Bumped MPI2_HEADER_VERSION_UNIT.
  *                      Added MPI2_FUNCTION_SEND_HOST_MESSAGE.
  *  03-09-11  02.00.20  Bumped MPI2_HEADER_VERSION_UNIT.
+ *  05-25-11  02.00.21  Bumped MPI2_HEADER_VERSION_UNIT.
+ *  08-24-11  02.00.22  Bumped MPI2_HEADER_VERSION_UNIT.
  *  --------------------------------------------------------------------------
  */
 
@@ -94,7 +96,7 @@
 #define MPI2_VERSION_02_00                  (0x0200)
 
 /* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x14)
+#define MPI2_HEADER_VERSION_UNIT            (0x16)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
@@ -1073,8 +1075,10 @@ typedef struct _MPI2_IEEE_SGE_UNION
 #define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR         (0x02)
 #define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR      (0x03)
 						/* IEEE Simple Element only */
-#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR   (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR   (0x03)
 						/* IEEE Chain Element only */
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR   \
+	(MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR) /* typo in name */
 
 /****************************************************************************
 *  IEEE SGE operation Macros

+ 19 - 9
drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h

@@ -6,7 +6,7 @@
  *          Title:  MPI Configuration messages and pages
  *  Creation Date:  November 10, 2006
  *
- *    mpi2_cnfg.h Version:  02.00.19
+ *    mpi2_cnfg.h Version:  02.00.21
  *
  *  Version History
  *  ---------------
@@ -140,6 +140,13 @@
  *                      Added SASNotifyPrimitiveMasks field to
  *                      MPI2_CONFIG_PAGE_IOC_7.
  *  03-09-11  02.00.19  Fixed IO Unit Page 10 (to match the spec).
+ *  05-25-11  02.00.20  Cleaned up a few comments.
+ *  08-24-11  02.00.21  Marked the IO Unit Page 7 PowerManagementCapabilities
+ *                      for PCIe link as obsolete.
+ *                      Added SpinupFlags field containing a Disable Spin-up
+ *                      bit to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of
+ *                      SAS IO Unit Page 4.
+
  *  --------------------------------------------------------------------------
  */
 
@@ -904,8 +911,8 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
 #define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED    (0x00000400)
 #define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED    (0x00000200)
 #define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED    (0x00000100)
-#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE    (0x00000008)
-#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE    (0x00000004)
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE    (0x00000008) /* obsolete */
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE    (0x00000004) /* obsolete */
 
 /* defines for IO Unit Page 7 IOCTemperatureUnits field */
 #define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT       (0x00)
@@ -1970,10 +1977,14 @@ typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP
 {
     U8          MaxTargetSpinup;            /* 0x00 */
     U8          SpinupDelay;                /* 0x01 */
-    U16         Reserved1;                  /* 0x02 */
+	U8          SpinupFlags;                /* 0x02 */
+	U8          Reserved1;                  /* 0x03 */
 } MPI2_SAS_IOUNIT4_SPINUP_GROUP, MPI2_POINTER PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP,
   Mpi2SasIOUnit4SpinupGroup_t, MPI2_POINTER pMpi2SasIOUnit4SpinupGroup_t;
 
+/* defines for SAS IO Unit Page 4 SpinupFlags */
+#define MPI2_SASIOUNIT4_SPINUP_DISABLE_FLAG         (0x01)
+
 /*
  * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
  * one and check the value returned for NumPhys at runtime.
@@ -2321,13 +2332,12 @@ typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1
 
 /* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
 
-/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
-
 /* values for SAS Expander Page 1 DiscoveryInfo field */
 #define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED    (0x04)
 #define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE  (0x02)
 #define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES  (0x01)
 
+/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
 
 /****************************************************************************
 *   SAS Device Config Pages
@@ -2447,6 +2457,8 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0
 
 #define MPI2_SASPHY0_PAGEVERSION            (0x03)
 
+/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+
 /* use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
 
 /* use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
@@ -2454,12 +2466,10 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0
 /* values for SAS PHY Page 0 Flags field */
 #define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC             (0x01)
 
-/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */
 
 /* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
 
-/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */
-
 
 /* SAS PHY Page 1 */
 

+ 29 - 20
drivers/scsi/mpt2sas/mpi/mpi2_ioc.h

@@ -6,7 +6,7 @@
  *          Title:  MPI IOC, Port, Event, FW Download, and FW Upload messages
  *  Creation Date:  October 11, 2006
  *
- *  mpi2_ioc.h Version:  02.00.17
+ *  mpi2_ioc.h Version:  02.00.19
  *
  *  Version History
  *  ---------------
@@ -110,6 +110,13 @@
  *                      Added Temperature Threshold Event.
  *                      Added Host Message Event.
  *                      Added Send Host Message request and reply.
+ *  05-25-11  02.00.18  For Extended Image Header, added
+ *                      MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and
+ *                      MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines.
+ *                      Deprecated MPI2_EXT_IMAGE_TYPE_MAX define.
+ *  08-24-11  02.00.19  Added PhysicalPort field to
+ *                      MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure.
+ *                      Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete.
  *  --------------------------------------------------------------------------
  */
 
@@ -578,7 +585,7 @@ typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE
 {
     U16                     TaskTag;                        /* 0x00 */
     U8                      ReasonCode;                     /* 0x02 */
-    U8                      Reserved1;                      /* 0x03 */
+	U8                      PhysicalPort;                   /* 0x03 */
     U8                      ASC;                            /* 0x04 */
     U8                      ASCQ;                           /* 0x05 */
     U16                     DevHandle;                      /* 0x06 */
@@ -1366,16 +1373,18 @@ typedef struct _MPI2_EXT_IMAGE_HEADER
 #define MPI2_EXT_IMAGE_HEADER_SIZE              (0x40)
 
 /* defines for the ImageType field */
-#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED         (0x00)
-#define MPI2_EXT_IMAGE_TYPE_FW                  (0x01)
-#define MPI2_EXT_IMAGE_TYPE_NVDATA              (0x03)
-#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER          (0x04)
-#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION      (0x05)
-#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT        (0x06)
-#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES   (0x07)
-#define MPI2_EXT_IMAGE_TYPE_MEGARAID            (0x08)
-
-#define MPI2_EXT_IMAGE_TYPE_MAX                 (MPI2_EXT_IMAGE_TYPE_MEGARAID)
+#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED				(0x00)
+#define MPI2_EXT_IMAGE_TYPE_FW						(0x01)
+#define MPI2_EXT_IMAGE_TYPE_NVDATA					(0x03)
+#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER				(0x04)
+#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION			(0x05)
+#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT			(0x06)
+#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES		(0x07)
+#define MPI2_EXT_IMAGE_TYPE_MEGARAID				(0x08)
+#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC    (0x80)
+#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC    (0xFF)
+#define MPI2_EXT_IMAGE_TYPE_MAX                   \
+	(MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC)	/* deprecated */
 
 
 
@@ -1568,7 +1577,7 @@ typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST {
 /* defines for the Feature field */
 #define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND       (0x01)
 #define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION   (0x02)
-#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK               (0x03)
+#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK               (0x03) /* obsolete */
 #define MPI2_PM_CONTROL_FEATURE_IOC_SPEED               (0x04)
 #define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC    (0x80)
 #define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC    (0xFF)
@@ -1597,14 +1606,14 @@ typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST {
 
 /* parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */
 /* Parameter1 indicates desired PCIe link speed using these defines */
-#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS            (0x00)
-#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS            (0x01)
-#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS            (0x02)
+#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS            (0x00) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS            (0x01) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS            (0x02) /* obsolete */
 /* Parameter2 indicates desired PCIe link width using these defines */
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1                 (0x01)
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2                 (0x02)
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4                 (0x04)
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8                 (0x08)
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1                 (0x01) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2                 (0x02) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4                 (0x04) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8                 (0x08) /* obsolete */
 /* Parameter3 and Parameter4 are reserved */
 
 /* parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */

+ 60 - 7
drivers/scsi/mpt2sas/mpi/mpi2_raid.h

@@ -6,7 +6,7 @@
  *          Title:  MPI Integrated RAID messages and structures
  *  Creation Date:  April 26, 2007
  *
- *    mpi2_raid.h Version:  02.00.05
+ *    mpi2_raid.h Version:  02.00.06
  *
  *  Version History
  *  ---------------
@@ -23,6 +23,10 @@
  *  07-30-09  02.00.04  Added proper define for the Use Default Settings bit of
  *                      VolumeCreationFlags and marked the old one as obsolete.
  *  05-12-10  02.00.05  Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
+ *  08-24-10  02.00.06  Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with
+ *                      related structures and defines.
+ *                      Added product-specific range to RAID Action values.
+
  *  --------------------------------------------------------------------------
  */
 
@@ -176,7 +180,9 @@ typedef struct _MPI2_RAID_ACTION_REQUEST
 #define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED  (0x20)
 #define MPI2_RAID_ACTION_START_RAID_FUNCTION        (0x21)
 #define MPI2_RAID_ACTION_STOP_RAID_FUNCTION         (0x22)
-
+#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK        (0x23)
+#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC       (0x80)
+#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC       (0xFF)
 
 /* RAID Volume Creation Structure */
 
@@ -244,6 +250,23 @@ typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION
   Mpi2RaidOnlineCapacityExpansion_t,
   MPI2_POINTER pMpi2RaidOnlineCapacityExpansion_t;
 
+/* RAID Compatibility Input Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT {
+	U16                     SourceDevHandle;               /* 0x00 */
+	U16                     CandidateDevHandle;             /* 0x02 */
+	U32                     Flags;                          /* 0x04 */
+	U32                     Reserved1;                      /* 0x08 */
+	U32                     Reserved2;                      /* 0x0C */
+} MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+Mpi2RaidCompatibilityInputStruct_t,
+MPI2_POINTER pMpi2RaidCompatibilityInputStruct_t;
+
+/* defines for RAID Compatibility Structure Flags field */
+#define MPI2_RAID_COMPAT_SOURCE_IS_VOLUME_FLAG      (0x00000002)
+#define MPI2_RAID_COMPAT_REPORT_SOURCE_INFO_FLAG    (0x00000001)
+
 
 /* RAID Volume Indicator Structure */
 
@@ -263,15 +286,45 @@ typedef struct _MPI2_RAID_VOL_INDICATOR
 #define MPI2_RAID_VOL_FLAGS_OP_RESYNC               (0x00000003)
 #define MPI2_RAID_VOL_FLAGS_OP_MDC                  (0x00000004)
 
+/* RAID Compatibility Result Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT {
+	U8                      State;                          /* 0x00 */
+	U8                      Reserved1;                      /* 0x01 */
+	U16                     Reserved2;                      /* 0x02 */
+	U32                     GenericAttributes;              /* 0x04 */
+	U32                     OEMSpecificAttributes;          /* 0x08 */
+	U32                     Reserved3;                      /* 0x0C */
+	U32                     Reserved4;                      /* 0x10 */
+} MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+Mpi2RaidCompatibilityResultStruct_t,
+MPI2_POINTER pMpi2RaidCompatibilityResultStruct_t;
+
+/* defines for RAID Compatibility Result Structure State field */
+#define MPI2_RAID_COMPAT_STATE_COMPATIBLE           (0x00)
+#define MPI2_RAID_COMPAT_STATE_NOT_COMPATIBLE       (0x01)
+
+/* defines for RAID Compatibility Result Structure GenericAttributes field */
+#define MPI2_RAID_COMPAT_GENATTRIB_4K_SECTOR            (0x00000010)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_MEDIA_MASK           (0x0000000C)
+#define MPI2_RAID_COMPAT_GENATTRIB_SOLID_STATE_DRIVE    (0x00000008)
+#define MPI2_RAID_COMPAT_GENATTRIB_HARD_DISK_DRIVE      (0x00000004)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_PROTOCOL_MASK        (0x00000003)
+#define MPI2_RAID_COMPAT_GENATTRIB_SAS_PROTOCOL         (0x00000002)
+#define MPI2_RAID_COMPAT_GENATTRIB_SATA_PROTOCOL        (0x00000001)
 
 /* RAID Action Reply ActionData union */
 typedef union _MPI2_RAID_ACTION_REPLY_DATA
 {
-    U32                     Word[5];
-    MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator;
-    U16                     VolDevHandle;
-    U8                      VolumeState;
-    U8                      PhysDiskNum;
+	U32                                     Word[5];
+	MPI2_RAID_VOL_INDICATOR                 RaidVolumeIndicator;
+	U16                                     VolDevHandle;
+	U8                                      VolumeState;
+	U8                                      PhysDiskNum;
+	MPI2_RAID_COMPATIBILITY_RESULT_STRUCT   RaidCompatibilityResult;
 } MPI2_RAID_ACTION_REPLY_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_REPLY_DATA,
   Mpi2RaidActionReplyData_t, MPI2_POINTER pMpi2RaidActionReplyData_t;
 

+ 7 - 2
drivers/scsi/mpt2sas/mpi/mpi2_tool.h

@@ -6,7 +6,7 @@
  *          Title:  MPI diagnostic tool structures and definitions
  *  Creation Date:  March 26, 2007
  *
- *    mpi2_tool.h Version:  02.00.06
+ *    mpi2_tool.h Version:  02.00.07
  *
  *  Version History
  *  ---------------
@@ -25,6 +25,8 @@
  *  05-12-10  02.00.05  Added Diagnostic Data Upload tool.
  *  08-11-10  02.00.06  Added defines that were missing for Diagnostic Buffer
  *                      Post Request.
+ *  05-25-11  02.00.07  Added Flags field and related defines to
+ *                      MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST.
  *  --------------------------------------------------------------------------
  */
 
@@ -181,7 +183,7 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
     U8                      DevIndex;                   /* 0x14 */
     U8                      Action;                     /* 0x15 */
     U8                      SGLFlags;                   /* 0x16 */
-    U8                      Reserved7;                  /* 0x17 */
+	 U8                      Flags;                      /* 0x17 */
     U16                     TxDataLength;               /* 0x18 */
     U16                     RxDataLength;               /* 0x1A */
     U32                     Reserved8;                  /* 0x1C */
@@ -205,6 +207,9 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
 
 /* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
 
+/* values for the Flags field */
+#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE   (0x80)
+#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK         (0x07)
 
 /* Toolbox ISTWI Read Write Tool reply message */
 typedef struct _MPI2_TOOLBOX_ISTWI_REPLY {

+ 118 - 87
drivers/scsi/mpt2sas/mpt2sas_base.c

@@ -57,6 +57,7 @@
 #include <linux/sort.h>
 #include <linux/io.h>
 #include <linux/time.h>
+#include <linux/kthread.h>
 #include <linux/aer.h>
 
 #include "mpt2sas_base.h"
@@ -65,6 +66,8 @@ static MPT_CALLBACK	mpt_callbacks[MPT_MAX_CALLBACKS];
 
 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
 
+#define MAX_HBA_QUEUE_DEPTH	30000
+#define MAX_CHAIN_DEPTH		100000
 static int max_queue_depth = -1;
 module_param(max_queue_depth, int, 0);
 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
@@ -89,19 +92,6 @@ static int disable_discovery = -1;
 module_param(disable_discovery, int, 0);
 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
 
-
-/* diag_buffer_enable is bitwise
- * bit 0 set = TRACE
- * bit 1 set = SNAPSHOT
- * bit 2 set = EXTENDED
- *
- * Either bit can be set, or both
- */
-static int diag_buffer_enable;
-module_param(diag_buffer_enable, int, 0);
-MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
-    "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
-
 /**
  * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
  *
@@ -120,9 +110,33 @@ _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
 		ioc->fwfault_debug = mpt2sas_fwfault_debug;
 	return 0;
 }
+
 module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug,
     param_get_int, &mpt2sas_fwfault_debug, 0644);
 
+/**
+ *  mpt2sas_remove_dead_ioc_func - kthread context to remove dead ioc
+ * @arg: input argument, used to derive ioc
+ *
+ * Return 0 if controller is removed from pci subsystem.
+ * Return -1 for other case.
+ */
+static int mpt2sas_remove_dead_ioc_func(void *arg)
+{
+		struct MPT2SAS_ADAPTER *ioc = (struct MPT2SAS_ADAPTER *)arg;
+		struct pci_dev *pdev;
+
+		if ((ioc == NULL))
+			return -1;
+
+		pdev = ioc->pdev;
+		if ((pdev == NULL))
+			return -1;
+		pci_remove_bus_device(pdev);
+		return 0;
+}
+
+
 /**
  * _base_fault_reset_work - workq handling ioc fault conditions
  * @work: input argument, used to derive ioc
@@ -138,6 +152,7 @@ _base_fault_reset_work(struct work_struct *work)
 	unsigned long	 flags;
 	u32 doorbell;
 	int rc;
+	struct task_struct *p;
 
 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 	if (ioc->shost_recovery)
@@ -145,6 +160,39 @@ _base_fault_reset_work(struct work_struct *work)
 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 
 	doorbell = mpt2sas_base_get_iocstate(ioc, 0);
+	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
+		printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n",
+			ioc->name, __func__);
+
+		/*
+		 * Call _scsih_flush_pending_cmds callback so that we flush all
+		 * pending commands back to OS. This call is required to aovid
+		 * deadlock at block layer. Dead IOC will fail to do diag reset,
+		 * and this call is safe since dead ioc will never return any
+		 * command back from HW.
+		 */
+		ioc->schedule_dead_ioc_flush_running_cmds(ioc);
+		/*
+		 * Set remove_host flag early since kernel thread will
+		 * take some time to execute.
+		 */
+		ioc->remove_host = 1;
+		/*Remove the Dead Host */
+		p = kthread_run(mpt2sas_remove_dead_ioc_func, ioc,
+		    "mpt2sas_dead_ioc_%d", ioc->id);
+		if (IS_ERR(p)) {
+			printk(MPT2SAS_ERR_FMT
+			"%s: Running mpt2sas_dead_ioc thread failed !!!!\n",
+			ioc->name, __func__);
+		} else {
+		    printk(MPT2SAS_ERR_FMT
+			"%s: Running mpt2sas_dead_ioc thread success !!!!\n",
+			ioc->name, __func__);
+		}
+
+		return; /* don't rearm timer */
+	}
+
 	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
 		rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
 		    FORCE_BIG_HAMMER);
@@ -1346,7 +1394,7 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
 	if (_base_check_enable_msix(ioc) != 0)
 		goto try_ioapic;
 
-	ioc->reply_queue_count = min_t(u8, ioc->cpu_count,
+	ioc->reply_queue_count = min_t(int, ioc->cpu_count,
 	    ioc->msix_vector_count);
 
 	entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
@@ -1916,6 +1964,10 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
 			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
 			    MPT2SAS_INTEL_RMS2LL040_BRANDING);
 			break;
+		case MPT2SAS_INTEL_RAMSDALE_SSDID:
+			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+			    MPT2SAS_INTEL_RAMSDALE_BRANDING);
+			break;
 		default:
 			break;
 		}
@@ -1925,6 +1977,22 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
 			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
 			    MPT2SAS_INTEL_RS25GB008_BRANDING);
 			break;
+		case MPT2SAS_INTEL_RMS25JB080_SSDID:
+			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+			    MPT2SAS_INTEL_RMS25JB080_BRANDING);
+			break;
+		case MPT2SAS_INTEL_RMS25JB040_SSDID:
+			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+			    MPT2SAS_INTEL_RMS25JB040_BRANDING);
+			break;
+		case MPT2SAS_INTEL_RMS25KB080_SSDID:
+			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+			    MPT2SAS_INTEL_RMS25KB080_BRANDING);
+			break;
+		case MPT2SAS_INTEL_RMS25KB040_SSDID:
+			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+			    MPT2SAS_INTEL_RMS25KB040_BRANDING);
+			break;
 		default:
 			break;
 		}
@@ -2311,8 +2379,6 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
 		}
 		if (ioc->chain_dma_pool)
 			pci_pool_destroy(ioc->chain_dma_pool);
-	}
-	if (ioc->chain_lookup) {
 		free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
 		ioc->chain_lookup = NULL;
 	}
@@ -2330,9 +2396,7 @@ static int
 _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
 {
 	struct mpt2sas_facts *facts;
-	u32 queue_size, queue_diff;
 	u16 max_sge_elements;
-	u16 num_of_reply_frames;
 	u16 chains_needed_per_io;
 	u32 sz, total_sz, reply_post_free_sz;
 	u32 retry_sz;
@@ -2359,7 +2423,8 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
 		max_request_credit = (max_queue_depth < facts->RequestCredit)
 		    ? max_queue_depth : facts->RequestCredit;
 	else
-		max_request_credit = facts->RequestCredit;
+		max_request_credit = min_t(u16, facts->RequestCredit,
+		    MAX_HBA_QUEUE_DEPTH);
 
 	ioc->hba_queue_depth = max_request_credit;
 	ioc->hi_priority_depth = facts->HighPriorityCredit;
@@ -2400,50 +2465,25 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
 	}
 	ioc->chains_needed_per_io = chains_needed_per_io;
 
-	/* reply free queue sizing - taking into account for events */
-	num_of_reply_frames = ioc->hba_queue_depth + 32;
-
-	/* number of replies frames can't be a multiple of 16 */
-	/* decrease number of reply frames by 1 */
-	if (!(num_of_reply_frames % 16))
-		num_of_reply_frames--;
-
-	/* calculate number of reply free queue entries
-	 *  (must be multiple of 16)
-	 */
-
-	/* (we know reply_free_queue_depth is not a multiple of 16) */
-	queue_size = num_of_reply_frames;
-	queue_size += 16 - (queue_size % 16);
-	ioc->reply_free_queue_depth = queue_size;
-
-	/* reply descriptor post queue sizing */
-	/* this size should be the number of request frames + number of reply
-	 * frames
-	 */
-
-	queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1;
-	/* round up to 16 byte boundary */
-	if (queue_size % 16)
-		queue_size += 16 - (queue_size % 16);
-
-	/* check against IOC maximum reply post queue depth */
-	if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) {
-		queue_diff = queue_size -
-		    facts->MaxReplyDescriptorPostQueueDepth;
+	/* reply free queue sizing - taking into account for 64 FW events */
+	ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
 
-		/* round queue_diff up to multiple of 16 */
-		if (queue_diff % 16)
-			queue_diff += 16 - (queue_diff % 16);
-
-		/* adjust hba_queue_depth, reply_free_queue_depth,
-		 * and queue_size
-		 */
-		ioc->hba_queue_depth -= (queue_diff / 2);
-		ioc->reply_free_queue_depth -= (queue_diff / 2);
-		queue_size = facts->MaxReplyDescriptorPostQueueDepth;
+	/* align the reply post queue on the next 16 count boundary */
+	if (!ioc->reply_free_queue_depth % 16)
+		ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
+	else
+		ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
+				32 - (ioc->reply_free_queue_depth % 16);
+	if (ioc->reply_post_queue_depth >
+	    facts->MaxReplyDescriptorPostQueueDepth) {
+		ioc->reply_post_queue_depth = min_t(u16,
+		    (facts->MaxReplyDescriptorPostQueueDepth -
+		    (facts->MaxReplyDescriptorPostQueueDepth % 16)),
+		    (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
+		ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
+		ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
 	}
-	ioc->reply_post_queue_depth = queue_size;
+
 
 	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
 	    "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
@@ -2529,15 +2569,12 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
 	    "depth(%d)\n", ioc->name, ioc->request,
 	    ioc->scsiio_depth));
 
-	/* loop till the allocation succeeds */
-	do {
-		sz = ioc->chain_depth * sizeof(struct chain_tracker);
-		ioc->chain_pages = get_order(sz);
-		ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
-		    GFP_KERNEL, ioc->chain_pages);
-		if (ioc->chain_lookup == NULL)
-			ioc->chain_depth -= 100;
-	} while (ioc->chain_lookup == NULL);
+	ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
+	sz = ioc->chain_depth * sizeof(struct chain_tracker);
+	ioc->chain_pages = get_order(sz);
+
+	ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
+	    GFP_KERNEL, ioc->chain_pages);
 	ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
 	    ioc->request_sz, 16, 0);
 	if (!ioc->chain_dma_pool) {
@@ -3136,8 +3173,8 @@ mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
 	if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
 	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
 		ioc->ioc_link_reset_in_progress = 1;
-	mpt2sas_base_put_smid_default(ioc, smid);
 	init_completion(&ioc->base_cmds.done);
+	mpt2sas_base_put_smid_default(ioc, smid);
 	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
 	    msecs_to_jiffies(10000));
 	if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
@@ -3238,8 +3275,8 @@ mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
 	request = mpt2sas_base_get_msg_frame(ioc, smid);
 	ioc->base_cmds.smid = smid;
 	memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
-	mpt2sas_base_put_smid_default(ioc, smid);
 	init_completion(&ioc->base_cmds.done);
+	mpt2sas_base_put_smid_default(ioc, smid);
 	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
 	    msecs_to_jiffies(10000));
 	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
@@ -3746,8 +3783,8 @@ _base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
 	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
 		mpi_request->EventMasks[i] =
 		    cpu_to_le32(ioc->event_masks[i]);
-	mpt2sas_base_put_smid_default(ioc, smid);
 	init_completion(&ioc->base_cmds.done);
+	mpt2sas_base_put_smid_default(ioc, smid);
 	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
 	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
 		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
@@ -4062,7 +4099,8 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
 		ioc->reply_free[i] = cpu_to_le32(reply_address);
 
 	/* initialize reply queues */
-	_base_assign_reply_queues(ioc);
+	if (ioc->is_driver_loading)
+		_base_assign_reply_queues(ioc);
 
 	/* initialize Reply Post Free Queue */
 	reply_post_free = (long)ioc->reply_post_free;
@@ -4110,24 +4148,17 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
 
 
 	if (ioc->is_driver_loading) {
-
-
-
-		ioc->wait_for_discovery_to_complete =
-		    _base_determine_wait_on_discovery(ioc);
-		return r; /* scan_start and scan_finished support */
-	}
-
-
-	if (ioc->wait_for_discovery_to_complete && ioc->is_warpdrive) {
-		if (ioc->manu_pg10.OEMIdentifier  == 0x80) {
+		if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
+		    == 0x80) {
 			hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 &
 			    MFG_PAGE10_HIDE_SSDS_MASK);
 			if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
 				ioc->mfg_pg10_hide_flag = hide_flag;
 		}
+		ioc->wait_for_discovery_to_complete =
+		    _base_determine_wait_on_discovery(ioc);
+		return r; /* scan_start and scan_finished support */
 	}
-
 	r = _base_send_port_enable(ioc, sleep_flag);
 	if (r)
 		return r;
@@ -4206,7 +4237,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
 
 	r = mpt2sas_base_map_resources(ioc);
 	if (r)
-		return r;
+		goto out_free_resources;
 
 	if (ioc->is_warpdrive) {
 		ioc->reply_post_host_index[0] =

+ 22 - 4
drivers/scsi/mpt2sas/mpt2sas_base.h

@@ -69,8 +69,8 @@
 #define MPT2SAS_DRIVER_NAME		"mpt2sas"
 #define MPT2SAS_AUTHOR	"LSI Corporation <DL-MPTFusionLinux@lsi.com>"
 #define MPT2SAS_DESCRIPTION	"LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION		"10.100.00.00"
-#define MPT2SAS_MAJOR_VERSION		10
+#define MPT2SAS_DRIVER_VERSION		"12.100.00.00"
+#define MPT2SAS_MAJOR_VERSION		12
 #define MPT2SAS_MINOR_VERSION		100
 #define MPT2SAS_BUILD_VERSION		00
 #define MPT2SAS_RELEASE_VERSION		00
@@ -157,20 +157,33 @@
 /*
  * Intel HBA branding
  */
+#define MPT2SAS_INTEL_RMS25JB080_BRANDING    \
+				"Intel(R) Integrated RAID Module RMS25JB080"
+#define MPT2SAS_INTEL_RMS25JB040_BRANDING    \
+				"Intel(R) Integrated RAID Module RMS25JB040"
+#define MPT2SAS_INTEL_RMS25KB080_BRANDING    \
+				"Intel(R) Integrated RAID Module RMS25KB080"
+#define MPT2SAS_INTEL_RMS25KB040_BRANDING    \
+				"Intel(R) Integrated RAID Module RMS25KB040"
 #define MPT2SAS_INTEL_RMS2LL080_BRANDING	\
 				"Intel Integrated RAID Module RMS2LL080"
 #define MPT2SAS_INTEL_RMS2LL040_BRANDING	\
 				"Intel Integrated RAID Module RMS2LL040"
 #define MPT2SAS_INTEL_RS25GB008_BRANDING       \
 				"Intel(R) RAID Controller RS25GB008"
-
+#define MPT2SAS_INTEL_RAMSDALE_BRANDING        \
+				"Intel 720 Series SSD"
 /*
  * Intel HBA SSDIDs
  */
+#define MPT2SAS_INTEL_RMS25JB080_SSDID         0x3516
+#define MPT2SAS_INTEL_RMS25JB040_SSDID         0x3517
+#define MPT2SAS_INTEL_RMS25KB080_SSDID         0x3518
+#define MPT2SAS_INTEL_RMS25KB040_SSDID         0x3519
 #define MPT2SAS_INTEL_RMS2LL080_SSDID          0x350E
 #define MPT2SAS_INTEL_RMS2LL040_SSDID          0x350F
 #define MPT2SAS_INTEL_RS25GB008_SSDID          0x3000
-
+#define MPT2SAS_INTEL_RAMSDALE_SSDID           0x3700
 
 /*
  * HP HBA branding
@@ -373,6 +386,7 @@ struct _sas_device {
  * @percent_complete: resync percent complete
  * @direct_io_enabled: Whether direct io to PDs are allowed or not
  * @stripe_exponent: X where 2powX is the stripe sz in blocks
+ * @block_exponent: X where 2powX is the block sz in bytes
  * @max_lba: Maximum number of LBA in the volume
  * @stripe_sz: Stripe Size of the volume
  * @device_info: Device info of the volume member disk
@@ -394,6 +408,7 @@ struct _raid_device {
 	u8	percent_complete;
 	u8	direct_io_enabled;
 	u8	stripe_exponent;
+	u8	block_exponent;
 	u64	max_lba;
 	u32	stripe_sz;
 	u32	device_info;
@@ -623,6 +638,7 @@ enum mutex_type {
 	TM_MUTEX_ON = 1,
 };
 
+typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc);
 /**
  * struct MPT2SAS_ADAPTER - per adapter struct
  * @list: ioc_list
@@ -665,6 +681,7 @@ enum mutex_type {
  * @msix_vector_count: number msix vectors
  * @cpu_msix_table: table for mapping cpus to msix index
  * @cpu_msix_table_sz: table size
+ * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
  * @scsi_io_cb_idx: shost generated commands
  * @tm_cb_idx: task management commands
  * @scsih_cb_idx: scsih internal commands
@@ -816,6 +833,7 @@ struct MPT2SAS_ADAPTER {
 	resource_size_t	**reply_post_host_index;
 	u16		cpu_msix_table_sz;
 	u32		ioc_reset_count;
+	MPT2SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
 
 	/* internal commands, callback index */
 	u8		scsi_io_cb_idx;

+ 4 - 4
drivers/scsi/mpt2sas/mpt2sas_ctl.c

@@ -818,6 +818,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
 	_ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
 #endif
 
+	init_completion(&ioc->ctl_cmds.done);
 	switch (mpi_request->Function) {
 	case MPI2_FUNCTION_SCSI_IO_REQUEST:
 	case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
@@ -903,7 +904,6 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
 		timeout = MPT2_IOCTL_DEFAULT_TIMEOUT;
 	else
 		timeout = karg.timeout;
-	init_completion(&ioc->ctl_cmds.done);
 	timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
 	    timeout*HZ);
 	if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
@@ -1477,8 +1477,8 @@ _ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc,
 		mpi_request->ProductSpecific[i] =
 			cpu_to_le32(ioc->product_specific[buffer_type][i]);
 
-	mpt2sas_base_put_smid_default(ioc, smid);
 	init_completion(&ioc->ctl_cmds.done);
+	mpt2sas_base_put_smid_default(ioc, smid);
 	timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
 	    MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
 
@@ -1821,8 +1821,8 @@ _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset)
 	mpi_request->VF_ID = 0; /* TODO */
 	mpi_request->VP_ID = 0;
 
-	mpt2sas_base_put_smid_default(ioc, smid);
 	init_completion(&ioc->ctl_cmds.done);
+	mpt2sas_base_put_smid_default(ioc, smid);
 	timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
 	    MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
 
@@ -2095,8 +2095,8 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state)
 	mpi_request->VF_ID = 0; /* TODO */
 	mpi_request->VP_ID = 0;
 
-	mpt2sas_base_put_smid_default(ioc, smid);
 	init_completion(&ioc->ctl_cmds.done);
+	mpt2sas_base_put_smid_default(ioc, smid);
 	timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
 	    MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
 

+ 103 - 65
drivers/scsi/mpt2sas/mpt2sas_scsih.c

@@ -99,7 +99,7 @@ MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info "
 
 static ushort max_sectors = 0xFFFF;
 module_param(max_sectors, ushort, 0);
-MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 8192  default=8192");
+MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767  default=32767");
 
 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
 #define MPT2SAS_MAX_LUN (16895)
@@ -612,13 +612,17 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
 	if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
 	     sas_device->sas_address_parent)) {
 		_scsih_sas_device_remove(ioc, sas_device);
-		} else if (!sas_device->starget) {
-			if (!ioc->is_driver_loading)
-				mpt2sas_transport_port_remove(ioc,
-				sas_device->sas_address,
-			    sas_device->sas_address_parent);
-			_scsih_sas_device_remove(ioc, sas_device);
-		}
+	} else if (!sas_device->starget) {
+		/* When asyn scanning is enabled, its not possible to remove
+		 * devices while scanning is turned on due to an oops in
+		 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
+		 */
+		if (!ioc->is_driver_loading)
+			mpt2sas_transport_port_remove(ioc,
+			sas_device->sas_address,
+			sas_device->sas_address_parent);
+		_scsih_sas_device_remove(ioc, sas_device);
+	}
 }
 
 /**
@@ -1007,8 +1011,8 @@ _scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid)
 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
 	if (list_empty(&ioc->free_chain_list)) {
 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
-		printk(MPT2SAS_WARN_FMT "chain buffers not available\n",
-		    ioc->name);
+		dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "chain buffers not "
+			"available\n", ioc->name));
 		return NULL;
 	}
 	chain_req = list_entry(ioc->free_chain_list.next,
@@ -1449,7 +1453,7 @@ _scsih_slave_destroy(struct scsi_device *sdev)
 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
 		sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
 		   sas_target_priv_data->sas_address);
-		if (sas_device)
+		if (sas_device && !sas_target_priv_data->num_luns)
 			sas_device->starget = NULL;
 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 	}
@@ -1776,11 +1780,9 @@ _scsih_init_warpdrive_properties(struct MPT2SAS_ADAPTER *ioc,
 	Mpi2ConfigReply_t mpi_reply;
 	u16 sz;
 	u8 num_pds, count;
-	u64 mb = 1024 * 1024;
-	u64 tb_2 = 2 * mb * mb;
-	u64 capacity;
-	u32 stripe_sz;
-	u8 i, stripe_exp;
+	unsigned long stripe_sz, block_sz;
+	u8 stripe_exp, block_exp;
+	u64 dev_max_lba;
 
 	if (!ioc->is_warpdrive)
 		return;
@@ -1844,51 +1846,57 @@ _scsih_init_warpdrive_properties(struct MPT2SAS_ADAPTER *ioc,
 			    vol_pg0->PhysDisk[count].PhysDiskNum);
 			goto out_error;
 		}
+		/* Disable direct I/O if member drive lba exceeds 4 bytes */
+		dev_max_lba = le64_to_cpu(pd_pg0.DeviceMaxLBA);
+		if (dev_max_lba >> 32) {
+			printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is "
+			    "disabled for the drive with handle(0x%04x) member"
+			    "handle (0x%04x) unsupported max lba 0x%016llx\n",
+			    ioc->name, raid_device->handle,
+			    le16_to_cpu(pd_pg0.DevHandle),
+			    (unsigned long long)dev_max_lba);
+			goto out_error;
+		}
+
 		raid_device->pd_handle[count] = le16_to_cpu(pd_pg0.DevHandle);
 	}
 
 	/*
 	 * Assumption for WD: Direct I/O is not supported if the volume is
-	 * not RAID0, if the stripe size is not 64KB, if the block size is
-	 * not 512 and if the volume size is >2TB
+	 * not RAID0
 	 */
-	if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0 ||
-	    le16_to_cpu(vol_pg0->BlockSize) != 512) {
+	if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0) {
 		printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
 		    "for the drive with handle(0x%04x): type=%d, "
 		    "s_sz=%uK, blk_size=%u\n", ioc->name,
 		    raid_device->handle, raid_device->volume_type,
-		    le32_to_cpu(vol_pg0->StripeSize)/2,
+		    (le32_to_cpu(vol_pg0->StripeSize) *
+		    le16_to_cpu(vol_pg0->BlockSize)) / 1024,
 		    le16_to_cpu(vol_pg0->BlockSize));
 		goto out_error;
 	}
 
-	capacity = (u64) le16_to_cpu(vol_pg0->BlockSize) *
-	    (le64_to_cpu(vol_pg0->MaxLBA) + 1);
-
-	if (capacity > tb_2) {
+	stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
+	stripe_exp = find_first_bit(&stripe_sz, 32);
+	if (stripe_exp == 32) {
 		printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
-		"for the drive with handle(0x%04x) since drive sz > 2TB\n",
-		ioc->name, raid_device->handle);
+		"for the drive with handle(0x%04x) invalid stripe sz %uK\n",
+		    ioc->name, raid_device->handle,
+		    (le32_to_cpu(vol_pg0->StripeSize) *
+		    le16_to_cpu(vol_pg0->BlockSize)) / 1024);
 		goto out_error;
 	}
-
-	stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
-	stripe_exp = 0;
-	for (i = 0; i < 32; i++) {
-		if (stripe_sz & 1)
-			break;
-		stripe_exp++;
-		stripe_sz >>= 1;
-	}
-	if (i == 32) {
+	raid_device->stripe_exponent = stripe_exp;
+	block_sz = le16_to_cpu(vol_pg0->BlockSize);
+	block_exp = find_first_bit(&block_sz, 16);
+	if (block_exp == 16) {
 		printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
-		    "for the drive with handle(0x%04x) invalid stripe sz %uK\n",
+		    "for the drive with handle(0x%04x) invalid block sz %u\n",
 		    ioc->name, raid_device->handle,
-		    le32_to_cpu(vol_pg0->StripeSize)/2);
+		    le16_to_cpu(vol_pg0->BlockSize));
 		goto out_error;
 	}
-	raid_device->stripe_exponent = stripe_exp;
+	raid_device->block_exponent = block_exp;
 	raid_device->direct_io_enabled = 1;
 
 	printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is Enabled for the drive"
@@ -3804,8 +3812,9 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
 {
 	u32 v_lba, p_lba, stripe_off, stripe_unit, column, io_size;
 	u32 stripe_sz, stripe_exp;
-	u8 num_pds, *cdb_ptr, *tmp_ptr, *lba_ptr1, *lba_ptr2;
+	u8 num_pds, *cdb_ptr, i;
 	u8 cdb0 = scmd->cmnd[0];
+	u64 v_llba;
 
 	/*
 	 * Try Direct I/O to RAID memeber disks
@@ -3816,15 +3825,11 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
 
 		if ((cdb0 < READ_16) || !(cdb_ptr[2] | cdb_ptr[3] | cdb_ptr[4]
 			| cdb_ptr[5])) {
-			io_size = scsi_bufflen(scmd) >> 9;
+			io_size = scsi_bufflen(scmd) >>
+			    raid_device->block_exponent;
+			i = (cdb0 < READ_16) ? 2 : 6;
 			/* get virtual lba */
-			lba_ptr1 = lba_ptr2 = (cdb0 < READ_16) ? &cdb_ptr[2] :
-			    &cdb_ptr[6];
-			tmp_ptr = (u8 *)&v_lba + 3;
-			*tmp_ptr-- = *lba_ptr1++;
-			*tmp_ptr-- = *lba_ptr1++;
-			*tmp_ptr-- = *lba_ptr1++;
-			*tmp_ptr = *lba_ptr1;
+			v_lba = be32_to_cpu(*(__be32 *)(&cdb_ptr[i]));
 
 			if (((u64)v_lba + (u64)io_size - 1) <=
 			    (u32)raid_device->max_lba) {
@@ -3843,11 +3848,39 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
 					mpi_request->DevHandle =
 						cpu_to_le16(raid_device->
 						    pd_handle[column]);
-					tmp_ptr = (u8 *)&p_lba + 3;
-					*lba_ptr2++ = *tmp_ptr--;
-					*lba_ptr2++ = *tmp_ptr--;
-					*lba_ptr2++ = *tmp_ptr--;
-					*lba_ptr2 = *tmp_ptr;
+					(*(__be32 *)(&cdb_ptr[i])) =
+						cpu_to_be32(p_lba);
+					/*
+					* WD: To indicate this I/O is directI/O
+					*/
+					_scsih_scsi_direct_io_set(ioc, smid, 1);
+				}
+			}
+		} else {
+			io_size = scsi_bufflen(scmd) >>
+			    raid_device->block_exponent;
+			/* get virtual lba */
+			v_llba = be64_to_cpu(*(__be64 *)(&cdb_ptr[2]));
+
+			if ((v_llba + (u64)io_size - 1) <=
+			    raid_device->max_lba) {
+				stripe_sz = raid_device->stripe_sz;
+				stripe_exp = raid_device->stripe_exponent;
+				stripe_off = (u32) (v_llba & (stripe_sz - 1));
+
+				/* Check whether IO falls within a stripe */
+				if ((stripe_off + io_size) <= stripe_sz) {
+					num_pds = raid_device->num_pds;
+					p_lba = (u32)(v_llba >> stripe_exp);
+					stripe_unit = p_lba / num_pds;
+					column = p_lba % num_pds;
+					p_lba = (stripe_unit << stripe_exp) +
+					    stripe_off;
+					mpi_request->DevHandle =
+						cpu_to_le16(raid_device->
+						    pd_handle[column]);
+					(*(__be64 *)(&cdb_ptr[2])) =
+					    cpu_to_be64((u64)p_lba);
 					/*
 					* WD: To indicate this I/O is directI/O
 					*/
@@ -4403,11 +4436,14 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
 		scmd->result = DID_NO_CONNECT << 16;
 		goto out;
 	}
+	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
 	/*
 	 * WARPDRIVE: If direct_io is set then it is directIO,
 	 * the failed direct I/O should be redirected to volume
 	 */
-	if (_scsih_scsi_direct_io_get(ioc, smid)) {
+	if (_scsih_scsi_direct_io_get(ioc, smid) &&
+	    ((ioc_status & MPI2_IOCSTATUS_MASK)
+	    != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
 		spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
 		ioc->scsi_lookup[smid - 1].scmd = scmd;
 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
@@ -4441,7 +4477,6 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
 
 	xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
 	scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
-	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
 		log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
 	else
@@ -4485,6 +4520,8 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
 			scmd->result = DID_TRANSPORT_DISRUPTED << 16;
 			goto out;
 		}
+		scmd->result = DID_SOFT_ERROR << 16;
+		break;
 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
 		scmd->result = DID_RESET << 16;
@@ -6714,6 +6751,7 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
 			} else
 				sas_target_priv_data = NULL;
 			raid_device->responding = 1;
+			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
 			starget_printk(KERN_INFO, raid_device->starget,
 			    "handle(0x%04x), wwid(0x%016llx)\n", handle,
 			    (unsigned long long)raid_device->wwid);
@@ -6724,16 +6762,16 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
 			 */
 			_scsih_init_warpdrive_properties(ioc, raid_device);
 			if (raid_device->handle == handle)
-				goto out;
+				return;
 			printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
 			    raid_device->handle);
 			raid_device->handle = handle;
 			if (sas_target_priv_data)
 				sas_target_priv_data->handle = handle;
-			goto out;
+			return;
 		}
 	}
- out:
+
 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
 }
 
@@ -7418,7 +7456,7 @@ static struct scsi_host_template scsih_driver_template = {
 	.can_queue			= 1,
 	.this_id			= -1,
 	.sg_tablesize			= MPT2SAS_SG_DEPTH,
-	.max_sectors			= 8192,
+	.max_sectors			= 32767,
 	.cmd_per_lun			= 7,
 	.use_clustering			= ENABLE_CLUSTERING,
 	.shost_attrs			= mpt2sas_host_attrs,
@@ -7928,6 +7966,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
 	ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
 	ioc->logging_level = logging_level;
+	ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
 	/* misc semaphores and spin locks */
 	mutex_init(&ioc->reset_in_progress_mutex);
 	spin_lock_init(&ioc->ioc_reset_in_progress_lock);
@@ -7958,11 +7997,11 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 			printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
 			    "for max_sectors, range is 64 to 8192. Assigning "
 			    "value of 64.\n", ioc->name, max_sectors);
-		} else if (max_sectors > 8192) {
-			shost->max_sectors = 8192;
+		} else if (max_sectors > 32767) {
+			shost->max_sectors = 32767;
 			printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
 			    "for max_sectors, range is 64 to 8192. Assigning "
-			    "default value of 8192.\n", ioc->name,
+			    "default value of 32767.\n", ioc->name,
 			    max_sectors);
 		} else {
 			shost->max_sectors = max_sectors & 0xFFFE;
@@ -8000,7 +8039,6 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		goto out_attach_fail;
 	}
 
-	scsi_scan_host(shost);
 	if (ioc->is_warpdrive) {
 		if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_EXPOSE_ALL_DISKS)
 			ioc->hide_drives = 0;
@@ -8014,8 +8052,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		}
 	} else
 		ioc->hide_drives = 0;
+	scsi_scan_host(shost);
 
-	_scsih_probe_devices(ioc);
 	return 0;
 
  out_attach_fail:

+ 5 - 4
drivers/scsi/mpt2sas/mpt2sas_transport.c

@@ -398,8 +398,8 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
 	dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "report_manufacture - "
 	    "send to sas_addr(0x%016llx)\n", ioc->name,
 	    (unsigned long long)sas_address));
-	mpt2sas_base_put_smid_default(ioc, smid);
 	init_completion(&ioc->transport_cmds.done);
+	mpt2sas_base_put_smid_default(ioc, smid);
 	timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
 	    10*HZ);
 
@@ -1184,8 +1184,8 @@ _transport_get_expander_phy_error_log(struct MPT2SAS_ADAPTER *ioc,
 	dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "phy_error_log - "
 	    "send to sas_addr(0x%016llx), phy(%d)\n", ioc->name,
 	    (unsigned long long)phy->identify.sas_address, phy->number));
-	mpt2sas_base_put_smid_default(ioc, smid);
 	init_completion(&ioc->transport_cmds.done);
+	mpt2sas_base_put_smid_default(ioc, smid);
 	timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
 	    10*HZ);
 
@@ -1509,8 +1509,9 @@ _transport_expander_phy_control(struct MPT2SAS_ADAPTER *ioc,
 	    "send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", ioc->name,
 	    (unsigned long long)phy->identify.sas_address, phy->number,
 	    phy_operation));
-	mpt2sas_base_put_smid_default(ioc, smid);
+
 	init_completion(&ioc->transport_cmds.done);
+	mpt2sas_base_put_smid_default(ioc, smid);
 	timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
 	    10*HZ);
 
@@ -1949,8 +1950,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 	dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - "
 	    "sending smp request\n", ioc->name, __func__));
 
-	mpt2sas_base_put_smid_default(ioc, smid);
 	init_completion(&ioc->transport_cmds.done);
+	mpt2sas_base_put_smid_default(ioc, smid);
 	timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
 	    10*HZ);
 

+ 5 - 5
drivers/scsi/qla2xxx/qla_attr.c

@@ -107,7 +107,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 		break;
 	}
-	return -EINVAL;
+	return count;
 }
 
 static struct bin_attribute sysfs_fw_dump_attr = {
@@ -387,7 +387,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
 		break;
 	case 3:
 		if (ha->optrom_state != QLA_SWRITING)
-			return -ENOMEM;
+			return -EINVAL;
 
 		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
 			ql_log(ql_log_warn, vha, 0x7068,
@@ -667,7 +667,7 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
 	    dev, adr, len, opt);
 	if (rval != QLA_SUCCESS) {
 		ql_log(ql_log_warn, vha, 0x7074,
-		    "Unable to write EDC (%x) %02x:%04x:%02x:%02hhx\n",
+		    "Unable to write EDC (%x) %02x:%04x:%02x:%02x:%02hhx\n",
 		    rval, dev, adr, opt, len, buf[8]);
 		return -EIO;
 	}
@@ -724,7 +724,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
 			dev, adr, len, opt);
 	if (rval != QLA_SUCCESS) {
 		ql_log(ql_log_info, vha, 0x7075,
-		    "Unable to write EDC status (%x) %02x:%04x:%02x.\n",
+		    "Unable to write EDC status (%x) %02x:%04x:%02x:%02x.\n",
 		    rval, dev, adr, opt, len);
 		return -EIO;
 	}
@@ -1971,8 +1971,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
 			    "Queue delete failed.\n");
 	}
 
-	scsi_host_put(vha->host);
 	ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
+	scsi_host_put(vha->host);
 	return 0;
 }
 

+ 18 - 2
drivers/scsi/qla2xxx/qla_bsg.c

@@ -31,6 +31,7 @@ qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
 	memset(sp, 0, sizeof(*sp));
 	sp->fcport = fcport;
 	sp->ctx = ctx;
+	ctx->iocbs = 1;
 done:
 	return sp;
 }
@@ -102,7 +103,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
 
 	bsg_job->reply->reply_payload_rcv_len = 0;
 
-	if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))) {
+	if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
 		ret = -EINVAL;
 		goto exit_fcp_prio_cfg;
 	}
@@ -389,6 +390,20 @@ done:
 	return rval;
 }
 
+inline uint16_t
+qla24xx_calc_ct_iocbs(uint16_t dsds)
+{
+	uint16_t iocbs;
+
+	iocbs = 1;
+	if (dsds > 2) {
+		iocbs += (dsds - 2) / 5;
+		if ((dsds - 2) % 5)
+			iocbs++;
+	}
+	return iocbs;
+}
+
 static int
 qla2x00_process_ct(struct fc_bsg_job *bsg_job)
 {
@@ -489,6 +504,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
 	ct = sp->ctx;
 	ct->type = SRB_CT_CMD;
 	ct->name = "bsg_ct";
+	ct->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
 	ct->u.bsg_job = bsg_job;
 
 	ql_dbg(ql_dbg_user, vha, 0x7016,
@@ -1653,7 +1669,7 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
 	}
 
 	ql_dbg(ql_dbg_user, vha, 0x7000,
-	    "Entered %s msgcode=%d.\n", __func__, bsg_job->request->msgcode);
+	    "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
 
 	switch (bsg_job->request->msgcode) {
 	case FC_BSG_RPT_ELS:

+ 158 - 152
drivers/scsi/qla2xxx/qla_dbg.c

@@ -11,15 +11,17 @@
  * ----------------------------------------------------------------------
  * |             Level            |   Last Value Used  |     Holes	|
  * ----------------------------------------------------------------------
- * | Module Init and Probe        |       0x0116       |  		|
+ * | Module Init and Probe        |       0x0116       | 0xfa           |
  * | Mailbox commands             |       0x112b       |		|
- * | Device Discovery             |       0x2083       |		|
- * | Queue Command and IO tracing |       0x302e       |     0x3008     |
+ * | Device Discovery             |       0x2084       |		|
+ * | Queue Command and IO tracing |       0x302f       | 0x3008,0x302d, |
+ * |                              |                    | 0x302e         |
  * | DPC Thread                   |       0x401c       |		|
- * | Async Events                 |       0x5059       |		|
- * | Timer Routines               |       0x6010       | 0x600e,0x600f  |
- * | User Space Interactions      |       0x709d       |		|
- * | Task Management              |       0x8041       | 0x800b         |
+ * | Async Events                 |       0x5057       | 0x5052		|
+ * | Timer Routines               |       0x6011       | 0x600e,0x600f  |
+ * | User Space Interactions      |       0x709e       |		|
+ * | Task Management              |       0x803c       | 0x8025-0x8026  |
+ * |                              |                    | 0x800b,0x8039  |
  * | AER/EEH                      |       0x900f       |		|
  * | Virtual Port                 |       0xa007       |		|
  * | ISP82XX Specific             |       0xb052       |    		|
@@ -368,7 +370,7 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
 
 	memcpy(iter_reg, ha->fce, ntohl(fcec->size));
 
-	return iter_reg;
+	return (char *)iter_reg + ntohl(fcec->size);
 }
 
 static inline void *
@@ -1650,6 +1652,15 @@ qla81xx_fw_dump_failed:
 /****************************************************************************/
 /*                         Driver Debug Functions.                          */
 /****************************************************************************/
+
+static inline int
+ql_mask_match(uint32_t level)
+{
+	if (ql2xextended_error_logging == 1)
+		ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
+	return (level & ql2xextended_error_logging) == level;
+}
+
 /*
  * This function is for formatting and logging debug information.
  * It is to be used when vha is available. It formats the message
@@ -1664,34 +1675,31 @@ qla81xx_fw_dump_failed:
  * msg:   The message to be displayed.
  */
 void
-ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
-
-	char pbuf[QL_DBG_BUF_LEN];
-	va_list ap;
-	uint32_t len;
-	struct pci_dev *pdev = NULL;
-
-	memset(pbuf, 0, QL_DBG_BUF_LEN);
-
-	va_start(ap, msg);
-
-	if ((level & ql2xextended_error_logging) == level) {
-		if (vha != NULL) {
-			pdev = vha->hw->pdev;
-			/* <module-name> <pci-name> <msg-id>:<host> Message */
-			sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
-			    dev_name(&(pdev->dev)), id + ql_dbg_offset,
-			    vha->host_no);
-		} else
-			sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
-			    "0000:00:00.0", id + ql_dbg_offset);
-
-		len = strlen(pbuf);
-		vsprintf(pbuf+len, msg, ap);
-		pr_warning("%s", pbuf);
+ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+{
+	va_list va;
+	struct va_format vaf;
+
+	if (!ql_mask_match(level))
+		return;
+
+	va_start(va, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &va;
+
+	if (vha != NULL) {
+		const struct pci_dev *pdev = vha->hw->pdev;
+		/* <module-name> <pci-name> <msg-id>:<host> Message */
+		pr_warn("%s [%s]-%04x:%ld: %pV",
+			QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
+			vha->host_no, &vaf);
+	} else {
+		pr_warn("%s [%s]-%04x: : %pV",
+			QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
 	}
 
-	va_end(ap);
+	va_end(va);
 
 }
 
@@ -1710,31 +1718,27 @@ ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
  * msg:   The message to be displayed.
  */
 void
-ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
-
-	char pbuf[QL_DBG_BUF_LEN];
-	va_list ap;
-	uint32_t len;
+ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
+	   const char *fmt, ...)
+{
+	va_list va;
+	struct va_format vaf;
 
 	if (pdev == NULL)
 		return;
+	if (!ql_mask_match(level))
+		return;
 
-	memset(pbuf, 0, QL_DBG_BUF_LEN);
-
-	va_start(ap, msg);
-
-	if ((level & ql2xextended_error_logging) == level) {
-		/* <module-name> <dev-name>:<msg-id> Message */
-		sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
-		    dev_name(&(pdev->dev)), id + ql_dbg_offset);
+	va_start(va, fmt);
 
-		len = strlen(pbuf);
-		vsprintf(pbuf+len, msg, ap);
-		pr_warning("%s", pbuf);
-	}
+	vaf.fmt = fmt;
+	vaf.va = &va;
 
-	va_end(ap);
+	/* <module-name> <dev-name>:<msg-id> Message */
+	pr_warn("%s [%s]-%04x: : %pV",
+		QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf);
 
+	va_end(va);
 }
 
 /*
@@ -1751,47 +1755,47 @@ ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
  * msg:   The message to be displayed.
  */
 void
-ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
-
-	char pbuf[QL_DBG_BUF_LEN];
-	va_list ap;
-	uint32_t len;
-	struct pci_dev *pdev = NULL;
-
-	memset(pbuf, 0, QL_DBG_BUF_LEN);
-
-	va_start(ap, msg);
-
-	if (level <= ql_errlev) {
-		if (vha != NULL) {
-			pdev = vha->hw->pdev;
-			/* <module-name> <msg-id>:<host> Message */
-			sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
-			    dev_name(&(pdev->dev)), id, vha->host_no);
-		} else
-			sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
-			    "0000:00:00.0", id);
+ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+{
+	va_list va;
+	struct va_format vaf;
+	char pbuf[128];
 
-		len = strlen(pbuf);
-			vsprintf(pbuf+len, msg, ap);
+	if (level > ql_errlev)
+		return;
 
-		switch (level) {
-		case 0: /* FATAL LOG */
-			pr_crit("%s", pbuf);
-			break;
-		case 1:
-			pr_err("%s", pbuf);
-			break;
-		case 2:
-			pr_warn("%s", pbuf);
-			break;
-		default:
-			pr_info("%s", pbuf);
-			break;
-		}
+	if (vha != NULL) {
+		const struct pci_dev *pdev = vha->hw->pdev;
+		/* <module-name> <msg-id>:<host> Message */
+		snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
+			QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no);
+	} else {
+		snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
+			QL_MSGHDR, "0000:00:00.0", id);
+	}
+	pbuf[sizeof(pbuf) - 1] = 0;
+
+	va_start(va, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &va;
+
+	switch (level) {
+	case 0: /* FATAL LOG */
+		pr_crit("%s%pV", pbuf, &vaf);
+		break;
+	case 1:
+		pr_err("%s%pV", pbuf, &vaf);
+		break;
+	case 2:
+		pr_warn("%s%pV", pbuf, &vaf);
+		break;
+	default:
+		pr_info("%s%pV", pbuf, &vaf);
+		break;
 	}
 
-	va_end(ap);
+	va_end(va);
 }
 
 /*
@@ -1809,43 +1813,44 @@ ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
  * msg:   The message to be displayed.
  */
 void
-ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
-
-	char pbuf[QL_DBG_BUF_LEN];
-	va_list ap;
-	uint32_t len;
+ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
+	   const char *fmt, ...)
+{
+	va_list va;
+	struct va_format vaf;
+	char pbuf[128];
 
 	if (pdev == NULL)
 		return;
+	if (level > ql_errlev)
+		return;
 
-	memset(pbuf, 0, QL_DBG_BUF_LEN);
-
-	va_start(ap, msg);
-
-	if (level <= ql_errlev) {
-		/* <module-name> <dev-name>:<msg-id> Message */
-		sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
-		    dev_name(&(pdev->dev)), id);
-
-		len = strlen(pbuf);
-		vsprintf(pbuf+len, msg, ap);
-		switch (level) {
-		case 0: /* FATAL LOG */
-			pr_crit("%s", pbuf);
-			break;
-		case 1:
-			pr_err("%s", pbuf);
-			break;
-		case 2:
-			pr_warn("%s", pbuf);
-			break;
-		default:
-			pr_info("%s", pbuf);
-			break;
-		}
+	/* <module-name> <dev-name>:<msg-id> Message */
+	snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
+		 QL_MSGHDR, dev_name(&(pdev->dev)), id);
+	pbuf[sizeof(pbuf) - 1] = 0;
+
+	va_start(va, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &va;
+
+	switch (level) {
+	case 0: /* FATAL LOG */
+		pr_crit("%s%pV", pbuf, &vaf);
+		break;
+	case 1:
+		pr_err("%s%pV", pbuf, &vaf);
+		break;
+	case 2:
+		pr_warn("%s%pV", pbuf, &vaf);
+		break;
+	default:
+		pr_info("%s%pV", pbuf, &vaf);
+		break;
 	}
 
-	va_end(ap);
+	va_end(va);
 }
 
 void
@@ -1858,20 +1863,20 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
 	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
 	uint16_t __iomem *mbx_reg;
 
-	if ((level & ql2xextended_error_logging) == level) {
-
-		if (IS_QLA82XX(ha))
-			mbx_reg = &reg82->mailbox_in[0];
-		else if (IS_FWI2_CAPABLE(ha))
-			mbx_reg = &reg24->mailbox0;
-		else
-			mbx_reg = MAILBOX_REG(ha, reg, 0);
+	if (!ql_mask_match(level))
+		return;
 
-		ql_dbg(level, vha, id, "Mailbox registers:\n");
-		for (i = 0; i < 6; i++)
-			ql_dbg(level, vha, id,
-			    "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
-	}
+	if (IS_QLA82XX(ha))
+		mbx_reg = &reg82->mailbox_in[0];
+	else if (IS_FWI2_CAPABLE(ha))
+		mbx_reg = &reg24->mailbox0;
+	else
+		mbx_reg = MAILBOX_REG(ha, reg, 0);
+
+	ql_dbg(level, vha, id, "Mailbox registers:\n");
+	for (i = 0; i < 6; i++)
+		ql_dbg(level, vha, id,
+		    "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
 }
 
 
@@ -1881,24 +1886,25 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
 {
 	uint32_t cnt;
 	uint8_t c;
-	if ((level & ql2xextended_error_logging) == level) {
-
-		ql_dbg(level, vha, id, " 0   1   2   3   4   5   6   7   8   "
-		    "9  Ah  Bh  Ch  Dh  Eh  Fh\n");
-		ql_dbg(level, vha, id, "----------------------------------"
-		    "----------------------------\n");
-
-		ql_dbg(level, vha, id, "");
-		for (cnt = 0; cnt < size;) {
-			c = *b++;
-			printk("%02x", (uint32_t) c);
-			cnt++;
-			if (!(cnt % 16))
-				printk("\n");
-			else
-				printk("  ");
-		}
-		if (cnt % 16)
-			ql_dbg(level, vha, id, "\n");
+
+	if (!ql_mask_match(level))
+		return;
+
+	ql_dbg(level, vha, id, " 0   1   2   3   4   5   6   7   8   "
+	    "9  Ah  Bh  Ch  Dh  Eh  Fh\n");
+	ql_dbg(level, vha, id, "----------------------------------"
+	    "----------------------------\n");
+
+	ql_dbg(level, vha, id, " ");
+	for (cnt = 0; cnt < size;) {
+		c = *b++;
+		printk("%02x", (uint32_t) c);
+		cnt++;
+		if (!(cnt % 16))
+			printk("\n");
+		else
+			printk("  ");
 	}
+	if (cnt % 16)
+		ql_dbg(level, vha, id, "\n");
 }

+ 9 - 10
drivers/scsi/qla2xxx/qla_dbg.h

@@ -232,6 +232,7 @@ struct qla2xxx_fw_dump {
 };
 
 #define QL_MSGHDR "qla2xxx"
+#define QL_DBG_DEFAULT1_MASK    0x1e400000
 
 #define ql_log_fatal		0 /* display fatal errors */
 #define ql_log_warn		1 /* display critical errors */
@@ -244,15 +245,15 @@ struct qla2xxx_fw_dump {
 
 extern int ql_errlev;
 
-void
-ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
-void
-ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
 
-void
-ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
-void
-ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
 
 /* Debug Levels */
 /* The 0x40000000 is the max value any debug level can have
@@ -275,5 +276,3 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
 #define ql_dbg_misc	0x00010000 /* For dumping everything that is not
 				    * not covered by upper categories
 				    */
-
-#define QL_DBG_BUF_LEN	512

+ 2 - 4
drivers/scsi/qla2xxx/qla_def.h

@@ -271,6 +271,7 @@ struct srb_iocb {
 struct srb_ctx {
 	uint16_t type;
 	char *name;
+	int iocbs;
 	union {
 		struct srb_iocb *iocb_cmd;
 		struct fc_bsg_job *bsg_job;
@@ -2244,6 +2245,7 @@ struct isp_operations {
 	int (*get_flash_version) (struct scsi_qla_host *, void *);
 	int (*start_scsi) (srb_t *);
 	int (*abort_isp) (struct scsi_qla_host *);
+	int (*iospace_config)(struct qla_hw_data*);
 };
 
 /* MSI-X Support *************************************************************/
@@ -2978,10 +2980,6 @@ typedef struct scsi_qla_host {
 	atomic_dec(&__vha->vref_count);			     \
 } while (0)
 
-
-#define qla_printk(level, ha, format, arg...) \
-	dev_printk(level , &((ha)->pdev->dev) , format , ## arg)
-
 /*
  * qla2x00 local function return status codes
  */

+ 1 - 1
drivers/scsi/qla2xxx/qla_gbl.h

@@ -572,7 +572,7 @@ extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
     size_t, char *);
 extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
 extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
-extern void qla82xx_start_iocbs(srb_t *);
+extern void qla82xx_start_iocbs(scsi_qla_host_t *);
 extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
 extern int qla82xx_check_md_needed(scsi_qla_host_t *);
 extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);

+ 1 - 1
drivers/scsi/qla2xxx/qla_gs.c

@@ -758,7 +758,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
 		    "GA_NXT Send SNS failed (%d).\n", rval);
 	} else if (sns_cmd->p.gan_data[8] != 0x80 ||
 	    sns_cmd->p.gan_data[9] != 0x02) {
-		ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207d,
+		ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
 		    "GA_NXT failed, rejected request ga_nxt_rsp:\n");
 		ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
 		    sns_cmd->p.gan_data, 16);

+ 54 - 41
drivers/scsi/qla2xxx/qla_init.c

@@ -111,6 +111,7 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
 	memset(sp, 0, sizeof(*sp));
 	sp->fcport = fcport;
 	sp->ctx = ctx;
+	ctx->iocbs = 1;
 	ctx->u.iocb_cmd = iocb;
 	iocb->free = qla2x00_ctx_sp_free;
 
@@ -154,8 +155,8 @@ qla2x00_async_iocb_timeout(srb_t *sp)
 	struct srb_ctx *ctx = sp->ctx;
 
 	ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
-	    "Async-%s timeout - portid=%02x%02x%02x.\n",
-	    ctx->name, fcport->d_id.b.domain, fcport->d_id.b.area,
+	    "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n",
+	    ctx->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
 	    fcport->d_id.b.al_pa);
 
 	fcport->flags &= ~FCF_ASYNC_SENT;
@@ -211,9 +212,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
 		goto done_free_sp;
 
 	ql_dbg(ql_dbg_disc, vha, 0x2072,
-	    "Async-login - loopid=%x portid=%02x%02x%02x retries=%d.\n",
-	    fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
-	    fcport->d_id.b.al_pa, fcport->login_retry);
+	    "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x "
+	    "retries=%d.\n", sp->handle, fcport->loop_id,
+	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+	    fcport->login_retry);
 	return rval;
 
 done_free_sp:
@@ -258,9 +260,9 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
 		goto done_free_sp;
 
 	ql_dbg(ql_dbg_disc, vha, 0x2070,
-	    "Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
-	    fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
-	    fcport->d_id.b.al_pa);
+	    "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
 	return rval;
 
 done_free_sp:
@@ -308,9 +310,9 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
 		goto done_free_sp;
 
 	ql_dbg(ql_dbg_disc, vha, 0x206f,
-	    "Async-adisc - loopid=%x portid=%02x%02x%02x.\n",
-	    fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
-	    fcport->d_id.b.al_pa);
+	    "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
+	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
 	return rval;
 
 done_free_sp:
@@ -360,9 +362,9 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
 		goto done_free_sp;
 
 	ql_dbg(ql_dbg_taskm, vha, 0x802f,
-	    "Async-tmf loop-id=%x portid=%02x%02x%02x.\n",
-	    fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
-	    fcport->d_id.b.al_pa);
+	    "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
 	return rval;
 
 done_free_sp:
@@ -514,7 +516,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
 	set_bit(0, ha->req_qid_map);
 	set_bit(0, ha->rsp_qid_map);
 
-	ql_log(ql_log_info, vha, 0x0040,
+	ql_dbg(ql_dbg_init, vha, 0x0040,
 	    "Configuring PCI space...\n");
 	rval = ha->isp_ops->pci_config(vha);
 	if (rval) {
@@ -533,7 +535,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
 	}
 
 	ha->isp_ops->get_flash_version(vha, req->ring);
-	ql_log(ql_log_info, vha, 0x0061,
+	ql_dbg(ql_dbg_init, vha, 0x0061,
 	    "Configure NVRAM parameters...\n");
 
 	ha->isp_ops->nvram_config(vha);
@@ -550,7 +552,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
 		return QLA_FUNCTION_FAILED;
 	}
 
-	ql_log(ql_log_info, vha, 0x0078,
+	ql_dbg(ql_dbg_init, vha, 0x0078,
 	    "Verifying loaded RISC code...\n");
 
 	if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
@@ -1294,7 +1296,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
 			ha->flags.fce_enabled = 0;
 			goto try_eft;
 		}
-		ql_log(ql_log_info, vha, 0x00c0,
+		ql_dbg(ql_dbg_init, vha, 0x00c0,
 		    "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
 
 		fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
@@ -1321,7 +1323,7 @@ try_eft:
 			    tc_dma);
 			goto cont_alloc;
 		}
-		ql_log(ql_log_info, vha, 0x00c3,
+		ql_dbg(ql_dbg_init, vha, 0x00c3,
 		    "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
 
 		eft_size = EFT_SIZE;
@@ -1358,7 +1360,7 @@ cont_alloc:
 		}
 		return;
 	}
-	ql_log(ql_log_info, vha, 0x00c5,
+	ql_dbg(ql_dbg_init, vha, 0x00c5,
 	    "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
 
 	ha->fw_dump_len = dump_size;
@@ -1929,7 +1931,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
 					rval = qla84xx_init_chip(vha);
 					if (rval != QLA_SUCCESS) {
 						ql_log(ql_log_warn,
-						    vha, 0x8026,
+						    vha, 0x8007,
 						    "Init chip failed.\n");
 						break;
 					}
@@ -1938,7 +1940,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
 					cs84xx_time = jiffies - cs84xx_time;
 					wtime += cs84xx_time;
 					mtime += cs84xx_time;
-					ql_dbg(ql_dbg_taskm, vha, 0x8025,
+					ql_dbg(ql_dbg_taskm, vha, 0x8008,
 					    "Increasing wait time by %ld. "
 					    "New time %ld.\n", cs84xx_time,
 					    wtime);
@@ -1981,16 +1983,13 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
 
 		/* Delay for a while */
 		msleep(500);
-
-		ql_dbg(ql_dbg_taskm, vha, 0x8039,
-		    "fw_state=%x curr time=%lx.\n", state[0], jiffies);
 	} while (1);
 
 	ql_dbg(ql_dbg_taskm, vha, 0x803a,
 	    "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0],
 	    state[1], state[2], state[3], state[4], jiffies);
 
-	if (rval) {
+	if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
 		ql_log(ql_log_warn, vha, 0x803b,
 		    "Firmware ready **** FAILED ****.\n");
 	}
@@ -2386,7 +2385,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
 	 * internal driver logging.
 	 */
 	if (nv->host_p[0] & BIT_7)
-		ql2xextended_error_logging = 0x7fffffff;
+		ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
 	ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
 	/* Always load RISC code on non ISP2[12]00 chips. */
 	if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
@@ -4188,7 +4187,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
 		spin_unlock_irqrestore(&ha->vport_slock, flags);
 
 	} else {
-		ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n");
+		ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
+		       __func__);
 	}
 
 	return(status);
@@ -4638,7 +4638,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
 	struct req_que *req = ha->req_q_map[0];
 
 	ql_dbg(ql_dbg_init, vha, 0x008b,
-	    "Loading firmware from flash (%x).\n", faddr);
+	    "FW: Loading firmware from flash (%x).\n", faddr);
 
 	rval = QLA_SUCCESS;
 
@@ -4836,8 +4836,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
 		return QLA_FUNCTION_FAILED;
 	}
 
-	ql_log(ql_log_info, vha, 0x0092,
-	    "Loading via request-firmware.\n");
+	ql_dbg(ql_dbg_init, vha, 0x0092,
+	    "FW: Loading via request-firmware.\n");
 
 	rval = QLA_SUCCESS;
 
@@ -5425,7 +5425,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
 		if ((vha->device_flags & DFLG_NO_CABLE))
 			status = 0;
 
-		ql_log(ql_log_info, vha, 0x803d,
+		ql_log(ql_log_info, vha, 0x8000,
 		    "Configure loop done, status = 0x%x.\n", status);
 	}
 
@@ -5458,7 +5458,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
 			    ha->fce_dma, ha->fce_bufs, ha->fce_mb,
 			    &ha->fce_bufs);
 			if (rval) {
-				ql_log(ql_log_warn, vha, 0x803e,
+				ql_log(ql_log_warn, vha, 0x8001,
 				    "Unable to reinitialize FCE (%d).\n",
 				    rval);
 				ha->flags.fce_enabled = 0;
@@ -5470,7 +5470,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
 			rval = qla2x00_enable_eft_trace(vha,
 			    ha->eft_dma, EFT_NUM_BUFFERS);
 			if (rval) {
-				ql_log(ql_log_warn, vha, 0x803f,
+				ql_log(ql_log_warn, vha, 0x8010,
 				    "Unable to reinitialize EFT (%d).\n",
 				    rval);
 			}
@@ -5478,7 +5478,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
 	}
 
 	if (!status) {
-		ql_dbg(ql_dbg_taskm, vha, 0x8040,
+		ql_dbg(ql_dbg_taskm, vha, 0x8011,
 		    "qla82xx_restart_isp succeeded.\n");
 
 		spin_lock_irqsave(&ha->vport_slock, flags);
@@ -5496,7 +5496,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
 		spin_unlock_irqrestore(&ha->vport_slock, flags);
 
 	} else {
-		ql_log(ql_log_warn, vha, 0x8041,
+		ql_log(ql_log_warn, vha, 0x8016,
 		    "qla82xx_restart_isp **** FAILED ****.\n");
 	}
 
@@ -5643,13 +5643,26 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
 	if (priority < 0)
 		return QLA_FUNCTION_FAILED;
 
+	if (IS_QLA82XX(vha->hw)) {
+		fcport->fcp_prio = priority & 0xf;
+		return QLA_SUCCESS;
+	}
+
 	ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
-	if (ret == QLA_SUCCESS)
-		fcport->fcp_prio = priority;
-	else
+	if (ret == QLA_SUCCESS) {
+		if (fcport->fcp_prio != priority)
+			ql_dbg(ql_dbg_user, vha, 0x709e,
+			    "Updated FCP_CMND priority - value=%d loop_id=%d "
+			    "port_id=%02x%02x%02x.\n", priority,
+			    fcport->loop_id, fcport->d_id.b.domain,
+			    fcport->d_id.b.area, fcport->d_id.b.al_pa);
+		fcport->fcp_prio = priority & 0xf;
+	} else
 		ql_dbg(ql_dbg_user, vha, 0x704f,
-		    "Unable to activate fcp priority, ret=0x%x.\n", ret);
-
+		    "Unable to update FCP_CMND priority - ret=0x%x for "
+		    "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
+		    fcport->d_id.b.domain, fcport->d_id.b.area,
+		    fcport->d_id.b.al_pa);
 	return  ret;
 }
 

+ 537 - 104
drivers/scsi/qla2xxx/qla_iocb.c

@@ -11,8 +11,6 @@
 
 #include <scsi/scsi_tcq.h>
 
-static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
-
 static void qla25xx_set_que(srb_t *, struct rsp_que **);
 /**
  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
@@ -467,6 +465,42 @@ queuing_error:
 	return (QLA_FUNCTION_FAILED);
 }
 
+/**
+ * qla2x00_start_iocbs() - Execute the IOCB command
+ */
+static void
+qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
+{
+	struct qla_hw_data *ha = vha->hw;
+	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
+	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
+
+	if (IS_QLA82XX(ha)) {
+		qla82xx_start_iocbs(vha);
+	} else {
+		/* Adjust ring index. */
+		req->ring_index++;
+		if (req->ring_index == req->length) {
+			req->ring_index = 0;
+			req->ring_ptr = req->ring;
+		} else
+			req->ring_ptr++;
+
+		/* Set chip new ring index. */
+		if (ha->mqenable) {
+			WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
+			RD_REG_DWORD(&ioreg->hccr);
+		} else if (IS_FWI2_CAPABLE(ha)) {
+			WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
+			RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+		} else {
+			WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
+				req->ring_index);
+			RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+		}
+	}
+}
+
 /**
  * qla2x00_marker() - Send a marker IOCB to the firmware.
  * @ha: HA context
@@ -489,6 +523,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
 	mrk24 = NULL;
+	req = ha->req_q_map[0];
 	mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
 	if (mrk == NULL) {
 		ql_log(ql_log_warn, base_vha, 0x3026,
@@ -515,7 +550,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
 	}
 	wmb();
 
-	qla2x00_isp_cmd(vha, req);
+	qla2x00_start_iocbs(vha, req);
 
 	return (QLA_SUCCESS);
 }
@@ -536,89 +571,140 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
 }
 
 /**
- * qla2x00_isp_cmd() - Modify the request ring pointer.
- * @ha: HA context
+ * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
+ * Continuation Type 1 IOCBs to allocate.
+ *
+ * @dsds: number of data segment decriptors needed
  *
- * Note: The caller must hold the hardware lock before calling this routine.
+ * Returns the number of IOCB entries needed to store @dsds.
  */
-static void
-qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
+inline uint16_t
+qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
 {
-	struct qla_hw_data *ha = vha->hw;
-	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
-	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
+	uint16_t iocbs;
 
-	ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d,
-	    "IOCB data:\n");
-	ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
-	    (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE);
+	iocbs = 1;
+	if (dsds > 1) {
+		iocbs += (dsds - 1) / 5;
+		if ((dsds - 1) % 5)
+			iocbs++;
+	}
+	return iocbs;
+}
 
-	/* Adjust ring index. */
-	req->ring_index++;
-	if (req->ring_index == req->length) {
-		req->ring_index = 0;
-		req->ring_ptr = req->ring;
-	} else
-		req->ring_ptr++;
+static inline int
+qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
+	uint16_t tot_dsds)
+{
+	uint32_t *cur_dsd = NULL;
+	scsi_qla_host_t	*vha;
+	struct qla_hw_data *ha;
+	struct scsi_cmnd *cmd;
+	struct	scatterlist *cur_seg;
+	uint32_t *dsd_seg;
+	void *next_dsd;
+	uint8_t avail_dsds;
+	uint8_t first_iocb = 1;
+	uint32_t dsd_list_len;
+	struct dsd_dma *dsd_ptr;
+	struct ct6_dsd *ctx;
 
-	/* Set chip new ring index. */
-	if (IS_QLA82XX(ha)) {
-		uint32_t dbval = 0x04 | (ha->portnum << 5);
+	cmd = sp->cmd;
 
-		/* write, read and verify logic */
-		dbval = dbval | (req->id << 8) | (req->ring_index << 16);
-		if (ql2xdbwr)
-			qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
-		else {
-			WRT_REG_DWORD(
-				(unsigned long __iomem *)ha->nxdb_wr_ptr,
-				dbval);
-			wmb();
-			while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
-				WRT_REG_DWORD((unsigned long __iomem *)
-					ha->nxdb_wr_ptr, dbval);
-				wmb();
-			}
-		}
-	} else if (ha->mqenable) {
-		/* Set chip new ring index. */
-		WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
-		RD_REG_DWORD(&ioreg->hccr);
-	} else {
-		if (IS_FWI2_CAPABLE(ha)) {
-			WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
-			RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+	/* Update entry type to indicate Command Type 3 IOCB */
+	*((uint32_t *)(&cmd_pkt->entry_type)) =
+		__constant_cpu_to_le32(COMMAND_TYPE_6);
+
+	/* No data transfer */
+	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+		return 0;
+	}
+
+	vha = sp->fcport->vha;
+	ha = vha->hw;
+
+	/* Set transfer direction */
+	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+		cmd_pkt->control_flags =
+		    __constant_cpu_to_le16(CF_WRITE_DATA);
+		ha->qla_stats.output_bytes += scsi_bufflen(cmd);
+	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+		cmd_pkt->control_flags =
+		    __constant_cpu_to_le16(CF_READ_DATA);
+		ha->qla_stats.input_bytes += scsi_bufflen(cmd);
+	}
+
+	cur_seg = scsi_sglist(cmd);
+	ctx = sp->ctx;
+
+	while (tot_dsds) {
+		avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
+		    QLA_DSDS_PER_IOCB : tot_dsds;
+		tot_dsds -= avail_dsds;
+		dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
+
+		dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
+		    struct dsd_dma, list);
+		next_dsd = dsd_ptr->dsd_addr;
+		list_del(&dsd_ptr->list);
+		ha->gbl_dsd_avail--;
+		list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
+		ctx->dsd_use_cnt++;
+		ha->gbl_dsd_inuse++;
+
+		if (first_iocb) {
+			first_iocb = 0;
+			dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+			*dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+			*dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+			cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
 		} else {
-			WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
-				req->ring_index);
-			RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+			*cur_dsd++ = cpu_to_le32(dsd_list_len);
+		}
+		cur_dsd = (uint32_t *)next_dsd;
+		while (avail_dsds) {
+			dma_addr_t	sle_dma;
+
+			sle_dma = sg_dma_address(cur_seg);
+			*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+			*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+			*cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
+			cur_seg = sg_next(cur_seg);
+			avail_dsds--;
 		}
 	}
 
+	/* Null termination */
+	*cur_dsd++ =  0;
+	*cur_dsd++ = 0;
+	*cur_dsd++ = 0;
+	cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
+	return 0;
 }
 
-/**
- * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
- * Continuation Type 1 IOCBs to allocate.
+/*
+ * qla24xx_calc_dsd_lists() - Determine number of DSD list required
+ * for Command Type 6.
  *
  * @dsds: number of data segment decriptors needed
  *
- * Returns the number of IOCB entries needed to store @dsds.
+ * Returns the number of dsd list needed to store @dsds.
  */
 inline uint16_t
-qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
+qla24xx_calc_dsd_lists(uint16_t dsds)
 {
-	uint16_t iocbs;
+	uint16_t dsd_lists = 0;
 
-	iocbs = 1;
-	if (dsds > 1) {
-		iocbs += (dsds - 1) / 5;
-		if ((dsds - 1) % 5)
-			iocbs++;
-	}
-	return iocbs;
+	dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
+	if (dsds % QLA_DSDS_PER_IOCB)
+		dsd_lists++;
+	return dsd_lists;
 }
 
+
 /**
  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
  * IOCB types.
@@ -945,6 +1031,7 @@ alloc_and_fill:
 	*cur_dsd++ = 0;
 	return 0;
 }
+
 static int
 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
 	uint16_t tot_dsds)
@@ -1004,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
 		sle_dma = sg_dma_address(sg);
 		ql_dbg(ql_dbg_io, vha, 0x300a,
 		    "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
-		    cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
+		    i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
 		    sp->cmd);
 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
@@ -1731,6 +1818,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
 	uint32_t index, handle;
 	request_t *pkt;
 	uint16_t cnt, req_cnt;
+	struct srb_ctx *ctx;
 
 	pkt = NULL;
 	req_cnt = 1;
@@ -1759,6 +1847,12 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
 	req->outstanding_cmds[handle] = sp;
 	sp->handle = handle;
 
+	/* Adjust entry-counts as needed. */
+	if (sp->ctx) {
+		ctx = sp->ctx;
+		req_cnt = ctx->iocbs;
+	}
+
 skip_cmd_array:
 	/* Check for room on request queue. */
 	if (req->cnt < req_cnt) {
@@ -1792,42 +1886,6 @@ queuing_error:
 	return pkt;
 }
 
-static void
-qla2x00_start_iocbs(srb_t *sp)
-{
-	struct qla_hw_data *ha = sp->fcport->vha->hw;
-	struct req_que *req = ha->req_q_map[0];
-	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
-	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
-
-	if (IS_QLA82XX(ha)) {
-		qla82xx_start_iocbs(sp);
-	} else {
-		/* Adjust ring index. */
-		req->ring_index++;
-		if (req->ring_index == req->length) {
-			req->ring_index = 0;
-			req->ring_ptr = req->ring;
-		} else
-			req->ring_ptr++;
-
-		/* Set chip new ring index. */
-		if (ha->mqenable) {
-			WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
-			RD_REG_DWORD(&ioreg->hccr);
-		} else if (IS_QLA82XX(ha)) {
-			qla82xx_start_iocbs(sp);
-		} else if (IS_FWI2_CAPABLE(ha)) {
-			WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
-			RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
-		} else {
-			WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
-				req->ring_index);
-			RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
-		}
-	}
-}
-
 static void
 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
 {
@@ -2160,6 +2218,381 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
         ct_iocb->entry_count = entry_count;
 }
 
+/*
+ * qla82xx_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla82xx_start_scsi(srb_t *sp)
+{
+	int		ret, nseg;
+	unsigned long   flags;
+	struct scsi_cmnd *cmd;
+	uint32_t	*clr_ptr;
+	uint32_t        index;
+	uint32_t	handle;
+	uint16_t	cnt;
+	uint16_t	req_cnt;
+	uint16_t	tot_dsds;
+	struct device_reg_82xx __iomem *reg;
+	uint32_t dbval;
+	uint32_t *fcp_dl;
+	uint8_t additional_cdb_len;
+	struct ct6_dsd *ctx;
+	struct scsi_qla_host *vha = sp->fcport->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = NULL;
+	struct rsp_que *rsp = NULL;
+	char		tag[2];
+
+	/* Setup device pointers. */
+	ret = 0;
+	reg = &ha->iobase->isp82;
+	cmd = sp->cmd;
+	req = vha->req;
+	rsp = ha->rsp_q_map[0];
+
+	/* So we know we haven't pci_map'ed anything yet */
+	tot_dsds = 0;
+
+	dbval = 0x04 | (ha->portnum << 5);
+
+	/* Send marker if required */
+	if (vha->marker_needed != 0) {
+		if (qla2x00_marker(vha, req,
+			rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+			ql_log(ql_log_warn, vha, 0x300c,
+			    "qla2x00_marker failed for cmd=%p.\n", cmd);
+			return QLA_FUNCTION_FAILED;
+		}
+		vha->marker_needed = 0;
+	}
+
+	/* Acquire ring specific lock */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Check for room in outstanding command list. */
+	handle = req->current_outstanding_cmd;
+	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+		handle++;
+		if (handle == MAX_OUTSTANDING_COMMANDS)
+			handle = 1;
+		if (!req->outstanding_cmds[handle])
+			break;
+	}
+	if (index == MAX_OUTSTANDING_COMMANDS)
+		goto queuing_error;
+
+	/* Map the sg table so we have an accurate count of sg entries needed */
+	if (scsi_sg_count(cmd)) {
+		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+		    scsi_sg_count(cmd), cmd->sc_data_direction);
+		if (unlikely(!nseg))
+			goto queuing_error;
+	} else
+		nseg = 0;
+
+	tot_dsds = nseg;
+
+	if (tot_dsds > ql2xshiftctondsd) {
+		struct cmd_type_6 *cmd_pkt;
+		uint16_t more_dsd_lists = 0;
+		struct dsd_dma *dsd_ptr;
+		uint16_t i;
+
+		more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
+		if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
+			ql_dbg(ql_dbg_io, vha, 0x300d,
+			    "Num of DSD list %d is than %d for cmd=%p.\n",
+			    more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
+			    cmd);
+			goto queuing_error;
+		}
+
+		if (more_dsd_lists <= ha->gbl_dsd_avail)
+			goto sufficient_dsds;
+		else
+			more_dsd_lists -= ha->gbl_dsd_avail;
+
+		for (i = 0; i < more_dsd_lists; i++) {
+			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+			if (!dsd_ptr) {
+				ql_log(ql_log_fatal, vha, 0x300e,
+				    "Failed to allocate memory for dsd_dma "
+				    "for cmd=%p.\n", cmd);
+				goto queuing_error;
+			}
+
+			dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
+				GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
+			if (!dsd_ptr->dsd_addr) {
+				kfree(dsd_ptr);
+				ql_log(ql_log_fatal, vha, 0x300f,
+				    "Failed to allocate memory for dsd_addr "
+				    "for cmd=%p.\n", cmd);
+				goto queuing_error;
+			}
+			list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
+			ha->gbl_dsd_avail++;
+		}
+
+sufficient_dsds:
+		req_cnt = 1;
+
+		if (req->cnt < (req_cnt + 2)) {
+			cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+				&reg->req_q_out[0]);
+			if (req->ring_index < cnt)
+				req->cnt = cnt - req->ring_index;
+			else
+				req->cnt = req->length -
+					(req->ring_index - cnt);
+		}
+
+		if (req->cnt < (req_cnt + 2))
+			goto queuing_error;
+
+		ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
+		if (!sp->ctx) {
+			ql_log(ql_log_fatal, vha, 0x3010,
+			    "Failed to allocate ctx for cmd=%p.\n", cmd);
+			goto queuing_error;
+		}
+		memset(ctx, 0, sizeof(struct ct6_dsd));
+		ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
+			GFP_ATOMIC, &ctx->fcp_cmnd_dma);
+		if (!ctx->fcp_cmnd) {
+			ql_log(ql_log_fatal, vha, 0x3011,
+			    "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
+			goto queuing_error_fcp_cmnd;
+		}
+
+		/* Initialize the DSD list and dma handle */
+		INIT_LIST_HEAD(&ctx->dsd_list);
+		ctx->dsd_use_cnt = 0;
+
+		if (cmd->cmd_len > 16) {
+			additional_cdb_len = cmd->cmd_len - 16;
+			if ((cmd->cmd_len % 4) != 0) {
+				/* SCSI command bigger than 16 bytes must be
+				 * multiple of 4
+				 */
+				ql_log(ql_log_warn, vha, 0x3012,
+				    "scsi cmd len %d not multiple of 4 "
+				    "for cmd=%p.\n", cmd->cmd_len, cmd);
+				goto queuing_error_fcp_cmnd;
+			}
+			ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+		} else {
+			additional_cdb_len = 0;
+			ctx->fcp_cmnd_len = 12 + 16 + 4;
+		}
+
+		cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
+		cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+		/* Zero out remaining portion of packet. */
+		/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
+		clr_ptr = (uint32_t *)cmd_pkt + 2;
+		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+		/* Set NPORT-ID and LUN number*/
+		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+		cmd_pkt->vp_index = sp->fcport->vp_idx;
+
+		/* Build IOCB segments */
+		if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
+			goto queuing_error_fcp_cmnd;
+
+		int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+		/* build FCP_CMND IU */
+		memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+		int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
+		ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+		if (cmd->sc_data_direction == DMA_TO_DEVICE)
+			ctx->fcp_cmnd->additional_cdb_len |= 1;
+		else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+			ctx->fcp_cmnd->additional_cdb_len |= 2;
+
+		/*
+		 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
+		 */
+		if (scsi_populate_tag_msg(cmd, tag)) {
+			switch (tag[0]) {
+			case HEAD_OF_QUEUE_TAG:
+				ctx->fcp_cmnd->task_attribute =
+				    TSK_HEAD_OF_QUEUE;
+				break;
+			case ORDERED_QUEUE_TAG:
+				ctx->fcp_cmnd->task_attribute =
+				    TSK_ORDERED;
+				break;
+			}
+		}
+
+		/* Populate the FCP_PRIO. */
+		if (ha->flags.fcp_prio_enabled)
+			ctx->fcp_cmnd->task_attribute |=
+			    sp->fcport->fcp_prio << 3;
+
+		memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+
+		fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
+		    additional_cdb_len);
+		*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
+
+		cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
+		cmd_pkt->fcp_cmnd_dseg_address[0] =
+		    cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
+		cmd_pkt->fcp_cmnd_dseg_address[1] =
+		    cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
+
+		sp->flags |= SRB_FCP_CMND_DMA_VALID;
+		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+		/* Set total data segment count. */
+		cmd_pkt->entry_count = (uint8_t)req_cnt;
+		/* Specify response queue number where
+		 * completion should happen
+		 */
+		cmd_pkt->entry_status = (uint8_t) rsp->id;
+	} else {
+		struct cmd_type_7 *cmd_pkt;
+		req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+		if (req->cnt < (req_cnt + 2)) {
+			cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+			    &reg->req_q_out[0]);
+			if (req->ring_index < cnt)
+				req->cnt = cnt - req->ring_index;
+			else
+				req->cnt = req->length -
+					(req->ring_index - cnt);
+		}
+		if (req->cnt < (req_cnt + 2))
+			goto queuing_error;
+
+		cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
+		cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+		/* Zero out remaining portion of packet. */
+		/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
+		clr_ptr = (uint32_t *)cmd_pkt + 2;
+		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+		/* Set NPORT-ID and LUN number*/
+		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+		cmd_pkt->vp_index = sp->fcport->vp_idx;
+
+		int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
+			sizeof(cmd_pkt->lun));
+
+		/*
+		 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
+		 */
+		if (scsi_populate_tag_msg(cmd, tag)) {
+			switch (tag[0]) {
+			case HEAD_OF_QUEUE_TAG:
+				cmd_pkt->task = TSK_HEAD_OF_QUEUE;
+				break;
+			case ORDERED_QUEUE_TAG:
+				cmd_pkt->task = TSK_ORDERED;
+				break;
+			}
+		}
+
+		/* Populate the FCP_PRIO. */
+		if (ha->flags.fcp_prio_enabled)
+			cmd_pkt->task |= sp->fcport->fcp_prio << 3;
+
+		/* Load SCSI command packet. */
+		memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+		host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
+
+		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+		/* Build IOCB segments */
+		qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+
+		/* Set total data segment count. */
+		cmd_pkt->entry_count = (uint8_t)req_cnt;
+		/* Specify response queue number where
+		 * completion should happen.
+		 */
+		cmd_pkt->entry_status = (uint8_t) rsp->id;
+
+	}
+	/* Build command packet. */
+	req->current_outstanding_cmd = handle;
+	req->outstanding_cmds[handle] = sp;
+	sp->handle = handle;
+	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+	req->cnt -= req_cnt;
+	wmb();
+
+	/* Adjust ring index. */
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
+	} else
+		req->ring_ptr++;
+
+	sp->flags |= SRB_DMA_VALID;
+
+	/* Set chip new ring index. */
+	/* write, read and verify logic */
+	dbval = dbval | (req->id << 8) | (req->ring_index << 16);
+	if (ql2xdbwr)
+		qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
+	else {
+		WRT_REG_DWORD(
+			(unsigned long __iomem *)ha->nxdb_wr_ptr,
+			dbval);
+		wmb();
+		while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+			WRT_REG_DWORD(
+				(unsigned long __iomem *)ha->nxdb_wr_ptr,
+				dbval);
+			wmb();
+		}
+	}
+
+	/* Manage unprocessed RIO/ZIO commands in response queue. */
+	if (vha->flags.process_response_queue &&
+	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+		qla24xx_process_response_queue(vha, rsp);
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return QLA_SUCCESS;
+
+queuing_error_fcp_cmnd:
+	dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
+queuing_error:
+	if (tot_dsds)
+		scsi_dma_unmap(cmd);
+
+	if (sp->ctx) {
+		mempool_free(sp->ctx, ha->ctx_mempool);
+		sp->ctx = NULL;
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return QLA_FUNCTION_FAILED;
+}
+
 int
 qla2x00_start_sp(srb_t *sp)
 {
@@ -2196,8 +2629,8 @@ qla2x00_start_sp(srb_t *sp)
 		break;
 	case SRB_CT_CMD:
 		IS_FWI2_CAPABLE(ha) ?
-		qla24xx_ct_iocb(sp, pkt) :
-		qla2x00_ct_iocb(sp, pkt);
+		    qla24xx_ct_iocb(sp, pkt) :
+		    qla2x00_ct_iocb(sp, pkt);
 		break;
 	case SRB_ADISC_CMD:
 		IS_FWI2_CAPABLE(ha) ?
@@ -2212,7 +2645,7 @@ qla2x00_start_sp(srb_t *sp)
 	}
 
 	wmb();
-	qla2x00_start_iocbs(sp);
+	qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
 done:
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	return rval;

+ 144 - 109
drivers/scsi/qla2xxx/qla_isr.c

@@ -242,32 +242,34 @@ static void
 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
 {
 	uint16_t	cnt;
+	uint32_t	mboxes;
 	uint16_t __iomem *wptr;
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 
+	/* Read all mbox registers? */
+	mboxes = (1 << ha->mbx_count) - 1;
+	if (!ha->mcp)
+		ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERRROR.\n");
+	else
+		mboxes = ha->mcp->in_mb;
+
 	/* Load return mailbox registers. */
 	ha->flags.mbox_int = 1;
 	ha->mailbox_out[0] = mb0;
+	mboxes >>= 1;
 	wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
 
 	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
 		if (IS_QLA2200(ha) && cnt == 8)
 			wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
-		if (cnt == 4 || cnt == 5)
+		if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
 			ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
-		else
+		else if (mboxes & BIT_0)
 			ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
 
 		wptr++;
-	}
-
-	if (ha->mcp) {
-		ql_dbg(ql_dbg_async, vha, 0x5000,
-		    "Got mbx completion. cmd=%x.\n", ha->mcp->mb[0]);
-	} else {
-		ql_dbg(ql_dbg_async, vha, 0x5001,
-		    "MBX pointer ERROR.\n");
+		mboxes >>= 1;
 	}
 }
 
@@ -298,7 +300,7 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
 		return;
 
 	ql_dbg(ql_dbg_async, vha, 0x5022,
-	    "Inter-Driver Commucation %s -- ACK timeout=%d.\n",
+	    "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
 	    vha->host_no, event[aen & 0xff], timeout);
 
 	rval = qla2x00_post_idc_ack_work(vha, mb);
@@ -453,7 +455,7 @@ skip_rio:
 		break;
 
 	case MBA_LIP_OCCURRED:		/* Loop Initialization Procedure */
-		ql_log(ql_log_info, vha, 0x5009,
+		ql_dbg(ql_dbg_async, vha, 0x5009,
 		    "LIP occurred (%x).\n", mb[1]);
 
 		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -487,7 +489,7 @@ skip_rio:
 			ha->link_data_rate = mb[1];
 		}
 
-		ql_log(ql_log_info, vha, 0x500a,
+		ql_dbg(ql_dbg_async, vha, 0x500a,
 		    "LOOP UP detected (%s Gbps).\n", link_speed);
 
 		vha->flags.management_server_logged_in = 0;
@@ -497,7 +499,7 @@ skip_rio:
 	case MBA_LOOP_DOWN:		/* Loop Down Event */
 		mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
 		mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
-		ql_log(ql_log_info, vha, 0x500b,
+		ql_dbg(ql_dbg_async, vha, 0x500b,
 		    "LOOP DOWN detected (%x %x %x %x).\n",
 		    mb[1], mb[2], mb[3], mbx);
 
@@ -519,7 +521,7 @@ skip_rio:
 		break;
 
 	case MBA_LIP_RESET:		/* LIP reset occurred */
-		ql_log(ql_log_info, vha, 0x500c,
+		ql_dbg(ql_dbg_async, vha, 0x500c,
 		    "LIP reset occurred (%x).\n", mb[1]);
 
 		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -587,7 +589,7 @@ skip_rio:
 		if (IS_QLA2100(ha))
 			break;
 
-		ql_log(ql_log_info, vha, 0x500f,
+		ql_dbg(ql_dbg_async, vha, 0x500f,
 		    "Configuration change detected: value=%x.\n", mb[1]);
 
 		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -920,15 +922,15 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
 	    QLA_LOGIO_LOGIN_RETRIED : 0;
 	if (mbx->entry_status) {
 		ql_dbg(ql_dbg_async, vha, 0x5043,
-		    "Async-%s error entry - portid=%02x%02x%02x "
+		    "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
 		    "entry-status=%x status=%x state-flag=%x "
-		    "status-flags=%x.\n",
-		    type, fcport->d_id.b.domain, fcport->d_id.b.area,
+		    "status-flags=%x.\n", type, sp->handle,
+		    fcport->d_id.b.domain, fcport->d_id.b.area,
 		    fcport->d_id.b.al_pa, mbx->entry_status,
 		    le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
 		    le16_to_cpu(mbx->status_flags));
 
-		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5057,
+		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
 		    (uint8_t *)mbx, sizeof(*mbx));
 
 		goto logio_done;
@@ -940,9 +942,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
 		status = 0;
 	if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
 		ql_dbg(ql_dbg_async, vha, 0x5045,
-		    "Async-%s complete - portid=%02x%02x%02x mbx1=%x.\n",
-		    type, fcport->d_id.b.domain, fcport->d_id.b.area,
-		    fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1));
+		    "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
+		    type, sp->handle, fcport->d_id.b.domain,
+		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
+		    le16_to_cpu(mbx->mb1));
 
 		data[0] = MBS_COMMAND_COMPLETE;
 		if (ctx->type == SRB_LOGIN_CMD) {
@@ -968,11 +971,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
 	}
 
 	ql_log(ql_log_warn, vha, 0x5046,
-	    "Async-%s failed - portid=%02x%02x%02x status=%x "
-	    "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n",
-	    type, fcport->d_id.b.domain,
-	    fcport->d_id.b.area, fcport->d_id.b.al_pa, status,
-	    le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
+	    "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
+	    "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
+	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+	    status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
 	    le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
 	    le16_to_cpu(mbx->mb7));
 
@@ -1036,7 +1038,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 			bsg_job->reply->result = DID_ERROR << 16;
 			bsg_job->reply->reply_payload_rcv_len = 0;
 		}
-		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5058,
+		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
 		    (uint8_t *)pkt, sizeof(*pkt));
 	} else {
 		bsg_job->reply->result =  DID_OK << 16;
@@ -1111,9 +1113,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 				le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
 
 			ql_log(ql_log_info, vha, 0x503f,
-			    "ELS-CT pass-through-%s error comp_status-status=0x%x "
+			    "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
 			    "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
-			    type, comp_status, fw_status[1], fw_status[2],
+			    type, sp->handle, comp_status, fw_status[1], fw_status[2],
 			    le16_to_cpu(((struct els_sts_entry_24xx *)
 				pkt)->total_byte_count));
 			fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
@@ -1121,9 +1123,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 		}
 		else {
 			ql_log(ql_log_info, vha, 0x5040,
-			    "ELS-CT pass-through-%s error comp_status-status=0x%x "
+			    "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
 			    "error subcode 1=0x%x error subcode 2=0x%x.\n",
-			    type, comp_status,
+			    type, sp->handle, comp_status,
 			    le16_to_cpu(((struct els_sts_entry_24xx *)
 				pkt)->error_subcode_1),
 			    le16_to_cpu(((struct els_sts_entry_24xx *)
@@ -1184,11 +1186,12 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
 		QLA_LOGIO_LOGIN_RETRIED : 0;
 	if (logio->entry_status) {
 		ql_log(ql_log_warn, vha, 0x5034,
-		    "Async-%s error entry - "
+		    "Async-%s error entry - hdl=%x"
 		    "portid=%02x%02x%02x entry-status=%x.\n",
-		    type, fcport->d_id.b.domain, fcport->d_id.b.area,
-		    fcport->d_id.b.al_pa, logio->entry_status);
-		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5059,
+		    type, sp->handle, fcport->d_id.b.domain,
+		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
+		    logio->entry_status);
+		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
 		    (uint8_t *)logio, sizeof(*logio));
 
 		goto logio_done;
@@ -1196,10 +1199,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
 
 	if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
 		ql_dbg(ql_dbg_async, vha, 0x5036,
-		    "Async-%s complete - portid=%02x%02x%02x "
-		    "iop0=%x.\n",
-		    type, fcport->d_id.b.domain, fcport->d_id.b.area,
-		    fcport->d_id.b.al_pa,
+		    "Async-%s complete - hdl=%x portid=%02x%02x%02x "
+		    "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
+		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
 		    le32_to_cpu(logio->io_parameter[0]));
 
 		data[0] = MBS_COMMAND_COMPLETE;
@@ -1238,9 +1240,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
 	}
 
 	ql_dbg(ql_dbg_async, vha, 0x5037,
-	    "Async-%s failed - portid=%02x%02x%02x comp=%x "
-	    "iop0=%x iop1=%x.\n",
-	    type, fcport->d_id.b.domain,
+	    "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
+	    "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
 	    fcport->d_id.b.area, fcport->d_id.b.al_pa,
 	    le16_to_cpu(logio->comp_status),
 	    le32_to_cpu(logio->io_parameter[0]),
@@ -1274,25 +1275,25 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
 
 	if (sts->entry_status) {
 		ql_log(ql_log_warn, vha, 0x5038,
-		    "Async-%s error - entry-status(%x).\n",
-		    type, sts->entry_status);
+		    "Async-%s error - hdl=%x entry-status(%x).\n",
+		    type, sp->handle, sts->entry_status);
 	} else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
 		ql_log(ql_log_warn, vha, 0x5039,
-		    "Async-%s error - completion status(%x).\n",
-		    type, sts->comp_status);
+		    "Async-%s error - hdl=%x completion status(%x).\n",
+		    type, sp->handle, sts->comp_status);
 	} else if (!(le16_to_cpu(sts->scsi_status) &
 	    SS_RESPONSE_INFO_LEN_VALID)) {
 		ql_log(ql_log_warn, vha, 0x503a,
-		    "Async-%s error - no response info(%x).\n",
-		    type, sts->scsi_status);
+		    "Async-%s error - hdl=%x no response info(%x).\n",
+		    type, sp->handle, sts->scsi_status);
 	} else if (le32_to_cpu(sts->rsp_data_len) < 4) {
 		ql_log(ql_log_warn, vha, 0x503b,
-		    "Async-%s error - not enough response(%d).\n",
-		    type, sts->rsp_data_len);
+		    "Async-%s error - hdl=%x not enough response(%d).\n",
+		    type, sp->handle, sts->rsp_data_len);
 	} else if (sts->data[3]) {
 		ql_log(ql_log_warn, vha, 0x503c,
-		    "Async-%s error - response(%x).\n",
-		    type, sts->data[3]);
+		    "Async-%s error - hdl=%x response(%x).\n",
+		    type, sp->handle, sts->data[3]);
 	} else {
 		error = 0;
 	}
@@ -1337,9 +1338,6 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
 		}
 
 		if (pkt->entry_status != 0) {
-			ql_log(ql_log_warn, vha, 0x5035,
-			    "Process error entry.\n");
-
 			qla2x00_error_entry(vha, rsp, pkt);
 			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
 			wmb();
@@ -1391,7 +1389,6 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
 }
 
 static inline void
-
 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
     uint32_t sense_len, struct rsp_que *rsp)
 {
@@ -1413,13 +1410,14 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
 	if (sp->request_sense_length != 0)
 		rsp->status_srb = sp;
 
-	ql_dbg(ql_dbg_io, vha, 0x301c,
-	    "Check condition Sense data, scsi(%ld:%d:%d:%d) cmd=%p.\n",
-	    sp->fcport->vha->host_no, cp->device->channel, cp->device->id,
-	    cp->device->lun, cp);
-	if (sense_len)
+	if (sense_len) {
+		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
+		    "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
+		    sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
+		    cp);
 		ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
 		    cp->sense_buffer, sense_len);
+	}
 }
 
 struct scsi_dif_tuple {
@@ -1506,7 +1504,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
 			}
 
 			if (k != blocks_done) {
-				qla_printk(KERN_WARNING, sp->fcport->vha->hw,
+				ql_log(ql_log_warn, vha, 0x302f,
 				    "unexpected tag values tag:lba=%x:%llx)\n",
 				    e_ref_tag, (unsigned long long)lba_s);
 				return 1;
@@ -1611,7 +1609,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 		sp = NULL;
 
 	if (sp == NULL) {
-		ql_log(ql_log_warn, vha, 0x3017,
+		ql_dbg(ql_dbg_io, vha, 0x3017,
 		    "Invalid status handle (0x%x).\n", sts->handle);
 
 		if (IS_QLA82XX(ha))
@@ -1623,7 +1621,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 	}
 	cp = sp->cmd;
 	if (cp == NULL) {
-		ql_log(ql_log_warn, vha, 0x3018,
+		ql_dbg(ql_dbg_io, vha, 0x3018,
 		    "Command already returned (0x%x/%p).\n",
 		    sts->handle, sp);
 
@@ -1670,7 +1668,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 			par_sense_len -= rsp_info_len;
 		}
 		if (rsp_info_len > 3 && rsp_info[3]) {
-			ql_log(ql_log_warn, vha, 0x3019,
+			ql_dbg(ql_dbg_io, vha, 0x3019,
 			    "FCP I/O protocol failure (0x%x/0x%x).\n",
 			    rsp_info_len, rsp_info[3]);
 
@@ -1701,7 +1699,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 			if (!lscsi_status &&
 			    ((unsigned)(scsi_bufflen(cp) - resid) <
 			     cp->underflow)) {
-				ql_log(ql_log_warn, vha, 0x301a,
+				ql_dbg(ql_dbg_io, vha, 0x301a,
 				    "Mid-layer underflow "
 				    "detected (0x%x of 0x%x bytes).\n",
 				    resid, scsi_bufflen(cp));
@@ -1713,7 +1711,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 		cp->result = DID_OK << 16 | lscsi_status;
 
 		if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
-			ql_log(ql_log_warn, vha, 0x301b,
+			ql_dbg(ql_dbg_io, vha, 0x301b,
 			    "QUEUE FULL detected.\n");
 			break;
 		}
@@ -1735,7 +1733,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 		scsi_set_resid(cp, resid);
 		if (scsi_status & SS_RESIDUAL_UNDER) {
 			if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
-				ql_log(ql_log_warn, vha, 0x301d,
+				ql_dbg(ql_dbg_io, vha, 0x301d,
 				    "Dropped frame(s) detected "
 				    "(0x%x of 0x%x bytes).\n",
 				    resid, scsi_bufflen(cp));
@@ -1747,7 +1745,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 			if (!lscsi_status &&
 			    ((unsigned)(scsi_bufflen(cp) - resid) <
 			    cp->underflow)) {
-				ql_log(ql_log_warn, vha, 0x301e,
+				ql_dbg(ql_dbg_io, vha, 0x301e,
 				    "Mid-layer underflow "
 				    "detected (0x%x of 0x%x bytes).\n",
 				    resid, scsi_bufflen(cp));
@@ -1756,7 +1754,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 				break;
 			}
 		} else {
-			ql_log(ql_log_warn, vha, 0x301f,
+			ql_dbg(ql_dbg_io, vha, 0x301f,
 			    "Dropped frame(s) detected (0x%x "
 			    "of 0x%x bytes).\n", resid, scsi_bufflen(cp));
 
@@ -1774,7 +1772,7 @@ check_scsi_status:
 		 */
 		if (lscsi_status != 0) {
 			if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
-				ql_log(ql_log_warn, vha, 0x3020,
+				ql_dbg(ql_dbg_io, vha, 0x3020,
 				    "QUEUE FULL detected.\n");
 				logit = 1;
 				break;
@@ -1838,10 +1836,15 @@ out:
 	if (logit)
 		ql_dbg(ql_dbg_io, vha, 0x3022,
 		    "FCP command status: 0x%x-0x%x (0x%x) "
-		    "oxid=0x%x cdb=%02x%02x%02x len=0x%x "
+		    "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
+		    "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
 		    "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
-		    comp_status, scsi_status, cp->result, ox_id, cp->cmnd[0],
-		    cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
+		    comp_status, scsi_status, cp->result, vha->host_no,
+		    cp->device->id, cp->device->lun, fcport->d_id.b.domain,
+		    fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
+		    cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
+		    cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
+		    cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
 		    resid_len, fw_resid_len);
 
 	if (rsp->status_srb == NULL)
@@ -1899,6 +1902,45 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
 	}
 }
 
+static int
+qla2x00_free_sp_ctx(scsi_qla_host_t *vha, srb_t *sp)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct srb_ctx *ctx;
+
+	if (!sp->ctx)
+		return 1;
+
+	ctx = sp->ctx;
+
+	if (ctx->type == SRB_LOGIN_CMD ||
+	    ctx->type == SRB_LOGOUT_CMD ||
+	    ctx->type == SRB_TM_CMD) {
+		ctx->u.iocb_cmd->done(sp);
+		return 0;
+	} else if (ctx->type == SRB_ADISC_CMD) {
+		ctx->u.iocb_cmd->free(sp);
+		return 0;
+	} else {
+		struct fc_bsg_job *bsg_job;
+
+		bsg_job = ctx->u.bsg_job;
+		if (ctx->type == SRB_ELS_CMD_HST ||
+		    ctx->type == SRB_CT_CMD)
+			kfree(sp->fcport);
+
+		bsg_job->reply->reply_data.ctels_reply.status =
+		    FC_CTELS_STATUS_OK;
+		bsg_job->reply->result = DID_ERROR << 16;
+		bsg_job->reply->reply_payload_rcv_len = 0;
+		kfree(sp->ctx);
+		mempool_free(sp, ha->srb_mempool);
+		bsg_job->job_done(bsg_job);
+		return 0;
+	}
+	return 1;
+}
+
 /**
  * qla2x00_error_entry() - Process an error entry.
  * @ha: SCSI driver HA context
@@ -1909,7 +1951,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
 {
 	srb_t *sp;
 	struct qla_hw_data *ha = vha->hw;
-	uint32_t handle = LSW(pkt->handle);
+	const char func[] = "ERROR-IOCB";
 	uint16_t que = MSW(pkt->handle);
 	struct req_que *req = ha->req_q_map[que];
 
@@ -1932,28 +1974,20 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
 		ql_dbg(ql_dbg_async, vha, 0x502f,
 		    "UNKNOWN flag error.\n");
 
-	/* Validate handle. */
-	if (handle < MAX_OUTSTANDING_COMMANDS)
-		sp = req->outstanding_cmds[handle];
-	else
-		sp = NULL;
-
+	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
 	if (sp) {
-		/* Free outstanding command slot. */
-		req->outstanding_cmds[handle] = NULL;
-
-		/* Bad payload or header */
-		if (pkt->entry_status &
-		    (RF_INV_E_ORDER | RF_INV_E_COUNT |
-		     RF_INV_E_PARAM | RF_INV_E_TYPE)) {
-			sp->cmd->result = DID_ERROR << 16;
-		} else if (pkt->entry_status & RF_BUSY) {
-			sp->cmd->result = DID_BUS_BUSY << 16;
-		} else {
-			sp->cmd->result = DID_ERROR << 16;
+		if (qla2x00_free_sp_ctx(vha, sp)) {
+			if (pkt->entry_status &
+			    (RF_INV_E_ORDER | RF_INV_E_COUNT |
+			     RF_INV_E_PARAM | RF_INV_E_TYPE)) {
+				sp->cmd->result = DID_ERROR << 16;
+			} else if (pkt->entry_status & RF_BUSY) {
+				sp->cmd->result = DID_BUS_BUSY << 16;
+			} else {
+				sp->cmd->result = DID_ERROR << 16;
+			}
+			qla2x00_sp_compl(ha, sp);
 		}
-		qla2x00_sp_compl(ha, sp);
-
 	} else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
 		COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
 		|| pkt->entry_type == COMMAND_TYPE_6) {
@@ -1977,26 +2011,30 @@ static void
 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
 {
 	uint16_t	cnt;
+	uint32_t	mboxes;
 	uint16_t __iomem *wptr;
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
+	/* Read all mbox registers? */
+	mboxes = (1 << ha->mbx_count) - 1;
+	if (!ha->mcp)
+		ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERRROR.\n");
+	else
+		mboxes = ha->mcp->in_mb;
+
 	/* Load return mailbox registers. */
 	ha->flags.mbox_int = 1;
 	ha->mailbox_out[0] = mb0;
+	mboxes >>= 1;
 	wptr = (uint16_t __iomem *)&reg->mailbox1;
 
 	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
-		ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
-		wptr++;
-	}
+		if (mboxes & BIT_0)
+			ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
 
-	if (ha->mcp) {
-		ql_dbg(ql_dbg_async, vha, 0x504d,
-		    "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
-	} else {
-		ql_dbg(ql_dbg_async, vha, 0x504e,
-		    "MBX pointer ERROR.\n");
+		mboxes >>= 1;
+		wptr++;
 	}
 }
 
@@ -2025,9 +2063,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
 		}
 
 		if (pkt->entry_status != 0) {
-			ql_dbg(ql_dbg_async, vha, 0x5029,
-			    "Process error entry.\n");
-
 			qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
 			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
 			wmb();

+ 1 - 1
drivers/scsi/qla2xxx/qla_mbx.c

@@ -2887,7 +2887,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
 		if (vp_idx == 0 && (MSB(stat) != 1))
 			goto reg_needed;
 
-		if (MSB(stat) == 1) {
+		if (MSB(stat) != 0) {
 			ql_dbg(ql_dbg_mbx, vha, 0x10ba,
 			    "Could not acquire ID for VP[%d].\n", vp_idx);
 			return;

+ 13 - 497
drivers/scsi/qla2xxx/qla_nx.c

@@ -369,7 +369,7 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
 		ql_dbg(ql_dbg_p3p, vha, 0xb000,
 		    "%s: Written crbwin (0x%x) "
 		    "!= Read crbwin (0x%x), off=0x%lx.\n",
-		    ha->crb_win, win_read, *off);
+		    __func__, ha->crb_win, win_read, *off);
 	}
 	*off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
 }
@@ -409,7 +409,7 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
 	}
 	/* strange address given */
 	ql_dbg(ql_dbg_p3p, vha, 0xb001,
-	    "%x: Warning: unm_nic_pci_set_crbwindow "
+	    "%s: Warning: unm_nic_pci_set_crbwindow "
 	    "called with an unknown address(%llx).\n",
 	    QLA2XXX_DRIVER_NAME, off);
 	return off;
@@ -1711,12 +1711,12 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
 	ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
 	    "nx_pci_base=%p iobase=%p "
 	    "max_req_queues=%d msix_count=%d.\n",
-	    ha->nx_pcibase, ha->iobase,
+	    (void *)ha->nx_pcibase, ha->iobase,
 	    ha->max_req_queues, ha->msix_count);
 	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
 	    "nx_pci_base=%p iobase=%p "
 	    "max_req_queues=%d msix_count=%d.\n",
-	    ha->nx_pcibase, ha->iobase,
+	    (void *)ha->nx_pcibase, ha->iobase,
 	    ha->max_req_queues, ha->msix_count);
 	return 0;
 
@@ -1744,7 +1744,7 @@ qla82xx_pci_config(scsi_qla_host_t *vha)
 	ret = pci_set_mwi(ha->pdev);
 	ha->chip_revision = ha->pdev->revision;
 	ql_dbg(ql_dbg_init, vha, 0x0043,
-	    "Chip revision:%ld.\n",
+	    "Chip revision:%d.\n",
 	    ha->chip_revision);
 	return 0;
 }
@@ -2023,13 +2023,9 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
 		wptr++;
 	}
 
-	if (ha->mcp) {
-		ql_dbg(ql_dbg_async, vha, 0x5052,
-		    "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
-	} else {
+	if (!ha->mcp)
 		ql_dbg(ql_dbg_async, vha, 0x5053,
 		    "MBX pointer ERROR.\n");
-	}
 }
 
 /*
@@ -2543,484 +2539,6 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
 	return qla82xx_check_rcvpeg_state(ha);
 }
 
-static inline int
-qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
-	uint16_t tot_dsds)
-{
-	uint32_t *cur_dsd = NULL;
-	scsi_qla_host_t	*vha;
-	struct qla_hw_data *ha;
-	struct scsi_cmnd *cmd;
-	struct	scatterlist *cur_seg;
-	uint32_t *dsd_seg;
-	void *next_dsd;
-	uint8_t avail_dsds;
-	uint8_t first_iocb = 1;
-	uint32_t dsd_list_len;
-	struct dsd_dma *dsd_ptr;
-	struct ct6_dsd *ctx;
-
-	cmd = sp->cmd;
-
-	/* Update entry type to indicate Command Type 3 IOCB */
-	*((uint32_t *)(&cmd_pkt->entry_type)) =
-		__constant_cpu_to_le32(COMMAND_TYPE_6);
-
-	/* No data transfer */
-	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
-		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
-		return 0;
-	}
-
-	vha = sp->fcport->vha;
-	ha = vha->hw;
-
-	/* Set transfer direction */
-	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
-		cmd_pkt->control_flags =
-		    __constant_cpu_to_le16(CF_WRITE_DATA);
-		ha->qla_stats.output_bytes += scsi_bufflen(cmd);
-	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
-		cmd_pkt->control_flags =
-		    __constant_cpu_to_le16(CF_READ_DATA);
-		ha->qla_stats.input_bytes += scsi_bufflen(cmd);
-	}
-
-	cur_seg = scsi_sglist(cmd);
-	ctx = sp->ctx;
-
-	while (tot_dsds) {
-		avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
-		    QLA_DSDS_PER_IOCB : tot_dsds;
-		tot_dsds -= avail_dsds;
-		dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
-
-		dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
-		    struct dsd_dma, list);
-		next_dsd = dsd_ptr->dsd_addr;
-		list_del(&dsd_ptr->list);
-		ha->gbl_dsd_avail--;
-		list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
-		ctx->dsd_use_cnt++;
-		ha->gbl_dsd_inuse++;
-
-		if (first_iocb) {
-			first_iocb = 0;
-			dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
-			*dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-			*dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-			cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
-		} else {
-			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-			*cur_dsd++ = cpu_to_le32(dsd_list_len);
-		}
-		cur_dsd = (uint32_t *)next_dsd;
-		while (avail_dsds) {
-			dma_addr_t	sle_dma;
-
-			sle_dma = sg_dma_address(cur_seg);
-			*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-			*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-			*cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
-			cur_seg = sg_next(cur_seg);
-			avail_dsds--;
-		}
-	}
-
-	/* Null termination */
-	*cur_dsd++ =  0;
-	*cur_dsd++ = 0;
-	*cur_dsd++ = 0;
-	cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
-	return 0;
-}
-
-/*
- * qla82xx_calc_dsd_lists() - Determine number of DSD list required
- * for Command Type 6.
- *
- * @dsds: number of data segment decriptors needed
- *
- * Returns the number of dsd list needed to store @dsds.
- */
-inline uint16_t
-qla82xx_calc_dsd_lists(uint16_t dsds)
-{
-	uint16_t dsd_lists = 0;
-
-	dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
-	if (dsds % QLA_DSDS_PER_IOCB)
-		dsd_lists++;
-	return dsd_lists;
-}
-
-/*
- * qla82xx_start_scsi() - Send a SCSI command to the ISP
- * @sp: command to send to the ISP
- *
- * Returns non-zero if a failure occurred, else zero.
- */
-int
-qla82xx_start_scsi(srb_t *sp)
-{
-	int		ret, nseg;
-	unsigned long   flags;
-	struct scsi_cmnd *cmd;
-	uint32_t	*clr_ptr;
-	uint32_t        index;
-	uint32_t	handle;
-	uint16_t	cnt;
-	uint16_t	req_cnt;
-	uint16_t	tot_dsds;
-	struct device_reg_82xx __iomem *reg;
-	uint32_t dbval;
-	uint32_t *fcp_dl;
-	uint8_t additional_cdb_len;
-	struct ct6_dsd *ctx;
-	struct scsi_qla_host *vha = sp->fcport->vha;
-	struct qla_hw_data *ha = vha->hw;
-	struct req_que *req = NULL;
-	struct rsp_que *rsp = NULL;
-	char		tag[2];
-
-	/* Setup device pointers. */
-	ret = 0;
-	reg = &ha->iobase->isp82;
-	cmd = sp->cmd;
-	req = vha->req;
-	rsp = ha->rsp_q_map[0];
-
-	/* So we know we haven't pci_map'ed anything yet */
-	tot_dsds = 0;
-
-	dbval = 0x04 | (ha->portnum << 5);
-
-	/* Send marker if required */
-	if (vha->marker_needed != 0) {
-		if (qla2x00_marker(vha, req,
-			rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
-			ql_log(ql_log_warn, vha, 0x300c,
-			    "qla2x00_marker failed for cmd=%p.\n", cmd);
-			return QLA_FUNCTION_FAILED;
-		}
-		vha->marker_needed = 0;
-	}
-
-	/* Acquire ring specific lock */
-	spin_lock_irqsave(&ha->hardware_lock, flags);
-
-	/* Check for room in outstanding command list. */
-	handle = req->current_outstanding_cmd;
-	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
-		handle++;
-		if (handle == MAX_OUTSTANDING_COMMANDS)
-			handle = 1;
-		if (!req->outstanding_cmds[handle])
-			break;
-	}
-	if (index == MAX_OUTSTANDING_COMMANDS)
-		goto queuing_error;
-
-	/* Map the sg table so we have an accurate count of sg entries needed */
-	if (scsi_sg_count(cmd)) {
-		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
-		    scsi_sg_count(cmd), cmd->sc_data_direction);
-		if (unlikely(!nseg))
-			goto queuing_error;
-	} else
-		nseg = 0;
-
-	tot_dsds = nseg;
-
-	if (tot_dsds > ql2xshiftctondsd) {
-		struct cmd_type_6 *cmd_pkt;
-		uint16_t more_dsd_lists = 0;
-		struct dsd_dma *dsd_ptr;
-		uint16_t i;
-
-		more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
-		if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
-			ql_dbg(ql_dbg_io, vha, 0x300d,
-			    "Num of DSD list %d is than %d for cmd=%p.\n",
-			    more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
-			    cmd);
-			goto queuing_error;
-		}
-
-		if (more_dsd_lists <= ha->gbl_dsd_avail)
-			goto sufficient_dsds;
-		else
-			more_dsd_lists -= ha->gbl_dsd_avail;
-
-		for (i = 0; i < more_dsd_lists; i++) {
-			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
-			if (!dsd_ptr) {
-				ql_log(ql_log_fatal, vha, 0x300e,
-				    "Failed to allocate memory for dsd_dma "
-				    "for cmd=%p.\n", cmd);
-				goto queuing_error;
-			}
-
-			dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
-				GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
-			if (!dsd_ptr->dsd_addr) {
-				kfree(dsd_ptr);
-				ql_log(ql_log_fatal, vha, 0x300f,
-				    "Failed to allocate memory for dsd_addr "
-				    "for cmd=%p.\n", cmd);
-				goto queuing_error;
-			}
-			list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
-			ha->gbl_dsd_avail++;
-		}
-
-sufficient_dsds:
-		req_cnt = 1;
-
-		if (req->cnt < (req_cnt + 2)) {
-			cnt = (uint16_t)RD_REG_DWORD_RELAXED(
-				&reg->req_q_out[0]);
-			if (req->ring_index < cnt)
-				req->cnt = cnt - req->ring_index;
-			else
-				req->cnt = req->length -
-					(req->ring_index - cnt);
-		}
-
-		if (req->cnt < (req_cnt + 2))
-			goto queuing_error;
-
-		ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
-		if (!sp->ctx) {
-			ql_log(ql_log_fatal, vha, 0x3010,
-			    "Failed to allocate ctx for cmd=%p.\n", cmd);
-			goto queuing_error;
-		}
-		memset(ctx, 0, sizeof(struct ct6_dsd));
-		ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
-			GFP_ATOMIC, &ctx->fcp_cmnd_dma);
-		if (!ctx->fcp_cmnd) {
-			ql_log(ql_log_fatal, vha, 0x3011,
-			    "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
-			goto queuing_error_fcp_cmnd;
-		}
-
-		/* Initialize the DSD list and dma handle */
-		INIT_LIST_HEAD(&ctx->dsd_list);
-		ctx->dsd_use_cnt = 0;
-
-		if (cmd->cmd_len > 16) {
-			additional_cdb_len = cmd->cmd_len - 16;
-			if ((cmd->cmd_len % 4) != 0) {
-				/* SCSI command bigger than 16 bytes must be
-				 * multiple of 4
-				 */
-				ql_log(ql_log_warn, vha, 0x3012,
-				    "scsi cmd len %d not multiple of 4 "
-				    "for cmd=%p.\n", cmd->cmd_len, cmd);
-				goto queuing_error_fcp_cmnd;
-			}
-			ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
-		} else {
-			additional_cdb_len = 0;
-			ctx->fcp_cmnd_len = 12 + 16 + 4;
-		}
-
-		cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
-		cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
-
-		/* Zero out remaining portion of packet. */
-		/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
-		clr_ptr = (uint32_t *)cmd_pkt + 2;
-		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
-		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
-
-		/* Set NPORT-ID and LUN number*/
-		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
-		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
-		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
-		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
-		cmd_pkt->vp_index = sp->fcport->vp_idx;
-
-		/* Build IOCB segments */
-		if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
-			goto queuing_error_fcp_cmnd;
-
-		int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
-		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
-
-		/* build FCP_CMND IU */
-		memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
-		int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
-		ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
-
-		if (cmd->sc_data_direction == DMA_TO_DEVICE)
-			ctx->fcp_cmnd->additional_cdb_len |= 1;
-		else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
-			ctx->fcp_cmnd->additional_cdb_len |= 2;
-
-		/*
-		 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
-		 */
-		if (scsi_populate_tag_msg(cmd, tag)) {
-			switch (tag[0]) {
-			case HEAD_OF_QUEUE_TAG:
-				ctx->fcp_cmnd->task_attribute =
-				    TSK_HEAD_OF_QUEUE;
-				break;
-			case ORDERED_QUEUE_TAG:
-				ctx->fcp_cmnd->task_attribute =
-				    TSK_ORDERED;
-				break;
-			}
-		}
-
-		memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
-
-		fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
-		    additional_cdb_len);
-		*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
-
-		cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
-		cmd_pkt->fcp_cmnd_dseg_address[0] =
-		    cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
-		cmd_pkt->fcp_cmnd_dseg_address[1] =
-		    cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
-
-		sp->flags |= SRB_FCP_CMND_DMA_VALID;
-		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
-		/* Set total data segment count. */
-		cmd_pkt->entry_count = (uint8_t)req_cnt;
-		/* Specify response queue number where
-		 * completion should happen
-		 */
-		cmd_pkt->entry_status = (uint8_t) rsp->id;
-	} else {
-		struct cmd_type_7 *cmd_pkt;
-		req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
-		if (req->cnt < (req_cnt + 2)) {
-			cnt = (uint16_t)RD_REG_DWORD_RELAXED(
-			    &reg->req_q_out[0]);
-			if (req->ring_index < cnt)
-				req->cnt = cnt - req->ring_index;
-			else
-				req->cnt = req->length -
-					(req->ring_index - cnt);
-		}
-		if (req->cnt < (req_cnt + 2))
-			goto queuing_error;
-
-		cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
-		cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
-
-		/* Zero out remaining portion of packet. */
-		/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
-		clr_ptr = (uint32_t *)cmd_pkt + 2;
-		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
-		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
-
-		/* Set NPORT-ID and LUN number*/
-		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
-		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
-		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
-		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
-		cmd_pkt->vp_index = sp->fcport->vp_idx;
-
-		int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
-		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
-			sizeof(cmd_pkt->lun));
-
-		/*
-		 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
-		 */
-		if (scsi_populate_tag_msg(cmd, tag)) {
-			switch (tag[0]) {
-			case HEAD_OF_QUEUE_TAG:
-				cmd_pkt->task = TSK_HEAD_OF_QUEUE;
-				break;
-			case ORDERED_QUEUE_TAG:
-				cmd_pkt->task = TSK_ORDERED;
-				break;
-			}
-		}
-
-		/* Load SCSI command packet. */
-		memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
-		host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
-
-		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
-
-		/* Build IOCB segments */
-		qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
-
-		/* Set total data segment count. */
-		cmd_pkt->entry_count = (uint8_t)req_cnt;
-		/* Specify response queue number where
-		 * completion should happen.
-		 */
-		cmd_pkt->entry_status = (uint8_t) rsp->id;
-
-	}
-	/* Build command packet. */
-	req->current_outstanding_cmd = handle;
-	req->outstanding_cmds[handle] = sp;
-	sp->handle = handle;
-	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
-	req->cnt -= req_cnt;
-	wmb();
-
-	/* Adjust ring index. */
-	req->ring_index++;
-	if (req->ring_index == req->length) {
-		req->ring_index = 0;
-		req->ring_ptr = req->ring;
-	} else
-		req->ring_ptr++;
-
-	sp->flags |= SRB_DMA_VALID;
-
-	/* Set chip new ring index. */
-	/* write, read and verify logic */
-	dbval = dbval | (req->id << 8) | (req->ring_index << 16);
-	if (ql2xdbwr)
-		qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
-	else {
-		WRT_REG_DWORD(
-			(unsigned long __iomem *)ha->nxdb_wr_ptr,
-			dbval);
-		wmb();
-		while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
-			WRT_REG_DWORD(
-				(unsigned long __iomem *)ha->nxdb_wr_ptr,
-				dbval);
-			wmb();
-		}
-	}
-
-	/* Manage unprocessed RIO/ZIO commands in response queue. */
-	if (vha->flags.process_response_queue &&
-	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
-		qla24xx_process_response_queue(vha, rsp);
-
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
-	return QLA_SUCCESS;
-
-queuing_error_fcp_cmnd:
-	dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
-queuing_error:
-	if (tot_dsds)
-		scsi_dma_unmap(cmd);
-
-	if (sp->ctx) {
-		mempool_free(sp->ctx, ha->ctx_mempool);
-		sp->ctx = NULL;
-	}
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
-	return QLA_FUNCTION_FAILED;
-}
-
 static uint32_t *
 qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
 	uint32_t length)
@@ -3272,9 +2790,9 @@ qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
 }
 
 void
-qla82xx_start_iocbs(srb_t *sp)
+qla82xx_start_iocbs(scsi_qla_host_t *vha)
 {
-	struct qla_hw_data *ha = sp->fcport->vha->hw;
+	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req = ha->req_q_map[0];
 	struct device_reg_82xx __iomem *reg;
 	uint32_t dbval;
@@ -3659,11 +3177,10 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
 				qla82xx_md_free(vha);
 				/* ALlocate MiniDump resources */
 				qla82xx_md_prep(vha);
-			} else
-				ql_log(ql_log_info, vha, 0xb02e,
-				    "Firmware dump available to retrieve\n",
-				    vha->host_no);
-		}
+			}
+		} else
+			ql_log(ql_log_info, vha, 0xb02e,
+			    "Firmware dump available to retrieve\n");
 	}
 	return rval;
 }
@@ -3758,7 +3275,6 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
 
 		switch (dev_state) {
 		case QLA82XX_DEV_READY:
-			qla82xx_check_md_needed(vha);
 			ha->flags.isp82xx_reset_owner = 0;
 			goto exit;
 		case QLA82XX_DEV_COLD:
@@ -4067,7 +3583,7 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
 		}
 	}
 	ql_dbg(ql_dbg_p3p, vha, 0xb027,
-	    "%s status=%d.\n", status);
+	       "%s: status=%d.\n", __func__, status);
 
 	return status;
 }

+ 175 - 188
drivers/scsi/qla2xxx/qla_os.c

@@ -83,6 +83,9 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
 		"\t\t0x00080000 - P3P Specific.  0x00040000 - Virtual Port.\n"
 		"\t\t0x00020000 - Buffer Dump.   0x00010000 - Misc.\n"
 		"\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
+		"\t\t0x1e400000 - Preferred value for capturing essential "
+		"debug information (equivalent to old "
+		"ql2xextended_error_logging=1).\n"
 		"\t\tDo LOGICAL OR of the value to enable more than one level");
 
 int ql2xshiftctondsd = 6;
@@ -199,7 +202,7 @@ int ql2xmdcapmask = 0x1F;
 module_param(ql2xmdcapmask, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xmdcapmask,
 		"Set the Minidump driver capture mask level. "
-		"Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
+		"Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
 
 int ql2xmdenable = 1;
 module_param(ql2xmdenable, int, S_IRUGO);
@@ -847,14 +850,10 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
 	int wait = 0;
 	struct qla_hw_data *ha = vha->hw;
 
-	ql_dbg(ql_dbg_taskm, vha, 0x8000,
-	    "Entered %s for cmd=%p.\n", __func__, cmd);
 	if (!CMD_SP(cmd))
 		return SUCCESS;
 
 	ret = fc_block_scsi_eh(cmd);
-	ql_dbg(ql_dbg_taskm, vha, 0x8001,
-	    "Return value of fc_block_scsi_eh=%d.\n", ret);
 	if (ret != 0)
 		return ret;
 	ret = SUCCESS;
@@ -870,7 +869,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
 	}
 
 	ql_dbg(ql_dbg_taskm, vha, 0x8002,
-	    "Aborting sp=%p cmd=%p from RISC ", sp, cmd);
+	    "Aborting from RISC nexus=%ld:%d:%d sp=%p cmd=%p\n",
+	    vha->host_no, id, lun, sp, cmd);
 
 	/* Get a reference to the sp and drop the lock.*/
 	sp_get(sp);
@@ -878,10 +878,10 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	if (ha->isp_ops->abort_command(sp)) {
 		ql_dbg(ql_dbg_taskm, vha, 0x8003,
-		    "Abort command mbx failed for cmd=%p.\n", cmd);
+		    "Abort command mbx failed cmd=%p.\n", cmd);
 	} else {
 		ql_dbg(ql_dbg_taskm, vha, 0x8004,
-		    "Abort command mbx success.\n");
+		    "Abort command mbx success cmd=%p.\n", cmd);
 		wait = 1;
 	}
 
@@ -897,13 +897,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
 	if (wait) {
 		if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
 			ql_log(ql_log_warn, vha, 0x8006,
-			    "Abort handler timed out for cmd=%p.\n", cmd);
+			    "Abort handler timed out cmd=%p.\n", cmd);
 			ret = FAILED;
 		}
 	}
 
 	ql_log(ql_log_info, vha, 0x801c,
-	    "Abort command issued --  %d %x.\n", wait, ret);
+	    "Abort command issued nexus=%ld:%d:%d --  %d %x.\n",
+	    vha->host_no, id, lun, wait, ret);
 
 	return ret;
 }
@@ -972,19 +973,15 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
 	int err;
 
 	if (!fcport) {
-		ql_log(ql_log_warn, vha, 0x8007,
-		    "fcport is NULL.\n");
 		return FAILED;
 	}
 
 	err = fc_block_scsi_eh(cmd);
-	ql_dbg(ql_dbg_taskm, vha, 0x8008,
-	    "fc_block_scsi_eh ret=%d.\n", err);
 	if (err != 0)
 		return err;
 
 	ql_log(ql_log_info, vha, 0x8009,
-	    "%s RESET ISSUED for id %d lun %d cmd=%p.\n", name,
+	    "%s RESET ISSUED nexus=%ld:%d:%d cmd=%p.\n", name, vha->host_no,
 	    cmd->device->id, cmd->device->lun, cmd);
 
 	err = 0;
@@ -1009,15 +1006,16 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
 	}
 
 	ql_log(ql_log_info, vha, 0x800e,
-	    "%s RESET SUCCEEDED for id %d lun %d cmd=%p.\n", name,
-	    cmd->device->id, cmd->device->lun, cmd);
+	    "%s RESET SUCCEEDED nexus:%ld:%d:%d cmd=%p.\n", name,
+	    vha->host_no, cmd->device->id, cmd->device->lun, cmd);
 
 	return SUCCESS;
 
 eh_reset_failed:
 	ql_log(ql_log_info, vha, 0x800f,
-	    "%s RESET FAILED: %s for id %d lun %d cmd=%p.\n", name,
-	    reset_errors[err], cmd->device->id, cmd->device->lun);
+	    "%s RESET FAILED: %s nexus=%ld:%d:%d cmd=%p.\n", name,
+	    reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
+	    cmd);
 	return FAILED;
 }
 
@@ -1068,20 +1066,16 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
 	lun = cmd->device->lun;
 
 	if (!fcport) {
-		ql_log(ql_log_warn, vha, 0x8010,
-		    "fcport is NULL.\n");
 		return ret;
 	}
 
 	ret = fc_block_scsi_eh(cmd);
-	ql_dbg(ql_dbg_taskm, vha, 0x8011,
-	    "fc_block_scsi_eh ret=%d.\n", ret);
 	if (ret != 0)
 		return ret;
 	ret = FAILED;
 
 	ql_log(ql_log_info, vha, 0x8012,
-	    "BUS RESET ISSUED for id %d lun %d.\n", id, lun);
+	    "BUS RESET ISSUED nexus=%ld:%d%d.\n", vha->host_no, id, lun);
 
 	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
 		ql_log(ql_log_fatal, vha, 0x8013,
@@ -1105,7 +1099,8 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
 
 eh_bus_reset_done:
 	ql_log(ql_log_warn, vha, 0x802b,
-	    "BUS RESET %s.\n", (ret == FAILED) ? "FAILED" : "SUCCEDED");
+	    "BUS RESET %s nexus=%ld:%d:%d.\n",
+	    (ret == FAILED) ? "FAILED" : "SUCCEDED", vha->host_no, id, lun);
 
 	return ret;
 }
@@ -1139,20 +1134,16 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
 	lun = cmd->device->lun;
 
 	if (!fcport) {
-		ql_log(ql_log_warn, vha, 0x8016,
-		    "fcport is NULL.\n");
 		return ret;
 	}
 
 	ret = fc_block_scsi_eh(cmd);
-	ql_dbg(ql_dbg_taskm, vha, 0x8017,
-	    "fc_block_scsi_eh ret=%d.\n", ret);
 	if (ret != 0)
 		return ret;
 	ret = FAILED;
 
 	ql_log(ql_log_info, vha, 0x8018,
-	    "ADAPTER RESET ISSUED for id %d lun %d.\n", id, lun);
+	    "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
 
 	if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
 		goto eh_host_reset_lock;
@@ -1193,8 +1184,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
 		ret = SUCCESS;
 
 eh_host_reset_lock:
-	qla_printk(KERN_INFO, ha, "%s: reset %s.\n", __func__,
-	    (ret == FAILED) ? "failed" : "succeeded");
+	ql_log(ql_log_info, vha, 0x8017,
+	    "ADAPTER RESET %s nexus=%ld:%d:%d.\n",
+	    (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
 
 	return ret;
 }
@@ -1344,10 +1336,8 @@ static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
 		return;
 
 	ql_dbg(ql_dbg_io, fcport->vha, 0x3029,
-	    "Queue depth adjusted-down "
-	    "to %d for scsi(%ld:%d:%d:%d).\n",
-	    sdev->queue_depth, fcport->vha->host_no,
-	    sdev->channel, sdev->id, sdev->lun);
+	    "Queue depth adjusted-down to %d for nexus=%ld:%d:%d.\n",
+	    sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
 }
 
 static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
@@ -1369,10 +1359,8 @@ static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
 		scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
 
 	ql_dbg(ql_dbg_io, vha, 0x302a,
-	    "Queue depth adjusted-up to %d for "
-	    "scsi(%ld:%d:%d:%d).\n",
-	    sdev->queue_depth, fcport->vha->host_no,
-	    sdev->channel, sdev->id, sdev->lun);
+	    "Queue depth adjusted-up to %d for nexus=%ld:%d:%d.\n",
+	    sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
 }
 
 static int
@@ -1496,6 +1484,118 @@ qla24xx_disable_intrs(struct qla_hw_data *ha)
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
 
+static int
+qla2x00_iospace_config(struct qla_hw_data *ha)
+{
+	resource_size_t pio;
+	uint16_t msix;
+	int cpus;
+
+	if (IS_QLA82XX(ha))
+		return qla82xx_iospace_config(ha);
+
+	if (pci_request_selected_regions(ha->pdev, ha->bars,
+	    QLA2XXX_DRIVER_NAME)) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
+		    "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
+		    pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+	if (!(ha->bars & 1))
+		goto skip_pio;
+
+	/* We only need PIO for Flash operations on ISP2312 v2 chips. */
+	pio = pci_resource_start(ha->pdev, 0);
+	if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
+		if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
+			ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
+			    "Invalid pci I/O region size (%s).\n",
+			    pci_name(ha->pdev));
+			pio = 0;
+		}
+	} else {
+		ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
+		    "Region #0 no a PIO resource (%s).\n",
+		    pci_name(ha->pdev));
+		pio = 0;
+	}
+	ha->pio_address = pio;
+	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
+	    "PIO address=%llu.\n",
+	    (unsigned long long)ha->pio_address);
+
+skip_pio:
+	/* Use MMIO operations for all accesses. */
+	if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
+		    "Region #1 not an MMIO resource (%s), aborting.\n",
+		    pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+	if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
+		    "Invalid PCI mem region size (%s), aborting.\n",
+		    pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+
+	ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
+	if (!ha->iobase) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
+		    "Cannot remap MMIO (%s), aborting.\n",
+		    pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+
+	/* Determine queue resources */
+	ha->max_req_queues = ha->max_rsp_queues = 1;
+	if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
+		(ql2xmaxqueues > 1 && ql2xmultique_tag) ||
+		(!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+		goto mqiobase_exit;
+
+	ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
+			pci_resource_len(ha->pdev, 3));
+	if (ha->mqiobase) {
+		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
+		    "MQIO Base=%p.\n", ha->mqiobase);
+		/* Read MSIX vector size of the board */
+		pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
+		ha->msix_count = msix;
+		/* Max queues are bounded by available msix vectors */
+		/* queue 0 uses two msix vectors */
+		if (ql2xmultique_tag) {
+			cpus = num_online_cpus();
+			ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
+				(cpus + 1) : (ha->msix_count - 1);
+			ha->max_req_queues = 2;
+		} else if (ql2xmaxqueues > 1) {
+			ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
+			    QLA_MQ_SIZE : ql2xmaxqueues;
+			ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
+			    "QoS mode set, max no of request queues:%d.\n",
+			    ha->max_req_queues);
+			ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
+			    "QoS mode set, max no of request queues:%d.\n",
+			    ha->max_req_queues);
+		}
+		ql_log_pci(ql_log_info, ha->pdev, 0x001a,
+		    "MSI-X vector count: %d.\n", msix);
+	} else
+		ql_log_pci(ql_log_info, ha->pdev, 0x001b,
+		    "BAR 3 not enabled.\n");
+
+mqiobase_exit:
+	ha->msix_count = ha->max_rsp_queues + 1;
+	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
+	    "MSIX Count:%d.\n", ha->msix_count);
+	return (0);
+
+iospace_error_exit:
+	return (-ENOMEM);
+}
+
+
 static struct isp_operations qla2100_isp_ops = {
 	.pci_config		= qla2100_pci_config,
 	.reset_chip		= qla2x00_reset_chip,
@@ -1530,6 +1630,7 @@ static struct isp_operations qla2100_isp_ops = {
 	.get_flash_version	= qla2x00_get_flash_version,
 	.start_scsi		= qla2x00_start_scsi,
 	.abort_isp		= qla2x00_abort_isp,
+	.iospace_config     	= qla2x00_iospace_config,
 };
 
 static struct isp_operations qla2300_isp_ops = {
@@ -1566,6 +1667,7 @@ static struct isp_operations qla2300_isp_ops = {
 	.get_flash_version	= qla2x00_get_flash_version,
 	.start_scsi		= qla2x00_start_scsi,
 	.abort_isp		= qla2x00_abort_isp,
+	.iospace_config     	= qla2x00_iospace_config,
 };
 
 static struct isp_operations qla24xx_isp_ops = {
@@ -1602,6 +1704,7 @@ static struct isp_operations qla24xx_isp_ops = {
 	.get_flash_version	= qla24xx_get_flash_version,
 	.start_scsi		= qla24xx_start_scsi,
 	.abort_isp		= qla2x00_abort_isp,
+	.iospace_config     	= qla2x00_iospace_config,
 };
 
 static struct isp_operations qla25xx_isp_ops = {
@@ -1638,6 +1741,7 @@ static struct isp_operations qla25xx_isp_ops = {
 	.get_flash_version	= qla24xx_get_flash_version,
 	.start_scsi		= qla24xx_dif_start_scsi,
 	.abort_isp		= qla2x00_abort_isp,
+	.iospace_config     	= qla2x00_iospace_config,
 };
 
 static struct isp_operations qla81xx_isp_ops = {
@@ -1674,6 +1778,7 @@ static struct isp_operations qla81xx_isp_ops = {
 	.get_flash_version	= qla24xx_get_flash_version,
 	.start_scsi		= qla24xx_dif_start_scsi,
 	.abort_isp		= qla2x00_abort_isp,
+	.iospace_config     	= qla2x00_iospace_config,
 };
 
 static struct isp_operations qla82xx_isp_ops = {
@@ -1710,6 +1815,7 @@ static struct isp_operations qla82xx_isp_ops = {
 	.get_flash_version	= qla24xx_get_flash_version,
 	.start_scsi             = qla82xx_start_scsi,
 	.abort_isp		= qla82xx_abort_isp,
+	.iospace_config     	= qla82xx_iospace_config,
 };
 
 static inline void
@@ -1819,121 +1925,10 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
 	else
 		ha->flags.port0 = 0;
 	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
-	    "device_type=0x%x port=%d fw_srisc_address=%p.\n",
+	    "device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
 	    ha->device_type, ha->flags.port0, ha->fw_srisc_address);
 }
 
-static int
-qla2x00_iospace_config(struct qla_hw_data *ha)
-{
-	resource_size_t pio;
-	uint16_t msix;
-	int cpus;
-
-	if (IS_QLA82XX(ha))
-		return qla82xx_iospace_config(ha);
-
-	if (pci_request_selected_regions(ha->pdev, ha->bars,
-	    QLA2XXX_DRIVER_NAME)) {
-		ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
-		    "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
-		    pci_name(ha->pdev));
-		goto iospace_error_exit;
-	}
-	if (!(ha->bars & 1))
-		goto skip_pio;
-
-	/* We only need PIO for Flash operations on ISP2312 v2 chips. */
-	pio = pci_resource_start(ha->pdev, 0);
-	if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
-		if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
-			ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
-			    "Invalid pci I/O region size (%s).\n",
-			    pci_name(ha->pdev));
-			pio = 0;
-		}
-	} else {
-		ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
-		    "Region #0 no a PIO resource (%s).\n",
-		    pci_name(ha->pdev));
-		pio = 0;
-	}
-	ha->pio_address = pio;
-	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
-	    "PIO address=%p.\n",
-	    ha->pio_address);
-
-skip_pio:
-	/* Use MMIO operations for all accesses. */
-	if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
-		ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
-		    "Region #1 not an MMIO resource (%s), aborting.\n",
-		    pci_name(ha->pdev));
-		goto iospace_error_exit;
-	}
-	if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
-		ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
-		    "Invalid PCI mem region size (%s), aborting.\n",
-		    pci_name(ha->pdev));
-		goto iospace_error_exit;
-	}
-
-	ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
-	if (!ha->iobase) {
-		ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
-		    "Cannot remap MMIO (%s), aborting.\n",
-		    pci_name(ha->pdev));
-		goto iospace_error_exit;
-	}
-
-	/* Determine queue resources */
-	ha->max_req_queues = ha->max_rsp_queues = 1;
-	if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
-		(ql2xmaxqueues > 1 && ql2xmultique_tag) ||
-		(!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
-		goto mqiobase_exit;
-
-	ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
-			pci_resource_len(ha->pdev, 3));
-	if (ha->mqiobase) {
-		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
-		    "MQIO Base=%p.\n", ha->mqiobase);
-		/* Read MSIX vector size of the board */
-		pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
-		ha->msix_count = msix;
-		/* Max queues are bounded by available msix vectors */
-		/* queue 0 uses two msix vectors */
-		if (ql2xmultique_tag) {
-			cpus = num_online_cpus();
-			ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
-				(cpus + 1) : (ha->msix_count - 1);
-			ha->max_req_queues = 2;
-		} else if (ql2xmaxqueues > 1) {
-			ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
-			    QLA_MQ_SIZE : ql2xmaxqueues;
-			ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
-			    "QoS mode set, max no of request queues:%d.\n",
-			    ha->max_req_queues);
-			ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
-			    "QoS mode set, max no of request queues:%d.\n",
-			    ha->max_req_queues);
-		}
-		ql_log_pci(ql_log_info, ha->pdev, 0x001a,
-		    "MSI-X vector count: %d.\n", msix);
-	} else
-		ql_log_pci(ql_log_info, ha->pdev, 0x001b,
-		    "BAR 3 not enabled.\n");
-
-mqiobase_exit:
-	ha->msix_count = ha->max_rsp_queues + 1;
-	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
-	    "MSIX Count:%d.\n", ha->msix_count);
-	return (0);
-
-iospace_error_exit:
-	return (-ENOMEM);
-}
-
 static void
 qla2xxx_scan_start(struct Scsi_Host *shost)
 {
@@ -2032,14 +2027,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 		pdev->needs_freset = 1;
 	}
 
-	/* Configure PCI I/O space */
-	ret = qla2x00_iospace_config(ha);
-	if (ret)
-		goto probe_hw_failed;
-
-	ql_log_pci(ql_log_info, pdev, 0x001d,
-	    "Found an ISP%04X irq %d iobase 0x%p.\n",
-	    pdev->device, pdev->irq, ha->iobase);
 	ha->prev_topology = 0;
 	ha->init_cb_size = sizeof(init_cb_t);
 	ha->link_data_rate = PORT_SPEED_UNKNOWN;
@@ -2152,6 +2139,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	    "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
 	    ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
 	    ha->nvram_conf_off, ha->nvram_data_off);
+
+	/* Configure PCI I/O space */
+	ret = ha->isp_ops->iospace_config(ha);
+	if (ret)
+		goto probe_hw_failed;
+
+	ql_log_pci(ql_log_info, pdev, 0x001d,
+	    "Found an ISP%04X irq %d iobase 0x%p.\n",
+	    pdev->device, pdev->irq, ha->iobase);
 	mutex_init(&ha->vport_lock);
 	init_completion(&ha->mbx_cmd_comp);
 	complete(&ha->mbx_cmd_comp);
@@ -2227,7 +2223,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	ql_dbg(ql_dbg_init, base_vha, 0x0033,
 	    "max_id=%d this_id=%d "
 	    "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
-	    "max_lun=%d transportt=%p, vendor_id=%d.\n", host->max_id,
+	    "max_lun=%d transportt=%p, vendor_id=%llu.\n", host->max_id,
 	    host->this_id, host->cmd_per_lun, host->unique_id,
 	    host->max_cmd_len, host->max_channel, host->max_lun,
 	    host->transportt, sht->vendor_id);
@@ -2382,9 +2378,6 @@ skip_dpc:
 
 	qla2x00_dfs_setup(base_vha);
 
-	ql_log(ql_log_info, base_vha, 0x00fa,
-	    "QLogic Fibre Channed HBA Driver: %s.\n",
-	    qla2x00_version_str);
 	ql_log(ql_log_info, base_vha, 0x00fb,
 	    "QLogic %s - %s.\n",
 	    ha->model_number, ha->model_desc ? ha->model_desc : "");
@@ -2833,7 +2826,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
 		if (!ha->sns_cmd)
 			goto fail_dma_pool;
 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
-		    "sns_cmd.\n", ha->sns_cmd);
+		    "sns_cmd: %p.\n", ha->sns_cmd);
 	} else {
 	/* Get consistent memory allocated for MS IOCB */
 		ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -3460,27 +3453,21 @@ qla2x00_do_dpc(void *data)
 		schedule();
 		__set_current_state(TASK_RUNNING);
 
-		ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
-		    "DPC handler waking up.\n");
-		ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
-		    "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
-
-		/* Initialization not yet finished. Don't do anything yet. */
-		if (!base_vha->flags.init_done)
-			continue;
+		if (!base_vha->flags.init_done || ha->flags.mbox_busy)
+			goto end_loop;
 
 		if (ha->flags.eeh_busy) {
 			ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
 			    "eeh_busy=%d.\n", ha->flags.eeh_busy);
-			continue;
+			goto end_loop;
 		}
 
 		ha->dpc_active = 1;
 
-		if (ha->flags.mbox_busy) {
-			ha->dpc_active = 0;
-			continue;
-		}
+		ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
+		    "DPC handler waking up.\n");
+		ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
+		    "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
 
 		qla2x00_do_work(base_vha);
 
@@ -3622,6 +3609,7 @@ qla2x00_do_dpc(void *data)
 		qla2x00_do_dpc_all_vps(base_vha);
 
 		ha->dpc_active = 0;
+end_loop:
 		set_current_state(TASK_INTERRUPTIBLE);
 	} /* End of while(1) */
 	__set_current_state(TASK_RUNNING);
@@ -3705,16 +3693,6 @@ qla2x00_sp_free_dma(srb_t *sp)
 		sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
 	}
 
-	CMD_SP(cmd) = NULL;
-}
-
-static void
-qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
-{
-	struct scsi_cmnd *cmd = sp->cmd;
-
-	qla2x00_sp_free_dma(sp);
-
 	if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
 		struct ct6_dsd *ctx = sp->ctx;
 		dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
@@ -3726,6 +3704,15 @@ qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
 		sp->ctx = NULL;
 	}
 
+	CMD_SP(cmd) = NULL;
+}
+
+static void
+qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
+{
+	struct scsi_cmnd *cmd = sp->cmd;
+
+	qla2x00_sp_free_dma(sp);
 	mempool_free(sp, ha->srb_mempool);
 	cmd->scsi_done(cmd);
 }

+ 3 - 2
drivers/scsi/qla2xxx/qla_sup.c

@@ -904,8 +904,9 @@ no_flash_data:
 	}
 done:
 	ql_dbg(ql_dbg_init, vha, 0x004d,
-	    "FDT[%x]: (0x%x/0x%x) erase=0x%x "
-	    "pr=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
+	    "FDT[%s]: (0x%x/0x%x) erase=0x%x "
+	    "pr=%x wrtd=0x%x blk=0x%x.\n",
+	    loc, mid, fid,
 	    ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
 	    ha->fdt_wrt_disable, ha->fdt_block_size);
 

+ 3 - 3
drivers/scsi/qla4xxx/ql4_dbg.c

@@ -20,12 +20,12 @@ void qla4xxx_dump_buffer(void *b, uint32_t size)
 	printk("------------------------------------------------------------"
 	       "--\n");
 	for (cnt = 0; cnt < size; c++) {
-		printk(KERN_INFO "%02x", *c);
+		printk("%02x", *c);
 		if (!(++cnt % 16))
-			printk(KERN_INFO "\n");
+			printk("\n");
 
 		else
-			printk(KERN_INFO "  ");
+			printk("  ");
 	}
 	printk(KERN_INFO "\n");
 }

+ 1 - 0
drivers/scsi/qla4xxx/ql4_def.h

@@ -177,6 +177,7 @@
 #define LOGIN_TOV			12
 
 #define MAX_RESET_HA_RETRIES		2
+#define FW_ALIVE_WAIT_TOV		3
 
 #define CMD_SP(Cmnd)			((Cmnd)->SCp.ptr)
 

+ 19 - 11
drivers/scsi/qla4xxx/ql4_isr.c

@@ -123,13 +123,13 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
 
 	srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
 	if (!srb) {
-		DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
-			      "handle 0x%x, sp=%p. This cmd may have already "
-			      "been completed.\n", ha->host_no, __func__,
-			      le32_to_cpu(sts_entry->handle), srb));
-		ql4_printk(KERN_WARNING, ha, "%s invalid status entry:"
-		    " handle=0x%0x\n", __func__, sts_entry->handle);
-		set_bit(DPC_RESET_HA, &ha->dpc_flags);
+		ql4_printk(KERN_WARNING, ha, "%s invalid status entry: "
+			   "handle=0x%0x, srb=%p\n", __func__,
+			   sts_entry->handle, srb);
+		if (is_qla8022(ha))
+			set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
+		else
+			set_bit(DPC_RESET_HA, &ha->dpc_flags);
 		return;
 	}
 
@@ -563,7 +563,11 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
 		case MBOX_ASTS_DHCP_LEASE_EXPIRED:
 			DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
 				      "Reset HA\n", ha->host_no, mbox_status));
-			set_bit(DPC_RESET_HA, &ha->dpc_flags);
+			if (is_qla8022(ha))
+				set_bit(DPC_RESET_HA_FW_CONTEXT,
+					&ha->dpc_flags);
+			else
+				set_bit(DPC_RESET_HA, &ha->dpc_flags);
 			break;
 
 		case MBOX_ASTS_LINK_UP:
@@ -617,9 +621,13 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
 			    (mbox_sts[2] == ACB_STATE_ACQUIRING)))
 				set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
 			else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
-			    (mbox_sts[2] == ACB_STATE_VALID))
-				set_bit(DPC_RESET_HA, &ha->dpc_flags);
-			else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED))
+				 (mbox_sts[2] == ACB_STATE_VALID)) {
+				if (is_qla8022(ha))
+					set_bit(DPC_RESET_HA_FW_CONTEXT,
+						&ha->dpc_flags);
+				else
+					set_bit(DPC_RESET_HA, &ha->dpc_flags);
+			} else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED))
 				complete(&ha->disable_acb_comp);
 			break;
 

+ 15 - 8
drivers/scsi/qla4xxx/ql4_nx.c

@@ -1792,8 +1792,11 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
 	int rval = QLA_SUCCESS;
 	unsigned long dev_init_timeout;
 
-	if (!test_bit(AF_INIT_DONE, &ha->flags))
+	if (!test_bit(AF_INIT_DONE, &ha->flags)) {
+		qla4_8xxx_idc_lock(ha);
 		qla4_8xxx_set_drv_active(ha);
+		qla4_8xxx_idc_unlock(ha);
+	}
 
 	dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
 	ql4_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
@@ -1802,8 +1805,8 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
 	/* wait for 30 seconds for device to go ready */
 	dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
 
+	qla4_8xxx_idc_lock(ha);
 	while (1) {
-		qla4_8xxx_idc_lock(ha);
 
 		if (time_after_eq(jiffies, dev_init_timeout)) {
 			ql4_printk(KERN_WARNING, ha, "Device init failed!\n");
@@ -1819,15 +1822,14 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
 		/* NOTE: Make sure idc unlocked upon exit of switch statement */
 		switch (dev_state) {
 		case QLA82XX_DEV_READY:
-			qla4_8xxx_idc_unlock(ha);
 			goto exit;
 		case QLA82XX_DEV_COLD:
 			rval = qla4_8xxx_device_bootstrap(ha);
-			qla4_8xxx_idc_unlock(ha);
 			goto exit;
 		case QLA82XX_DEV_INITIALIZING:
 			qla4_8xxx_idc_unlock(ha);
 			msleep(1000);
+			qla4_8xxx_idc_lock(ha);
 			break;
 		case QLA82XX_DEV_NEED_RESET:
 			if (!ql4xdontresethba) {
@@ -1836,32 +1838,37 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
 				 * reset handler */
 				dev_init_timeout = jiffies +
 					(ha->nx_dev_init_timeout * HZ);
+			} else {
+				qla4_8xxx_idc_unlock(ha);
+				msleep(1000);
+				qla4_8xxx_idc_lock(ha);
 			}
-			qla4_8xxx_idc_unlock(ha);
 			break;
 		case QLA82XX_DEV_NEED_QUIESCENT:
-			qla4_8xxx_idc_unlock(ha);
 			/* idc locked/unlocked in handler */
 			qla4_8xxx_need_qsnt_handler(ha);
-			qla4_8xxx_idc_lock(ha);
-			/* fall thru needs idc_locked */
+			break;
 		case QLA82XX_DEV_QUIESCENT:
 			qla4_8xxx_idc_unlock(ha);
 			msleep(1000);
+			qla4_8xxx_idc_lock(ha);
 			break;
 		case QLA82XX_DEV_FAILED:
 			qla4_8xxx_idc_unlock(ha);
 			qla4xxx_dead_adapter_cleanup(ha);
 			rval = QLA_ERROR;
+			qla4_8xxx_idc_lock(ha);
 			goto exit;
 		default:
 			qla4_8xxx_idc_unlock(ha);
 			qla4xxx_dead_adapter_cleanup(ha);
 			rval = QLA_ERROR;
+			qla4_8xxx_idc_lock(ha);
 			goto exit;
 		}
 	}
 exit:
+	qla4_8xxx_idc_unlock(ha);
 	return rval;
 }
 

+ 102 - 25
drivers/scsi/qla4xxx/ql4_os.c

@@ -935,7 +935,16 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
 		goto exit_init_fw_cb;
 	}
 
-	qla4xxx_disable_acb(ha);
+	rval = qla4xxx_disable_acb(ha);
+	if (rval != QLA_SUCCESS) {
+		ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
+			   __func__);
+		rval = -EIO;
+		goto exit_init_fw_cb;
+	}
+
+	wait_for_completion_timeout(&ha->disable_acb_comp,
+				    DISABLE_ACB_TOV * HZ);
 
 	qla4xxx_initcb_to_acb(init_fw_cb);
 
@@ -1966,9 +1975,10 @@ mem_alloc_error_exit:
  *
  * Context: Interrupt
  **/
-static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
+static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
 {
-	uint32_t fw_heartbeat_counter, halt_status;
+	uint32_t fw_heartbeat_counter;
+	int status = QLA_SUCCESS;
 
 	fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
 	/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
@@ -1976,7 +1986,7 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
 		DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
 		    "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
 		    ha->host_no, __func__));
-		return;
+		return status;
 	}
 
 	if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
@@ -1984,8 +1994,6 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
 		/* FW not alive after 2 seconds */
 		if (ha->seconds_since_last_heartbeat == 2) {
 			ha->seconds_since_last_heartbeat = 0;
-			halt_status = qla4_8xxx_rd_32(ha,
-						      QLA82XX_PEG_HALT_STATUS1);
 
 			ql4_printk(KERN_INFO, ha,
 				   "scsi(%ld): %s, Dumping hw/fw registers:\n "
@@ -1993,7 +2001,9 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
 				   " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
 				   " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
 				   " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
-				   ha->host_no, __func__, halt_status,
+				   ha->host_no, __func__,
+				   qla4_8xxx_rd_32(ha,
+						   QLA82XX_PEG_HALT_STATUS1),
 				   qla4_8xxx_rd_32(ha,
 						   QLA82XX_PEG_HALT_STATUS2),
 				   qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
@@ -2006,24 +2016,13 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
 						   0x3c),
 				   qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
 						   0x3c));
-
-			/* Since we cannot change dev_state in interrupt
-			 * context, set appropriate DPC flag then wakeup
-			 * DPC */
-			if (halt_status & HALT_STATUS_UNRECOVERABLE)
-				set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
-			else {
-				printk("scsi%ld: %s: detect abort needed!\n",
-				    ha->host_no, __func__);
-				set_bit(DPC_RESET_HA, &ha->dpc_flags);
-			}
-			qla4xxx_wake_dpc(ha);
-			qla4xxx_mailbox_premature_completion(ha);
+			status = QLA_ERROR;
 		}
 	} else
 		ha->seconds_since_last_heartbeat = 0;
 
 	ha->fw_heartbeat_counter = fw_heartbeat_counter;
+	return status;
 }
 
 /**
@@ -2034,14 +2033,13 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
  **/
 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
 {
-	uint32_t dev_state;
-
-	dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+	uint32_t dev_state, halt_status;
 
 	/* don't poll if reset is going on */
 	if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
 	    test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
+		dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
 		if (dev_state == QLA82XX_DEV_NEED_RESET &&
 		    !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
 			if (!ql4xdontresethba) {
@@ -2049,7 +2047,6 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
 				    "NEED RESET!\n", __func__);
 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
 				qla4xxx_wake_dpc(ha);
-				qla4xxx_mailbox_premature_completion(ha);
 			}
 		} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
 		    !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
@@ -2059,7 +2056,24 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
 			qla4xxx_wake_dpc(ha);
 		} else  {
 			/* Check firmware health */
-			qla4_8xxx_check_fw_alive(ha);
+			if (qla4_8xxx_check_fw_alive(ha)) {
+				halt_status = qla4_8xxx_rd_32(ha,
+						QLA82XX_PEG_HALT_STATUS1);
+
+				/* Since we cannot change dev_state in interrupt
+				 * context, set appropriate DPC flag then wakeup
+				 * DPC */
+				if (halt_status & HALT_STATUS_UNRECOVERABLE)
+					set_bit(DPC_HA_UNRECOVERABLE,
+						&ha->dpc_flags);
+				else {
+					ql4_printk(KERN_INFO, ha, "%s: detect "
+						   "abort needed!\n", __func__);
+					set_bit(DPC_RESET_HA, &ha->dpc_flags);
+				}
+				qla4xxx_mailbox_premature_completion(ha);
+				qla4xxx_wake_dpc(ha);
+			}
 		}
 	}
 }
@@ -2414,6 +2428,8 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
 {
 	int status = QLA_ERROR;
 	uint8_t reset_chip = 0;
+	uint32_t dev_state;
+	unsigned long wait;
 
 	/* Stall incoming I/O until we are done */
 	scsi_block_requests(ha->host);
@@ -2464,8 +2480,29 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
 	 * or if stop_firmware fails for ISP-82xx.
 	 * This is the default case for ISP-4xxx */
 	if (!is_qla8022(ha) || reset_chip) {
+		if (!is_qla8022(ha))
+			goto chip_reset;
+
+		/* Check if 82XX firmware is alive or not
+		 * We may have arrived here from NEED_RESET
+		 * detection only */
+		if (test_bit(AF_FW_RECOVERY, &ha->flags))
+			goto chip_reset;
+
+		wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
+		while (time_before(jiffies, wait)) {
+			if (qla4_8xxx_check_fw_alive(ha)) {
+				qla4xxx_mailbox_premature_completion(ha);
+				break;
+			}
+
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			schedule_timeout(HZ);
+		}
+
 		if (!test_bit(AF_FW_RECOVERY, &ha->flags))
 			qla4xxx_cmd_wait(ha);
+chip_reset:
 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
 		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
 		DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -2501,6 +2538,25 @@ recover_ha_init_adapter:
 		 * Since we don't want to block the DPC for too long
 		 * with multiple resets in the same thread,
 		 * utilize DPC to retry */
+		if (is_qla8022(ha)) {
+			qla4_8xxx_idc_lock(ha);
+			dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+			qla4_8xxx_idc_unlock(ha);
+			if (dev_state == QLA82XX_DEV_FAILED) {
+				ql4_printk(KERN_INFO, ha, "%s: don't retry "
+					   "recover adapter. H/W is in Failed "
+					   "state\n", __func__);
+				qla4xxx_dead_adapter_cleanup(ha);
+				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+				clear_bit(DPC_RESET_HA_FW_CONTEXT,
+						&ha->dpc_flags);
+				status = QLA_ERROR;
+
+				goto exit_recover;
+			}
+		}
+
 		if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
 			ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
 			DEBUG2(printk("scsi%ld: recover adapter - retrying "
@@ -2539,6 +2595,7 @@ recover_ha_init_adapter:
 		clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
 	}
 
+exit_recover:
 	ha->adapter_error_count++;
 
 	if (test_bit(AF_ONLINE, &ha->flags))
@@ -2806,6 +2863,7 @@ dpc_post_reset_ha:
  **/
 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
 {
+	qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
 
 	if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
 		/* Turn-off interrupts on the card. */
@@ -4815,6 +4873,20 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
 	return SUCCESS;
 }
 
+/**
+ * qla4xxx_is_eh_active - check if error handler is running
+ * @shost: Pointer to SCSI Host struct
+ *
+ * This routine finds that if reset host is called in EH
+ * scenario or from some application like sg_reset
+ **/
+static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
+{
+	if (shost->shost_state == SHOST_RECOVERY)
+		return 1;
+	return 0;
+}
+
 /**
  * qla4xxx_eh_host_reset - kernel callback
  * @cmd: Pointer to Linux's SCSI command structure
@@ -4832,6 +4904,11 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
 	if (ql4xdontresethba) {
 		DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
 		     ha->host_no, __func__));
+
+		/* Clear outstanding srb in queues */
+		if (qla4xxx_is_eh_active(cmd->device->host))
+			qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
+
 		return FAILED;
 	}
 

+ 1 - 1
drivers/scsi/qla4xxx/ql4_version.h

@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION	"5.02.00-k9"
+#define QLA4XXX_DRIVER_VERSION	"5.02.00-k10"

+ 0 - 1
drivers/scsi/scsi_priv.h

@@ -45,7 +45,6 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
 enum {
 	SCSI_DEVINFO_GLOBAL = 0,
 	SCSI_DEVINFO_SPI,
-	SCSI_DEVINFO_DH,
 };
 
 extern int scsi_get_device_flags(struct scsi_device *sdev,

+ 19 - 2
drivers/scsi/scsi_transport_iscsi.c

@@ -1030,6 +1030,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
 		return NULL;
 
 	session->transport = transport;
+	session->creator = -1;
 	session->recovery_tmo = 120;
 	session->state = ISCSI_SESSION_FREE;
 	INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
@@ -1634,8 +1635,9 @@ EXPORT_SYMBOL_GPL(iscsi_session_event);
 
 static int
 iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
-			struct iscsi_uevent *ev, uint32_t initial_cmdsn,
-			uint16_t cmds_max, uint16_t queue_depth)
+			struct iscsi_uevent *ev, pid_t pid,
+			uint32_t initial_cmdsn,	uint16_t cmds_max,
+			uint16_t queue_depth)
 {
 	struct iscsi_transport *transport = priv->iscsi_transport;
 	struct iscsi_cls_session *session;
@@ -1646,6 +1648,7 @@ iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
 	if (!session)
 		return -ENOMEM;
 
+	session->creator = pid;
 	shost = iscsi_session_to_shost(session);
 	ev->r.c_session_ret.host_no = shost->host_no;
 	ev->r.c_session_ret.sid = session->sid;
@@ -1938,6 +1941,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
 	switch (nlh->nlmsg_type) {
 	case ISCSI_UEVENT_CREATE_SESSION:
 		err = iscsi_if_create_session(priv, ep, ev,
+					      NETLINK_CREDS(skb)->pid,
 					      ev->u.c_session.initial_cmdsn,
 					      ev->u.c_session.cmds_max,
 					      ev->u.c_session.queue_depth);
@@ -1950,6 +1954,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
 		}
 
 		err = iscsi_if_create_session(priv, ep, ev,
+					NETLINK_CREDS(skb)->pid,
 					ev->u.c_bound_session.initial_cmdsn,
 					ev->u.c_bound_session.cmds_max,
 					ev->u.c_bound_session.queue_depth);
@@ -2298,6 +2303,15 @@ show_priv_session_state(struct device *dev, struct device_attribute *attr,
 }
 static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
 			NULL);
+static ssize_t
+show_priv_session_creator(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+	return sprintf(buf, "%d\n", session->creator);
+}
+static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator,
+			NULL);
 
 #define iscsi_priv_session_attr_show(field, format)			\
 static ssize_t								\
@@ -2367,6 +2381,7 @@ static struct attribute *iscsi_session_attrs[] = {
 	&dev_attr_sess_targetalias.attr,
 	&dev_attr_priv_sess_recovery_tmo.attr,
 	&dev_attr_priv_sess_state.attr,
+	&dev_attr_priv_sess_creator.attr,
 	NULL,
 };
 
@@ -2424,6 +2439,8 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
 		return S_IRUGO | S_IWUSR;
 	else if (attr == &dev_attr_priv_sess_state.attr)
 		return S_IRUGO;
+	else if (attr == &dev_attr_priv_sess_creator.attr)
+		return S_IRUGO;
 	else {
 		WARN_ONCE(1, "Invalid session attr");
 		return 0;

+ 0 - 1
include/scsi/scsi_device.h

@@ -185,7 +185,6 @@ typedef void (*activate_complete)(void *, int);
 struct scsi_device_handler {
 	/* Used by the infrastructure */
 	struct list_head list; /* list of scsi_device_handlers */
-	int idx;
 
 	/* Filled by the hardware handler */
 	struct module *module;

+ 5 - 0
include/scsi/scsi_transport_iscsi.h

@@ -211,6 +211,11 @@ struct iscsi_cls_session {
 	unsigned int target_id;
 	bool ida_used;
 
+	/*
+	 * pid of userspace process that created session or -1 if
+	 * created by the kernel.
+	 */
+	pid_t creator;
 	int state;
 	int sid;				/* session id */
 	void *dd_data;				/* LLD private data */

Неке датотеке нису приказане због велике количине промена