Browse Source

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull first round of SCSI updates from James Bottomley:
 "This patch set is a set of driver updates (ufs, zfcp, lpfc, mpt2/3sas,
  qla4xxx, qla2xxx [adding support for ISP8044 + other things]).

  We also have a new driver: esas2r which has a number of static checker
  problems, but which I expect to resolve over the -rc course of 3.12
  under the new driver exception.

  We also have the error return that were discussed at LSF"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (118 commits)
  [SCSI] sg: push file descriptor list locking down to per-device locking
  [SCSI] sg: checking sdp->detached isn't protected when open
  [SCSI] sg: no need sg_open_exclusive_lock
  [SCSI] sg: use rwsem to solve race during exclusive open
  [SCSI] scsi_debug: fix logical block provisioning support when unmap_alignment != 0
  [SCSI] scsi_debug: fix endianness bug in sdebug_build_parts()
  [SCSI] qla2xxx: Update the driver version to 8.06.00.08-k.
  [SCSI] qla2xxx: print MAC via %pMR.
  [SCSI] qla2xxx: Correction to message ids.
  [SCSI] qla2xxx: Correctly print out/in mailbox registers.
  [SCSI] qla2xxx: Add a new interface to update versions.
  [SCSI] qla2xxx: Move queue depth ramp down message to i/o debug level.
  [SCSI] qla2xxx: Select link initialization option bits from current operating mode.
  [SCSI] qla2xxx: Add loopback IDC-TIME-EXTEND aen handling support.
  [SCSI] qla2xxx: Set default critical temperature value in cases when ISPFX00 firmware doesn't provide it
  [SCSI] qla2xxx: QLAFX00 make over temperature AEN handling informational, add log for normal temperature AEN
  [SCSI] qla2xxx: Correct Interrupt Register offset for ISPFX00
  [SCSI] qla2xxx: Remove handling of Shutdown Requested AEN from qlafx00_process_aen().
  [SCSI] qla2xxx: Send all AENs for ISPFx00 to above layers.
  [SCSI] qla2xxx: Add changes in initialization for ISPFX00 cards with BIOS
  ...
Linus Torvalds 12 years ago
parent
commit
f66c83d059
100 changed files with 21358 additions and 617 deletions
  1. 1 1
      Documentation/scsi/LICENSE.qla4xxx
  2. 14 1
      MAINTAINERS
  3. 6 0
      block/blk-core.c
  4. 2 1
      drivers/infiniband/ulp/iser/iscsi_iser.c
  5. 10 1
      drivers/infiniband/ulp/iser/iser_initiator.c
  6. 15 1
      drivers/md/dm-mpath.c
  7. 2 2
      drivers/s390/scsi/zfcp_aux.c
  8. 0 2
      drivers/s390/scsi/zfcp_ext.h
  9. 2 10
      drivers/s390/scsi/zfcp_fsf.c
  10. 2 2
      drivers/s390/scsi/zfcp_qdio.c
  11. 6 6
      drivers/s390/scsi/zfcp_sysfs.c
  12. 1 0
      drivers/scsi/Kconfig
  13. 1 0
      drivers/scsi/Makefile
  14. 3 3
      drivers/scsi/bfa/bfad.c
  15. 1 1
      drivers/scsi/bnx2i/57xx_iscsi_constants.h
  16. 1 1
      drivers/scsi/bnx2i/57xx_iscsi_hsi.h
  17. 1 1
      drivers/scsi/bnx2i/bnx2i.h
  18. 1 1
      drivers/scsi/bnx2i/bnx2i_hwi.c
  19. 3 3
      drivers/scsi/bnx2i/bnx2i_init.c
  20. 1 1
      drivers/scsi/bnx2i/bnx2i_iscsi.c
  21. 1 1
      drivers/scsi/bnx2i/bnx2i_sysfs.c
  22. 1 1
      drivers/scsi/eata_pio.c
  23. 5 0
      drivers/scsi/esas2r/Kconfig
  24. 5 0
      drivers/scsi/esas2r/Makefile
  25. 1254 0
      drivers/scsi/esas2r/atioctl.h
  26. 1319 0
      drivers/scsi/esas2r/atvda.h
  27. 1441 0
      drivers/scsi/esas2r/esas2r.h
  28. 1189 0
      drivers/scsi/esas2r/esas2r_disc.c
  29. 1512 0
      drivers/scsi/esas2r/esas2r_flash.c
  30. 1773 0
      drivers/scsi/esas2r/esas2r_init.c
  31. 941 0
      drivers/scsi/esas2r/esas2r_int.c
  32. 880 0
      drivers/scsi/esas2r/esas2r_io.c
  33. 2110 0
      drivers/scsi/esas2r/esas2r_ioctl.c
  34. 254 0
      drivers/scsi/esas2r/esas2r_log.c
  35. 118 0
      drivers/scsi/esas2r/esas2r_log.h
  36. 2032 0
      drivers/scsi/esas2r/esas2r_main.c
  37. 306 0
      drivers/scsi/esas2r/esas2r_targdb.c
  38. 521 0
      drivers/scsi/esas2r/esas2r_vda.c
  39. 3 5
      drivers/scsi/hpsa.c
  40. 0 2
      drivers/scsi/hpsa.h
  41. 14 0
      drivers/scsi/ipr.c
  42. 7 0
      drivers/scsi/ipr.h
  43. 1 1
      drivers/scsi/isci/port_config.c
  44. 109 0
      drivers/scsi/libiscsi.c
  45. 1 2
      drivers/scsi/lpfc/lpfc.h
  46. 23 34
      drivers/scsi/lpfc/lpfc_attr.c
  47. 5 3
      drivers/scsi/lpfc/lpfc_bsg.c
  48. 1 1
      drivers/scsi/lpfc/lpfc_ct.c
  49. 2 1
      drivers/scsi/lpfc/lpfc_disc.h
  50. 2 0
      drivers/scsi/lpfc/lpfc_els.c
  51. 13 0
      drivers/scsi/lpfc/lpfc_hw4.h
  52. 25 17
      drivers/scsi/lpfc/lpfc_init.c
  53. 4 3
      drivers/scsi/lpfc/lpfc_mbox.c
  54. 10 2
      drivers/scsi/lpfc/lpfc_nportdisc.c
  55. 6 6
      drivers/scsi/lpfc/lpfc_scsi.c
  56. 125 22
      drivers/scsi/lpfc/lpfc_sli.c
  57. 5 2
      drivers/scsi/lpfc/lpfc_sli.h
  58. 6 0
      drivers/scsi/lpfc/lpfc_sli4.h
  59. 1 1
      drivers/scsi/lpfc/lpfc_version.h
  60. 4 1
      drivers/scsi/lpfc/lpfc_vport.c
  61. 4 3
      drivers/scsi/mpt2sas/mpi/mpi2.h
  62. 8 2
      drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
  63. 1 1
      drivers/scsi/mpt2sas/mpi/mpi2_init.h
  64. 1 1
      drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
  65. 1 1
      drivers/scsi/mpt2sas/mpi/mpi2_raid.h
  66. 1 1
      drivers/scsi/mpt2sas/mpi/mpi2_sas.h
  67. 1 1
      drivers/scsi/mpt2sas/mpi/mpi2_tool.h
  68. 1 1
      drivers/scsi/mpt2sas/mpi/mpi2_type.h
  69. 23 17
      drivers/scsi/mpt2sas/mpt2sas_base.c
  70. 5 5
      drivers/scsi/mpt2sas/mpt2sas_base.h
  71. 1 1
      drivers/scsi/mpt2sas/mpt2sas_config.c
  72. 9 5
      drivers/scsi/mpt2sas/mpt2sas_ctl.c
  73. 1 1
      drivers/scsi/mpt2sas/mpt2sas_ctl.h
  74. 1 1
      drivers/scsi/mpt2sas/mpt2sas_debug.h
  75. 44 38
      drivers/scsi/mpt2sas/mpt2sas_scsih.c
  76. 5 2
      drivers/scsi/mpt2sas/mpt2sas_transport.c
  77. 31 10
      drivers/scsi/mpt3sas/mpt3sas_base.c
  78. 1 0
      drivers/scsi/mpt3sas/mpt3sas_scsih.c
  79. 4 1
      drivers/scsi/mpt3sas/mpt3sas_transport.c
  80. 3 2
      drivers/scsi/pm8001/pm8001_init.c
  81. 1 1
      drivers/scsi/qla2xxx/Makefile
  82. 60 21
      drivers/scsi/qla2xxx/qla_attr.c
  83. 28 15
      drivers/scsi/qla2xxx/qla_bsg.c
  84. 38 20
      drivers/scsi/qla2xxx/qla_dbg.c
  85. 27 12
      drivers/scsi/qla2xxx/qla_def.h
  86. 2 0
      drivers/scsi/qla2xxx/qla_fw.h
  87. 57 17
      drivers/scsi/qla2xxx/qla_gbl.h
  88. 21 76
      drivers/scsi/qla2xxx/qla_gs.c
  89. 48 38
      drivers/scsi/qla2xxx/qla_init.c
  90. 1 1
      drivers/scsi/qla2xxx/qla_inline.h
  91. 15 2
      drivers/scsi/qla2xxx/qla_iocb.c
  92. 54 37
      drivers/scsi/qla2xxx/qla_isr.c
  93. 226 71
      drivers/scsi/qla2xxx/qla_mbx.c
  94. 2 0
      drivers/scsi/qla2xxx/qla_mid.c
  95. 138 26
      drivers/scsi/qla2xxx/qla_mr.c
  96. 37 4
      drivers/scsi/qla2xxx/qla_mr.h
  97. 78 39
      drivers/scsi/qla2xxx/qla_nx.c
  98. 10 0
      drivers/scsi/qla2xxx/qla_nx.h
  99. 3716 0
      drivers/scsi/qla2xxx/qla_nx2.c
  100. 551 0
      drivers/scsi/qla2xxx/qla_nx2.h

+ 1 - 1
Documentation/scsi/LICENSE.qla4xxx

@@ -1,4 +1,4 @@
-Copyright (c) 2003-2012 QLogic Corporation
+Copyright (c) 2003-2013 QLogic Corporation
 QLogic Linux iSCSI Driver
 
 This program includes a device driver for Linux 3.x.

+ 14 - 1
MAINTAINERS

@@ -1547,6 +1547,13 @@ W:	http://atmelwlandriver.sourceforge.net/
 S:	Maintained
 F:	drivers/net/wireless/atmel*
 
+ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
+M:      Bradley Grove <linuxdrivers@attotech.com>
+L:      linux-scsi@vger.kernel.org
+W:      http://www.attotech.com
+S:      Supported
+F:      drivers/scsi/esas2r
+
 AUDIT SUBSYSTEM
 M:	Al Viro <viro@zeniv.linux.org.uk>
 M:	Eric Paris <eparis@redhat.com>
@@ -1823,6 +1830,12 @@ L:	linux-scsi@vger.kernel.org
 S:	Supported
 F:	drivers/scsi/bnx2fc/
 
+BROADCOM BNX2I 1/10 GIGABIT iSCSI DRIVER
+M:	Eddie Wai <eddie.wai@broadcom.com>
+L:	linux-scsi@vger.kernel.org
+S:	Supported
+F:	drivers/scsi/bnx2i/
+
 BROADCOM SPECIFIC AMBA DRIVER (BCMA)
 M:	Rafał Miłecki <zajec5@gmail.com>
 L:	linux-wireless@vger.kernel.org
@@ -6676,11 +6689,11 @@ F:	Documentation/scsi/LICENSE.qla2xxx
 F:	drivers/scsi/qla2xxx/
 
 QLOGIC QLA4XXX iSCSI DRIVER
-M:	Ravi Anand <ravi.anand@qlogic.com>
 M:	Vikas Chaudhary <vikas.chaudhary@qlogic.com>
 M:	iscsi-driver@qlogic.com
 L:	linux-scsi@vger.kernel.org
 S:	Supported
+F:	Documentation/scsi/LICENSE.qla4xxx
 F:	drivers/scsi/qla4xxx/
 
 QLOGIC QLA3XXX NETWORK DRIVER

+ 6 - 0
block/blk-core.c

@@ -2318,6 +2318,12 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
 		case -ETIMEDOUT:
 			error_type = "timeout";
 			break;
+		case -ENOSPC:
+			error_type = "critical space allocation";
+			break;
+		case -ENODATA:
+			error_type = "critical medium";
+			break;
 		case -EIO:
 		default:
 			error_type = "I/O";

+ 2 - 1
drivers/infiniband/ulp/iser/iscsi_iser.c

@@ -672,6 +672,7 @@ static umode_t iser_attr_is_visible(int param_type, int param)
 		case ISCSI_PARAM_TGT_RESET_TMO:
 		case ISCSI_PARAM_IFACE_NAME:
 		case ISCSI_PARAM_INITIATOR_NAME:
+		case ISCSI_PARAM_DISCOVERY_SESS:
 			return S_IRUGO;
 		default:
 			return 0;
@@ -701,7 +702,7 @@ static struct scsi_host_template iscsi_iser_sht = {
 static struct iscsi_transport iscsi_iser_transport = {
 	.owner                  = THIS_MODULE,
 	.name                   = "iser",
-	.caps                   = CAP_RECOVERY_L0 | CAP_MULTI_R2T,
+	.caps                   = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_TEXT_NEGO,
 	/* session management */
 	.create_session         = iscsi_iser_session_create,
 	.destroy_session        = iscsi_iser_session_destroy,

+ 10 - 1
drivers/infiniband/ulp/iser/iser_initiator.c

@@ -234,6 +234,7 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn)
 static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
 {
 	struct iscsi_iser_conn *iser_conn = conn->dd_data;
+	struct iscsi_session *session = conn->session;
 
 	iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
 	/* check if this is the last login - going to full feature phase */
@@ -248,7 +249,13 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
 	WARN_ON(iser_conn->ib_conn->post_recv_buf_count != 1);
 	WARN_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
 
-	iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX);
+	if (session->discovery_sess) {
+		iser_info("Discovery session, re-using login RX buffer\n");
+		return 0;
+	} else
+		iser_info("Normal session, posting batch of RX %d buffers\n",
+			  ISER_MIN_POSTED_RX);
+
 	/* Initial post receive buffers */
 	if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX))
 		return -ENOMEM;
@@ -425,6 +432,8 @@ int iser_send_control(struct iscsi_conn *conn,
 	}
 
 	if (task == conn->login_task) {
+		iser_dbg("op %x dsl %lx, posting login rx buffer\n",
+			 task->hdr->opcode, data_seg_len);
 		err = iser_post_recvl(iser_conn->ib_conn);
 		if (err)
 			goto send_control_error;

+ 15 - 1
drivers/md/dm-mpath.c

@@ -1261,6 +1261,20 @@ static void activate_path(struct work_struct *work)
 				pg_init_done, pgpath);
 }
 
+static int noretry_error(int error)
+{
+	switch (error) {
+	case -EOPNOTSUPP:
+	case -EREMOTEIO:
+	case -EILSEQ:
+	case -ENODATA:
+		return 1;
+	}
+
+	/* Anything else could be a path failure, so should be retried */
+	return 0;
+}
+
 /*
  * end_io handling
  */
@@ -1284,7 +1298,7 @@ static int do_end_io(struct multipath *m, struct request *clone,
 	if (!error && !clone->errors)
 		return 0;	/* I/O complete */
 
-	if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ)
+	if (noretry_error(error))
 		return error;
 
 	if (mpio->pgpath)

+ 2 - 2
drivers/s390/scsi/zfcp_aux.c

@@ -104,11 +104,11 @@ static void __init zfcp_init_device_setup(char *devstr)
 	strncpy(busid, token, ZFCP_BUS_ID_SIZE);
 
 	token = strsep(&str, ",");
-	if (!token || strict_strtoull(token, 0, (unsigned long long *) &wwpn))
+	if (!token || kstrtoull(token, 0, (unsigned long long *) &wwpn))
 		goto err_out;
 
 	token = strsep(&str, ",");
-	if (!token || strict_strtoull(token, 0, (unsigned long long *) &lun))
+	if (!token || kstrtoull(token, 0, (unsigned long long *) &lun))
 		goto err_out;
 
 	kfree(str_saved);

+ 0 - 2
drivers/s390/scsi/zfcp_ext.h

@@ -126,8 +126,6 @@ extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
 extern int zfcp_qdio_open(struct zfcp_qdio *);
 extern void zfcp_qdio_close(struct zfcp_qdio *);
 extern void zfcp_qdio_siosl(struct zfcp_adapter *);
-extern struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *,
-					     struct qdio_buffer *);
 
 /* zfcp_scsi.c */
 extern struct scsi_transport_template *zfcp_scsi_transport_template;

+ 2 - 10
drivers/s390/scsi/zfcp_fsf.c

@@ -770,7 +770,8 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
 	if (zfcp_qdio_sbal_get(qdio))
 		goto out;
 
-	req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 0,
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
+				  SBAL_SFLAGS0_TYPE_STATUS,
 				  adapter->pool.status_read_req);
 	if (IS_ERR(req)) {
 		retval = PTR_ERR(req);
@@ -2387,12 +2388,3 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
 			break;
 	}
 }
-
-struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *qdio,
-				      struct qdio_buffer *sbal)
-{
-	struct qdio_buffer_element *sbale = &sbal->element[0];
-	u64 req_id = (unsigned long) sbale->addr;
-
-	return zfcp_reqlist_find(qdio->adapter->req_list, req_id);
-}

+ 2 - 2
drivers/s390/scsi/zfcp_qdio.c

@@ -16,9 +16,9 @@
 
 #define QBUFF_PER_PAGE		(PAGE_SIZE / sizeof(struct qdio_buffer))
 
-static bool enable_multibuffer;
+static bool enable_multibuffer = 1;
 module_param_named(datarouter, enable_multibuffer, bool, 0400);
-MODULE_PARM_DESC(datarouter, "Enable hardware data router support");
+MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
 
 static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
 {

+ 6 - 6
drivers/s390/scsi/zfcp_sysfs.c

@@ -107,7 +107,7 @@ static ssize_t zfcp_sysfs_port_failed_store(struct device *dev,
 	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
 	unsigned long val;
 
-	if (strict_strtoul(buf, 0, &val) || val != 0)
+	if (kstrtoul(buf, 0, &val) || val != 0)
 		return -EINVAL;
 
 	zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING);
@@ -146,7 +146,7 @@ static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev,
 	unsigned long val;
 	struct scsi_device *sdev;
 
-	if (strict_strtoul(buf, 0, &val) || val != 0)
+	if (kstrtoul(buf, 0, &val) || val != 0)
 		return -EINVAL;
 
 	sdev = zfcp_unit_sdev(unit);
@@ -196,7 +196,7 @@ static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
 	if (!adapter)
 		return -ENODEV;
 
-	if (strict_strtoul(buf, 0, &val) || val != 0) {
+	if (kstrtoul(buf, 0, &val) || val != 0) {
 		retval = -EINVAL;
 		goto out;
 	}
@@ -248,7 +248,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
 	if (!adapter)
 		return -ENODEV;
 
-	if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn))
+	if (kstrtoull(buf, 0, (unsigned long long *) &wwpn))
 		goto out;
 
 	port = zfcp_get_port_by_wwpn(adapter, wwpn);
@@ -309,7 +309,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
 	u64 fcp_lun;
 	int retval;
 
-	if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
+	if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun))
 		return -EINVAL;
 
 	retval = zfcp_unit_add(port, fcp_lun);
@@ -327,7 +327,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
 	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
 	u64 fcp_lun;
 
-	if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
+	if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun))
 		return -EINVAL;
 
 	if (zfcp_unit_remove(port, fcp_lun))

+ 1 - 0
drivers/scsi/Kconfig

@@ -601,6 +601,7 @@ config SCSI_ARCMSR
 	  To compile this driver as a module, choose M here: the
 	  module will be called arcmsr (modprobe arcmsr).
 
+source "drivers/scsi/esas2r/Kconfig"
 source "drivers/scsi/megaraid/Kconfig.megaraid"
 source "drivers/scsi/mpt2sas/Kconfig"
 source "drivers/scsi/mpt3sas/Kconfig"

+ 1 - 0
drivers/scsi/Makefile

@@ -141,6 +141,7 @@ obj-$(CONFIG_SCSI_CXGB3_ISCSI)	+= libiscsi.o libiscsi_tcp.o cxgbi/
 obj-$(CONFIG_SCSI_CXGB4_ISCSI)	+= libiscsi.o libiscsi_tcp.o cxgbi/
 obj-$(CONFIG_SCSI_BNX2_ISCSI)	+= libiscsi.o bnx2i/
 obj-$(CONFIG_BE2ISCSI)		+= libiscsi.o be2iscsi/
+obj-$(CONFIG_SCSI_ESAS2R)	+= esas2r/
 obj-$(CONFIG_SCSI_PMCRAID)	+= pmcraid.o
 obj-$(CONFIG_SCSI_VIRTIO)	+= virtio_scsi.o
 obj-$(CONFIG_VMWARE_PVSCSI)	+= vmw_pvscsi.o

+ 3 - 3
drivers/scsi/bfa/bfad.c

@@ -63,9 +63,9 @@ int		max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
 u32	bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
 u32	*bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
 
-#define BFAD_FW_FILE_CB		"cbfw-3.2.1.0.bin"
-#define BFAD_FW_FILE_CT		"ctfw-3.2.1.0.bin"
-#define BFAD_FW_FILE_CT2	"ct2fw-3.2.1.0.bin"
+#define BFAD_FW_FILE_CB		"cbfw-3.2.1.1.bin"
+#define BFAD_FW_FILE_CT		"ctfw-3.2.1.1.bin"
+#define BFAD_FW_FILE_CT2	"ct2fw-3.2.1.1.bin"
 
 static u32 *bfad_load_fwimg(struct pci_dev *pdev);
 static void bfad_free_fwimg(void);

+ 1 - 1
drivers/scsi/bnx2i/57xx_iscsi_constants.h

@@ -1,6 +1,6 @@
 /* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
  *
- * Copyright (c) 2006 - 2012 Broadcom Corporation
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by

+ 1 - 1
drivers/scsi/bnx2i/57xx_iscsi_hsi.h

@@ -1,6 +1,6 @@
 /* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
  *
- * Copyright (c) 2006 - 2012 Broadcom Corporation
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by

+ 1 - 1
drivers/scsi/bnx2i/bnx2i.h

@@ -1,6 +1,6 @@
 /* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2012 Broadcom Corporation
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *

+ 1 - 1
drivers/scsi/bnx2i/bnx2i_hwi.c

@@ -1,6 +1,6 @@
 /* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2012 Broadcom Corporation
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *

+ 3 - 3
drivers/scsi/bnx2i/bnx2i_init.c

@@ -1,6 +1,6 @@
 /* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2012 Broadcom Corporation
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *
@@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
 static u32 adapter_count;
 
 #define DRV_MODULE_NAME		"bnx2i"
-#define DRV_MODULE_VERSION	"2.7.2.2"
-#define DRV_MODULE_RELDATE	"Apr 25, 2012"
+#define DRV_MODULE_VERSION	"2.7.6.2"
+#define DRV_MODULE_RELDATE	"Jun 06, 2013"
 
 static char version[] =
 		"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \

+ 1 - 1
drivers/scsi/bnx2i/bnx2i_iscsi.c

@@ -1,7 +1,7 @@
 /*
  * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2012 Broadcom Corporation
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *

+ 1 - 1
drivers/scsi/bnx2i/bnx2i_sysfs.c

@@ -1,6 +1,6 @@
 /* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2004 - 2012 Broadcom Corporation
+ * Copyright (c) 2004 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by

+ 1 - 1
drivers/scsi/eata_pio.c

@@ -919,7 +919,7 @@ static int eata_pio_detect(struct scsi_host_template *tpnt)
 	find_pio_EISA(&gc);
 	find_pio_ISA(&gc);
 
-	for (i = 0; i <= MAXIRQ; i++)
+	for (i = 0; i < MAXIRQ; i++)
 		if (reg_IRQ[i])
 			request_irq(i, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", NULL);
 

+ 5 - 0
drivers/scsi/esas2r/Kconfig

@@ -0,0 +1,5 @@
+config SCSI_ESAS2R
+	tristate "ATTO Technology's ExpressSAS RAID adapter driver"
+	depends on PCI && SCSI
+	---help---
+	  This driver supports the ATTO ExpressSAS R6xx SAS/SATA RAID controllers.

+ 5 - 0
drivers/scsi/esas2r/Makefile

@@ -0,0 +1,5 @@
+obj-$(CONFIG_SCSI_ESAS2R)	+= esas2r.o
+
+esas2r-objs := esas2r_log.o esas2r_disc.o esas2r_flash.o esas2r_init.o \
+	 esas2r_int.o esas2r_io.o esas2r_ioctl.o esas2r_targdb.o   \
+	 esas2r_vda.o esas2r_main.o

+ 1254 - 0
drivers/scsi/esas2r/atioctl.h

@@ -0,0 +1,1254 @@
+/*  linux/drivers/scsi/esas2r/atioctl.h
+ *      ATTO IOCTL Handling
+ *
+ *  Copyright (c) 2001-2013 ATTO Technology, Inc.
+ *  (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  NO WARRANTY
+ *  THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ *  CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ *  LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ *  MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ *  solely responsible for determining the appropriateness of using and
+ *  distributing the Program and assumes all risks associated with its
+ *  exercise of rights under this Agreement, including but not limited to
+ *  the risks and costs of program errors, damage to or loss of data,
+ *  programs or equipment, and unavailability or interruption of operations.
+ *
+ *  DISCLAIMER OF LIABILITY
+ *  NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ *  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ *  DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ *  USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ *  HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "atvda.h"
+
+#ifndef ATIOCTL_H
+#define ATIOCTL_H
+
+#define EXPRESS_IOCTL_SIGNATURE        "Express"
+#define EXPRESS_IOCTL_SIGNATURE_SIZE   8
+
+/* structure definitions for IOCTls */
+
+struct __packed atto_express_ioctl_header {
+	u8 signature[EXPRESS_IOCTL_SIGNATURE_SIZE];
+	u8 return_code;
+
+#define IOCTL_SUCCESS               0
+#define IOCTL_ERR_INVCMD          101
+#define IOCTL_INIT_FAILED         102
+#define IOCTL_NOT_IMPLEMENTED     103
+#define IOCTL_BAD_CHANNEL         104
+#define IOCTL_TARGET_OVERRUN      105
+#define IOCTL_TARGET_NOT_ENABLED  106
+#define IOCTL_BAD_FLASH_IMGTYPE   107
+#define IOCTL_OUT_OF_RESOURCES    108
+#define IOCTL_GENERAL_ERROR       109
+#define IOCTL_INVALID_PARAM       110
+
+	u8 channel;
+	u8 retries;
+	u8 pad[5];
+};
+
+/*
+ * NOTE - if channel == 0xFF, the request is
+ * handled on the adapter it came in on.
+ */
+#define MAX_NODE_NAMES  256
+
+struct __packed atto_firmware_rw_request {
+	u8 function;
+	#define FUNC_FW_DOWNLOAD        0x09
+	#define FUNC_FW_UPLOAD          0x12
+
+	u8 img_type;
+	#define FW_IMG_FW               0x01
+	#define FW_IMG_BIOS             0x02
+	#define FW_IMG_NVR              0x03
+	#define FW_IMG_RAW              0x04
+	#define FW_IMG_FM_API           0x05
+	#define FW_IMG_FS_API           0x06
+
+	u8 pad[2];
+	u32 img_offset;
+	u32 img_size;
+	u8 image[0x80000];
+};
+
+struct __packed atto_param_rw_request {
+	u16 code;
+	char data_buffer[512];
+};
+
+#define MAX_CHANNEL 256
+
+struct __packed atto_channel_list {
+	u32 num_channels;
+	u8 channel[MAX_CHANNEL];
+};
+
+struct __packed atto_channel_info {
+	u8 major_rev;
+	u8 minor_rev;
+	u8 IRQ;
+	u8 revision_id;
+	u8 pci_bus;
+	u8 pci_dev_func;
+	u8 core_rev;
+	u8 host_no;
+	u16 device_id;
+	u16 vendor_id;
+	u16 ven_dev_id;
+	u8 pad[3];
+	u32 hbaapi_rev;
+};
+
+/*
+ * CSMI control codes
+ * class independent
+ */
+#define CSMI_CC_GET_DRVR_INFO        1
+#define CSMI_CC_GET_CNTLR_CFG        2
+#define CSMI_CC_GET_CNTLR_STS        3
+#define CSMI_CC_FW_DOWNLOAD          4
+
+/* RAID class */
+#define CSMI_CC_GET_RAID_INFO        10
+#define CSMI_CC_GET_RAID_CFG         11
+
+/* HBA class */
+#define CSMI_CC_GET_PHY_INFO         20
+#define CSMI_CC_SET_PHY_INFO         21
+#define CSMI_CC_GET_LINK_ERRORS      22
+#define CSMI_CC_SMP_PASSTHRU         23
+#define CSMI_CC_SSP_PASSTHRU         24
+#define CSMI_CC_STP_PASSTHRU         25
+#define CSMI_CC_GET_SATA_SIG         26
+#define CSMI_CC_GET_SCSI_ADDR        27
+#define CSMI_CC_GET_DEV_ADDR         28
+#define CSMI_CC_TASK_MGT             29
+#define CSMI_CC_GET_CONN_INFO        30
+
+/* PHY class */
+#define CSMI_CC_PHY_CTRL             60
+
+/*
+ * CSMI status codes
+ * class independent
+ */
+#define CSMI_STS_SUCCESS             0
+#define CSMI_STS_FAILED              1
+#define CSMI_STS_BAD_CTRL_CODE       2
+#define CSMI_STS_INV_PARAM           3
+#define CSMI_STS_WRITE_ATTEMPTED     4
+
+/* RAID class */
+#define CSMI_STS_INV_RAID_SET        1000
+
+/* HBA class */
+#define CSMI_STS_PHY_CHANGED         CSMI_STS_SUCCESS
+#define CSMI_STS_PHY_UNCHANGEABLE    2000
+#define CSMI_STS_INV_LINK_RATE       2001
+#define CSMI_STS_INV_PHY             2002
+#define CSMI_STS_INV_PHY_FOR_PORT    2003
+#define CSMI_STS_PHY_UNSELECTABLE    2004
+#define CSMI_STS_SELECT_PHY_OR_PORT  2005
+#define CSMI_STS_INV_PORT            2006
+#define CSMI_STS_PORT_UNSELECTABLE   2007
+#define CSMI_STS_CONNECTION_FAILED   2008
+#define CSMI_STS_NO_SATA_DEV         2009
+#define CSMI_STS_NO_SATA_SIGNATURE   2010
+#define CSMI_STS_SCSI_EMULATION      2011
+#define CSMI_STS_NOT_AN_END_DEV      2012
+#define CSMI_STS_NO_SCSI_ADDR        2013
+#define CSMI_STS_NO_DEV_ADDR         2014
+
+/* CSMI class independent structures */
+struct atto_csmi_get_driver_info {
+	char name[81];
+	char description[81];
+	u16 major_rev;
+	u16 minor_rev;
+	u16 build_rev;
+	u16 release_rev;
+	u16 csmi_major_rev;
+	u16 csmi_minor_rev;
+	#define CSMI_MAJOR_REV_0_81      0
+	#define CSMI_MINOR_REV_0_81      81
+
+	#define CSMI_MAJOR_REV           CSMI_MAJOR_REV_0_81
+	#define CSMI_MINOR_REV           CSMI_MINOR_REV_0_81
+};
+
+struct atto_csmi_get_pci_bus_addr {
+	u8 bus_num;
+	u8 device_num;
+	u8 function_num;
+	u8 reserved;
+};
+
+struct atto_csmi_get_cntlr_cfg {
+	u32 base_io_addr;
+
+	struct {
+		u32 base_memaddr_lo;
+		u32 base_memaddr_hi;
+	};
+
+	u32 board_id;
+	u16 slot_num;
+	#define CSMI_SLOT_NUM_UNKNOWN    0xFFFF
+
+	u8 cntlr_class;
+	#define CSMI_CNTLR_CLASS_HBA     5
+
+	u8 io_bus_type;
+	#define CSMI_BUS_TYPE_PCI        3
+	#define CSMI_BUS_TYPE_PCMCIA     4
+
+	union {
+		struct atto_csmi_get_pci_bus_addr pci_addr;
+		u8 reserved[32];
+	};
+
+	char serial_num[81];
+	u16 major_rev;
+	u16 minor_rev;
+	u16 build_rev;
+	u16 release_rev;
+	u16 bios_major_rev;
+	u16 bios_minor_rev;
+	u16 bios_build_rev;
+	u16 bios_release_rev;
+	u32 cntlr_flags;
+	#define CSMI_CNTLRF_SAS_HBA      0x00000001
+	#define CSMI_CNTLRF_SAS_RAID     0x00000002
+	#define CSMI_CNTLRF_SATA_HBA     0x00000004
+	#define CSMI_CNTLRF_SATA_RAID    0x00000008
+	#define CSMI_CNTLRF_FWD_SUPPORT  0x00010000
+	#define CSMI_CNTLRF_FWD_ONLINE   0x00020000
+	#define CSMI_CNTLRF_FWD_SRESET   0x00040000
+	#define CSMI_CNTLRF_FWD_HRESET   0x00080000
+	#define CSMI_CNTLRF_FWD_RROM     0x00100000
+
+	u16 rrom_major_rev;
+	u16 rrom_minor_rev;
+	u16 rrom_build_rev;
+	u16 rrom_release_rev;
+	u16 rrom_biosmajor_rev;
+	u16 rrom_biosminor_rev;
+	u16 rrom_biosbuild_rev;
+	u16 rrom_biosrelease_rev;
+	u8 reserved2[7];
+};
+
+struct atto_csmi_get_cntlr_sts {
+	u32 status;
+	#define CSMI_CNTLR_STS_GOOD          1
+	#define CSMI_CNTLR_STS_FAILED        2
+	#define CSMI_CNTLR_STS_OFFLINE       3
+	#define CSMI_CNTLR_STS_POWEROFF      4
+
+	u32 offline_reason;
+	#define CSMI_OFFLINE_NO_REASON       0
+	#define CSMI_OFFLINE_INITIALIZING    1
+	#define CSMI_OFFLINE_BUS_DEGRADED    2
+	#define CSMI_OFFLINE_BUS_FAILURE     3
+
+	u8 reserved[28];
+};
+
+struct atto_csmi_fw_download {
+	u32 buffer_len;
+	u32 download_flags;
+	#define CSMI_FWDF_VALIDATE       0x00000001
+	#define CSMI_FWDF_SOFT_RESET     0x00000002
+	#define CSMI_FWDF_HARD_RESET     0x00000004
+
+	u8 reserved[32];
+	u16 status;
+	#define CSMI_FWD_STS_SUCCESS     0
+	#define CSMI_FWD_STS_FAILED      1
+	#define CSMI_FWD_STS_USING_RROM  2
+	#define CSMI_FWD_STS_REJECT      3
+	#define CSMI_FWD_STS_DOWNREV     4
+
+	u16 severity;
+	#define CSMI_FWD_SEV_INFO        0
+	#define CSMI_FWD_SEV_WARNING     1
+	#define CSMI_FWD_SEV_ERROR       2
+	#define CSMI_FWD_SEV_FATAL       3
+
+};
+
+/* CSMI RAID class structures */
+struct atto_csmi_get_raid_info {
+	u32 num_raid_sets;
+	u32 max_drivesper_set;
+	u8 reserved[92];
+};
+
+struct atto_csmi_raid_drives {
+	char model[40];
+	char firmware[8];
+	char serial_num[40];
+	u8 sas_addr[8];
+	u8 lun[8];
+	u8 drive_sts;
+	#define CSMI_DRV_STS_OK          0
+	#define CSMI_DRV_STS_REBUILDING  1
+	#define CSMI_DRV_STS_FAILED      2
+	#define CSMI_DRV_STS_DEGRADED    3
+
+	u8 drive_usage;
+	#define CSMI_DRV_USE_NOT_USED    0
+	#define CSMI_DRV_USE_MEMBER      1
+	#define CSMI_DRV_USE_SPARE       2
+
+	u8 reserved[30]; /* spec says 22 */
+};
+
+struct atto_csmi_get_raid_cfg {
+	u32 raid_set_index;
+	u32 capacity;
+	u32 stripe_size;
+	u8 raid_type;
+	u8 status;
+	u8 information;
+	u8 drive_cnt;
+	u8 reserved[20];
+
+	struct atto_csmi_raid_drives drives[1];
+};
+
+/* CSMI HBA class structures */
+struct atto_csmi_phy_entity {
+	u8 ident_frame[0x1C];
+	u8 port_id;
+	u8 neg_link_rate;
+	u8 min_link_rate;
+	u8 max_link_rate;
+	u8 phy_change_cnt;
+	u8 auto_discover;
+	#define CSMI_DISC_NOT_SUPPORTED  0x00
+	#define CSMI_DISC_NOT_STARTED    0x01
+	#define CSMI_DISC_IN_PROGRESS    0x02
+	#define CSMI_DISC_COMPLETE       0x03
+	#define CSMI_DISC_ERROR          0x04
+
+	u8 reserved[2];
+	u8 attach_ident_frame[0x1C];
+};
+
+struct atto_csmi_get_phy_info {
+	u8 number_of_phys;
+	u8 reserved[3];
+	struct atto_csmi_phy_entity
+		phy[32];
+};
+
+struct atto_csmi_set_phy_info {
+	u8 phy_id;
+	u8 neg_link_rate;
+	#define CSMI_NEG_RATE_NEGOTIATE  0x00
+	#define CSMI_NEG_RATE_PHY_DIS    0x01
+
+	u8 prog_minlink_rate;
+	u8 prog_maxlink_rate;
+	u8 signal_class;
+	#define CSMI_SIG_CLASS_UNKNOWN   0x00
+	#define CSMI_SIG_CLASS_DIRECT    0x01
+	#define CSMI_SIG_CLASS_SERVER    0x02
+	#define CSMI_SIG_CLASS_ENCLOSURE 0x03
+
+	u8 reserved[3];
+};
+
+struct atto_csmi_get_link_errors {
+	u8 phy_id;
+	u8 reset_cnts;
+	#define CSMI_RESET_CNTS_NO       0x00
+	#define CSMI_RESET_CNTS_YES      0x01
+
+	u8 reserved[2];
+	u32 inv_dw_cnt;
+	u32 disp_err_cnt;
+	u32 loss_ofdw_sync_cnt;
+	u32 phy_reseterr_cnt;
+
+	/*
+	 * The following field has been added by ATTO for ease of
+	 * implementation of additional statistics.  Drivers must validate
+	 * the length of the IOCTL payload prior to filling them in so CSMI
+	 * complaint applications function correctly.
+	 */
+
+	u32 crc_err_cnt;
+};
+
+struct atto_csmi_smp_passthru {
+	u8 phy_id;
+	u8 port_id;
+	u8 conn_rate;
+	u8 reserved;
+	u8 dest_sas_addr[8];
+	u32 req_len;
+	u8 smp_req[1020];
+	u8 conn_sts;
+	u8 reserved2[3];
+	u32 rsp_len;
+	u8 smp_rsp[1020];
+};
+
+struct atto_csmi_ssp_passthru_sts {
+	u8 conn_sts;
+	u8 reserved[3];
+	u8 data_present;
+	u8 status;
+	u16 rsp_length;
+	u8 rsp[256];
+	u32 data_bytes;
+};
+
+struct atto_csmi_ssp_passthru {
+	u8 phy_id;
+	u8 port_id;
+	u8 conn_rate;
+	u8 reserved;
+	u8 dest_sas_addr[8];
+	u8 lun[8];
+	u8 cdb_len;
+	u8 add_cdb_len;
+	u8 reserved2[2];
+	u8 cdb[16];
+	u32 flags;
+	#define CSMI_SSPF_DD_READ        0x00000001
+	#define CSMI_SSPF_DD_WRITE       0x00000002
+	#define CSMI_SSPF_DD_UNSPECIFIED 0x00000004
+	#define CSMI_SSPF_TA_SIMPLE      0x00000000
+	#define CSMI_SSPF_TA_HEAD_OF_Q   0x00000010
+	#define CSMI_SSPF_TA_ORDERED     0x00000020
+	#define CSMI_SSPF_TA_ACA         0x00000040
+
+	u8 add_cdb[24];
+	u32 data_len;
+
+	struct atto_csmi_ssp_passthru_sts sts;
+};
+
+struct atto_csmi_stp_passthru_sts {
+	u8 conn_sts;
+	u8 reserved[3];
+	u8 sts_fis[20];
+	u32 scr[16];
+	u32 data_bytes;
+};
+
+struct atto_csmi_stp_passthru {
+	u8 phy_id;
+	u8 port_id;
+	u8 conn_rate;
+	u8 reserved;
+	u8 dest_sas_addr[8];
+	u8 reserved2[4];
+	u8 command_fis[20];
+	u32 flags;
+	#define CSMI_STPF_DD_READ        0x00000001
+	#define CSMI_STPF_DD_WRITE       0x00000002
+	#define CSMI_STPF_DD_UNSPECIFIED 0x00000004
+	#define CSMI_STPF_PIO            0x00000010
+	#define CSMI_STPF_DMA            0x00000020
+	#define CSMI_STPF_PACKET         0x00000040
+	#define CSMI_STPF_DMA_QUEUED     0x00000080
+	#define CSMI_STPF_EXECUTE_DIAG   0x00000100
+	#define CSMI_STPF_RESET_DEVICE   0x00000200
+
+	u32 data_len;
+
+	struct atto_csmi_stp_passthru_sts sts;
+};
+
+struct atto_csmi_get_sata_sig {
+	u8 phy_id;
+	u8 reserved[3];
+	u8 reg_dth_fis[20];
+};
+
+struct atto_csmi_get_scsi_addr {
+	u8 sas_addr[8];
+	u8 sas_lun[8];
+	u8 host_index;
+	u8 path_id;
+	u8 target_id;
+	u8 lun;
+};
+
+struct atto_csmi_get_dev_addr {
+	u8 host_index;
+	u8 path_id;
+	u8 target_id;
+	u8 lun;
+	u8 sas_addr[8];
+	u8 sas_lun[8];
+};
+
+struct atto_csmi_task_mgmt {
+	u8 host_index;
+	u8 path_id;
+	u8 target_id;
+	u8 lun;
+	u32 flags;
+	#define CSMI_TMF_TASK_IU         0x00000001
+	#define CSMI_TMF_HARD_RST        0x00000002
+	#define CSMI_TMF_SUPPRESS_RSLT   0x00000004
+
+	u32 queue_tag;
+	u32 reserved;
+	u8 task_mgt_func;
+	u8 reserved2[7];
+	u32 information;
+	#define CSMI_TM_INFO_TEST        1
+	#define CSMI_TM_INFO_EXCEEDED    2
+	#define CSMI_TM_INFO_DEMAND      3
+	#define CSMI_TM_INFO_TRIGGER     4
+
+	struct atto_csmi_ssp_passthru_sts sts;
+
+};
+
+struct atto_csmi_get_conn_info {
+	u32 pinout;
+	#define CSMI_CON_UNKNOWN         0x00000001
+	#define CSMI_CON_SFF_8482        0x00000002
+	#define CSMI_CON_SFF_8470_LANE_1 0x00000100
+	#define CSMI_CON_SFF_8470_LANE_2 0x00000200
+	#define CSMI_CON_SFF_8470_LANE_3 0x00000400
+	#define CSMI_CON_SFF_8470_LANE_4 0x00000800
+	#define CSMI_CON_SFF_8484_LANE_1 0x00010000
+	#define CSMI_CON_SFF_8484_LANE_2 0x00020000
+	#define CSMI_CON_SFF_8484_LANE_3 0x00040000
+	#define CSMI_CON_SFF_8484_LANE_4 0x00080000
+
+	u8 connector[16];
+	u8 location;
+	#define CSMI_CON_INTERNAL        0x02
+	#define CSMI_CON_EXTERNAL        0x04
+	#define CSMI_CON_SWITCHABLE      0x08
+	#define CSMI_CON_AUTO            0x10
+
+	u8 reserved[15];
+};
+
+/* CSMI PHY class structures */
+struct atto_csmi_character {
+	u8 type_flags;
+	#define CSMI_CTF_POS_DISP        0x01
+	#define CSMI_CTF_NEG_DISP        0x02
+	#define CSMI_CTF_CTRL_CHAR       0x04
+
+	u8 value;
+};
+
+struct atto_csmi_pc_ctrl {
+	u8 type;
+	#define CSMI_PC_TYPE_UNDEFINED   0x00
+	#define CSMI_PC_TYPE_SATA        0x01
+	#define CSMI_PC_TYPE_SAS         0x02
+	u8 rate;
+	u8 reserved[6];
+	u32 vendor_unique[8];
+	u32 tx_flags;
+	#define CSMI_PC_TXF_PREEMP_DIS   0x00000001
+
+	signed char tx_amplitude;
+	signed char tx_preemphasis;
+	signed char tx_slew_rate;
+	signed char tx_reserved[13];
+	u8 tx_vendor_unique[64];
+	u32 rx_flags;
+	#define CSMI_PC_RXF_EQ_DIS       0x00000001
+
+	signed char rx_threshold;
+	signed char rx_equalization_gain;
+	signed char rx_reserved[14];
+	u8 rx_vendor_unique[64];
+	u32 pattern_flags;
+	#define CSMI_PC_PATF_FIXED       0x00000001
+	#define CSMI_PC_PATF_DIS_SCR     0x00000002
+	#define CSMI_PC_PATF_DIS_ALIGN   0x00000004
+	#define CSMI_PC_PATF_DIS_SSC     0x00000008
+
+	u8 fixed_pattern;
+	#define CSMI_PC_FP_CJPAT         0x00000001
+	#define CSMI_PC_FP_ALIGN         0x00000002
+
+	u8 user_pattern_len;
+	u8 pattern_reserved[6];
+
+	struct atto_csmi_character user_pattern_buffer[16];
+};
+
+struct atto_csmi_phy_ctrl {
+	u32 function;
+	#define CSMI_PC_FUNC_GET_SETUP   0x00000100
+
+	u8 phy_id;
+	u16 len_of_cntl;
+	u8 num_of_cntls;
+	u8 reserved[4];
+	u32 link_flags;
+	#define CSMI_PHY_ACTIVATE_CTRL   0x00000001
+	#define CSMI_PHY_UPD_SPINUP_RATE 0x00000002
+	#define CSMI_PHY_AUTO_COMWAKE    0x00000004
+
+	u8 spinup_rate;
+	u8 link_reserved[7];
+	u32 vendor_unique[8];
+
+	struct atto_csmi_pc_ctrl control[1];
+};
+
+union atto_ioctl_csmi {
+	struct atto_csmi_get_driver_info drvr_info;
+	struct atto_csmi_get_cntlr_cfg cntlr_cfg;
+	struct atto_csmi_get_cntlr_sts cntlr_sts;
+	struct atto_csmi_fw_download fw_dwnld;
+	struct atto_csmi_get_raid_info raid_info;
+	struct atto_csmi_get_raid_cfg raid_cfg;
+	struct atto_csmi_get_phy_info get_phy_info;
+	struct atto_csmi_set_phy_info set_phy_info;
+	struct atto_csmi_get_link_errors link_errs;
+	struct atto_csmi_smp_passthru smp_pass_thru;
+	struct atto_csmi_ssp_passthru ssp_pass_thru;
+	struct atto_csmi_stp_passthru stp_pass_thru;
+	struct atto_csmi_task_mgmt tsk_mgt;
+	struct atto_csmi_get_sata_sig sata_sig;
+	struct atto_csmi_get_scsi_addr scsi_addr;
+	struct atto_csmi_get_dev_addr dev_addr;
+	struct atto_csmi_get_conn_info conn_info[32];
+	struct atto_csmi_phy_ctrl phy_ctrl;
+};
+
+struct atto_csmi {
+	u32 control_code;
+	u32 status;
+	union atto_ioctl_csmi data;
+};
+
+struct atto_module_info {
+	void *adapter;
+	void *pci_dev;
+	void *scsi_host;
+	unsigned short host_no;
+	union {
+		struct {
+			u64 node_name;
+			u64 port_name;
+		};
+		u64 sas_addr;
+	};
+};
+
+#define ATTO_FUNC_GET_ADAP_INFO      0x00
+#define ATTO_VER_GET_ADAP_INFO0      0
+#define ATTO_VER_GET_ADAP_INFO       ATTO_VER_GET_ADAP_INFO0
+
+struct __packed atto_hba_get_adapter_info {
+
+	struct {
+		u16 vendor_id;
+		u16 device_id;
+		u16 ss_vendor_id;
+		u16 ss_device_id;
+		u8 class_code[3];
+		u8 rev_id;
+		u8 bus_num;
+		u8 dev_num;
+		u8 func_num;
+		u8 link_width_max;
+		u8 link_width_curr;
+	    #define ATTO_GAI_PCILW_UNKNOWN   0x00
+
+		u8 link_speed_max;
+		u8 link_speed_curr;
+	    #define ATTO_GAI_PCILS_UNKNOWN   0x00
+	    #define ATTO_GAI_PCILS_GEN1      0x01
+	    #define ATTO_GAI_PCILS_GEN2      0x02
+	    #define ATTO_GAI_PCILS_GEN3      0x03
+
+		u8 interrupt_mode;
+	    #define ATTO_GAI_PCIIM_UNKNOWN   0x00
+	    #define ATTO_GAI_PCIIM_LEGACY    0x01
+	    #define ATTO_GAI_PCIIM_MSI       0x02
+	    #define ATTO_GAI_PCIIM_MSIX      0x03
+
+		u8 msi_vector_cnt;
+		u8 reserved[19];
+	} pci;
+
+	u8 adap_type;
+	#define ATTO_GAI_AT_EPCIU320     0x00
+	#define ATTO_GAI_AT_ESASRAID     0x01
+	#define ATTO_GAI_AT_ESASRAID2    0x02
+	#define ATTO_GAI_AT_ESASHBA      0x03
+	#define ATTO_GAI_AT_ESASHBA2     0x04
+	#define ATTO_GAI_AT_CELERITY     0x05
+	#define ATTO_GAI_AT_CELERITY8    0x06
+	#define ATTO_GAI_AT_FASTFRAME    0x07
+	#define ATTO_GAI_AT_ESASHBA3     0x08
+	#define ATTO_GAI_AT_CELERITY16   0x09
+	#define ATTO_GAI_AT_TLSASHBA     0x0A
+	#define ATTO_GAI_AT_ESASHBA4     0x0B
+
+	u8 adap_flags;
+	#define ATTO_GAI_AF_DEGRADED     0x01
+	#define ATTO_GAI_AF_SPT_SUPP     0x02
+	#define ATTO_GAI_AF_DEVADDR_SUPP 0x04
+	#define ATTO_GAI_AF_PHYCTRL_SUPP 0x08
+	#define ATTO_GAI_AF_TEST_SUPP    0x10
+	#define ATTO_GAI_AF_DIAG_SUPP    0x20
+	#define ATTO_GAI_AF_VIRT_SES     0x40
+	#define ATTO_GAI_AF_CONN_CTRL    0x80
+
+	u8 num_ports;
+	u8 num_phys;
+	u8 drvr_rev_major;
+	u8 drvr_rev_minor;
+	u8 drvr_revsub_minor;
+	u8 drvr_rev_build;
+	char drvr_rev_ascii[16];
+	char drvr_name[32];
+	char firmware_rev[16];
+	char flash_rev[16];
+	char model_name_short[16];
+	char model_name[32];
+	u32 num_targets;
+	u32 num_targsper_bus;
+	u32 num_lunsper_targ;
+	u8 num_busses;
+	u8 num_connectors;
+	u8 adap_flags2;
+	#define ATTO_GAI_AF2_FCOE_SUPP       0x01
+	#define ATTO_GAI_AF2_NIC_SUPP        0x02
+	#define ATTO_GAI_AF2_LOCATE_SUPP     0x04
+	#define ATTO_GAI_AF2_ADAP_CTRL_SUPP  0x08
+	#define ATTO_GAI_AF2_DEV_INFO_SUPP   0x10
+	#define ATTO_GAI_AF2_NPIV_SUPP       0x20
+	#define ATTO_GAI_AF2_MP_SUPP         0x40
+
+	u8 num_temp_sensors;
+	u32 num_targets_backend;
+	u32 tunnel_flags;
+	#define ATTO_GAI_TF_MEM_RW           0x00000001
+	#define ATTO_GAI_TF_TRACE            0x00000002
+	#define ATTO_GAI_TF_SCSI_PASS_THRU   0x00000004
+	#define ATTO_GAI_TF_GET_DEV_ADDR     0x00000008
+	#define ATTO_GAI_TF_PHY_CTRL         0x00000010
+	#define ATTO_GAI_TF_CONN_CTRL        0x00000020
+	#define ATTO_GAI_TF_GET_DEV_INFO     0x00000040
+
+	u8 reserved3[0x138];
+};
+
+#define ATTO_FUNC_GET_ADAP_ADDR      0x01
+#define ATTO_VER_GET_ADAP_ADDR0      0
+#define ATTO_VER_GET_ADAP_ADDR       ATTO_VER_GET_ADAP_ADDR0
+
+struct __packed atto_hba_get_adapter_address {
+
+	u8 addr_type;
+	#define ATTO_GAA_AT_PORT         0x00
+	#define ATTO_GAA_AT_NODE         0x01
+	#define ATTO_GAA_AT_CURR_MAC     0x02
+	#define ATTO_GAA_AT_PERM_MAC     0x03
+	#define ATTO_GAA_AT_VNIC         0x04
+
+	u8 port_id;
+	u16 addr_len;
+	u8 address[256];
+};
+
+#define ATTO_FUNC_MEM_RW             0x02
+#define ATTO_VER_MEM_RW0             0
+#define ATTO_VER_MEM_RW              ATTO_VER_MEM_RW0
+
+struct __packed atto_hba_memory_read_write {
+	u8 mem_func;
+	u8 mem_type;
+	union {
+		u8 pci_index;
+		u8 i2c_dev;
+	};
+	u8 i2c_status;
+	u32 length;
+	u64 address;
+	u8 reserved[48];
+
+};
+
+#define ATTO_FUNC_TRACE              0x03
+#define ATTO_VER_TRACE0              0
+#define ATTO_VER_TRACE1              1
+#define ATTO_VER_TRACE               ATTO_VER_TRACE1
+
+struct __packed atto_hba_trace {
+	u8 trace_func;
+	#define ATTO_TRC_TF_GET_INFO     0x00
+	#define ATTO_TRC_TF_ENABLE       0x01
+	#define ATTO_TRC_TF_DISABLE      0x02
+	#define ATTO_TRC_TF_SET_MASK     0x03
+	#define ATTO_TRC_TF_UPLOAD       0x04
+	#define ATTO_TRC_TF_RESET        0x05
+
+	u8 trace_type;
+	#define ATTO_TRC_TT_DRIVER       0x00
+	#define ATTO_TRC_TT_FWCOREDUMP   0x01
+
+	u8 reserved[2];
+	u32 current_offset;
+	u32 total_length;
+	u32 trace_mask;
+	u8 reserved2[48];
+};
+
+#define ATTO_FUNC_SCSI_PASS_THRU     0x04
+#define ATTO_VER_SCSI_PASS_THRU0     0
+#define ATTO_VER_SCSI_PASS_THRU      ATTO_VER_SCSI_PASS_THRU0
+
+struct __packed atto_hba_scsi_pass_thru {
+	u8 cdb[32];
+	u8 cdb_length;
+	u8 req_status;
+	#define ATTO_SPT_RS_SUCCESS      0x00
+	#define ATTO_SPT_RS_FAILED       0x01
+	#define ATTO_SPT_RS_OVERRUN      0x02
+	#define ATTO_SPT_RS_UNDERRUN     0x03
+	#define ATTO_SPT_RS_NO_DEVICE    0x04
+	#define ATTO_SPT_RS_NO_LUN       0x05
+	#define ATTO_SPT_RS_TIMEOUT      0x06
+	#define ATTO_SPT_RS_BUS_RESET    0x07
+	#define ATTO_SPT_RS_ABORTED      0x08
+	#define ATTO_SPT_RS_BUSY         0x09
+	#define ATTO_SPT_RS_DEGRADED     0x0A
+
+	u8 scsi_status;
+	u8 sense_length;
+	u32 flags;
+	#define ATTO_SPTF_DATA_IN    0x00000001
+	#define ATTO_SPTF_DATA_OUT   0x00000002
+	#define ATTO_SPTF_SIMPLE_Q   0x00000004
+	#define ATTO_SPTF_HEAD_OF_Q  0x00000008
+	#define ATTO_SPTF_ORDERED_Q  0x00000010
+
+	u32 timeout;
+	u32 target_id;
+	u8 lun[8];
+	u32 residual_length;
+	u8 sense_data[0xFC];
+	u8 reserved[0x28];
+};
+
+#define ATTO_FUNC_GET_DEV_ADDR       0x05
+#define ATTO_VER_GET_DEV_ADDR0       0
+#define ATTO_VER_GET_DEV_ADDR        ATTO_VER_GET_DEV_ADDR0
+
+struct __packed atto_hba_get_device_address {
+	u8 addr_type;
+	#define ATTO_GDA_AT_PORT         0x00
+	#define ATTO_GDA_AT_NODE         0x01
+	#define ATTO_GDA_AT_MAC          0x02
+	#define ATTO_GDA_AT_PORTID       0x03
+	#define ATTO_GDA_AT_UNIQUE       0x04
+
+	u8 reserved;
+	u16 addr_len;
+	u32 target_id;
+	u8 address[256];
+};
+
+/* The following functions are supported by firmware but do not have any
+ * associated driver structures
+ */
+#define ATTO_FUNC_PHY_CTRL           0x06
+#define ATTO_FUNC_CONN_CTRL          0x0C
+#define ATTO_FUNC_ADAP_CTRL          0x0E
+#define ATTO_VER_ADAP_CTRL0          0
+#define ATTO_VER_ADAP_CTRL           ATTO_VER_ADAP_CTRL0
+
+struct __packed atto_hba_adap_ctrl {
+	u8 adap_func;
+	#define ATTO_AC_AF_HARD_RST      0x00
+	#define ATTO_AC_AF_GET_STATE     0x01
+	#define ATTO_AC_AF_GET_TEMP      0x02
+
+	u8 adap_state;
+	#define ATTO_AC_AS_UNKNOWN       0x00
+	#define ATTO_AC_AS_OK            0x01
+	#define ATTO_AC_AS_RST_SCHED     0x02
+	#define ATTO_AC_AS_RST_IN_PROG   0x03
+	#define ATTO_AC_AS_RST_DISC      0x04
+	#define ATTO_AC_AS_DEGRADED      0x05
+	#define ATTO_AC_AS_DISABLED      0x06
+	#define ATTO_AC_AS_TEMP          0x07
+
+	u8 reserved[2];
+
+	union {
+		struct {
+			u8 temp_sensor;
+			u8 temp_state;
+
+	#define ATTO_AC_TS_UNSUPP        0x00
+	#define ATTO_AC_TS_UNKNOWN       0x01
+	#define ATTO_AC_TS_INIT_FAILED   0x02
+	#define ATTO_AC_TS_NORMAL        0x03
+	#define ATTO_AC_TS_OUT_OF_RANGE  0x04
+	#define ATTO_AC_TS_FAULT         0x05
+
+			signed short temp_value;
+			signed short temp_lower_lim;
+			signed short temp_upper_lim;
+			char temp_desc[32];
+			u8 reserved2[20];
+		};
+	};
+};
+
+#define ATTO_FUNC_GET_DEV_INFO       0x0F
+#define ATTO_VER_GET_DEV_INFO0       0
+#define ATTO_VER_GET_DEV_INFO        ATTO_VER_GET_DEV_INFO0
+
+struct __packed atto_hba_sas_device_info {
+
+    #define ATTO_SDI_MAX_PHYS_WIDE_PORT  16
+
+	u8 phy_id[ATTO_SDI_MAX_PHYS_WIDE_PORT]; /* IDs of parent exp/adapt */
+	#define ATTO_SDI_PHY_ID_INV      ATTO_SAS_PHY_ID_INV
+	u32 exp_target_id;
+	u32 sas_port_mask;
+	u8 sas_level;
+	#define ATTO_SDI_SAS_LVL_INV     0xFF
+
+	u8 slot_num;
+	#define ATTO_SDI_SLOT_NUM_INV    ATTO_SLOT_NUM_INV
+
+	u8 dev_type;
+	#define ATTO_SDI_DT_END_DEVICE   0
+	#define ATTO_SDI_DT_EXPANDER     1
+	#define ATTO_SDI_DT_PORT_MULT    2
+
+	u8 ini_flags;
+	u8 tgt_flags;
+	u8 link_rate; /* SMP_RATE_XXX */
+	u8 loc_flags;
+	#define ATTO_SDI_LF_DIRECT       0x01
+	#define ATTO_SDI_LF_EXPANDER     0x02
+	#define ATTO_SDI_LF_PORT_MULT    0x04
+	u8 pm_port;
+	u8 reserved[0x60];
+};
+
+union atto_hba_device_info {
+	struct atto_hba_sas_device_info sas_dev_info;
+};
+
+struct __packed atto_hba_get_device_info {
+	u32 target_id;
+	u8 info_type;
+	#define ATTO_GDI_IT_UNKNOWN      0x00
+	#define ATTO_GDI_IT_SAS          0x01
+	#define ATTO_GDI_IT_FC           0x02
+	#define ATTO_GDI_IT_FCOE         0x03
+
+	u8 reserved[11];
+	union atto_hba_device_info dev_info;
+};
+
+struct atto_ioctl {
+	u8 version;
+	u8 function; /* ATTO_FUNC_XXX */
+	u8 status;
+#define ATTO_STS_SUCCESS         0x00
+#define ATTO_STS_FAILED          0x01
+#define ATTO_STS_INV_VERSION     0x02
+#define ATTO_STS_OUT_OF_RSRC     0x03
+#define ATTO_STS_INV_FUNC        0x04
+#define ATTO_STS_UNSUPPORTED     0x05
+#define ATTO_STS_INV_ADAPTER     0x06
+#define ATTO_STS_INV_DRVR_VER    0x07
+#define ATTO_STS_INV_PARAM       0x08
+#define ATTO_STS_TIMEOUT         0x09
+#define ATTO_STS_NOT_APPL        0x0A
+#define ATTO_STS_DEGRADED        0x0B
+
+	u8 flags;
+	#define HBAF_TUNNEL      0x01
+
+	u32 data_length;
+	u8 reserved2[56];
+
+	union {
+		u8 byte[1];
+		struct atto_hba_get_adapter_info get_adap_info;
+		struct atto_hba_get_adapter_address get_adap_addr;
+		struct atto_hba_scsi_pass_thru scsi_pass_thru;
+		struct atto_hba_get_device_address get_dev_addr;
+		struct atto_hba_adap_ctrl adap_ctrl;
+		struct atto_hba_get_device_info get_dev_info;
+		struct atto_hba_trace trace;
+	} data;
+
+};
+
+struct __packed atto_ioctl_vda_scsi_cmd {
+
+    #define ATTO_VDA_SCSI_VER0   0
+    #define ATTO_VDA_SCSI_VER    ATTO_VDA_SCSI_VER0
+
+	u8 cdb[16];
+	u32 flags;
+	u32 data_length;
+	u32 residual_length;
+	u16 target_id;
+	u8 sense_len;
+	u8 scsi_stat;
+	u8 reserved[8];
+	u8 sense_data[80];
+};
+
+struct __packed atto_ioctl_vda_flash_cmd {
+
+    #define ATTO_VDA_FLASH_VER0  0
+    #define ATTO_VDA_FLASH_VER   ATTO_VDA_FLASH_VER0
+
+	u32 flash_addr;
+	u32 data_length;
+	u8 sub_func;
+	u8 reserved[15];
+
+	union {
+		struct {
+			u32 flash_size;
+			u32 page_size;
+			u8 prod_info[32];
+		} info;
+
+		struct {
+			char file_name[16]; /* 8.3 fname, NULL term, wc=* */
+			u32 file_size;
+		} file;
+	} data;
+
+};
+
+struct __packed atto_ioctl_vda_diag_cmd {
+
+    #define ATTO_VDA_DIAG_VER0   0
+    #define ATTO_VDA_DIAG_VER    ATTO_VDA_DIAG_VER0
+
+	u64 local_addr;
+	u32 data_length;
+	u8 sub_func;
+	u8 flags;
+	u8 reserved[3];
+};
+
+struct __packed atto_ioctl_vda_cli_cmd {
+
+    #define ATTO_VDA_CLI_VER0    0
+    #define ATTO_VDA_CLI_VER     ATTO_VDA_CLI_VER0
+
+	u32 cmd_rsp_len;
+};
+
+struct __packed atto_ioctl_vda_smp_cmd {
+
+    #define ATTO_VDA_SMP_VER0    0
+    #define ATTO_VDA_SMP_VER     ATTO_VDA_SMP_VER0
+
+	u64 dest;
+	u32 cmd_rsp_len;
+};
+
+struct __packed atto_ioctl_vda_cfg_cmd {
+
+    #define ATTO_VDA_CFG_VER0    0
+    #define ATTO_VDA_CFG_VER     ATTO_VDA_CFG_VER0
+
+	u32 data_length;
+	u8 cfg_func;
+	u8 reserved[11];
+
+	union {
+		u8 bytes[112];
+		struct atto_vda_cfg_init init;
+	} data;
+
+};
+
+struct __packed atto_ioctl_vda_mgt_cmd {
+
+    #define ATTO_VDA_MGT_VER0    0
+    #define ATTO_VDA_MGT_VER     ATTO_VDA_MGT_VER0
+
+	u8 mgt_func;
+	u8 scan_generation;
+	u16 dev_index;
+	u32 data_length;
+	u8 reserved[8];
+	union {
+		u8 bytes[112];
+		struct atto_vda_devinfo dev_info;
+		struct atto_vda_grp_info grp_info;
+		struct atto_vdapart_info part_info;
+		struct atto_vda_dh_info dh_info;
+		struct atto_vda_metrics_info metrics_info;
+		struct atto_vda_schedule_info sched_info;
+		struct atto_vda_n_vcache_info nvcache_info;
+		struct atto_vda_buzzer_info buzzer_info;
+		struct atto_vda_adapter_info adapter_info;
+		struct atto_vda_temp_info temp_info;
+		struct atto_vda_fan_info fan_info;
+	} data;
+};
+
+struct __packed atto_ioctl_vda_gsv_cmd {
+
+    #define ATTO_VDA_GSV_VER0    0
+    #define ATTO_VDA_GSV_VER     ATTO_VDA_GSV_VER0
+
+	u8 rsp_len;
+	u8 reserved[7];
+	u8 version_info[1];
+	#define ATTO_VDA_VER_UNSUPPORTED 0xFF
+
+};
+
+struct __packed atto_ioctl_vda {
+	u8 version;
+	u8 function;    /* VDA_FUNC_XXXX */
+	u8 status;      /* ATTO_STS_XXX */
+	u8 vda_status;  /* RS_XXX (if status == ATTO_STS_SUCCESS) */
+	u32 data_length;
+	u8 reserved[8];
+
+	union {
+		struct atto_ioctl_vda_scsi_cmd scsi;
+		struct atto_ioctl_vda_flash_cmd flash;
+		struct atto_ioctl_vda_diag_cmd diag;
+		struct atto_ioctl_vda_cli_cmd cli;
+		struct atto_ioctl_vda_smp_cmd smp;
+		struct atto_ioctl_vda_cfg_cmd cfg;
+		struct atto_ioctl_vda_mgt_cmd mgt;
+		struct atto_ioctl_vda_gsv_cmd gsv;
+		u8 cmd_info[256];
+	} cmd;
+
+	union {
+		u8 data[1];
+		struct atto_vda_devinfo2 dev_info2;
+	} data;
+
+};
+
+struct __packed atto_ioctl_smp {
+	u8 version;
+	#define ATTO_SMP_VERSION0        0
+	#define ATTO_SMP_VERSION1        1
+	#define ATTO_SMP_VERSION2        2
+	#define ATTO_SMP_VERSION         ATTO_SMP_VERSION2
+
+	u8 function;
+#define ATTO_SMP_FUNC_DISC_SMP           0x00
+#define ATTO_SMP_FUNC_DISC_TARG          0x01
+#define ATTO_SMP_FUNC_SEND_CMD           0x02
+#define ATTO_SMP_FUNC_DISC_TARG_DIRECT   0x03
+#define ATTO_SMP_FUNC_SEND_CMD_DIRECT    0x04
+#define ATTO_SMP_FUNC_DISC_SMP_DIRECT    0x05
+
+	u8 status;      /* ATTO_STS_XXX */
+	u8 smp_status;  /* if status == ATTO_STS_SUCCESS */
+	#define ATTO_SMP_STS_SUCCESS     0x00
+	#define ATTO_SMP_STS_FAILURE     0x01
+	#define ATTO_SMP_STS_RESCAN      0x02
+	#define ATTO_SMP_STS_NOT_FOUND   0x03
+
+	u16 target_id;
+	u8 phy_id;
+	u8 dev_index;
+	u64 smp_sas_addr;
+	u64 targ_sas_addr;
+	u32 req_length;
+	u32 rsp_length;
+	u8 flags;
+	#define ATTO_SMPF_ROOT_EXP       0x01 /* expander direct attached */
+
+	u8 reserved[31];
+
+	union {
+		u8 byte[1];
+		u32 dword[1];
+	} data;
+
+};
+
+struct __packed atto_express_ioctl {
+	struct atto_express_ioctl_header header;
+
+	union {
+		struct atto_firmware_rw_request fwrw;
+		struct atto_param_rw_request prw;
+		struct atto_channel_list chanlist;
+		struct atto_channel_info chaninfo;
+		struct atto_ioctl ioctl_hba;
+		struct atto_module_info modinfo;
+		struct atto_ioctl_vda ioctl_vda;
+		struct atto_ioctl_smp ioctl_smp;
+		struct atto_csmi csmi;
+
+	} data;
+};
+
+/* The struct associated with the code is listed after the definition */
+#define EXPRESS_IOCTL_MIN             0x4500
+#define EXPRESS_IOCTL_RW_FIRMWARE     0x4500            /* FIRMWARERW    */
+#define EXPRESS_IOCTL_READ_PARAMS     0x4501            /* PARAMRW       */
+#define EXPRESS_IOCTL_WRITE_PARAMS    0x4502            /* PARAMRW       */
+#define EXPRESS_IOCTL_FC_API          0x4503            /* internal      */
+#define EXPRESS_IOCTL_GET_CHANNELS    0x4504            /* CHANNELLIST   */
+#define EXPRESS_IOCTL_CHAN_INFO       0x4505            /* CHANNELINFO   */
+#define EXPRESS_IOCTL_DEFAULT_PARAMS  0x4506            /* PARAMRW       */
+#define EXPRESS_ADDR_MEMORY           0x4507            /* MEMADDR       */
+#define EXPRESS_RW_MEMORY             0x4508            /* MEMRW         */
+#define EXPRESS_TSDK_DUMP             0x4509            /* TSDKDUMP      */
+#define EXPRESS_IOCTL_SMP             0x450A            /* IOCTL_SMP     */
+#define EXPRESS_CSMI                  0x450B            /* CSMI          */
+#define EXPRESS_IOCTL_HBA             0x450C            /* IOCTL_HBA     */
+#define EXPRESS_IOCTL_VDA             0x450D            /* IOCTL_VDA     */
+#define EXPRESS_IOCTL_GET_ID          0x450E            /* GET_ID        */
+#define EXPRESS_IOCTL_GET_MOD_INFO    0x450F            /* MODULE_INFO   */
+#define EXPRESS_IOCTL_MAX             0x450F
+
+#endif

+ 1319 - 0
drivers/scsi/esas2r/atvda.h

@@ -0,0 +1,1319 @@
+/*  linux/drivers/scsi/esas2r/atvda.h
+ *       ATTO VDA interface definitions
+ *
+ *  Copyright (c) 2001-2013 ATTO Technology, Inc.
+ *  (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  NO WARRANTY
+ *  THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ *  CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ *  LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ *  MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ *  solely responsible for determining the appropriateness of using and
+ *  distributing the Program and assumes all risks associated with its
+ *  exercise of rights under this Agreement, including but not limited to
+ *  the risks and costs of program errors, damage to or loss of data,
+ *  programs or equipment, and unavailability or interruption of operations.
+ *
+ *  DISCLAIMER OF LIABILITY
+ *  NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ *  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ *  DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ *  USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ *  HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+
+#ifndef ATVDA_H
+#define ATVDA_H
+
+struct __packed atto_dev_addr {
+	u64 dev_port;
+	u64 hba_port;
+	u8 lun;
+	u8 flags;
+	   #define VDA_DEVADDRF_SATA   0x01
+	   #define VDA_DEVADDRF_SSD    0x02
+	u8 link_speed; /* VDALINKSPEED_xxx */
+	u8 pad[1];
+};
+
+/* dev_addr2 was added for 64-bit alignment */
+
+struct __packed atto_dev_addr2 {
+	u64 dev_port;
+	u64 hba_port;
+	u8 lun;
+	u8 flags;
+	u8 link_speed;
+	u8 pad[5];
+};
+
+struct __packed atto_vda_sge {
+	u32 length;
+	u64 address;
+};
+
+
+/* VDA request function codes */
+
+#define VDA_FUNC_SCSI     0x00
+#define VDA_FUNC_FLASH    0x01
+#define VDA_FUNC_DIAG     0x02
+#define VDA_FUNC_AE       0x03
+#define VDA_FUNC_CLI      0x04
+#define VDA_FUNC_IOCTL    0x05
+#define VDA_FUNC_CFG      0x06
+#define VDA_FUNC_MGT      0x07
+#define VDA_FUNC_GSV      0x08
+
+
+/* VDA request status values.  for host driver considerations, values for
+ * SCSI requests start at zero.  other requests may use these values as well. */
+
+#define RS_SUCCESS          0x00        /*! successful completion            */
+#define RS_INV_FUNC         0x01        /*! invalid command function         */
+#define RS_BUSY             0x02        /*! insufficient resources           */
+#define RS_SEL              0x03        /*! no target at target_id           */
+#define RS_NO_LUN           0x04        /*! invalid LUN                      */
+#define RS_TIMEOUT          0x05        /*! request timeout                  */
+#define RS_OVERRUN          0x06        /*! data overrun                     */
+#define RS_UNDERRUN         0x07        /*! data underrun                    */
+#define RS_SCSI_ERROR       0x08        /*! SCSI error occurred              */
+#define RS_ABORTED          0x0A        /*! command aborted                  */
+#define RS_RESID_MISM       0x0B        /*! residual length incorrect        */
+#define RS_TM_FAILED        0x0C        /*! task management failed           */
+#define RS_RESET            0x0D        /*! aborted due to bus reset         */
+#define RS_ERR_DMA_SG       0x0E        /*! error reading SG list            */
+#define RS_ERR_DMA_DATA     0x0F        /*! error transferring data          */
+#define RS_UNSUPPORTED      0x10        /*! unsupported request              */
+#define RS_SEL2             0x70        /*! internal generated RS_SEL        */
+#define RS_VDA_BASE         0x80        /*! base of VDA-specific errors      */
+#define RS_MGT_BASE         0x80        /*! base of VDA management errors    */
+#define RS_SCAN_FAIL        (RS_MGT_BASE + 0x00)
+#define RS_DEV_INVALID      (RS_MGT_BASE + 0x01)
+#define RS_DEV_ASSIGNED     (RS_MGT_BASE + 0x02)
+#define RS_DEV_REMOVE       (RS_MGT_BASE + 0x03)
+#define RS_DEV_LOST         (RS_MGT_BASE + 0x04)
+#define RS_SCAN_GEN         (RS_MGT_BASE + 0x05)
+#define RS_GRP_INVALID      (RS_MGT_BASE + 0x08)
+#define RS_GRP_EXISTS       (RS_MGT_BASE + 0x09)
+#define RS_GRP_LIMIT        (RS_MGT_BASE + 0x0A)
+#define RS_GRP_INTLV        (RS_MGT_BASE + 0x0B)
+#define RS_GRP_SPAN         (RS_MGT_BASE + 0x0C)
+#define RS_GRP_TYPE         (RS_MGT_BASE + 0x0D)
+#define RS_GRP_MEMBERS      (RS_MGT_BASE + 0x0E)
+#define RS_GRP_COMMIT       (RS_MGT_BASE + 0x0F)
+#define RS_GRP_REBUILD      (RS_MGT_BASE + 0x10)
+#define RS_GRP_REBUILD_TYPE (RS_MGT_BASE + 0x11)
+#define RS_GRP_BLOCK_SIZE   (RS_MGT_BASE + 0x12)
+#define RS_CFG_SAVE         (RS_MGT_BASE + 0x14)
+#define RS_PART_LAST        (RS_MGT_BASE + 0x18)
+#define RS_ELEM_INVALID     (RS_MGT_BASE + 0x19)
+#define RS_PART_MAPPED      (RS_MGT_BASE + 0x1A)
+#define RS_PART_TARGET      (RS_MGT_BASE + 0x1B)
+#define RS_PART_LUN         (RS_MGT_BASE + 0x1C)
+#define RS_PART_DUP         (RS_MGT_BASE + 0x1D)
+#define RS_PART_NOMAP       (RS_MGT_BASE + 0x1E)
+#define RS_PART_MAX         (RS_MGT_BASE + 0x1F)
+#define RS_PART_CAP         (RS_MGT_BASE + 0x20)
+#define RS_PART_STATE       (RS_MGT_BASE + 0x21)
+#define RS_TEST_IN_PROG     (RS_MGT_BASE + 0x22)
+#define RS_METRICS_ERROR    (RS_MGT_BASE + 0x23)
+#define RS_HS_ERROR         (RS_MGT_BASE + 0x24)
+#define RS_NO_METRICS_TEST  (RS_MGT_BASE + 0x25)
+#define RS_BAD_PARAM        (RS_MGT_BASE + 0x26)
+#define RS_GRP_MEMBER_SIZE  (RS_MGT_BASE + 0x27)
+#define RS_FLS_BASE         0xB0        /*! base of VDA errors               */
+#define RS_FLS_ERR_AREA     (RS_FLS_BASE + 0x00)
+#define RS_FLS_ERR_BUSY     (RS_FLS_BASE + 0x01)
+#define RS_FLS_ERR_RANGE    (RS_FLS_BASE + 0x02)
+#define RS_FLS_ERR_BEGIN    (RS_FLS_BASE + 0x03)
+#define RS_FLS_ERR_CHECK    (RS_FLS_BASE + 0x04)
+#define RS_FLS_ERR_FAIL     (RS_FLS_BASE + 0x05)
+#define RS_FLS_ERR_RSRC     (RS_FLS_BASE + 0x06)
+#define RS_FLS_ERR_NOFILE   (RS_FLS_BASE + 0x07)
+#define RS_FLS_ERR_FSIZE    (RS_FLS_BASE + 0x08)
+#define RS_CFG_BASE         0xC0        /*! base of VDA configuration errors */
+#define RS_CFG_ERR_BUSY     (RS_CFG_BASE + 0)
+#define RS_CFG_ERR_SGE      (RS_CFG_BASE + 1)
+#define RS_CFG_ERR_DATE     (RS_CFG_BASE + 2)
+#define RS_CFG_ERR_TIME     (RS_CFG_BASE + 3)
+#define RS_DEGRADED         0xFB        /*! degraded mode                    */
+#define RS_CLI_INTERNAL     0xFC        /*! VDA CLI internal error           */
+#define RS_VDA_INTERNAL     0xFD        /*! catch-all                        */
+#define RS_PENDING          0xFE        /*! pending, not started             */
+#define RS_STARTED          0xFF        /*! started                          */
+
+
+/* flash request subfunctions.  these are used in both the IOCTL and the
+ * driver-firmware interface (VDA_FUNC_FLASH). */
+
+#define VDA_FLASH_BEGINW  0x00
+#define VDA_FLASH_READ    0x01
+#define VDA_FLASH_WRITE   0x02
+#define VDA_FLASH_COMMIT  0x03
+#define VDA_FLASH_CANCEL  0x04
+#define VDA_FLASH_INFO    0x05
+#define VDA_FLASH_FREAD   0x06
+#define VDA_FLASH_FWRITE  0x07
+#define VDA_FLASH_FINFO   0x08
+
+
+/* IOCTL request subfunctions.  these identify the payload type for
+ * VDA_FUNC_IOCTL.
+ */
+
+#define VDA_IOCTL_HBA     0x00
+#define VDA_IOCTL_CSMI    0x01
+#define VDA_IOCTL_SMP     0x02
+
+struct __packed atto_vda_devinfo {
+	struct atto_dev_addr dev_addr;
+	u8 vendor_id[8];
+	u8 product_id[16];
+	u8 revision[4];
+	u64 capacity;
+	u32 block_size;
+	u8 dev_type;
+
+	union {
+		u8 dev_status;
+	    #define VDADEVSTAT_INVALID   0x00
+	    #define VDADEVSTAT_CORRUPT   VDADEVSTAT_INVALID
+	    #define VDADEVSTAT_ASSIGNED  0x01
+	    #define VDADEVSTAT_SPARE     0x02
+	    #define VDADEVSTAT_UNAVAIL   0x03
+	    #define VDADEVSTAT_PT_MAINT  0x04
+	    #define VDADEVSTAT_LCLSPARE  0x05
+	    #define VDADEVSTAT_UNUSEABLE 0x06
+	    #define VDADEVSTAT_AVAIL     0xFF
+
+		u8 op_ctrl;
+	    #define VDA_DEV_OP_CTRL_START   0x01
+	    #define VDA_DEV_OP_CTRL_HALT    0x02
+	    #define VDA_DEV_OP_CTRL_RESUME  0x03
+	    #define VDA_DEV_OP_CTRL_CANCEL  0x04
+	};
+
+	u8 member_state;
+	#define VDAMBRSTATE_ONLINE   0x00
+	#define VDAMBRSTATE_DEGRADED 0x01
+	#define VDAMBRSTATE_UNAVAIL  0x02
+	#define VDAMBRSTATE_FAULTED  0x03
+	#define VDAMBRSTATE_MISREAD  0x04
+	#define VDAMBRSTATE_INCOMPAT 0x05
+
+	u8 operation;
+	#define VDAOP_NONE           0x00
+	#define VDAOP_REBUILD        0x01
+	#define VDAOP_ERASE          0x02
+	#define VDAOP_PATTERN        0x03
+	#define VDAOP_CONVERSION     0x04
+	#define VDAOP_FULL_INIT      0x05
+	#define VDAOP_QUICK_INIT     0x06
+	#define VDAOP_SECT_SCAN      0x07
+	#define VDAOP_SECT_SCAN_PARITY      0x08
+	#define VDAOP_SECT_SCAN_PARITY_FIX  0x09
+	#define VDAOP_RECOV_REBUILD  0x0A
+
+	u8 op_status;
+	#define VDAOPSTAT_OK         0x00
+	#define VDAOPSTAT_FAULTED    0x01
+	#define VDAOPSTAT_HALTED     0x02
+	#define VDAOPSTAT_INT        0x03
+
+	u8 progress; /* 0 - 100% */
+	u16 ses_dev_index;
+	#define VDASESDI_INVALID     0xFFFF
+
+	u8 serial_no[32];
+
+	union {
+		u16 target_id;
+	#define VDATGTID_INVALID     0xFFFF
+
+		u16 features_mask;
+	};
+
+	u16 lun;
+	u16 features;
+	#define VDADEVFEAT_ENC_SERV  0x0001
+	#define VDADEVFEAT_IDENT     0x0002
+	#define VDADEVFEAT_DH_SUPP   0x0004
+	#define VDADEVFEAT_PHYS_ID   0x0008
+
+	u8 ses_element_id;
+	u8 link_speed;
+	#define VDALINKSPEED_UNKNOWN 0x00
+	#define VDALINKSPEED_1GB     0x01
+	#define VDALINKSPEED_1_5GB   0x02
+	#define VDALINKSPEED_2GB     0x03
+	#define VDALINKSPEED_3GB     0x04
+	#define VDALINKSPEED_4GB     0x05
+	#define VDALINKSPEED_6GB     0x06
+	#define VDALINKSPEED_8GB     0x07
+
+	u16 phys_target_id;
+	u8 reserved[2];
+};
+
+
+/*! struct atto_vda_devinfo2 is a replacement for atto_vda_devinfo.  it
+ * extends beyond the 0x70 bytes allowed in atto_vda_mgmt_req; therefore,
+ * the entire structure is DMaed between the firmware and host buffer and
+ * the data will always be in little endian format.
+ */
+
+struct __packed atto_vda_devinfo2 {
+	struct atto_dev_addr dev_addr;
+	u8 vendor_id[8];
+	u8 product_id[16];
+	u8 revision[4];
+	u64 capacity;
+	u32 block_size;
+	u8 dev_type;
+	u8 dev_status;
+	u8 member_state;
+	u8 operation;
+	u8 op_status;
+	u8 progress;
+	u16 ses_dev_index;
+	u8 serial_no[32];
+	union {
+		u16 target_id;
+		u16 features_mask;
+	};
+
+	u16 lun;
+	u16 features;
+	u8 ses_element_id;
+	u8 link_speed;
+	u16 phys_target_id;
+	u8 reserved[2];
+
+/* This is where fields specific to struct atto_vda_devinfo2 begin.  Note
+ * that the structure version started at one so applications that unionize this
+ * structure with atto_vda_dev_info can differentiate them if desired.
+ */
+
+	u8 version;
+	#define VDADEVINFO_VERSION0         0x00
+	#define VDADEVINFO_VERSION1         0x01
+	#define VDADEVINFO_VERSION2         0x02
+	#define VDADEVINFO_VERSION3         0x03
+	#define VDADEVINFO_VERSION          VDADEVINFO_VERSION3
+
+	u8 reserved2[3];
+
+	/* sector scanning fields */
+
+	u32 ss_curr_errors;
+	u64 ss_curr_scanned;
+	u32 ss_curr_recvrd;
+	u32 ss_scan_length;
+	u32 ss_total_errors;
+	u32 ss_total_recvrd;
+	u32 ss_num_scans;
+
+	/* grp_name was added in version 2 of this structure. */
+
+	char grp_name[15];
+	u8 reserved3[4];
+
+	/* dev_addr_list was added in version 3 of this structure. */
+
+	u8 num_dev_addr;
+	struct atto_dev_addr2 dev_addr_list[8];
+};
+
+
+struct __packed atto_vda_grp_info {
+	u8 grp_index;
+	#define VDA_MAX_RAID_GROUPS         32
+
+	char grp_name[15];
+	u64 capacity;
+	u32 block_size;
+	u32 interleave;
+	u8 type;
+	#define VDA_GRP_TYPE_RAID0          0
+	#define VDA_GRP_TYPE_RAID1          1
+	#define VDA_GRP_TYPE_RAID4          4
+	#define VDA_GRP_TYPE_RAID5          5
+	#define VDA_GRP_TYPE_RAID6          6
+	#define VDA_GRP_TYPE_RAID10         10
+	#define VDA_GRP_TYPE_RAID40         40
+	#define VDA_GRP_TYPE_RAID50         50
+	#define VDA_GRP_TYPE_RAID60         60
+	#define VDA_GRP_TYPE_DVRAID_HS      252
+	#define VDA_GRP_TYPE_DVRAID_NOHS    253
+	#define VDA_GRP_TYPE_JBOD           254
+	#define VDA_GRP_TYPE_SPARE          255
+
+	union {
+		u8 status;
+	    #define VDA_GRP_STAT_INVALID  0x00
+	    #define VDA_GRP_STAT_NEW      0x01
+	    #define VDA_GRP_STAT_WAITING  0x02
+	    #define VDA_GRP_STAT_ONLINE   0x03
+	    #define VDA_GRP_STAT_DEGRADED 0x04
+	    #define VDA_GRP_STAT_OFFLINE  0x05
+	    #define VDA_GRP_STAT_DELETED  0x06
+	    #define VDA_GRP_STAT_RECOV_BASIC    0x07
+	    #define VDA_GRP_STAT_RECOV_EXTREME  0x08
+
+		u8 op_ctrl;
+	    #define VDA_GRP_OP_CTRL_START   0x01
+	    #define VDA_GRP_OP_CTRL_HALT    0x02
+	    #define VDA_GRP_OP_CTRL_RESUME  0x03
+	    #define VDA_GRP_OP_CTRL_CANCEL  0x04
+	};
+
+	u8 rebuild_state;
+	#define VDA_RBLD_NONE      0x00
+	#define VDA_RBLD_REBUILD   0x01
+	#define VDA_RBLD_ERASE     0x02
+	#define VDA_RBLD_PATTERN   0x03
+	#define VDA_RBLD_CONV      0x04
+	#define VDA_RBLD_FULL_INIT 0x05
+	#define VDA_RBLD_QUICK_INIT 0x06
+	#define VDA_RBLD_SECT_SCAN 0x07
+	#define VDA_RBLD_SECT_SCAN_PARITY     0x08
+	#define VDA_RBLD_SECT_SCAN_PARITY_FIX 0x09
+	#define VDA_RBLD_RECOV_REBUILD 0x0A
+	#define VDA_RBLD_RECOV_BASIC   0x0B
+	#define VDA_RBLD_RECOV_EXTREME 0x0C
+
+	u8 span_depth;
+	u8 progress;
+	u8 mirror_width;
+	u8 stripe_width;
+	u8 member_cnt;
+
+	union {
+		u16 members[32];
+	#define VDA_MEMBER_MISSING  0xFFFF
+	#define VDA_MEMBER_NEW      0xFFFE
+		u16 features_mask;
+	};
+
+	u16 features;
+	#define VDA_GRP_FEAT_HOTSWAP    0x0001
+	#define VDA_GRP_FEAT_SPDRD_MASK 0x0006
+	#define VDA_GRP_FEAT_SPDRD_DIS  0x0000
+	#define VDA_GRP_FEAT_SPDRD_ENB  0x0002
+	#define VDA_GRP_FEAT_SPDRD_AUTO 0x0004
+	#define VDA_GRP_FEAT_IDENT      0x0008
+	#define VDA_GRP_FEAT_RBLDPRI_MASK 0x0030
+	#define VDA_GRP_FEAT_RBLDPRI_LOW  0x0010
+	#define VDA_GRP_FEAT_RBLDPRI_SAME 0x0020
+	#define VDA_GRP_FEAT_RBLDPRI_HIGH 0x0030
+	#define VDA_GRP_FEAT_WRITE_CACHE  0x0040
+	#define VDA_GRP_FEAT_RBLD_RESUME  0x0080
+	#define VDA_GRP_FEAT_SECT_RESUME  0x0100
+	#define VDA_GRP_FEAT_INIT_RESUME  0x0200
+	#define VDA_GRP_FEAT_SSD          0x0400
+	#define VDA_GRP_FEAT_BOOT_DEV     0x0800
+
+	/*
+	 * for backward compatibility, a prefetch value of zero means the
+	 * setting is ignored/unsupported.  therefore, the firmware supported
+	 * 0-6 values are incremented to 1-7.
+	 */
+
+	u8 prefetch;
+	u8 op_status;
+	#define VDAGRPOPSTAT_MASK       0x0F
+	#define VDAGRPOPSTAT_INVALID    0x00
+	#define VDAGRPOPSTAT_OK         0x01
+	#define VDAGRPOPSTAT_FAULTED    0x02
+	#define VDAGRPOPSTAT_HALTED     0x03
+	#define VDAGRPOPSTAT_INT        0x04
+	#define VDAGRPOPPROC_MASK       0xF0
+	#define VDAGRPOPPROC_STARTABLE  0x10
+	#define VDAGRPOPPROC_CANCELABLE 0x20
+	#define VDAGRPOPPROC_RESUMABLE  0x40
+	#define VDAGRPOPPROC_HALTABLE   0x80
+	u8 over_provision;
+	u8 reserved[3];
+
+};
+
+
+struct __packed atto_vdapart_info {
+	u8 part_no;
+	#define VDA_MAX_PARTITIONS   128
+
+	char grp_name[15];
+	u64 part_size;
+	u64 start_lba;
+	u32 block_size;
+	u16 target_id;
+	u8 LUN;
+	char serial_no[41];
+	u8 features;
+	#define VDAPI_FEAT_WRITE_CACHE   0x01
+
+	u8 reserved[7];
+};
+
+
+struct __packed atto_vda_dh_info {
+	u8 req_type;
+	#define VDADH_RQTYPE_CACHE      0x01
+	#define VDADH_RQTYPE_FETCH      0x02
+	#define VDADH_RQTYPE_SET_STAT   0x03
+	#define VDADH_RQTYPE_GET_STAT   0x04
+
+	u8 req_qual;
+	#define VDADH_RQQUAL_SMART      0x01
+	#define VDADH_RQQUAL_MEDDEF     0x02
+	#define VDADH_RQQUAL_INFOEXC    0x04
+
+	u8 num_smart_attribs;
+	u8 status;
+	#define VDADH_STAT_DISABLE      0x00
+	#define VDADH_STAT_ENABLE       0x01
+
+	u32 med_defect_cnt;
+	u32 info_exc_cnt;
+	u8 smart_status;
+	#define VDADH_SMARTSTAT_OK      0x00
+	#define VDADH_SMARTSTAT_ERR     0x01
+
+	u8 reserved[35];
+	struct atto_vda_sge sge[1];
+};
+
+
+struct __packed atto_vda_dh_smart {
+	u8 attrib_id;
+	u8 current_val;
+	u8 worst;
+	u8 threshold;
+	u8 raw_data[6];
+	u8 raw_attrib_status;
+	#define VDADHSM_RAWSTAT_PREFAIL_WARRANTY        0x01
+	#define VDADHSM_RAWSTAT_ONLINE_COLLECTION       0x02
+	#define VDADHSM_RAWSTAT_PERFORMANCE_ATTR        0x04
+	#define VDADHSM_RAWSTAT_ERROR_RATE_ATTR         0x08
+	#define VDADHSM_RAWSTAT_EVENT_COUNT_ATTR        0x10
+	#define VDADHSM_RAWSTAT_SELF_PRESERVING_ATTR    0x20
+
+	u8 calc_attrib_status;
+	#define VDADHSM_CALCSTAT_UNKNOWN                0x00
+	#define VDADHSM_CALCSTAT_GOOD                   0x01
+	#define VDADHSM_CALCSTAT_PREFAIL                0x02
+	#define VDADHSM_CALCSTAT_OLDAGE                 0x03
+
+	u8 reserved[4];
+};
+
+
+struct __packed atto_vda_metrics_info {
+	u8 data_version;
+	#define VDAMET_VERSION0         0x00
+	#define VDAMET_VERSION          VDAMET_VERSION0
+
+	u8 metrics_action;
+	#define VDAMET_METACT_NONE      0x00
+	#define VDAMET_METACT_START     0x01
+	#define VDAMET_METACT_STOP      0x02
+	#define VDAMET_METACT_RETRIEVE  0x03
+	#define VDAMET_METACT_CLEAR     0x04
+
+	u8 test_action;
+	#define VDAMET_TSTACT_NONE              0x00
+	#define VDAMET_TSTACT_STRT_INIT         0x01
+	#define VDAMET_TSTACT_STRT_READ         0x02
+	#define VDAMET_TSTACT_STRT_VERIFY       0x03
+	#define VDAMET_TSTACT_STRT_INIT_VERIFY  0x04
+	#define VDAMET_TSTACT_STOP              0x05
+
+	u8 num_dev_indexes;
+	#define VDAMET_ALL_DEVICES      0xFF
+
+	u16 dev_indexes[32];
+	u8 reserved[12];
+	struct atto_vda_sge sge[1];
+};
+
+
+struct __packed atto_vda_metrics_data {
+	u16 dev_index;
+	u16 length;
+	#define VDAMD_LEN_LAST          0x8000
+	#define VDAMD_LEN_MASK          0x0FFF
+
+	u32 flags;
+	#define VDAMDF_RUN          0x00000007
+	#define VDAMDF_RUN_READ     0x00000001
+	#define VDAMDF_RUN_WRITE    0x00000002
+	#define VDAMDF_RUN_ALL      0x00000004
+	#define VDAMDF_READ         0x00000010
+	#define VDAMDF_WRITE        0x00000020
+	#define VDAMDF_ALL          0x00000040
+	#define VDAMDF_DRIVETEST    0x40000000
+	#define VDAMDF_NEW          0x80000000
+
+	u64 total_read_data;
+	u64 total_write_data;
+	u64 total_read_io;
+	u64 total_write_io;
+	u64 read_start_time;
+	u64 read_stop_time;
+	u64 write_start_time;
+	u64 write_stop_time;
+	u64 read_maxio_time;
+	u64 wpvdadmetricsdatarite_maxio_time;
+	u64 read_totalio_time;
+	u64 write_totalio_time;
+	u64 read_total_errs;
+	u64 write_total_errs;
+	u64 read_recvrd_errs;
+	u64 write_recvrd_errs;
+	u64 miscompares;
+};
+
+
+struct __packed atto_vda_schedule_info {
+	u8 schedule_type;
+	#define VDASI_SCHTYPE_ONETIME   0x01
+	#define VDASI_SCHTYPE_DAILY     0x02
+	#define VDASI_SCHTYPE_WEEKLY    0x03
+
+	u8 operation;
+	#define VDASI_OP_NONE           0x00
+	#define VDASI_OP_CREATE         0x01
+	#define VDASI_OP_CANCEL         0x02
+
+	u8 hour;
+	u8 minute;
+	u8 day;
+	#define VDASI_DAY_NONE          0x00
+
+	u8 progress;
+	#define VDASI_PROG_NONE         0xFF
+
+	u8 event_type;
+	#define VDASI_EVTTYPE_SECT_SCAN             0x01
+	#define VDASI_EVTTYPE_SECT_SCAN_PARITY      0x02
+	#define VDASI_EVTTYPE_SECT_SCAN_PARITY_FIX  0x03
+
+	u8 recurrences;
+	#define VDASI_RECUR_FOREVER     0x00
+
+	u32 id;
+	#define VDASI_ID_NONE           0x00
+
+	char grp_name[15];
+	u8 reserved[85];
+};
+
+
+struct __packed atto_vda_n_vcache_info {
+	u8 super_cap_status;
+	#define VDANVCI_SUPERCAP_NOT_PRESENT       0x00
+	#define VDANVCI_SUPERCAP_FULLY_CHARGED     0x01
+	#define VDANVCI_SUPERCAP_NOT_CHARGED       0x02
+
+	u8 nvcache_module_status;
+	#define VDANVCI_NVCACHEMODULE_NOT_PRESENT  0x00
+	#define VDANVCI_NVCACHEMODULE_PRESENT      0x01
+
+	u8 protection_mode;
+	#define VDANVCI_PROTMODE_HI_PROTECT        0x00
+	#define VDANVCI_PROTMODE_HI_PERFORM        0x01
+
+	u8 reserved[109];
+};
+
+
+struct __packed atto_vda_buzzer_info {
+	u8 status;
+	#define VDABUZZI_BUZZER_OFF           0x00
+	#define VDABUZZI_BUZZER_ON            0x01
+	#define VDABUZZI_BUZZER_LAST          0x02
+
+	u8 reserved[3];
+	u32 duration;
+	#define VDABUZZI_DURATION_INDEFINITE  0xffffffff
+
+	u8 reserved2[104];
+};
+
+
+struct  __packed atto_vda_adapter_info {
+	u8 version;
+	#define VDAADAPINFO_VERSION0         0x00
+	#define VDAADAPINFO_VERSION          VDAADAPINFO_VERSION0
+
+	u8 reserved;
+	signed short utc_offset;
+	u32 utc_time;
+	u32 features;
+	#define VDA_ADAP_FEAT_IDENT     0x0001
+	#define VDA_ADAP_FEAT_BUZZ_ERR  0x0002
+	#define VDA_ADAP_FEAT_UTC_TIME  0x0004
+
+	u32 valid_features;
+	char active_config[33];
+	u8 temp_count;
+	u8 fan_count;
+	u8 reserved3[61];
+};
+
+
+struct __packed atto_vda_temp_info {
+	u8 temp_index;
+	u8 max_op_temp;
+	u8 min_op_temp;
+	u8 op_temp_warn;
+	u8 temperature;
+	u8 type;
+	#define VDA_TEMP_TYPE_CPU  1
+
+	u8 reserved[106];
+};
+
+
+struct __packed atto_vda_fan_info {
+	u8 fan_index;
+	u8 status;
+	#define VDA_FAN_STAT_UNKNOWN 0
+	#define VDA_FAN_STAT_NORMAL  1
+	#define VDA_FAN_STAT_FAIL    2
+
+	u16 crit_pvdafaninfothreshold;
+	u16 warn_threshold;
+	u16 speed;
+	u8 reserved[104];
+};
+
+
+/* VDA management commands */
+
+#define VDAMGT_DEV_SCAN         0x00
+#define VDAMGT_DEV_INFO         0x01
+#define VDAMGT_DEV_CLEAN        0x02
+#define VDAMGT_DEV_IDENTIFY     0x03
+#define VDAMGT_DEV_IDENTSTOP    0x04
+#define VDAMGT_DEV_PT_INFO      0x05
+#define VDAMGT_DEV_FEATURES     0x06
+#define VDAMGT_DEV_PT_FEATURES  0x07
+#define VDAMGT_DEV_HEALTH_REQ   0x08
+#define VDAMGT_DEV_METRICS      0x09
+#define VDAMGT_DEV_INFO2        0x0A
+#define VDAMGT_DEV_OPERATION    0x0B
+#define VDAMGT_DEV_INFO2_BYADDR 0x0C
+#define VDAMGT_GRP_INFO         0x10
+#define VDAMGT_GRP_CREATE       0x11
+#define VDAMGT_GRP_DELETE       0x12
+#define VDAMGT_ADD_STORAGE      0x13
+#define VDAMGT_MEMBER_ADD       0x14
+#define VDAMGT_GRP_COMMIT       0x15
+#define VDAMGT_GRP_REBUILD      0x16
+#define VDAMGT_GRP_COMMIT_INIT  0x17
+#define VDAMGT_QUICK_RAID       0x18
+#define VDAMGT_GRP_FEATURES     0x19
+#define VDAMGT_GRP_COMMIT_INIT_AUTOMAP  0x1A
+#define VDAMGT_QUICK_RAID_INIT_AUTOMAP  0x1B
+#define VDAMGT_GRP_OPERATION    0x1C
+#define VDAMGT_CFG_SAVE         0x20
+#define VDAMGT_LAST_ERROR       0x21
+#define VDAMGT_ADAP_INFO        0x22
+#define VDAMGT_ADAP_FEATURES    0x23
+#define VDAMGT_TEMP_INFO        0x24
+#define VDAMGT_FAN_INFO         0x25
+#define VDAMGT_PART_INFO        0x30
+#define VDAMGT_PART_MAP         0x31
+#define VDAMGT_PART_UNMAP       0x32
+#define VDAMGT_PART_AUTOMAP     0x33
+#define VDAMGT_PART_SPLIT       0x34
+#define VDAMGT_PART_MERGE       0x35
+#define VDAMGT_SPARE_LIST       0x40
+#define VDAMGT_SPARE_ADD        0x41
+#define VDAMGT_SPARE_REMOVE     0x42
+#define VDAMGT_LOCAL_SPARE_ADD  0x43
+#define VDAMGT_SCHEDULE_EVENT   0x50
+#define VDAMGT_SCHEDULE_INFO    0x51
+#define VDAMGT_NVCACHE_INFO     0x60
+#define VDAMGT_NVCACHE_SET      0x61
+#define VDAMGT_BUZZER_INFO      0x70
+#define VDAMGT_BUZZER_SET       0x71
+
+
+struct __packed atto_vda_ae_hdr {
+	u8 bylength;
+	u8 byflags;
+	#define VDAAE_HDRF_EVENT_ACK    0x01
+
+	u8 byversion;
+	#define VDAAE_HDR_VER_0         0
+
+	u8 bytype;
+	#define VDAAE_HDR_TYPE_RAID     1
+	#define VDAAE_HDR_TYPE_LU       2
+	#define VDAAE_HDR_TYPE_DISK     3
+	#define VDAAE_HDR_TYPE_RESET    4
+	#define VDAAE_HDR_TYPE_LOG_INFO 5
+	#define VDAAE_HDR_TYPE_LOG_WARN 6
+	#define VDAAE_HDR_TYPE_LOG_CRIT 7
+	#define VDAAE_HDR_TYPE_LOG_FAIL 8
+	#define VDAAE_HDR_TYPE_NVC      9
+	#define VDAAE_HDR_TYPE_TLG_INFO 10
+	#define VDAAE_HDR_TYPE_TLG_WARN 11
+	#define VDAAE_HDR_TYPE_TLG_CRIT 12
+	#define VDAAE_HDR_TYPE_PWRMGT   13
+	#define VDAAE_HDR_TYPE_MUTE     14
+	#define VDAAE_HDR_TYPE_DEV      15
+};
+
+
+struct  __packed atto_vda_ae_raid {
+	struct atto_vda_ae_hdr hdr;
+	u32 dwflags;
+	#define VDAAE_GROUP_STATE   0x00000001
+	#define VDAAE_RBLD_STATE    0x00000002
+	#define VDAAE_RBLD_PROG     0x00000004
+	#define VDAAE_MEMBER_CHG    0x00000008
+	#define VDAAE_PART_CHG      0x00000010
+	#define VDAAE_MEM_STATE_CHG 0x00000020
+
+	u8 bygroup_state;
+	#define VDAAE_RAID_INVALID  0
+	#define VDAAE_RAID_NEW      1
+	#define VDAAE_RAID_WAITING  2
+	#define VDAAE_RAID_ONLINE   3
+	#define VDAAE_RAID_DEGRADED 4
+	#define VDAAE_RAID_OFFLINE  5
+	#define VDAAE_RAID_DELETED  6
+	#define VDAAE_RAID_BASIC    7
+	#define VDAAE_RAID_EXTREME  8
+	#define VDAAE_RAID_UNKNOWN  9
+
+	u8 byrebuild_state;
+	#define VDAAE_RBLD_NONE       0
+	#define VDAAE_RBLD_REBUILD    1
+	#define VDAAE_RBLD_ERASE      2
+	#define VDAAE_RBLD_PATTERN    3
+	#define VDAAE_RBLD_CONV       4
+	#define VDAAE_RBLD_FULL_INIT  5
+	#define VDAAE_RBLD_QUICK_INIT 6
+	#define VDAAE_RBLD_SECT_SCAN  7
+	#define VDAAE_RBLD_SECT_SCAN_PARITY     8
+	#define VDAAE_RBLD_SECT_SCAN_PARITY_FIX 9
+	#define VDAAE_RBLD_RECOV_REBUILD 10
+	#define VDAAE_RBLD_UNKNOWN    11
+
+	u8 byrebuild_progress;
+	u8 op_status;
+	#define VDAAE_GRPOPSTAT_MASK       0x0F
+	#define VDAAE_GRPOPSTAT_INVALID    0x00
+	#define VDAAE_GRPOPSTAT_OK         0x01
+	#define VDAAE_GRPOPSTAT_FAULTED    0x02
+	#define VDAAE_GRPOPSTAT_HALTED     0x03
+	#define VDAAE_GRPOPSTAT_INT        0x04
+	#define VDAAE_GRPOPPROC_MASK       0xF0
+	#define VDAAE_GRPOPPROC_STARTABLE  0x10
+	#define VDAAE_GRPOPPROC_CANCELABLE 0x20
+	#define VDAAE_GRPOPPROC_RESUMABLE  0x40
+	#define VDAAE_GRPOPPROC_HALTABLE   0x80
+	char acname[15];
+	u8 byreserved;
+	u8 byreserved2[0x80 - 0x1C];
+};
+
+
+struct __packed atto_vda_ae_lu_tgt_lun {
+	u16 wtarget_id;
+	u8 bylun;
+	u8 byreserved;
+};
+
+
+struct __packed atto_vda_ae_lu_tgt_lun_raid {
+	u16 wtarget_id;
+	u8 bylun;
+	u8 byreserved;
+	u32 dwinterleave;
+	u32 dwblock_size;
+};
+
+
+struct __packed atto_vda_ae_lu {
+	struct atto_vda_ae_hdr hdr;
+	u32 dwevent;
+	#define VDAAE_LU_DISC        0x00000001
+	#define VDAAE_LU_LOST        0x00000002
+	#define VDAAE_LU_STATE       0x00000004
+	#define VDAAE_LU_PASSTHROUGH 0x10000000
+	#define VDAAE_LU_PHYS_ID     0x20000000
+
+	u8 bystate;
+	#define VDAAE_LU_UNDEFINED        0
+	#define VDAAE_LU_NOT_PRESENT      1
+	#define VDAAE_LU_OFFLINE          2
+	#define VDAAE_LU_ONLINE           3
+	#define VDAAE_LU_DEGRADED         4
+	#define VDAAE_LU_FACTORY_DISABLED 5
+	#define VDAAE_LU_DELETED          6
+	#define VDAAE_LU_BUSSCAN          7
+	#define VDAAE_LU_UNKNOWN          8
+
+	u8 byreserved;
+	u16 wphys_target_id;
+
+	union {
+		struct atto_vda_ae_lu_tgt_lun tgtlun;
+		struct atto_vda_ae_lu_tgt_lun_raid tgtlun_raid;
+	} id;
+};
+
+
+struct __packed atto_vda_ae_disk {
+	struct atto_vda_ae_hdr hdr;
+};
+
+
+#define VDAAE_LOG_STRSZ 64
+
+struct __packed atto_vda_ae_log {
+	struct atto_vda_ae_hdr hdr;
+	char aclog_ascii[VDAAE_LOG_STRSZ];
+};
+
+
+#define VDAAE_TLG_STRSZ 56
+
+struct __packed atto_vda_ae_timestamp_log {
+	struct atto_vda_ae_hdr hdr;
+	u32 dwtimestamp;
+	char aclog_ascii[VDAAE_TLG_STRSZ];
+};
+
+
+struct __packed atto_vda_ae_nvc {
+	struct atto_vda_ae_hdr hdr;
+};
+
+
+struct __packed atto_vda_ae_dev {
+	struct atto_vda_ae_hdr hdr;
+	struct atto_dev_addr devaddr;
+};
+
+
+union atto_vda_ae {
+	struct atto_vda_ae_hdr hdr;
+	struct atto_vda_ae_disk disk;
+	struct atto_vda_ae_lu lu;
+	struct atto_vda_ae_raid raid;
+	struct atto_vda_ae_log log;
+	struct atto_vda_ae_timestamp_log tslog;
+	struct atto_vda_ae_nvc nvcache;
+	struct atto_vda_ae_dev dev;
+};
+
+
+struct __packed atto_vda_date_and_time {
+	u8 flags;
+	#define VDA_DT_DAY_MASK   0x07
+	#define VDA_DT_DAY_NONE   0x00
+	#define VDA_DT_DAY_SUN    0x01
+	#define VDA_DT_DAY_MON    0x02
+	#define VDA_DT_DAY_TUE    0x03
+	#define VDA_DT_DAY_WED    0x04
+	#define VDA_DT_DAY_THU    0x05
+	#define VDA_DT_DAY_FRI    0x06
+	#define VDA_DT_DAY_SAT    0x07
+	#define VDA_DT_PM         0x40
+	#define VDA_DT_MILITARY   0x80
+
+	u8 seconds;
+	u8 minutes;
+	u8 hours;
+	u8 day;
+	u8 month;
+	u16 year;
+};
+
+#define SGE_LEN_LIMIT   0x003FFFFF      /*! mask of segment length            */
+#define SGE_LEN_MAX     0x003FF000      /*! maximum segment length            */
+#define SGE_LAST        0x01000000      /*! last entry                        */
+#define SGE_ADDR_64     0x04000000      /*! 64-bit addressing flag            */
+#define SGE_CHAIN       0x80000000      /*! chain descriptor flag             */
+#define SGE_CHAIN_LEN   0x0000FFFF      /*! mask of length in chain entries   */
+#define SGE_CHAIN_SZ    0x00FF0000      /*! mask of size of chained buffer    */
+
+
+struct __packed atto_vda_cfg_init {
+	struct atto_vda_date_and_time date_time;
+	u32 sgl_page_size;
+	u32 vda_version;
+	u32 fw_version;
+	u32 fw_build;
+	u32 fw_release;
+	u32 epoch_time;
+	u32 ioctl_tunnel;
+	#define VDA_ITF_MEM_RW           0x00000001
+	#define VDA_ITF_TRACE            0x00000002
+	#define VDA_ITF_SCSI_PASS_THRU   0x00000004
+	#define VDA_ITF_GET_DEV_ADDR     0x00000008
+	#define VDA_ITF_PHY_CTRL         0x00000010
+	#define VDA_ITF_CONN_CTRL        0x00000020
+	#define VDA_ITF_GET_DEV_INFO     0x00000040
+
+	u32 num_targets_backend;
+	u8 reserved[0x48];
+};
+
+
+/* configuration commands */
+
+#define VDA_CFG_INIT          0x00
+#define VDA_CFG_GET_INIT      0x01
+#define VDA_CFG_GET_INIT2     0x02
+
+
+/*! physical region descriptor (PRD) aka scatter/gather entry */
+
+struct __packed atto_physical_region_description {
+	u64 address;
+	u32 ctl_len;
+	#define PRD_LEN_LIMIT       0x003FFFFF
+	#define PRD_LEN_MAX         0x003FF000
+	#define PRD_NXT_PRD_CNT     0x0000007F
+	#define PRD_CHAIN           0x01000000
+	#define PRD_DATA            0x00000000
+	#define PRD_INT_SEL         0xF0000000
+	  #define PRD_INT_SEL_F0    0x00000000
+	  #define PRD_INT_SEL_F1    0x40000000
+	  #define PRD_INT_SEL_F2    0x80000000
+	  #define PRD_INT_SEL_F3    0xc0000000
+	  #define PRD_INT_SEL_SRAM  0x10000000
+	  #define PRD_INT_SEL_PBSR  0x20000000
+
+};
+
+/* Request types. NOTE that ALL requests have the same layout for the first
+ * few bytes.
+ */
+struct __packed atto_vda_req_header {
+	u32 length;
+	u8 function;
+	u8 variable1;
+	u8 chain_offset;
+	u8 sg_list_offset;
+	u32 handle;
+};
+
+
+#define FCP_CDB_SIZE    16
+
+struct __packed atto_vda_scsi_req {
+	u32 length;
+	u8 function;  /* VDA_FUNC_SCSI */
+	u8 sense_len;
+	u8 chain_offset;
+	u8 sg_list_offset;
+	u32 handle;
+	u32 flags;
+     #define FCP_CMND_LUN_MASK    0x000000FF
+     #define FCP_CMND_TA_MASK     0x00000700
+      #define FCP_CMND_TA_SIMPL_Q 0x00000000
+      #define FCP_CMND_TA_HEAD_Q  0x00000100
+      #define FCP_CMND_TA_ORDRD_Q 0x00000200
+      #define FCP_CMND_TA_ACA     0x00000400
+     #define FCP_CMND_PRI_MASK    0x00007800
+     #define FCP_CMND_TM_MASK     0x00FF0000
+      #define FCP_CMND_ATS        0x00020000
+      #define FCP_CMND_CTS        0x00040000
+      #define FCP_CMND_LRS        0x00100000
+      #define FCP_CMND_TRS        0x00200000
+      #define FCP_CMND_CLA        0x00400000
+      #define FCP_CMND_TRM        0x00800000
+     #define FCP_CMND_DATA_DIR    0x03000000
+      #define FCP_CMND_WRD        0x01000000
+      #define FCP_CMND_RDD        0x02000000
+
+	u8 cdb[FCP_CDB_SIZE];
+	union {
+		struct __packed {
+			u64 ppsense_buf;
+			u16 target_id;
+			u8 iblk_cnt_prd;
+			u8 reserved;
+		};
+
+		struct atto_physical_region_description sense_buff_prd;
+	};
+
+	union {
+		struct atto_vda_sge sge[1];
+
+		u32 abort_handle;
+		u32 dwords[245];
+		struct atto_physical_region_description prd[1];
+	} u;
+};
+
+
+struct __packed atto_vda_flash_req {
+	u32 length;
+	u8 function; /* VDA_FUNC_FLASH */
+	u8 sub_func;
+	u8 chain_offset;
+	u8 sg_list_offset;
+	u32 handle;
+	u32 flash_addr;
+	u8 checksum;
+	u8 rsvd[3];
+
+	union {
+		struct {
+			char file_name[16]; /* 8.3 fname, NULL term, wc=* */
+			struct atto_vda_sge sge[1];
+		} file;
+
+		struct atto_vda_sge sge[1];
+		struct atto_physical_region_description prde[2];
+	} data;
+};
+
+
+struct __packed atto_vda_diag_req {
+	u32 length;
+	u8 function; /* VDA_FUNC_DIAG */
+	u8 sub_func;
+	#define VDA_DIAG_STATUS   0x00
+	#define VDA_DIAG_RESET    0x01
+	#define VDA_DIAG_PAUSE    0x02
+	#define VDA_DIAG_RESUME   0x03
+	#define VDA_DIAG_READ     0x04
+	#define VDA_DIAG_WRITE    0x05
+
+	u8 chain_offset;
+	u8 sg_list_offset;
+	u32 handle;
+	u32 rsvd;
+	u64 local_addr;
+	struct atto_vda_sge sge[1];
+};
+
+
+struct __packed atto_vda_ae_req {
+	u32 length;
+	u8 function; /* VDA_FUNC_AE */
+	u8 reserved1;
+	u8 chain_offset;
+	u8 sg_list_offset;
+	u32 handle;
+
+	union {
+		struct atto_vda_sge sge[1];
+		struct atto_physical_region_description prde[1];
+	};
+};
+
+
+struct __packed atto_vda_cli_req {
+	u32 length;
+	u8 function; /* VDA_FUNC_CLI */
+	u8 reserved1;
+	u8 chain_offset;
+	u8 sg_list_offset;
+	u32 handle;
+	u32 cmd_rsp_len;
+	struct atto_vda_sge sge[1];
+};
+
+
+struct __packed atto_vda_ioctl_req {
+	u32 length;
+	u8 function; /* VDA_FUNC_IOCTL */
+	u8 sub_func;
+	u8 chain_offset;
+	u8 sg_list_offset;
+	u32 handle;
+
+	union {
+		struct atto_vda_sge reserved_sge;
+		struct atto_physical_region_description reserved_prde;
+	};
+
+	union {
+		struct {
+			u32 ctrl_code;
+			u16 target_id;
+			u8 lun;
+			u8 reserved;
+		} csmi;
+	};
+
+	union {
+		struct atto_vda_sge sge[1];
+		struct atto_physical_region_description prde[1];
+	};
+};
+
+
+struct __packed atto_vda_cfg_req {
+	u32 length;
+	u8 function; /* VDA_FUNC_CFG */
+	u8 sub_func;
+	u8 rsvd1;
+	u8 sg_list_offset;
+	u32 handle;
+
+	union {
+		u8 bytes[116];
+		struct atto_vda_cfg_init init;
+		struct atto_vda_sge sge;
+		struct atto_physical_region_description prde;
+	} data;
+};
+
+
+struct __packed atto_vda_mgmt_req {
+	u32 length;
+	u8 function; /* VDA_FUNC_MGT */
+	u8 mgt_func;
+	u8 chain_offset;
+	u8 sg_list_offset;
+	u32 handle;
+	u8 scan_generation;
+	u8 payld_sglst_offset;
+	u16 dev_index;
+	u32 payld_length;
+	u32 pad;
+	union {
+		struct atto_vda_sge sge[2];
+		struct atto_physical_region_description prde[2];
+	};
+	struct atto_vda_sge payld_sge[1];
+};
+
+
+union atto_vda_req {
+	struct atto_vda_scsi_req scsi;
+	struct atto_vda_flash_req flash;
+	struct atto_vda_diag_req diag;
+	struct atto_vda_ae_req ae;
+	struct atto_vda_cli_req cli;
+	struct atto_vda_ioctl_req ioctl;
+	struct atto_vda_cfg_req cfg;
+	struct atto_vda_mgmt_req mgt;
+	u8 bytes[1024];
+};
+
+/* Outbound response structures */
+
+struct __packed atto_vda_scsi_rsp {
+	u8 scsi_stat;
+	u8 sense_len;
+	u8 rsvd[2];
+	u32 residual_length;
+};
+
+struct __packed atto_vda_flash_rsp {
+	u32 file_size;
+};
+
+struct __packed atto_vda_ae_rsp {
+	u32 length;
+};
+
+struct __packed atto_vda_cli_rsp {
+	u32 cmd_rsp_len;
+};
+
+struct __packed atto_vda_ioctl_rsp {
+	union {
+		struct {
+			u32 csmi_status;
+			u16 target_id;
+			u8 lun;
+			u8 reserved;
+		} csmi;
+	};
+};
+
+struct __packed atto_vda_cfg_rsp {
+	u16 vda_version;
+	u16 fw_release;
+	u32 fw_build;
+};
+
+struct __packed atto_vda_mgmt_rsp {
+	u32 length;
+	u16 dev_index;
+	u8 scan_generation;
+};
+
+union atto_vda_func_rsp {
+	struct atto_vda_scsi_rsp scsi_rsp;
+	struct atto_vda_flash_rsp flash_rsp;
+	struct atto_vda_ae_rsp ae_rsp;
+	struct atto_vda_cli_rsp cli_rsp;
+	struct atto_vda_ioctl_rsp ioctl_rsp;
+	struct atto_vda_cfg_rsp cfg_rsp;
+	struct atto_vda_mgmt_rsp mgt_rsp;
+	u32 dwords[2];
+};
+
+struct __packed atto_vda_ob_rsp {
+	u32 handle;
+	u8 req_stat;
+	u8 rsvd[3];
+
+	union atto_vda_func_rsp
+		func_rsp;
+};
+
+struct __packed atto_vda_ae_data {
+	u8 event_data[256];
+};
+
+struct __packed atto_vda_mgmt_data {
+	union {
+		u8 bytes[112];
+		struct atto_vda_devinfo dev_info;
+		struct atto_vda_grp_info grp_info;
+		struct atto_vdapart_info part_info;
+		struct atto_vda_dh_info dev_health_info;
+		struct atto_vda_metrics_info metrics_info;
+		struct atto_vda_schedule_info sched_info;
+		struct atto_vda_n_vcache_info nvcache_info;
+		struct atto_vda_buzzer_info buzzer_info;
+	} data;
+};
+
+union atto_vda_rsp_data {
+	struct atto_vda_ae_data ae_data;
+	struct atto_vda_mgmt_data mgt_data;
+	u8 sense_data[252];
+	#define SENSE_DATA_SZ   252;
+	u8 bytes[256];
+};
+
+#endif

+ 1441 - 0
drivers/scsi/esas2r/esas2r.h

@@ -0,0 +1,1441 @@
+/*
+ *  linux/drivers/scsi/esas2r/esas2r.h
+ *      For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ *  Copyright (c) 2001-2013 ATTO Technology, Inc.
+ *  (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+
+#include "esas2r_log.h"
+#include "atioctl.h"
+#include "atvda.h"
+
+#ifndef ESAS2R_H
+#define ESAS2R_H
+
+/* Global Variables */
+extern struct esas2r_adapter *esas2r_adapters[];
+extern u8 *esas2r_buffered_ioctl;
+extern dma_addr_t esas2r_buffered_ioctl_addr;
+extern u32 esas2r_buffered_ioctl_size;
+extern struct pci_dev *esas2r_buffered_ioctl_pcid;
+#define SGL_PG_SZ_MIN   64
+#define SGL_PG_SZ_MAX   1024
+extern int sgl_page_size;
+#define NUM_SGL_MIN     8
+#define NUM_SGL_MAX     2048
+extern int num_sg_lists;
+#define NUM_REQ_MIN     4
+#define NUM_REQ_MAX     256
+extern int num_requests;
+#define NUM_AE_MIN      2
+#define NUM_AE_MAX      8
+extern int num_ae_requests;
+extern int cmd_per_lun;
+extern int can_queue;
+extern int esas2r_max_sectors;
+extern int sg_tablesize;
+extern int interrupt_mode;
+extern int num_io_requests;
+
+/* Macro defintions */
+#define ESAS2R_MAX_ID        255
+#define MAX_ADAPTERS         32
+#define ESAS2R_DRVR_NAME     "esas2r"
+#define ESAS2R_LONGNAME      "ATTO ExpressSAS 6GB RAID Adapter"
+#define ESAS2R_MAX_DEVICES     32
+#define ATTONODE_NAME         "ATTONode"
+#define ESAS2R_MAJOR_REV       1
+#define ESAS2R_MINOR_REV       00
+#define ESAS2R_VERSION_STR     DEFINED_NUM_TO_STR(ESAS2R_MAJOR_REV) "." \
+	DEFINED_NUM_TO_STR(ESAS2R_MINOR_REV)
+#define ESAS2R_COPYRIGHT_YEARS "2001-2013"
+#define ESAS2R_DEFAULT_SGL_PAGE_SIZE 384
+#define ESAS2R_DEFAULT_CMD_PER_LUN   64
+#define ESAS2R_DEFAULT_NUM_SG_LISTS 1024
+#define DEFINED_NUM_TO_STR(num) NUM_TO_STR(num)
+#define NUM_TO_STR(num) #num
+
+#define ESAS2R_SGL_ALIGN    16
+#define ESAS2R_LIST_ALIGN   16
+#define ESAS2R_LIST_EXTRA   ESAS2R_NUM_EXTRA
+#define ESAS2R_DATA_BUF_LEN         256
+#define ESAS2R_DEFAULT_TMO          5000
+#define ESAS2R_DISC_BUF_LEN         512
+#define ESAS2R_FWCOREDUMP_SZ        0x80000
+#define ESAS2R_NUM_PHYS             8
+#define ESAS2R_TARG_ID_INV          0xFFFF
+#define ESAS2R_INT_STS_MASK         MU_INTSTAT_MASK
+#define ESAS2R_INT_ENB_MASK         MU_INTSTAT_MASK
+#define ESAS2R_INT_DIS_MASK         0
+#define ESAS2R_MAX_TARGETS          256
+#define ESAS2R_KOBJ_NAME_LEN        20
+
+/* u16 (WORD) component macros */
+#define LOBYTE(w) ((u8)(u16)(w))
+#define HIBYTE(w) ((u8)(((u16)(w)) >> 8))
+#define MAKEWORD(lo, hi) ((u16)((u8)(lo) | ((u16)(u8)(hi) << 8)))
+
+/* u32 (DWORD) component macros */
+#define LOWORD(d) ((u16)(u32)(d))
+#define HIWORD(d) ((u16)(((u32)(d)) >> 16))
+#define MAKEDWORD(lo, hi) ((u32)((u16)(lo) | ((u32)(u16)(hi) << 16)))
+
+/* macro to get the lowest nonzero bit of a value */
+#define LOBIT(x) ((x) & (0 - (x)))
+
+/* These functions are provided to access the chip's control registers.
+ * The register is specified by its byte offset from the register base
+ * for the adapter.
+ */
+#define esas2r_read_register_dword(a, reg)                             \
+	readl((void __iomem *)a->regs + (reg) + MW_REG_OFFSET_HWREG)
+
+#define esas2r_write_register_dword(a, reg, data)                      \
+	writel(data, (void __iomem *)(a->regs + (reg) + MW_REG_OFFSET_HWREG))
+
+#define esas2r_flush_register_dword(a, r) esas2r_read_register_dword(a, r)
+
+/* This function is provided to access the chip's data window.   The
+ * register is specified by its byte offset from the window base
+ * for the adapter.
+ */
+#define esas2r_read_data_byte(a, reg)                                  \
+	readb((void __iomem *)a->data_window + (reg))
+
+/* ATTO vendor and device Ids */
+#define ATTO_VENDOR_ID          0x117C
+#define ATTO_DID_INTEL_IOP348   0x002C
+#define ATTO_DID_MV_88RC9580    0x0049
+#define ATTO_DID_MV_88RC9580TS  0x0066
+#define ATTO_DID_MV_88RC9580TSE 0x0067
+#define ATTO_DID_MV_88RC9580TL  0x0068
+
+/* ATTO subsystem device Ids */
+#define ATTO_SSDID_TBT      0x4000
+#define ATTO_TSSC_3808      0x4066
+#define ATTO_TSSC_3808E     0x4067
+#define ATTO_TLSH_1068      0x4068
+#define ATTO_ESAS_R680      0x0049
+#define ATTO_ESAS_R608      0x004A
+#define ATTO_ESAS_R60F      0x004B
+#define ATTO_ESAS_R6F0      0x004C
+#define ATTO_ESAS_R644      0x004D
+#define ATTO_ESAS_R648      0x004E
+
+/*
+ * flash definitions & structures
+ * define the code types
+ */
+#define FBT_CPYR        0xAA00
+#define FBT_SETUP       0xAA02
+#define FBT_FLASH_VER   0xAA04
+
+/* offsets to various locations in flash */
+#define FLS_OFFSET_BOOT (u32)(0x00700000)
+#define FLS_OFFSET_NVR  (u32)(0x007C0000)
+#define FLS_OFFSET_CPYR FLS_OFFSET_NVR
+#define FLS_LENGTH_BOOT (FLS_OFFSET_CPYR - FLS_OFFSET_BOOT)
+#define FLS_BLOCK_SIZE  (u32)(0x00020000)
+#define FI_NVR_2KB  0x0800
+#define FI_NVR_8KB  0x2000
+#define FM_BUF_SZ   0x800
+
+/*
+ * marvell frey (88R9580) register definitions
+ * chip revision identifiers
+ */
+#define MVR_FREY_B2     0xB2
+
+/*
+ * memory window definitions.  window 0 is the data window with definitions
+ * of MW_DATA_XXX.  window 1 is the register window with definitions of
+ * MW_REG_XXX.
+ */
+#define MW_REG_WINDOW_SIZE      (u32)(0x00040000)
+#define MW_REG_OFFSET_HWREG     (u32)(0x00000000)
+#define MW_REG_OFFSET_PCI       (u32)(0x00008000)
+#define MW_REG_PCI_HWREG_DELTA  (MW_REG_OFFSET_PCI - MW_REG_OFFSET_HWREG)
+#define MW_DATA_WINDOW_SIZE     (u32)(0x00020000)
+#define MW_DATA_ADDR_SER_FLASH  (u32)(0xEC000000)
+#define MW_DATA_ADDR_SRAM       (u32)(0xF4000000)
+#define MW_DATA_ADDR_PAR_FLASH  (u32)(0xFC000000)
+
+/*
+ * the following registers are for the communication
+ * list interface (AKA message unit (MU))
+ */
+#define MU_IN_LIST_ADDR_LO      (u32)(0x00004000)
+#define MU_IN_LIST_ADDR_HI      (u32)(0x00004004)
+
+#define MU_IN_LIST_WRITE        (u32)(0x00004018)
+    #define MU_ILW_TOGGLE       (u32)(0x00004000)
+
+#define MU_IN_LIST_READ         (u32)(0x0000401C)
+    #define MU_ILR_TOGGLE       (u32)(0x00004000)
+    #define MU_ILIC_LIST        (u32)(0x0000000F)
+    #define MU_ILIC_LIST_F0     (u32)(0x00000000)
+    #define MU_ILIC_DEST        (u32)(0x00000F00)
+    #define MU_ILIC_DEST_DDR    (u32)(0x00000200)
+#define MU_IN_LIST_IFC_CONFIG   (u32)(0x00004028)
+
+#define MU_IN_LIST_CONFIG       (u32)(0x0000402C)
+    #define MU_ILC_ENABLE       (u32)(0x00000001)
+    #define MU_ILC_ENTRY_MASK   (u32)(0x000000F0)
+    #define MU_ILC_ENTRY_4_DW   (u32)(0x00000020)
+    #define MU_ILC_DYNAMIC_SRC  (u32)(0x00008000)
+    #define MU_ILC_NUMBER_MASK  (u32)(0x7FFF0000)
+    #define MU_ILC_NUMBER_SHIFT            16
+
+#define MU_OUT_LIST_ADDR_LO     (u32)(0x00004050)
+#define MU_OUT_LIST_ADDR_HI     (u32)(0x00004054)
+
+#define MU_OUT_LIST_COPY_PTR_LO (u32)(0x00004058)
+#define MU_OUT_LIST_COPY_PTR_HI (u32)(0x0000405C)
+
+#define MU_OUT_LIST_WRITE       (u32)(0x00004068)
+    #define MU_OLW_TOGGLE       (u32)(0x00004000)
+
+#define MU_OUT_LIST_COPY        (u32)(0x0000406C)
+    #define MU_OLC_TOGGLE       (u32)(0x00004000)
+    #define MU_OLC_WRT_PTR      (u32)(0x00003FFF)
+
+#define MU_OUT_LIST_IFC_CONFIG  (u32)(0x00004078)
+    #define MU_OLIC_LIST        (u32)(0x0000000F)
+    #define MU_OLIC_LIST_F0     (u32)(0x00000000)
+    #define MU_OLIC_SOURCE      (u32)(0x00000F00)
+    #define MU_OLIC_SOURCE_DDR  (u32)(0x00000200)
+
+#define MU_OUT_LIST_CONFIG      (u32)(0x0000407C)
+    #define MU_OLC_ENABLE       (u32)(0x00000001)
+    #define MU_OLC_ENTRY_MASK   (u32)(0x000000F0)
+    #define MU_OLC_ENTRY_4_DW   (u32)(0x00000020)
+    #define MU_OLC_NUMBER_MASK  (u32)(0x7FFF0000)
+    #define MU_OLC_NUMBER_SHIFT            16
+
+#define MU_OUT_LIST_INT_STAT    (u32)(0x00004088)
+    #define MU_OLIS_INT         (u32)(0x00000001)
+
+#define MU_OUT_LIST_INT_MASK    (u32)(0x0000408C)
+    #define MU_OLIS_MASK        (u32)(0x00000001)
+
+/*
+ * the maximum size of the communication lists is two greater than the
+ * maximum amount of VDA requests.  the extra are to prevent queue overflow.
+ */
+#define ESAS2R_MAX_NUM_REQS         256
+#define ESAS2R_NUM_EXTRA            2
+#define ESAS2R_MAX_COMM_LIST_SIZE   (ESAS2R_MAX_NUM_REQS + ESAS2R_NUM_EXTRA)
+
+/*
+ * the following registers are for the CPU interface
+ */
+#define MU_CTL_STATUS_IN        (u32)(0x00010108)
+    #define MU_CTL_IN_FULL_RST  (u32)(0x00000020)
+#define MU_CTL_STATUS_IN_B2     (u32)(0x00010130)
+    #define MU_CTL_IN_FULL_RST2 (u32)(0x80000000)
+#define MU_DOORBELL_IN          (u32)(0x00010460)
+    #define DRBL_RESET_BUS      (u32)(0x00000002)
+    #define DRBL_PAUSE_AE       (u32)(0x00000004)
+    #define DRBL_RESUME_AE      (u32)(0x00000008)
+    #define DRBL_MSG_IFC_DOWN   (u32)(0x00000010)
+    #define DRBL_FLASH_REQ      (u32)(0x00000020)
+    #define DRBL_FLASH_DONE     (u32)(0x00000040)
+    #define DRBL_FORCE_INT      (u32)(0x00000080)
+    #define DRBL_MSG_IFC_INIT   (u32)(0x00000100)
+    #define DRBL_POWER_DOWN     (u32)(0x00000200)
+    #define DRBL_DRV_VER_1      (u32)(0x00010000)
+    #define DRBL_DRV_VER        DRBL_DRV_VER_1
+#define MU_DOORBELL_IN_ENB      (u32)(0x00010464)
+#define MU_DOORBELL_OUT         (u32)(0x00010480)
+ #define DRBL_PANIC_REASON_MASK (u32)(0x00F00000)
+    #define DRBL_UNUSED_HANDLER (u32)(0x00100000)
+    #define DRBL_UNDEF_INSTR    (u32)(0x00200000)
+    #define DRBL_PREFETCH_ABORT (u32)(0x00300000)
+    #define DRBL_DATA_ABORT     (u32)(0x00400000)
+    #define DRBL_JUMP_TO_ZERO   (u32)(0x00500000)
+  #define DRBL_FW_RESET         (u32)(0x00080000)
+  #define DRBL_FW_VER_MSK       (u32)(0x00070000)
+  #define DRBL_FW_VER_0         (u32)(0x00000000)
+  #define DRBL_FW_VER_1         (u32)(0x00010000)
+  #define DRBL_FW_VER           DRBL_FW_VER_1
+#define MU_DOORBELL_OUT_ENB     (u32)(0x00010484)
+    #define DRBL_ENB_MASK       (u32)(0x00F803FF)
+#define MU_INT_STATUS_OUT       (u32)(0x00010200)
+    #define MU_INTSTAT_POST_OUT (u32)(0x00000010)
+    #define MU_INTSTAT_DRBL_IN  (u32)(0x00000100)
+    #define MU_INTSTAT_DRBL     (u32)(0x00001000)
+    #define MU_INTSTAT_MASK     (u32)(0x00001010)
+#define MU_INT_MASK_OUT         (u32)(0x0001020C)
+
+/* PCI express registers accessed via window 1 */
+#define MVR_PCI_WIN1_REMAP      (u32)(0x00008438)
+    #define MVRPW1R_ENABLE      (u32)(0x00000001)
+
+
+/* structures */
+
+/* inbound list dynamic source entry */
+struct esas2r_inbound_list_source_entry {
+	u64 address;
+	u32 length;
+	#define HWILSE_INTERFACE_F0  0x00000000
+	u32 reserved;
+};
+
+/* PCI data structure in expansion ROM images */
+struct __packed esas2r_boot_header {
+	char signature[4];
+	u16 vendor_id;
+	u16 device_id;
+	u16 VPD;
+	u16 struct_length;
+	u8 struct_revision;
+	u8 class_code[3];
+	u16 image_length;
+	u16 code_revision;
+	u8 code_type;
+	#define CODE_TYPE_PC    0
+	#define CODE_TYPE_OPEN  1
+	#define CODE_TYPE_EFI   3
+	u8 indicator;
+	#define INDICATOR_LAST  0x80
+	u8 reserved[2];
+};
+
+struct __packed esas2r_boot_image {
+	u16 signature;
+	u8 reserved[22];
+	u16 header_offset;
+	u16 pnp_offset;
+};
+
+struct __packed esas2r_pc_image {
+	u16 signature;
+	u8 length;
+	u8 entry_point[3];
+	u8 checksum;
+	u16 image_end;
+	u16 min_size;
+	u8 rom_flags;
+	u8 reserved[12];
+	u16 header_offset;
+	u16 pnp_offset;
+	struct esas2r_boot_header boot_image;
+};
+
+struct __packed esas2r_efi_image {
+	u16 signature;
+	u16 length;
+	u32 efi_signature;
+	#define EFI_ROM_SIG     0x00000EF1
+	u16 image_type;
+	#define EFI_IMAGE_APP   10
+	#define EFI_IMAGE_BSD   11
+	#define EFI_IMAGE_RTD   12
+	u16 machine_type;
+	#define EFI_MACHINE_IA32 0x014c
+	#define EFI_MACHINE_IA64 0x0200
+	#define EFI_MACHINE_X64  0x8664
+	#define EFI_MACHINE_EBC  0x0EBC
+	u16 compression;
+	#define EFI_UNCOMPRESSED 0x0000
+	#define EFI_COMPRESSED   0x0001
+	u8 reserved[8];
+	u16 efi_offset;
+	u16 header_offset;
+	u16 reserved2;
+	struct esas2r_boot_header boot_image;
+};
+
+struct esas2r_adapter;
+struct esas2r_sg_context;
+struct esas2r_request;
+
+typedef void (*RQCALLBK)     (struct esas2r_adapter *a,
+			      struct esas2r_request *rq);
+typedef bool (*RQBUILDSGL)   (struct esas2r_adapter *a,
+			      struct esas2r_sg_context *sgc);
+
+struct esas2r_component_header {
+	u8 img_type;
+	#define CH_IT_FW    0x00
+	#define CH_IT_NVR   0x01
+	#define CH_IT_BIOS  0x02
+	#define CH_IT_MAC   0x03
+	#define CH_IT_CFG   0x04
+	#define CH_IT_EFI   0x05
+	u8 status;
+	#define CH_STAT_PENDING 0xff
+	#define CH_STAT_FAILED  0x00
+	#define CH_STAT_SUCCESS 0x01
+	#define CH_STAT_RETRY   0x02
+	#define CH_STAT_INVALID 0x03
+	u8 pad[2];
+	u32 version;
+	u32 length;
+	u32 image_offset;
+};
+
+#define FI_REL_VER_SZ   16
+
+struct esas2r_flash_img_v0 {
+	u8 fi_version;
+	#define FI_VERSION_0    00
+	u8 status;
+	u8 adap_typ;
+	u8 action;
+	u32 length;
+	u16 checksum;
+	u16 driver_error;
+	u16 flags;
+	u16 num_comps;
+	#define FI_NUM_COMPS_V0 5
+	u8 rel_version[FI_REL_VER_SZ];
+	struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V0];
+	u8 scratch_buf[FM_BUF_SZ];
+};
+
+struct esas2r_flash_img {
+	u8 fi_version;
+	#define FI_VERSION_1    01
+	u8 status;
+	#define FI_STAT_SUCCESS  0x00
+	#define FI_STAT_FAILED   0x01
+	#define FI_STAT_REBOOT   0x02
+	#define FI_STAT_ADAPTYP  0x03
+	#define FI_STAT_INVALID  0x04
+	#define FI_STAT_CHKSUM   0x05
+	#define FI_STAT_LENGTH   0x06
+	#define FI_STAT_UNKNOWN  0x07
+	#define FI_STAT_IMG_VER  0x08
+	#define FI_STAT_BUSY     0x09
+	#define FI_STAT_DUAL     0x0A
+	#define FI_STAT_MISSING  0x0B
+	#define FI_STAT_UNSUPP   0x0C
+	#define FI_STAT_ERASE    0x0D
+	#define FI_STAT_FLASH    0x0E
+	#define FI_STAT_DEGRADED 0x0F
+	u8 adap_typ;
+	#define FI_AT_UNKNWN    0xFF
+	#define FI_AT_SUN_LAKE  0x0B
+	#define FI_AT_MV_9580   0x0F
+	u8 action;
+	#define FI_ACT_DOWN     0x00
+	#define FI_ACT_UP       0x01
+	#define FI_ACT_UPSZ     0x02
+	#define FI_ACT_MAX      0x02
+	#define FI_ACT_DOWN1    0x80
+	u32 length;
+	u16 checksum;
+	u16 driver_error;
+	u16 flags;
+	#define FI_FLG_NVR_DEF  0x0001
+	u16 num_comps;
+	#define FI_NUM_COMPS_V1 6
+	u8 rel_version[FI_REL_VER_SZ];
+	struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V1];
+	u8 scratch_buf[FM_BUF_SZ];
+};
+
+/* definitions for flash script (FS) commands */
+struct esas2r_ioctlfs_command {
+	u8 command;
+	#define ESAS2R_FS_CMD_ERASE    0
+	#define ESAS2R_FS_CMD_READ     1
+	#define ESAS2R_FS_CMD_BEGINW   2
+	#define ESAS2R_FS_CMD_WRITE    3
+	#define ESAS2R_FS_CMD_COMMIT   4
+	#define ESAS2R_FS_CMD_CANCEL   5
+	u8 checksum;
+	u8 reserved[2];
+	u32 flash_addr;
+	u32 length;
+	u32 image_offset;
+};
+
+struct esas2r_ioctl_fs {
+	u8 version;
+	#define ESAS2R_FS_VER      0
+	u8 status;
+	u8 driver_error;
+	u8 adap_type;
+	#define ESAS2R_FS_AT_ESASRAID2     3
+	#define ESAS2R_FS_AT_TSSASRAID2    4
+	#define ESAS2R_FS_AT_TSSASRAID2E   5
+	#define ESAS2R_FS_AT_TLSASHBA      6
+	u8 driver_ver;
+	u8 reserved[11];
+	struct esas2r_ioctlfs_command command;
+	u8 data[1];
+};
+
+struct esas2r_sas_nvram {
+	u8 signature[4];
+	u8 version;
+	#define SASNVR_VERSION_0    0x00
+	#define SASNVR_VERSION      SASNVR_VERSION_0
+	u8 checksum;
+	#define SASNVR_CKSUM_SEED   0x5A
+	u8 max_lun_for_target;
+	u8 pci_latency;
+	#define SASNVR_PCILAT_DIS   0x00
+	#define SASNVR_PCILAT_MIN   0x10
+	#define SASNVR_PCILAT_MAX   0xF8
+	u8 options1;
+	#define SASNVR1_BOOT_DRVR   0x01
+	#define SASNVR1_BOOT_SCAN   0x02
+	#define SASNVR1_DIS_PCI_MWI 0x04
+	#define SASNVR1_FORCE_ORD_Q 0x08
+	#define SASNVR1_CACHELINE_0 0x10
+	#define SASNVR1_DIS_DEVSORT 0x20
+	#define SASNVR1_PWR_MGT_EN  0x40
+	#define SASNVR1_WIDEPORT    0x80
+	u8 options2;
+	#define SASNVR2_SINGLE_BUS  0x01
+	#define SASNVR2_SLOT_BIND   0x02
+	#define SASNVR2_EXP_PROG    0x04
+	#define SASNVR2_CMDTHR_LUN  0x08
+	#define SASNVR2_HEARTBEAT   0x10
+	#define SASNVR2_INT_CONNECT 0x20
+	#define SASNVR2_SW_MUX_CTRL 0x40
+	#define SASNVR2_DISABLE_NCQ 0x80
+	u8 int_coalescing;
+	#define SASNVR_COAL_DIS     0x00
+	#define SASNVR_COAL_LOW     0x01
+	#define SASNVR_COAL_MED     0x02
+	#define SASNVR_COAL_HI      0x03
+	u8 cmd_throttle;
+	#define SASNVR_CMDTHR_NONE  0x00
+	u8 dev_wait_time;
+	u8 dev_wait_count;
+	u8 spin_up_delay;
+	#define SASNVR_SPINUP_MAX   0x14
+	u8 ssp_align_rate;
+	u8 sas_addr[8];
+	u8 phy_speed[16];
+	#define SASNVR_SPEED_AUTO   0x00
+	#define SASNVR_SPEED_1_5GB  0x01
+	#define SASNVR_SPEED_3GB    0x02
+	#define SASNVR_SPEED_6GB    0x03
+	#define SASNVR_SPEED_12GB   0x04
+	u8 phy_mux[16];
+	#define SASNVR_MUX_DISABLED 0x00
+	#define SASNVR_MUX_1_5GB    0x01
+	#define SASNVR_MUX_3GB      0x02
+	#define SASNVR_MUX_6GB      0x03
+	u8 phy_flags[16];
+	#define SASNVR_PHF_DISABLED 0x01
+	#define SASNVR_PHF_RD_ONLY  0x02
+	u8 sort_type;
+	#define SASNVR_SORT_SAS_ADDR    0x00
+	#define SASNVR_SORT_H308_CONN   0x01
+	#define SASNVR_SORT_PHY_ID      0x02
+	#define SASNVR_SORT_SLOT_ID     0x03
+	u8 dpm_reqcmd_lmt;
+	u8 dpm_stndby_time;
+	u8 dpm_active_time;
+	u8 phy_target_id[16];
+	#define SASNVR_PTI_DISABLED     0xFF
+	u8 virt_ses_mode;
+	#define SASNVR_VSMH_DISABLED    0x00
+	u8 read_write_mode;
+	#define SASNVR_RWM_DEFAULT      0x00
+	u8 link_down_to;
+	u8 reserved[0xA1];
+};
+
+typedef u32 (*PGETPHYSADDR) (struct esas2r_sg_context *sgc, u64 *addr);
+
+struct esas2r_sg_context {
+	struct esas2r_adapter *adapter;
+	struct esas2r_request *first_req;
+	u32 length;
+	u8 *cur_offset;
+	PGETPHYSADDR get_phys_addr;
+	union {
+		struct {
+			struct atto_vda_sge *curr;
+			struct atto_vda_sge *last;
+			struct atto_vda_sge *limit;
+			struct atto_vda_sge *chain;
+		} a64;
+		struct {
+			struct atto_physical_region_description *curr;
+			struct atto_physical_region_description *chain;
+			u32 sgl_max_cnt;
+			u32 sge_cnt;
+		} prd;
+	} sge;
+	struct scatterlist *cur_sgel;
+	u8 *exp_offset;
+	int num_sgel;
+	int sgel_count;
+};
+
+struct esas2r_target {
+	u8 flags;
+	#define TF_PASS_THRU    0x01
+	#define TF_USED         0x02
+	u8 new_target_state;
+	u8 target_state;
+	u8 buffered_target_state;
+#define TS_NOT_PRESENT      0x00
+#define TS_PRESENT          0x05
+#define TS_LUN_CHANGE       0x06
+#define TS_INVALID          0xFF
+	u32 block_size;
+	u32 inter_block;
+	u32 inter_byte;
+	u16 virt_targ_id;
+	u16 phys_targ_id;
+	u8 identifier_len;
+	u64 sas_addr;
+	u8 identifier[60];
+	struct atto_vda_ae_lu lu_event;
+};
+
+struct esas2r_request {
+	struct list_head comp_list;
+	struct list_head req_list;
+	union atto_vda_req *vrq;
+	struct esas2r_mem_desc *vrq_md;
+	union {
+		void *data_buf;
+		union atto_vda_rsp_data *vda_rsp_data;
+	};
+	u8 *sense_buf;
+	struct list_head sg_table_head;
+	struct esas2r_mem_desc *sg_table;
+	u32 timeout;
+	#define RQ_TIMEOUT_S1     0xFFFFFFFF
+	#define RQ_TIMEOUT_S2     0xFFFFFFFE
+	#define RQ_MAX_TIMEOUT    0xFFFFFFFD
+	u16 target_id;
+	u8 req_type;
+	#define RT_INI_REQ          0x01
+	#define RT_DISC_REQ         0x02
+	u8 sense_len;
+	union atto_vda_func_rsp func_rsp;
+	RQCALLBK comp_cb;
+	RQCALLBK interrupt_cb;
+	void *interrupt_cx;
+	u8 flags;
+	#define RF_1ST_IBLK_BASE    0x04
+	#define RF_FAILURE_OK       0x08
+	u8 req_stat;
+	u16 vda_req_sz;
+	#define RQ_SIZE_DEFAULT   0
+	u64 lba;
+	RQCALLBK aux_req_cb;
+	void *aux_req_cx;
+	u32 blk_len;
+	u32 max_blk_len;
+	union {
+		struct scsi_cmnd *cmd;
+		u8 *task_management_status_ptr;
+	};
+};
+
+struct esas2r_flash_context {
+	struct esas2r_flash_img *fi;
+	RQCALLBK interrupt_cb;
+	u8 *sgc_offset;
+	u8 *scratch;
+	u32 fi_hdr_len;
+	u8 task;
+	#define     FMTSK_ERASE_BOOT    0
+	#define     FMTSK_WRTBIOS       1
+	#define     FMTSK_READBIOS      2
+	#define     FMTSK_WRTMAC        3
+	#define     FMTSK_READMAC       4
+	#define     FMTSK_WRTEFI        5
+	#define     FMTSK_READEFI       6
+	#define     FMTSK_WRTCFG        7
+	#define     FMTSK_READCFG       8
+	u8 func;
+	u16 num_comps;
+	u32 cmp_len;
+	u32 flsh_addr;
+	u32 curr_len;
+	u8 comp_typ;
+	struct esas2r_sg_context sgc;
+};
+
+struct esas2r_disc_context {
+	u8 disc_evt;
+	#define DCDE_DEV_CHANGE     0x01
+	#define DCDE_DEV_SCAN       0x02
+	u8 state;
+	#define DCS_DEV_RMV         0x00
+	#define DCS_DEV_ADD         0x01
+	#define DCS_BLOCK_DEV_SCAN  0x02
+	#define DCS_RAID_GRP_INFO   0x03
+	#define DCS_PART_INFO       0x04
+	#define DCS_PT_DEV_INFO     0x05
+	#define DCS_PT_DEV_ADDR     0x06
+	#define DCS_DISC_DONE       0xFF
+	u16 flags;
+	#define DCF_DEV_CHANGE      0x0001
+	#define DCF_DEV_SCAN        0x0002
+	#define DCF_POLLED          0x8000
+	u32 interleave;
+	u32 block_size;
+	u16 dev_ix;
+	u8 part_num;
+	u8 raid_grp_ix;
+	char raid_grp_name[16];
+	struct esas2r_target *curr_targ;
+	u16 curr_virt_id;
+	u16 curr_phys_id;
+	u8 scan_gen;
+	u8 dev_addr_type;
+	u64 sas_addr;
+};
+
+struct esas2r_mem_desc {
+	struct list_head next_desc;
+	void *virt_addr;
+	u64 phys_addr;
+	void *pad;
+	void *esas2r_data;
+	u32 esas2r_param;
+	u32 size;
+};
+
+enum fw_event_type {
+	fw_event_null,
+	fw_event_lun_change,
+	fw_event_present,
+	fw_event_not_present,
+	fw_event_vda_ae
+};
+
+struct esas2r_vda_ae {
+	u32 signature;
+#define ESAS2R_VDA_EVENT_SIG  0x4154544F
+	u8 bus_number;
+	u8 devfn;
+	u8 pad[2];
+	union atto_vda_ae vda_ae;
+};
+
+struct esas2r_fw_event_work {
+	struct list_head list;
+	struct delayed_work work;
+	struct esas2r_adapter *a;
+	enum fw_event_type type;
+	u8 data[sizeof(struct esas2r_vda_ae)];
+};
+
+enum state {
+	FW_INVALID_ST,
+	FW_STATUS_ST,
+	FW_COMMAND_ST
+};
+
+struct esas2r_firmware {
+	enum state state;
+	struct esas2r_flash_img header;
+	u8 *data;
+	u64 phys;
+	int orig_len;
+	void *header_buff;
+	u64 header_buff_phys;
+};
+
+struct esas2r_adapter {
+	struct esas2r_target targetdb[ESAS2R_MAX_TARGETS];
+	struct esas2r_target *targetdb_end;
+	unsigned char *regs;
+	unsigned char *data_window;
+	u32 volatile flags;
+	#define AF_PORT_CHANGE      (u32)(0x00000001)
+	#define AF_CHPRST_NEEDED    (u32)(0x00000004)
+	#define AF_CHPRST_PENDING   (u32)(0x00000008)
+	#define AF_CHPRST_DETECTED  (u32)(0x00000010)
+	#define AF_BUSRST_NEEDED    (u32)(0x00000020)
+	#define AF_BUSRST_PENDING   (u32)(0x00000040)
+	#define AF_BUSRST_DETECTED  (u32)(0x00000080)
+	#define AF_DISABLED         (u32)(0x00000100)
+	#define AF_FLASH_LOCK       (u32)(0x00000200)
+	#define AF_OS_RESET         (u32)(0x00002000)
+	#define AF_FLASHING         (u32)(0x00004000)
+	#define AF_POWER_MGT        (u32)(0x00008000)
+	#define AF_NVR_VALID        (u32)(0x00010000)
+	#define AF_DEGRADED_MODE    (u32)(0x00020000)
+	#define AF_DISC_PENDING     (u32)(0x00040000)
+	#define AF_TASKLET_SCHEDULED    (u32)(0x00080000)
+	#define AF_HEARTBEAT        (u32)(0x00200000)
+	#define AF_HEARTBEAT_ENB    (u32)(0x00400000)
+	#define AF_NOT_PRESENT      (u32)(0x00800000)
+	#define AF_CHPRST_STARTED   (u32)(0x01000000)
+	#define AF_FIRST_INIT       (u32)(0x02000000)
+	#define AF_POWER_DOWN       (u32)(0x04000000)
+	#define AF_DISC_IN_PROG     (u32)(0x08000000)
+	#define AF_COMM_LIST_TOGGLE (u32)(0x10000000)
+	#define AF_LEGACY_SGE_MODE  (u32)(0x20000000)
+	#define AF_DISC_POLLED      (u32)(0x40000000)
+	u32 volatile flags2;
+	#define AF2_SERIAL_FLASH    (u32)(0x00000001)
+	#define AF2_DEV_SCAN        (u32)(0x00000002)
+	#define AF2_DEV_CNT_OK      (u32)(0x00000004)
+	#define AF2_COREDUMP_AVAIL  (u32)(0x00000008)
+	#define AF2_COREDUMP_SAVED  (u32)(0x00000010)
+	#define AF2_VDA_POWER_DOWN  (u32)(0x00000100)
+	#define AF2_THUNDERLINK     (u32)(0x00000200)
+	#define AF2_THUNDERBOLT     (u32)(0x00000400)
+	#define AF2_INIT_DONE       (u32)(0x00000800)
+	#define AF2_INT_PENDING     (u32)(0x00001000)
+	#define AF2_TIMER_TICK      (u32)(0x00002000)
+	#define AF2_IRQ_CLAIMED     (u32)(0x00004000)
+	#define AF2_MSI_ENABLED     (u32)(0x00008000)
+	atomic_t disable_cnt;
+	atomic_t dis_ints_cnt;
+	u32 int_stat;
+	u32 int_mask;
+	u32 volatile *outbound_copy;
+	struct list_head avail_request;
+	spinlock_t request_lock;
+	spinlock_t sg_list_lock;
+	spinlock_t queue_lock;
+	spinlock_t mem_lock;
+	struct list_head free_sg_list_head;
+	struct esas2r_mem_desc *sg_list_mds;
+	struct list_head active_list;
+	struct list_head defer_list;
+	struct esas2r_request **req_table;
+	union {
+		u16 prev_dev_cnt;
+		u32 heartbeat_time;
+	#define ESAS2R_HEARTBEAT_TIME       (3000)
+	};
+	u32 chip_uptime;
+	#define ESAS2R_CHP_UPTIME_MAX       (60000)
+	#define ESAS2R_CHP_UPTIME_CNT       (20000)
+	u64 uncached_phys;
+	u8 *uncached;
+	struct esas2r_sas_nvram *nvram;
+	struct esas2r_request general_req;
+	u8 init_msg;
+	#define ESAS2R_INIT_MSG_START       1
+	#define ESAS2R_INIT_MSG_INIT        2
+	#define ESAS2R_INIT_MSG_GET_INIT    3
+	#define ESAS2R_INIT_MSG_REINIT      4
+	u16 cmd_ref_no;
+	u32 fw_version;
+	u32 fw_build;
+	u32 chip_init_time;
+	#define ESAS2R_CHPRST_TIME         (180000)
+	#define ESAS2R_CHPRST_WAIT_TIME    (2000)
+	u32 last_tick_time;
+	u32 window_base;
+	RQBUILDSGL build_sgl;
+	struct esas2r_request *first_ae_req;
+	u32 list_size;
+	u32 last_write;
+	u32 last_read;
+	u16 max_vdareq_size;
+	u16 disc_wait_cnt;
+	struct esas2r_mem_desc inbound_list_md;
+	struct esas2r_mem_desc outbound_list_md;
+	struct esas2r_disc_context disc_ctx;
+	u8 *disc_buffer;
+	u32 disc_start_time;
+	u32 disc_wait_time;
+	u32 flash_ver;
+	char flash_rev[16];
+	char fw_rev[16];
+	char image_type[16];
+	struct esas2r_flash_context flash_context;
+	u32 num_targets_backend;
+	u32 ioctl_tunnel;
+	struct tasklet_struct tasklet;
+	struct pci_dev *pcid;
+	struct Scsi_Host *host;
+	unsigned int index;
+	char name[32];
+	struct timer_list timer;
+	struct esas2r_firmware firmware;
+	wait_queue_head_t nvram_waiter;
+	int nvram_command_done;
+	wait_queue_head_t fm_api_waiter;
+	int fm_api_command_done;
+	wait_queue_head_t vda_waiter;
+	int vda_command_done;
+	u8 *vda_buffer;
+	u64 ppvda_buffer;
+#define VDA_BUFFER_HEADER_SZ (offsetof(struct atto_ioctl_vda, data))
+#define VDA_MAX_BUFFER_SIZE  (0x40000 + VDA_BUFFER_HEADER_SZ)
+	wait_queue_head_t fs_api_waiter;
+	int fs_api_command_done;
+	u64 ppfs_api_buffer;
+	u8 *fs_api_buffer;
+	u32 fs_api_buffer_size;
+	wait_queue_head_t buffered_ioctl_waiter;
+	int buffered_ioctl_done;
+	int uncached_size;
+	struct workqueue_struct *fw_event_q;
+	struct list_head fw_event_list;
+	spinlock_t fw_event_lock;
+	u8 fw_events_off;                       /* if '1', then ignore events */
+	char fw_event_q_name[ESAS2R_KOBJ_NAME_LEN];
+	/*
+	 * intr_mode stores the interrupt mode currently being used by this
+	 * adapter. it is based on the interrupt_mode module parameter, but
+	 * can be changed based on the ability (or not) to utilize the
+	 * mode requested by the parameter.
+	 */
+	int intr_mode;
+#define INTR_MODE_LEGACY 0
+#define INTR_MODE_MSI    1
+#define INTR_MODE_MSIX   2
+	struct esas2r_sg_context fm_api_sgc;
+	u8 *save_offset;
+	struct list_head vrq_mds_head;
+	struct esas2r_mem_desc *vrq_mds;
+	int num_vrqs;
+	struct semaphore fm_api_semaphore;
+	struct semaphore fs_api_semaphore;
+	struct semaphore nvram_semaphore;
+	struct atto_ioctl *local_atto_ioctl;
+	u8 fw_coredump_buff[ESAS2R_FWCOREDUMP_SZ];
+	unsigned int sysfs_fw_created:1;
+	unsigned int sysfs_fs_created:1;
+	unsigned int sysfs_vda_created:1;
+	unsigned int sysfs_hw_created:1;
+	unsigned int sysfs_live_nvram_created:1;
+	unsigned int sysfs_default_nvram_created:1;
+};
+
+/*
+ * Function Declarations
+ * SCSI functions
+ */
+int esas2r_release(struct Scsi_Host *);
+const char *esas2r_info(struct Scsi_Host *);
+int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
+			struct esas2r_sas_nvram *data);
+int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg);
+int esas2r_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
+u8 handle_hba_ioctl(struct esas2r_adapter *a,
+		    struct atto_ioctl *ioctl_hba);
+int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd);
+int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh);
+int esas2r_slave_alloc(struct scsi_device *dev);
+int esas2r_slave_configure(struct scsi_device *dev);
+void esas2r_slave_destroy(struct scsi_device *dev);
+int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason);
+int esas2r_change_queue_type(struct scsi_device *dev, int type);
+long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
+
+/* SCSI error handler (eh) functions */
+int esas2r_eh_abort(struct scsi_cmnd *cmd);
+int esas2r_device_reset(struct scsi_cmnd *cmd);
+int esas2r_host_reset(struct scsi_cmnd *cmd);
+int esas2r_bus_reset(struct scsi_cmnd *cmd);
+int esas2r_target_reset(struct scsi_cmnd *cmd);
+
+/* Internal functions */
+int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
+			int index);
+int esas2r_cleanup(struct Scsi_Host *host);
+int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count);
+int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off,
+		    int count);
+int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count);
+int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
+		     int count);
+int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count);
+int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
+		    int count);
+void esas2r_adapter_tasklet(unsigned long context);
+irqreturn_t esas2r_interrupt(int irq, void *dev_id);
+irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id);
+void esas2r_kickoff_timer(struct esas2r_adapter *a);
+int esas2r_suspend(struct pci_dev *pcid, pm_message_t state);
+int esas2r_resume(struct pci_dev *pcid);
+void esas2r_fw_event_off(struct esas2r_adapter *a);
+void esas2r_fw_event_on(struct esas2r_adapter *a);
+bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
+			struct esas2r_sas_nvram *nvram);
+void esas2r_nvram_get_defaults(struct esas2r_adapter *a,
+			       struct esas2r_sas_nvram *nvram);
+void esas2r_complete_request_cb(struct esas2r_adapter *a,
+				struct esas2r_request *rq);
+void esas2r_reset_detected(struct esas2r_adapter *a);
+void esas2r_target_state_changed(struct esas2r_adapter *ha, u16 targ_id,
+				 u8 state);
+int esas2r_req_status_to_error(u8 req_stat);
+void esas2r_kill_adapter(int i);
+void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq);
+struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a);
+u32 esas2r_get_uncached_size(struct esas2r_adapter *a);
+bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
+				void **uncached_area);
+bool esas2r_check_adapter(struct esas2r_adapter *a);
+bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll);
+void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq);
+bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
+			   struct esas2r_request *rqaux, u8 task_mgt_func);
+void esas2r_do_tasklet_tasks(struct esas2r_adapter *a);
+void esas2r_adapter_interrupt(struct esas2r_adapter *a);
+void esas2r_do_deferred_processes(struct esas2r_adapter *a);
+void esas2r_reset_bus(struct esas2r_adapter *a);
+void esas2r_reset_adapter(struct esas2r_adapter *a);
+void esas2r_timer_tick(struct esas2r_adapter *a);
+const char *esas2r_get_model_name(struct esas2r_adapter *a);
+const char *esas2r_get_model_name_short(struct esas2r_adapter *a);
+u32 esas2r_stall_execution(struct esas2r_adapter *a, u32 start_time,
+			   u32 *delay);
+void esas2r_build_flash_req(struct esas2r_adapter *a,
+			    struct esas2r_request *rq,
+			    u8 sub_func,
+			    u8 cksum,
+			    u32 addr,
+			    u32 length);
+void esas2r_build_mgt_req(struct esas2r_adapter *a,
+			  struct esas2r_request *rq,
+			  u8 sub_func,
+			  u8 scan_gen,
+			  u16 dev_index,
+			  u32 length,
+			  void *data);
+void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq);
+void esas2r_build_cli_req(struct esas2r_adapter *a,
+			  struct esas2r_request *rq,
+			  u32 length,
+			  u32 cmd_rsp_len);
+void esas2r_build_ioctl_req(struct esas2r_adapter *a,
+			    struct esas2r_request *rq,
+			    u32 length,
+			    u8 sub_func);
+void esas2r_build_cfg_req(struct esas2r_adapter *a,
+			  struct esas2r_request *rq,
+			  u8 sub_func,
+			  u32 length,
+			  void *data);
+void esas2r_power_down(struct esas2r_adapter *a);
+bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll);
+void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq);
+u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo);
+bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
+			     struct esas2r_ioctl_fs *fs,
+			     struct esas2r_request *rq,
+			     struct esas2r_sg_context *sgc);
+bool esas2r_read_flash_block(struct esas2r_adapter *a, void *to, u32 from,
+			     u32 size);
+bool esas2r_read_mem_block(struct esas2r_adapter *a, void *to, u32 from,
+			   u32 size);
+bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
+		   struct esas2r_request *rq, struct esas2r_sg_context *sgc);
+void esas2r_force_interrupt(struct esas2r_adapter *a);
+void esas2r_local_start_request(struct esas2r_adapter *a,
+				struct esas2r_request *rq);
+void esas2r_process_adapter_reset(struct esas2r_adapter *a);
+void esas2r_complete_request(struct esas2r_adapter *a,
+			     struct esas2r_request *rq);
+void esas2r_dummy_complete(struct esas2r_adapter *a,
+			   struct esas2r_request *rq);
+void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq);
+void esas2r_start_vda_request(struct esas2r_adapter *a,
+			      struct esas2r_request *rq);
+bool esas2r_read_flash_rev(struct esas2r_adapter *a);
+bool esas2r_read_image_type(struct esas2r_adapter *a);
+bool esas2r_nvram_read_direct(struct esas2r_adapter *a);
+bool esas2r_nvram_validate(struct esas2r_adapter *a);
+void esas2r_nvram_set_defaults(struct esas2r_adapter *a);
+bool esas2r_print_flash_rev(struct esas2r_adapter *a);
+void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt);
+bool esas2r_init_msgs(struct esas2r_adapter *a);
+bool esas2r_is_adapter_present(struct esas2r_adapter *a);
+void esas2r_nuxi_mgt_data(u8 function, void *data);
+void esas2r_nuxi_cfg_data(u8 function, void *data);
+void esas2r_nuxi_ae_data(union atto_vda_ae *ae);
+void esas2r_reset_chip(struct esas2r_adapter *a);
+void esas2r_log_request_failure(struct esas2r_adapter *a,
+				struct esas2r_request *rq);
+void esas2r_polled_interrupt(struct esas2r_adapter *a);
+bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
+			  u8 status);
+bool esas2r_build_sg_list_sge(struct esas2r_adapter *a,
+			      struct esas2r_sg_context *sgc);
+bool esas2r_build_sg_list_prd(struct esas2r_adapter *a,
+			      struct esas2r_sg_context *sgc);
+void esas2r_targ_db_initialize(struct esas2r_adapter *a);
+void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify);
+void esas2r_targ_db_report_changes(struct esas2r_adapter *a);
+struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a,
+					      struct esas2r_disc_context *dc);
+struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a,
+					       struct esas2r_disc_context *dc,
+					       u8 *ident,
+					       u8 ident_len);
+void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t);
+struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a,
+						      u64 *sas_addr);
+struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a,
+						   void *identifier,
+						   u8 ident_len);
+u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id);
+struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a,
+						     u16 virt_id);
+u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a);
+void esas2r_disc_initialize(struct esas2r_adapter *a);
+void esas2r_disc_start_waiting(struct esas2r_adapter *a);
+void esas2r_disc_check_for_work(struct esas2r_adapter *a);
+void esas2r_disc_check_complete(struct esas2r_adapter *a);
+void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt);
+bool esas2r_disc_start_port(struct esas2r_adapter *a);
+void esas2r_disc_local_start_request(struct esas2r_adapter *a,
+				     struct esas2r_request *rq);
+bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str);
+bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,
+			      struct atto_ioctl_vda *vi,
+			      struct esas2r_request *rq,
+			      struct esas2r_sg_context *sgc);
+void esas2r_queue_fw_event(struct esas2r_adapter *a,
+			   enum fw_event_type type,
+			   void *data,
+			   int data_sz);
+
+/* Inline functions */
+static inline u32 esas2r_lock_set_flags(volatile u32 *flags, u32 bits)
+{
+	return test_and_set_bit(ilog2(bits), (volatile unsigned long *)flags);
+}
+
+static inline u32 esas2r_lock_clear_flags(volatile u32 *flags, u32 bits)
+{
+	return test_and_clear_bit(ilog2(bits),
+				  (volatile unsigned long *)flags);
+}
+
+/* Allocate a chip scatter/gather list entry */
+static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a)
+{
+	unsigned long flags;
+	struct list_head *sgl;
+	struct esas2r_mem_desc *result = NULL;
+
+	spin_lock_irqsave(&a->sg_list_lock, flags);
+	if (likely(!list_empty(&a->free_sg_list_head))) {
+		sgl = a->free_sg_list_head.next;
+		result = list_entry(sgl, struct esas2r_mem_desc, next_desc);
+		list_del_init(sgl);
+	}
+	spin_unlock_irqrestore(&a->sg_list_lock, flags);
+
+	return result;
+}
+
+/* Initialize a scatter/gather context */
+static inline void esas2r_sgc_init(struct esas2r_sg_context *sgc,
+				   struct esas2r_adapter *a,
+				   struct esas2r_request *rq,
+				   struct atto_vda_sge *first)
+{
+	sgc->adapter = a;
+	sgc->first_req = rq;
+
+	/*
+	 * set the limit pointer such that an SGE pointer above this value
+	 * would be the first one to overflow the SGL.
+	 */
+	sgc->sge.a64.limit = (struct atto_vda_sge *)((u8 *)rq->vrq
+						     + (sizeof(union
+							       atto_vda_req) /
+							8)
+						     - sizeof(struct
+							      atto_vda_sge));
+	if (first) {
+		sgc->sge.a64.last =
+			sgc->sge.a64.curr = first;
+		rq->vrq->scsi.sg_list_offset = (u8)
+					       ((u8 *)first -
+						(u8 *)rq->vrq);
+	} else {
+		sgc->sge.a64.last =
+			sgc->sge.a64.curr = &rq->vrq->scsi.u.sge[0];
+		rq->vrq->scsi.sg_list_offset =
+			(u8)offsetof(struct atto_vda_scsi_req, u.sge);
+	}
+	sgc->sge.a64.chain = NULL;
+}
+
+static inline void esas2r_rq_init_request(struct esas2r_request *rq,
+					  struct esas2r_adapter *a)
+{
+	union atto_vda_req *vrq = rq->vrq;
+	u32 handle;
+
+	INIT_LIST_HEAD(&rq->sg_table_head);
+	rq->data_buf = (void *)(vrq + 1);
+	rq->interrupt_cb = NULL;
+	rq->comp_cb = esas2r_complete_request_cb;
+	rq->flags = 0;
+	rq->timeout = 0;
+	rq->req_stat = RS_PENDING;
+	rq->req_type = RT_INI_REQ;
+
+	/* clear the outbound response */
+	rq->func_rsp.dwords[0] = 0;
+	rq->func_rsp.dwords[1] = 0;
+
+	/*
+	 * clear the size of the VDA request.  esas2r_build_sg_list() will
+	 * only allow the size of the request to grow.  there are some
+	 * management requests that go through there twice and the second
+	 * time through sets a smaller request size.  if this is not modified
+	 * at all we'll set it to the size of the entire VDA request.
+	 */
+	rq->vda_req_sz = RQ_SIZE_DEFAULT;
+
+	/* req_table entry should be NULL at this point - if not, halt */
+
+	if (a->req_table[LOWORD(vrq->scsi.handle)])
+		esas2r_bugon();
+
+	/* fill in the table for this handle so we can get back to the
+	 * request.
+	 */
+	a->req_table[LOWORD(vrq->scsi.handle)] = rq;
+
+	/*
+	 * add a reference number to the handle to make it unique (until it
+	 * wraps of course) while preserving the upper word
+	 */
+
+	handle = be32_to_cpu(vrq->scsi.handle) & 0xFFFF0000;
+	vrq->scsi.handle = cpu_to_be32(handle + a->cmd_ref_no++);
+
+	/*
+	 * the following formats a SCSI request.  the caller can override as
+	 * necessary.  clear_vda_request can be called to clear the VDA
+	 * request for another type of request.
+	 */
+	vrq->scsi.function = VDA_FUNC_SCSI;
+	vrq->scsi.sense_len = SENSE_DATA_SZ;
+
+	/* clear out sg_list_offset and chain_offset */
+	vrq->scsi.sg_list_offset = 0;
+	vrq->scsi.chain_offset = 0;
+	vrq->scsi.flags = 0;
+	vrq->scsi.reserved = 0;
+
+	/* set the sense buffer to be the data payload buffer */
+	vrq->scsi.ppsense_buf
+		= cpu_to_le64(rq->vrq_md->phys_addr +
+			      sizeof(union atto_vda_req));
+}
+
+static inline void esas2r_rq_free_sg_lists(struct esas2r_request *rq,
+					   struct esas2r_adapter *a)
+{
+	unsigned long flags;
+
+	if (list_empty(&rq->sg_table_head))
+		return;
+
+	spin_lock_irqsave(&a->sg_list_lock, flags);
+	list_splice_tail_init(&rq->sg_table_head, &a->free_sg_list_head);
+	spin_unlock_irqrestore(&a->sg_list_lock, flags);
+}
+
+static inline void esas2r_rq_destroy_request(struct esas2r_request *rq,
+					     struct esas2r_adapter *a)
+
+{
+	esas2r_rq_free_sg_lists(rq, a);
+	a->req_table[LOWORD(rq->vrq->scsi.handle)] = NULL;
+	rq->data_buf = NULL;
+}
+
+static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a)
+{
+	return (a->flags & (AF_BUSRST_NEEDED | AF_BUSRST_DETECTED
+			    | AF_CHPRST_NEEDED | AF_CHPRST_DETECTED
+			    | AF_PORT_CHANGE))
+	       ? true : false;
+}
+
+/*
+ * Build the scatter/gather list for an I/O request according to the
+ * specifications placed in the esas2r_sg_context.  The caller must initialize
+ * struct esas2r_sg_context prior to the initial call by calling
+ * esas2r_sgc_init()
+ */
+static inline bool esas2r_build_sg_list(struct esas2r_adapter *a,
+					struct esas2r_request *rq,
+					struct esas2r_sg_context *sgc)
+{
+	if (unlikely(le32_to_cpu(rq->vrq->scsi.length) == 0))
+		return true;
+
+	return (*a->build_sgl)(a, sgc);
+}
+
+static inline void esas2r_disable_chip_interrupts(struct esas2r_adapter *a)
+{
+	if (atomic_inc_return(&a->dis_ints_cnt) == 1)
+		esas2r_write_register_dword(a, MU_INT_MASK_OUT,
+					    ESAS2R_INT_DIS_MASK);
+}
+
+static inline void esas2r_enable_chip_interrupts(struct esas2r_adapter *a)
+{
+	if (atomic_dec_return(&a->dis_ints_cnt) == 0)
+		esas2r_write_register_dword(a, MU_INT_MASK_OUT,
+					    ESAS2R_INT_ENB_MASK);
+}
+
+/* Schedule a TASKLET to perform non-interrupt tasks that may require delays
+ * or long completion times.
+ */
+static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a)
+{
+	/* make sure we don't schedule twice */
+	if (!(esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED) &
+	      ilog2(AF_TASKLET_SCHEDULED)))
+		tasklet_hi_schedule(&a->tasklet);
+}
+
+static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a)
+{
+	if (!(a->flags & (AF_DEGRADED_MODE | AF_CHPRST_PENDING))
+	    && (a->nvram->options2 & SASNVR2_HEARTBEAT))
+		esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT_ENB);
+	else
+		esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB);
+}
+
+static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a)
+{
+	esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB);
+	esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT);
+}
+
+/* Set the initial state for resetting the adapter on the next pass through
+ * esas2r_do_deferred.
+ */
+static inline void esas2r_local_reset_adapter(struct esas2r_adapter *a)
+{
+	esas2r_disable_heartbeat(a);
+
+	esas2r_lock_set_flags(&a->flags, AF_CHPRST_NEEDED);
+	esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING);
+	esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING);
+}
+
+/* See if an interrupt is pending on the adapter. */
+static inline bool esas2r_adapter_interrupt_pending(struct esas2r_adapter *a)
+{
+	u32 intstat;
+
+	if (a->int_mask == 0)
+		return false;
+
+	intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
+
+	if ((intstat & a->int_mask) == 0)
+		return false;
+
+	esas2r_disable_chip_interrupts(a);
+
+	a->int_stat = intstat;
+	a->int_mask = 0;
+
+	return true;
+}
+
+static inline u16 esas2r_targ_get_id(struct esas2r_target *t,
+				     struct esas2r_adapter *a)
+{
+	return (u16)(uintptr_t)(t - a->targetdb);
+}
+
+/*  Build and start an asynchronous event request */
+static inline void esas2r_start_ae_request(struct esas2r_adapter *a,
+					   struct esas2r_request *rq)
+{
+	unsigned long flags;
+
+	esas2r_build_ae_req(a, rq);
+
+	spin_lock_irqsave(&a->queue_lock, flags);
+	esas2r_start_vda_request(a, rq);
+	spin_unlock_irqrestore(&a->queue_lock, flags);
+}
+
+static inline void esas2r_comp_list_drain(struct esas2r_adapter *a,
+					  struct list_head *comp_list)
+{
+	struct esas2r_request *rq;
+	struct list_head *element, *next;
+
+	list_for_each_safe(element, next, comp_list) {
+		rq = list_entry(element, struct esas2r_request, comp_list);
+		list_del_init(element);
+		esas2r_complete_request(a, rq);
+	}
+}
+
+/* sysfs handlers */
+extern struct bin_attribute bin_attr_fw;
+extern struct bin_attribute bin_attr_fs;
+extern struct bin_attribute bin_attr_vda;
+extern struct bin_attribute bin_attr_hw;
+extern struct bin_attribute bin_attr_live_nvram;
+extern struct bin_attribute bin_attr_default_nvram;
+
+#endif /* ESAS2R_H */

+ 1189 - 0
drivers/scsi/esas2r/esas2r_disc.c

@@ -0,0 +1,1189 @@
+/*
+ *  linux/drivers/scsi/esas2r/esas2r_disc.c
+ *      esas2r device discovery routines
+ *
+ *  Copyright (c) 2001-2013 ATTO Technology, Inc.
+ *  (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  NO WARRANTY
+ *  THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ *  CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ *  LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ *  MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ *  solely responsible for determining the appropriateness of using and
+ *  distributing the Program and assumes all risks associated with its
+ *  exercise of rights under this Agreement, including but not limited to
+ *  the risks and costs of program errors, damage to or loss of data,
+ *  programs or equipment, and unavailability or interruption of operations.
+ *
+ *  DISCLAIMER OF LIABILITY
+ *  NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ *  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ *  DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ *  USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ *  HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "esas2r.h"
+
+/* Miscellaneous internal discovery routines */
+static void esas2r_disc_abort(struct esas2r_adapter *a,
+			      struct esas2r_request *rq);
+static bool esas2r_disc_continue(struct esas2r_adapter *a,
+				 struct esas2r_request *rq);
+static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a);
+static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr);
+static bool esas2r_disc_start_request(struct esas2r_adapter *a,
+				      struct esas2r_request *rq);
+
+/* Internal discovery routines that process the states */
+static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
+				       struct esas2r_request *rq);
+static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
+					  struct esas2r_request *rq);
+static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
+				struct esas2r_request *rq);
+static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
+				   struct esas2r_request *rq);
+static bool esas2r_disc_part_info(struct esas2r_adapter *a,
+				  struct esas2r_request *rq);
+static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
+				     struct esas2r_request *rq);
+static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
+					  struct esas2r_request *rq);
+static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
+					     struct esas2r_request *rq);
+static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
+					  struct esas2r_request *rq);
+static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
+					     struct esas2r_request *rq);
+static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
+				      struct esas2r_request *rq);
+static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
+					 struct esas2r_request *rq);
+
+void esas2r_disc_initialize(struct esas2r_adapter *a)
+{
+	struct esas2r_sas_nvram *nvr = a->nvram;
+
+	esas2r_trace_enter();
+
+	esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
+	esas2r_lock_clear_flags(&a->flags2, AF2_DEV_SCAN);
+	esas2r_lock_clear_flags(&a->flags2, AF2_DEV_CNT_OK);
+
+	a->disc_start_time = jiffies_to_msecs(jiffies);
+	a->disc_wait_time = nvr->dev_wait_time * 1000;
+	a->disc_wait_cnt = nvr->dev_wait_count;
+
+	if (a->disc_wait_cnt > ESAS2R_MAX_TARGETS)
+		a->disc_wait_cnt = ESAS2R_MAX_TARGETS;
+
+	/*
+	 * If we are doing chip reset or power management processing, always
+	 * wait for devices.  use the NVRAM device count if it is greater than
+	 * previously discovered devices.
+	 */
+
+	esas2r_hdebug("starting discovery...");
+
+	a->general_req.interrupt_cx = NULL;
+
+	if (a->flags & (AF_CHPRST_DETECTED | AF_POWER_MGT)) {
+		if (a->prev_dev_cnt == 0) {
+			/* Don't bother waiting if there is nothing to wait
+			 * for.
+			 */
+			a->disc_wait_time = 0;
+		} else {
+			/*
+			 * Set the device wait count to what was previously
+			 * found.  We don't care if the user only configured
+			 * a time because we know the exact count to wait for.
+			 * There is no need to honor the user's wishes to
+			 * always wait the full time.
+			 */
+			a->disc_wait_cnt = a->prev_dev_cnt;
+
+			/*
+			 * bump the minimum wait time to 15 seconds since the
+			 * default is 3 (system boot or the boot driver usually
+			 * buys us more time).
+			 */
+			if (a->disc_wait_time < 15000)
+				a->disc_wait_time = 15000;
+		}
+	}
+
+	esas2r_trace("disc wait count: %d", a->disc_wait_cnt);
+	esas2r_trace("disc wait time: %d", a->disc_wait_time);
+
+	if (a->disc_wait_time == 0)
+		esas2r_disc_check_complete(a);
+
+	esas2r_trace_exit();
+}
+
+void esas2r_disc_start_waiting(struct esas2r_adapter *a)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&a->mem_lock, flags);
+
+	if (a->disc_ctx.disc_evt)
+		esas2r_disc_start_port(a);
+
+	spin_unlock_irqrestore(&a->mem_lock, flags);
+}
+
+void esas2r_disc_check_for_work(struct esas2r_adapter *a)
+{
+	struct esas2r_request *rq = &a->general_req;
+
+	/* service any pending interrupts first */
+
+	esas2r_polled_interrupt(a);
+
+	/*
+	 * now, interrupt processing may have queued up a discovery event.  go
+	 * see if we have one to start.  we couldn't start it in the ISR since
+	 * polled discovery would cause a deadlock.
+	 */
+
+	esas2r_disc_start_waiting(a);
+
+	if (rq->interrupt_cx == NULL)
+		return;
+
+	if (rq->req_stat == RS_STARTED
+	    && rq->timeout <= RQ_MAX_TIMEOUT) {
+		/* wait for the current discovery request to complete. */
+		esas2r_wait_request(a, rq);
+
+		if (rq->req_stat == RS_TIMEOUT) {
+			esas2r_disc_abort(a, rq);
+			esas2r_local_reset_adapter(a);
+			return;
+		}
+	}
+
+	if (rq->req_stat == RS_PENDING
+	    || rq->req_stat == RS_STARTED)
+		return;
+
+	esas2r_disc_continue(a, rq);
+}
+
+void esas2r_disc_check_complete(struct esas2r_adapter *a)
+{
+	unsigned long flags;
+
+	esas2r_trace_enter();
+
+	/* check to see if we should be waiting for devices */
+	if (a->disc_wait_time) {
+		u32 currtime = jiffies_to_msecs(jiffies);
+		u32 time = currtime - a->disc_start_time;
+
+		/*
+		 * Wait until the device wait time is exhausted or the device
+		 * wait count is satisfied.
+		 */
+		if (time < a->disc_wait_time
+		    && (esas2r_targ_db_get_tgt_cnt(a) < a->disc_wait_cnt
+			|| a->disc_wait_cnt == 0)) {
+			/* After three seconds of waiting, schedule a scan. */
+			if (time >= 3000
+			    && !(esas2r_lock_set_flags(&a->flags2,
+						       AF2_DEV_SCAN) &
+				 ilog2(AF2_DEV_SCAN))) {
+				spin_lock_irqsave(&a->mem_lock, flags);
+				esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
+				spin_unlock_irqrestore(&a->mem_lock, flags);
+			}
+
+			esas2r_trace_exit();
+			return;
+		}
+
+		/*
+		 * We are done waiting...we think.  Adjust the wait time to
+		 * consume events after the count is met.
+		 */
+		if (!(esas2r_lock_set_flags(&a->flags2, AF2_DEV_CNT_OK)
+		      & ilog2(AF2_DEV_CNT_OK)))
+			a->disc_wait_time = time + 3000;
+
+		/* If we haven't done a full scan yet, do it now. */
+		if (!(esas2r_lock_set_flags(&a->flags2,
+					    AF2_DEV_SCAN) &
+		      ilog2(AF2_DEV_SCAN))) {
+			spin_lock_irqsave(&a->mem_lock, flags);
+			esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
+			spin_unlock_irqrestore(&a->mem_lock, flags);
+
+			esas2r_trace_exit();
+			return;
+		}
+
+		/*
+		 * Now, if there is still time left to consume events, continue
+		 * waiting.
+		 */
+		if (time < a->disc_wait_time) {
+			esas2r_trace_exit();
+			return;
+		}
+	} else {
+		if (!(esas2r_lock_set_flags(&a->flags2,
+					    AF2_DEV_SCAN) &
+		      ilog2(AF2_DEV_SCAN))) {
+			spin_lock_irqsave(&a->mem_lock, flags);
+			esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
+			spin_unlock_irqrestore(&a->mem_lock, flags);
+		}
+	}
+
+	/* We want to stop waiting for devices. */
+	a->disc_wait_time = 0;
+
+	if ((a->flags & AF_DISC_POLLED)
+	    && (a->flags & AF_DISC_IN_PROG)) {
+		/*
+		 * Polled discovery is still pending so continue the active
+		 * discovery until it is done.  At that point, we will stop
+		 * polled discovery and transition to interrupt driven
+		 * discovery.
+		 */
+	} else {
+		/*
+		 * Done waiting for devices.  Note that we get here immediately
+		 * after deferred waiting completes because that is interrupt
+		 * driven; i.e. There is no transition.
+		 */
+		esas2r_disc_fix_curr_requests(a);
+		esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
+
+		/*
+		 * We have deferred target state changes until now because we
+		 * don't want to report any removals (due to the first arrival)
+		 * until the device wait time expires.
+		 */
+		esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
+	}
+
+	esas2r_trace_exit();
+}
+
+void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt)
+{
+	struct esas2r_disc_context *dc = &a->disc_ctx;
+
+	esas2r_trace_enter();
+
+	esas2r_trace("disc_event: %d", disc_evt);
+
+	/* Initialize the discovery context */
+	dc->disc_evt |= disc_evt;
+
+	/*
+	 * Don't start discovery before or during polled discovery.  if we did,
+	 * we would have a deadlock if we are in the ISR already.
+	 */
+	if (!(a->flags & (AF_CHPRST_PENDING | AF_DISC_POLLED)))
+		esas2r_disc_start_port(a);
+
+	esas2r_trace_exit();
+}
+
+bool esas2r_disc_start_port(struct esas2r_adapter *a)
+{
+	struct esas2r_request *rq = &a->general_req;
+	struct esas2r_disc_context *dc = &a->disc_ctx;
+	bool ret;
+
+	esas2r_trace_enter();
+
+	if (a->flags & AF_DISC_IN_PROG) {
+		esas2r_trace_exit();
+
+		return false;
+	}
+
+	/* If there is a discovery waiting, process it. */
+	if (dc->disc_evt) {
+		if ((a->flags & AF_DISC_POLLED)
+		    && a->disc_wait_time == 0) {
+			/*
+			 * We are doing polled discovery, but we no longer want
+			 * to wait for devices.  Stop polled discovery and
+			 * transition to interrupt driven discovery.
+			 */
+
+			esas2r_trace_exit();
+
+			return false;
+		}
+	} else {
+		/* Discovery is complete. */
+
+		esas2r_hdebug("disc done");
+
+		esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
+
+		esas2r_trace_exit();
+
+		return false;
+	}
+
+	/* Handle the discovery context */
+	esas2r_trace("disc_evt: %d", dc->disc_evt);
+	esas2r_lock_set_flags(&a->flags, AF_DISC_IN_PROG);
+	dc->flags = 0;
+
+	if (a->flags & AF_DISC_POLLED)
+		dc->flags |= DCF_POLLED;
+
+	rq->interrupt_cx = dc;
+	rq->req_stat = RS_SUCCESS;
+
+	/* Decode the event code */
+	if (dc->disc_evt & DCDE_DEV_SCAN) {
+		dc->disc_evt &= ~DCDE_DEV_SCAN;
+
+		dc->flags |= DCF_DEV_SCAN;
+		dc->state = DCS_BLOCK_DEV_SCAN;
+	} else if (dc->disc_evt & DCDE_DEV_CHANGE) {
+		dc->disc_evt &= ~DCDE_DEV_CHANGE;
+
+		dc->flags |= DCF_DEV_CHANGE;
+		dc->state = DCS_DEV_RMV;
+	}
+
+	/* Continue interrupt driven discovery */
+	if (!(a->flags & AF_DISC_POLLED))
+		ret = esas2r_disc_continue(a, rq);
+	else
+		ret = true;
+
+	esas2r_trace_exit();
+
+	return ret;
+}
+
+static bool esas2r_disc_continue(struct esas2r_adapter *a,
+				 struct esas2r_request *rq)
+{
+	struct esas2r_disc_context *dc =
+		(struct esas2r_disc_context *)rq->interrupt_cx;
+	bool rslt;
+
+	/* Device discovery/removal */
+	while (dc->flags & (DCF_DEV_CHANGE | DCF_DEV_SCAN)) {
+		rslt = false;
+
+		switch (dc->state) {
+		case DCS_DEV_RMV:
+
+			rslt = esas2r_disc_dev_remove(a, rq);
+			break;
+
+		case DCS_DEV_ADD:
+
+			rslt = esas2r_disc_dev_add(a, rq);
+			break;
+
+		case DCS_BLOCK_DEV_SCAN:
+
+			rslt = esas2r_disc_block_dev_scan(a, rq);
+			break;
+
+		case DCS_RAID_GRP_INFO:
+
+			rslt = esas2r_disc_raid_grp_info(a, rq);
+			break;
+
+		case DCS_PART_INFO:
+
+			rslt = esas2r_disc_part_info(a, rq);
+			break;
+
+		case DCS_PT_DEV_INFO:
+
+			rslt = esas2r_disc_passthru_dev_info(a, rq);
+			break;
+		case DCS_PT_DEV_ADDR:
+
+			rslt = esas2r_disc_passthru_dev_addr(a, rq);
+			break;
+		case DCS_DISC_DONE:
+
+			dc->flags &= ~(DCF_DEV_CHANGE | DCF_DEV_SCAN);
+			break;
+
+		default:
+
+			esas2r_bugon();
+			dc->state = DCS_DISC_DONE;
+			break;
+		}
+
+		if (rslt)
+			return true;
+	}
+
+	/* Discovery is done...for now. */
+	rq->interrupt_cx = NULL;
+
+	if (!(a->flags & AF_DISC_PENDING))
+		esas2r_disc_fix_curr_requests(a);
+
+	esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
+
+	/* Start the next discovery. */
+	return esas2r_disc_start_port(a);
+}
+
+static bool esas2r_disc_start_request(struct esas2r_adapter *a,
+				      struct esas2r_request *rq)
+{
+	unsigned long flags;
+
+	/* Set the timeout to a minimum value. */
+	if (rq->timeout < ESAS2R_DEFAULT_TMO)
+		rq->timeout = ESAS2R_DEFAULT_TMO;
+
+	/*
+	 * Override the request type to distinguish discovery requests.  If we
+	 * end up deferring the request, esas2r_disc_local_start_request()
+	 * will be called to restart it.
+	 */
+	rq->req_type = RT_DISC_REQ;
+
+	spin_lock_irqsave(&a->queue_lock, flags);
+
+	if (!(a->flags & (AF_CHPRST_PENDING | AF_FLASHING)))
+		esas2r_disc_local_start_request(a, rq);
+	else
+		list_add_tail(&rq->req_list, &a->defer_list);
+
+	spin_unlock_irqrestore(&a->queue_lock, flags);
+
+	return true;
+}
+
+void esas2r_disc_local_start_request(struct esas2r_adapter *a,
+				     struct esas2r_request *rq)
+{
+	esas2r_trace_enter();
+
+	list_add_tail(&rq->req_list, &a->active_list);
+
+	esas2r_start_vda_request(a, rq);
+
+	esas2r_trace_exit();
+
+	return;
+}
+
+static void esas2r_disc_abort(struct esas2r_adapter *a,
+			      struct esas2r_request *rq)
+{
+	struct esas2r_disc_context *dc =
+		(struct esas2r_disc_context *)rq->interrupt_cx;
+
+	esas2r_trace_enter();
+
+	/* abort the current discovery */
+
+	dc->state = DCS_DISC_DONE;
+
+	esas2r_trace_exit();
+}
+
+static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
+				       struct esas2r_request *rq)
+{
+	struct esas2r_disc_context *dc =
+		(struct esas2r_disc_context *)rq->interrupt_cx;
+	bool rslt;
+
+	esas2r_trace_enter();
+
+	esas2r_rq_init_request(rq, a);
+
+	esas2r_build_mgt_req(a,
+			     rq,
+			     VDAMGT_DEV_SCAN,
+			     0,
+			     0,
+			     0,
+			     NULL);
+
+	rq->comp_cb = esas2r_disc_block_dev_scan_cb;
+
+	rq->timeout = 30000;
+	rq->interrupt_cx = dc;
+
+	rslt = esas2r_disc_start_request(a, rq);
+
+	esas2r_trace_exit();
+
+	return rslt;
+}
+
+static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
+					  struct esas2r_request *rq)
+{
+	struct esas2r_disc_context *dc =
+		(struct esas2r_disc_context *)rq->interrupt_cx;
+	unsigned long flags;
+
+	esas2r_trace_enter();
+
+	spin_lock_irqsave(&a->mem_lock, flags);
+
+	if (rq->req_stat == RS_SUCCESS)
+		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
+
+	dc->state = DCS_RAID_GRP_INFO;
+	dc->raid_grp_ix = 0;
+
+	esas2r_rq_destroy_request(rq, a);
+
+	/* continue discovery if it's interrupt driven */
+
+	if (!(dc->flags & DCF_POLLED))
+		esas2r_disc_continue(a, rq);
+
+	spin_unlock_irqrestore(&a->mem_lock, flags);
+
+	esas2r_trace_exit();
+}
+
+static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
+				      struct esas2r_request *rq)
+{
+	struct esas2r_disc_context *dc =
+		(struct esas2r_disc_context *)rq->interrupt_cx;
+	bool rslt;
+	struct atto_vda_grp_info *grpinfo;
+
+	esas2r_trace_enter();
+
+	esas2r_trace("raid_group_idx: %d", dc->raid_grp_ix);
+
+	if (dc->raid_grp_ix >= VDA_MAX_RAID_GROUPS) {
+		dc->state = DCS_DISC_DONE;
+
+		esas2r_trace_exit();
+
+		return false;
+	}
+
+	esas2r_rq_init_request(rq, a);
+
+	grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
+
+	memset(grpinfo, 0, sizeof(struct atto_vda_grp_info));
+
+	esas2r_build_mgt_req(a,
+			     rq,
+			     VDAMGT_GRP_INFO,
+			     dc->scan_gen,
+			     0,
+			     sizeof(struct atto_vda_grp_info),
+			     NULL);
+
+	grpinfo->grp_index = dc->raid_grp_ix;
+
+	rq->comp_cb = esas2r_disc_raid_grp_info_cb;
+
+	rq->interrupt_cx = dc;
+
+	rslt = esas2r_disc_start_request(a, rq);
+
+	esas2r_trace_exit();
+
+	return rslt;
+}
+
+static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
+					 struct esas2r_request *rq)
+{
+	struct esas2r_disc_context *dc =
+		(struct esas2r_disc_context *)rq->interrupt_cx;
+	unsigned long flags;
+	struct atto_vda_grp_info *grpinfo;
+
+	esas2r_trace_enter();
+
+	spin_lock_irqsave(&a->mem_lock, flags);
+
+	if (rq->req_stat == RS_SCAN_GEN) {
+		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
+		dc->raid_grp_ix = 0;
+		goto done;
+	}
+
+	if (rq->req_stat == RS_SUCCESS) {
+		grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
+
+		if (grpinfo->status != VDA_GRP_STAT_ONLINE
+		    && grpinfo->status != VDA_GRP_STAT_DEGRADED) {
+			/* go to the next group. */
+
+			dc->raid_grp_ix++;
+		} else {
+			memcpy(&dc->raid_grp_name[0],
+			       &grpinfo->grp_name[0],
+			       sizeof(grpinfo->grp_name));
+
+			dc->interleave = le32_to_cpu(grpinfo->interleave);
+			dc->block_size = le32_to_cpu(grpinfo->block_size);
+
+			dc->state = DCS_PART_INFO;
+			dc->part_num = 0;
+		}
+	} else {
+		if (!(rq->req_stat == RS_GRP_INVALID)) {
+			esas2r_log(ESAS2R_LOG_WARN,
+				   "A request for RAID group info failed - "
+				   "returned with %x",
+				   rq->req_stat);
+		}
+
+		dc->dev_ix = 0;
+		dc->state = DCS_PT_DEV_INFO;
+	}
+
+done:
+
+	esas2r_rq_destroy_request(rq, a);
+
+	/* continue discovery if it's interrupt driven */
+
+	if (!(dc->flags & DCF_POLLED))
+		esas2r_disc_continue(a, rq);
+
+	spin_unlock_irqrestore(&a->mem_lock, flags);
+
+	esas2r_trace_exit();
+}
+
+static bool esas2r_disc_part_info(struct esas2r_adapter *a,
+				  struct esas2r_request *rq)
+{
+	struct esas2r_disc_context *dc =
+		(struct esas2r_disc_context *)rq->interrupt_cx;
+	bool rslt;
+	struct atto_vdapart_info *partinfo;
+
+	esas2r_trace_enter();
+
+	esas2r_trace("part_num: %d", dc->part_num);
+
+	if (dc->part_num >= VDA_MAX_PARTITIONS) {
+		dc->state = DCS_RAID_GRP_INFO;
+		dc->raid_grp_ix++;
+
+		esas2r_trace_exit();
+
+		return false;
+	}
+
+	esas2r_rq_init_request(rq, a);
+
+	partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
+
+	memset(partinfo, 0, sizeof(struct atto_vdapart_info));
+
+	esas2r_build_mgt_req(a,
+			     rq,
+			     VDAMGT_PART_INFO,
+			     dc->scan_gen,
+			     0,
+			     sizeof(struct atto_vdapart_info),
+			     NULL);
+
+	partinfo->part_no = dc->part_num;
+
+	memcpy(&partinfo->grp_name[0],
+	       &dc->raid_grp_name[0],
+	       sizeof(partinfo->grp_name));
+
+	rq->comp_cb = esas2r_disc_part_info_cb;
+
+	rq->interrupt_cx = dc;
+
+	rslt = esas2r_disc_start_request(a, rq);
+
+	esas2r_trace_exit();
+
+	return rslt;
+}
+
+static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
+				     struct esas2r_request *rq)
+{
+	struct esas2r_disc_context *dc =
+		(struct esas2r_disc_context *)rq->interrupt_cx;
+	unsigned long flags;
+	struct atto_vdapart_info *partinfo;
+
+	esas2r_trace_enter();
+
+	spin_lock_irqsave(&a->mem_lock, flags);
+
+	if (rq->req_stat == RS_SCAN_GEN) {
+		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
+		dc->raid_grp_ix = 0;
+		dc->state = DCS_RAID_GRP_INFO;
+	} else if (rq->req_stat == RS_SUCCESS) {
+		partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
+
+		dc->part_num = partinfo->part_no;
+
+		dc->curr_virt_id = le16_to_cpu(partinfo->target_id);
+
+		esas2r_targ_db_add_raid(a, dc);
+
+		dc->part_num++;
+	} else {
+		if (!(rq->req_stat == RS_PART_LAST)) {
+			esas2r_log(ESAS2R_LOG_WARN,
+				   "A request for RAID group partition info "
+				   "failed - status:%d", rq->req_stat);
+		}
+
+		dc->state = DCS_RAID_GRP_INFO;
+		dc->raid_grp_ix++;
+	}
+
+	esas2r_rq_destroy_request(rq, a);
+
+	/* continue discovery if it's interrupt driven */
+
+	if (!(dc->flags & DCF_POLLED))
+		esas2r_disc_continue(a, rq);
+
+	spin_unlock_irqrestore(&a->mem_lock, flags);
+
+	esas2r_trace_exit();
+}
+
+static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
+					  struct esas2r_request *rq)
+{
+	struct esas2r_disc_context *dc =
+		(struct esas2r_disc_context *)rq->interrupt_cx;
+	bool rslt;
+	struct atto_vda_devinfo *devinfo;
+
+	esas2r_trace_enter();
+
+	esas2r_trace("dev_ix: %d", dc->dev_ix);
+
+	esas2r_rq_init_request(rq, a);
+
+	devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
+
+	memset(devinfo, 0, sizeof(struct atto_vda_devinfo));
+
+	esas2r_build_mgt_req(a,
+			     rq,
+			     VDAMGT_DEV_PT_INFO,
+			     dc->scan_gen,
+			     dc->dev_ix,
+			     sizeof(struct atto_vda_devinfo),
+			     NULL);
+
+	rq->comp_cb = esas2r_disc_passthru_dev_info_cb;
+
+	rq->interrupt_cx = dc;
+
+	rslt = esas2r_disc_start_request(a, rq);
+
+	esas2r_trace_exit();
+
+	return rslt;
+}
+
+static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
+					     struct esas2r_request *rq)
+{
+	struct esas2r_disc_context *dc =
+		(struct esas2r_disc_context *)rq->interrupt_cx;
+	unsigned long flags;
+	struct atto_vda_devinfo *devinfo;
+
+	esas2r_trace_enter();
+
+	spin_lock_irqsave(&a->mem_lock, flags);
+
+	if (rq->req_stat == RS_SCAN_GEN) {
+		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
+		dc->dev_ix = 0;
+		dc->state = DCS_PT_DEV_INFO;
+	} else if (rq->req_stat == RS_SUCCESS) {
+		devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
+
+		dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index);
+
+		dc->curr_virt_id = le16_to_cpu(devinfo->target_id);
+
+		if (le16_to_cpu(devinfo->features) & VDADEVFEAT_PHYS_ID) {
+			dc->curr_phys_id =
+				le16_to_cpu(devinfo->phys_target_id);
+			dc->dev_addr_type = ATTO_GDA_AT_PORT;
+			dc->state = DCS_PT_DEV_ADDR;
+
+			esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
+			esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
+		} else {
+			dc->dev_ix++;
+		}
+	} else {
+		if (!(rq->req_stat == RS_DEV_INVALID)) {
+			esas2r_log(ESAS2R_LOG_WARN,
+				   "A request for device information failed - "
+				   "status:%d", rq->req_stat);
+		}
+
+		dc->state = DCS_DISC_DONE;
+	}
+
+	esas2r_rq_destroy_request(rq, a);
+
+	/* continue discovery if it's interrupt driven */
+
+	if (!(dc->flags & DCF_POLLED))
+		esas2r_disc_continue(a, rq);
+
+	spin_unlock_irqrestore(&a->mem_lock, flags);
+
+	esas2r_trace_exit();
+}
+
+static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
+					  struct esas2r_request *rq)
+{
+	struct esas2r_disc_context *dc =
+		(struct esas2r_disc_context *)rq->interrupt_cx;
+	bool rslt;
+	struct atto_ioctl *hi;
+	struct esas2r_sg_context sgc;
+
+	esas2r_trace_enter();
+
+	esas2r_rq_init_request(rq, a);
+
+	/* format the request. */
+
+	sgc.cur_offset = NULL;
+	sgc.get_phys_addr = (PGETPHYSADDR)esas2r_disc_get_phys_addr;
+	sgc.length = offsetof(struct atto_ioctl, data)
+		     + sizeof(struct atto_hba_get_device_address);
+
+	esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge);
+
+	esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA);
+
+	if (!esas2r_build_sg_list(a, rq, &sgc)) {
+		esas2r_rq_destroy_request(rq, a);
+
+		esas2r_trace_exit();
+
+		return false;
+	}
+
+	rq->comp_cb = esas2r_disc_passthru_dev_addr_cb;
+
+	rq->interrupt_cx = dc;
+
+	/* format the IOCTL data. */
+
+	hi = (struct atto_ioctl *)a->disc_buffer;
+
+	memset(a->disc_buffer, 0, ESAS2R_DISC_BUF_LEN);
+
+	hi->version = ATTO_VER_GET_DEV_ADDR0;
+	hi->function = ATTO_FUNC_GET_DEV_ADDR;
+	hi->flags = HBAF_TUNNEL;
+
+	hi->data.get_dev_addr.target_id = le32_to_cpu(dc->curr_phys_id);
+	hi->data.get_dev_addr.addr_type = dc->dev_addr_type;
+
+	/* start it up. */
+
+	rslt = esas2r_disc_start_request(a, rq);
+
+	esas2r_trace_exit();
+
+	return rslt;
+}
+
+static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
+					     struct esas2r_request *rq)
+{
+	struct esas2r_disc_context *dc =
+		(struct esas2r_disc_context *)rq->interrupt_cx;
+	struct esas2r_target *t = NULL;
+	unsigned long flags;
+	struct atto_ioctl *hi;
+	u16 addrlen;
+
+	esas2r_trace_enter();
+
+	spin_lock_irqsave(&a->mem_lock, flags);
+
+	hi = (struct atto_ioctl *)a->disc_buffer;
+
+	if (rq->req_stat == RS_SUCCESS
+	    && hi->status == ATTO_STS_SUCCESS) {
+		addrlen = le16_to_cpu(hi->data.get_dev_addr.addr_len);
+
+		if (dc->dev_addr_type == ATTO_GDA_AT_PORT) {
+			if (addrlen == sizeof(u64))
+				memcpy(&dc->sas_addr,
+				       &hi->data.get_dev_addr.address[0],
+				       addrlen);
+			else
+				memset(&dc->sas_addr, 0, sizeof(dc->sas_addr));
+
+			/* Get the unique identifier. */
+			dc->dev_addr_type = ATTO_GDA_AT_UNIQUE;
+
+			goto next_dev_addr;
+		} else {
+			/* Add the pass through target. */
+			if (HIBYTE(addrlen) == 0) {
+				t = esas2r_targ_db_add_pthru(a,
+							     dc,
+							     &hi->data.
+							     get_dev_addr.
+							     address[0],
+							     (u8)hi->data.
+							     get_dev_addr.
+							     addr_len);
+
+				if (t)
+					memcpy(&t->sas_addr, &dc->sas_addr,
+					       sizeof(t->sas_addr));
+			} else {
+				/* getting the back end data failed */
+
+				esas2r_log(ESAS2R_LOG_WARN,
+					   "an error occurred retrieving the "
+					   "back end data (%s:%d)",
+					   __func__,
+					   __LINE__);
+			}
+		}
+	} else {
+		/* getting the back end data failed */
+
+		esas2r_log(ESAS2R_LOG_WARN,
+			   "an error occurred retrieving the back end data - "
+			   "rq->req_stat:%d hi->status:%d",
+			   rq->req_stat, hi->status);
+	}
+
+	/* proceed to the next device. */
+
+	if (dc->flags & DCF_DEV_SCAN) {
+		dc->dev_ix++;
+		dc->state = DCS_PT_DEV_INFO;
+	} else if (dc->flags & DCF_DEV_CHANGE) {
+		dc->curr_targ++;
+		dc->state = DCS_DEV_ADD;
+	} else {
+		esas2r_bugon();
+	}
+
+next_dev_addr:
+	esas2r_rq_destroy_request(rq, a);
+
+	/* continue discovery if it's interrupt driven */
+
+	if (!(dc->flags & DCF_POLLED))
+		esas2r_disc_continue(a, rq);
+
+	spin_unlock_irqrestore(&a->mem_lock, flags);
+
+	esas2r_trace_exit();
+}
+
+static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr)
+{
+	struct esas2r_adapter *a = sgc->adapter;
+
+	if (sgc->length > ESAS2R_DISC_BUF_LEN)
+		esas2r_bugon();
+
+	*addr = a->uncached_phys
+		+ (u64)((u8 *)a->disc_buffer - a->uncached);
+
+	return sgc->length;
+}
+
+static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
+				   struct esas2r_request *rq)
+{
+	struct esas2r_disc_context *dc =
+		(struct esas2r_disc_context *)rq->interrupt_cx;
+	struct esas2r_target *t;
+	struct esas2r_target *t2;
+
+	esas2r_trace_enter();
+
+	/* process removals. */
+
+	for (t = a->targetdb; t < a->targetdb_end; t++) {
+		if (t->new_target_state != TS_NOT_PRESENT)
+			continue;
+
+		t->new_target_state = TS_INVALID;
+
+		/* remove the right target! */
+
+		t2 =
+			esas2r_targ_db_find_by_virt_id(a,
+						       esas2r_targ_get_id(t,
+									  a));
+
+		if (t2)
+			esas2r_targ_db_remove(a, t2);
+	}
+
+	/* removals complete.  process arrivals. */
+
+	dc->state = DCS_DEV_ADD;
+	dc->curr_targ = a->targetdb;
+
+	esas2r_trace_exit();
+
+	return false;
+}
+
+static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
+				struct esas2r_request *rq)
+{
+	struct esas2r_disc_context *dc =
+		(struct esas2r_disc_context *)rq->interrupt_cx;
+	struct esas2r_target *t = dc->curr_targ;
+
+	if (t >= a->targetdb_end) {
+		/* done processing state changes. */
+
+		dc->state = DCS_DISC_DONE;
+	} else if (t->new_target_state == TS_PRESENT) {
+		struct atto_vda_ae_lu *luevt = &t->lu_event;
+
+		esas2r_trace_enter();
+
+		/* clear this now in case more events come in. */
+
+		t->new_target_state = TS_INVALID;
+
+		/* setup the discovery context for adding this device. */
+
+		dc->curr_virt_id = esas2r_targ_get_id(t, a);
+
+		if ((luevt->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
+		     + sizeof(struct atto_vda_ae_lu_tgt_lun_raid))
+		    && !(luevt->dwevent & VDAAE_LU_PASSTHROUGH)) {
+			dc->block_size = luevt->id.tgtlun_raid.dwblock_size;
+			dc->interleave = luevt->id.tgtlun_raid.dwinterleave;
+		} else {
+			dc->block_size = 0;
+			dc->interleave = 0;
+		}
+
+		/* determine the device type being added. */
+
+		if (luevt->dwevent & VDAAE_LU_PASSTHROUGH) {
+			if (luevt->dwevent & VDAAE_LU_PHYS_ID) {
+				dc->state = DCS_PT_DEV_ADDR;
+				dc->dev_addr_type = ATTO_GDA_AT_PORT;
+				dc->curr_phys_id = luevt->wphys_target_id;
+			} else {
+				esas2r_log(ESAS2R_LOG_WARN,
+					   "luevt->dwevent does not have the "
+					   "VDAAE_LU_PHYS_ID bit set (%s:%d)",
+					   __func__, __LINE__);
+			}
+		} else {
+			dc->raid_grp_name[0] = 0;
+
+			esas2r_targ_db_add_raid(a, dc);
+		}
+
+		esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
+		esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
+		esas2r_trace("dwevent: %d", luevt->dwevent);
+
+		esas2r_trace_exit();
+	}
+
+	if (dc->state == DCS_DEV_ADD) {
+		/* go to the next device. */
+
+		dc->curr_targ++;
+	}
+
+	return false;
+}
+
+/*
+ * When discovery is done, find all requests on defer queue and
+ * test if they need to be modified. If a target is no longer present
+ * then complete the request with RS_SEL. Otherwise, update the
+ * target_id since after a hibernate it can be a different value.
+ * VDA does not make passthrough target IDs persistent.
+ */
+static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a)
+{
+	unsigned long flags;
+	struct esas2r_target *t;
+	struct esas2r_request *rq;
+	struct list_head *element;
+
+	/* update virt_targ_id in any outstanding esas2r_requests  */
+
+	spin_lock_irqsave(&a->queue_lock, flags);
+
+	list_for_each(element, &a->defer_list) {
+		rq = list_entry(element, struct esas2r_request, req_list);
+		if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
+			t = a->targetdb + rq->target_id;
+
+			if (t->target_state == TS_PRESENT)
+				rq->vrq->scsi.target_id = le16_to_cpu(
+					t->virt_targ_id);
+			else
+				rq->req_stat = RS_SEL;
+		}
+
+	}
+
+	spin_unlock_irqrestore(&a->queue_lock, flags);
+}

+ 1512 - 0
drivers/scsi/esas2r/esas2r_flash.c

@@ -0,0 +1,1512 @@
+
+/*
+ *  linux/drivers/scsi/esas2r/esas2r_flash.c
+ *      For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ *  Copyright (c) 2001-2013 ATTO Technology, Inc.
+ *  (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+/* local macro defs */
+#define esas2r_nvramcalc_cksum(n)     \
+	(esas2r_calc_byte_cksum((u8 *)(n), sizeof(struct esas2r_sas_nvram), \
+				SASNVR_CKSUM_SEED))
+#define esas2r_nvramcalc_xor_cksum(n)  \
+	(esas2r_calc_byte_xor_cksum((u8 *)(n), \
+				    sizeof(struct esas2r_sas_nvram), 0))
+
+#define ESAS2R_FS_DRVR_VER 2
+
+static struct esas2r_sas_nvram default_sas_nvram = {
+	{ 'E',	'S',  'A',  'S'			     }, /* signature          */
+	SASNVR_VERSION,                                 /* version            */
+	0,                                              /* checksum           */
+	31,                                             /* max_lun_for_target */
+	SASNVR_PCILAT_MAX,                              /* pci_latency        */
+	SASNVR1_BOOT_DRVR,                              /* options1           */
+	SASNVR2_HEARTBEAT   | SASNVR2_SINGLE_BUS        /* options2           */
+	| SASNVR2_SW_MUX_CTRL,
+	SASNVR_COAL_DIS,                                /* int_coalescing     */
+	SASNVR_CMDTHR_NONE,                             /* cmd_throttle       */
+	3,                                              /* dev_wait_time      */
+	1,                                              /* dev_wait_count     */
+	0,                                              /* spin_up_delay      */
+	0,                                              /* ssp_align_rate     */
+	{ 0x50, 0x01, 0x08, 0x60,                       /* sas_addr           */
+	  0x00, 0x00, 0x00, 0x00 },
+	{ SASNVR_SPEED_AUTO },                          /* phy_speed          */
+	{ SASNVR_MUX_DISABLED },                        /* SAS multiplexing   */
+	{ 0 },                                          /* phy_flags          */
+	SASNVR_SORT_SAS_ADDR,                           /* sort_type          */
+	3,                                              /* dpm_reqcmd_lmt     */
+	3,                                              /* dpm_stndby_time    */
+	0,                                              /* dpm_active_time    */
+	{ 0 },                                          /* phy_target_id      */
+	SASNVR_VSMH_DISABLED,                           /* virt_ses_mode      */
+	SASNVR_RWM_DEFAULT,                             /* read_write_mode    */
+	0,                                              /* link down timeout  */
+	{ 0 }                                           /* reserved           */
+};
+
+static u8 cmd_to_fls_func[] = {
+	0xFF,
+	VDA_FLASH_READ,
+	VDA_FLASH_BEGINW,
+	VDA_FLASH_WRITE,
+	VDA_FLASH_COMMIT,
+	VDA_FLASH_CANCEL
+};
+
+static u8 esas2r_calc_byte_xor_cksum(u8 *addr, u32 len, u8 seed)
+{
+	u32 cksum = seed;
+	u8 *p = (u8 *)&cksum;
+
+	while (len) {
+		if (((uintptr_t)addr & 3) == 0)
+			break;
+
+		cksum = cksum ^ *addr;
+		addr++;
+		len--;
+	}
+	while (len >= sizeof(u32)) {
+		cksum = cksum ^ *(u32 *)addr;
+		addr += 4;
+		len -= 4;
+	}
+	while (len--) {
+		cksum = cksum ^ *addr;
+		addr++;
+	}
+	return p[0] ^ p[1] ^ p[2] ^ p[3];
+}
+
+static u8 esas2r_calc_byte_cksum(void *addr, u32 len, u8 seed)
+{
+	u8 *p = (u8 *)addr;
+	u8 cksum = seed;
+
+	while (len--)
+		cksum = cksum + p[len];
+	return cksum;
+}
+
+/* Interrupt callback to process FM API write requests. */
+static void esas2r_fmapi_callback(struct esas2r_adapter *a,
+				  struct esas2r_request *rq)
+{
+	struct atto_vda_flash_req *vrq = &rq->vrq->flash;
+	struct esas2r_flash_context *fc =
+		(struct esas2r_flash_context *)rq->interrupt_cx;
+
+	if (rq->req_stat == RS_SUCCESS) {
+		/* Last request was successful.  See what to do now. */
+		switch (vrq->sub_func) {
+		case VDA_FLASH_BEGINW:
+			if (fc->sgc.cur_offset == NULL)
+				goto commit;
+
+			vrq->sub_func = VDA_FLASH_WRITE;
+			rq->req_stat = RS_PENDING;
+			break;
+
+		case VDA_FLASH_WRITE:
+commit:
+			vrq->sub_func = VDA_FLASH_COMMIT;
+			rq->req_stat = RS_PENDING;
+			rq->interrupt_cb = fc->interrupt_cb;
+			break;
+
+		default:
+			break;
+		}
+	}
+
+	if (rq->req_stat != RS_PENDING)
+		/*
+		 * All done. call the real callback to complete the FM API
+		 * request.  We should only get here if a BEGINW or WRITE
+		 * operation failed.
+		 */
+		(*fc->interrupt_cb)(a, rq);
+}
+
+/*
+ * Build a flash request based on the flash context.  The request status
+ * is filled in on an error.
+ */
+static void build_flash_msg(struct esas2r_adapter *a,
+			    struct esas2r_request *rq)
+{
+	struct esas2r_flash_context *fc =
+		(struct esas2r_flash_context *)rq->interrupt_cx;
+	struct esas2r_sg_context *sgc = &fc->sgc;
+	u8 cksum = 0;
+
+	/* calculate the checksum */
+	if (fc->func == VDA_FLASH_BEGINW) {
+		if (sgc->cur_offset)
+			cksum = esas2r_calc_byte_xor_cksum(sgc->cur_offset,
+							   sgc->length,
+							   0);
+		rq->interrupt_cb = esas2r_fmapi_callback;
+	} else {
+		rq->interrupt_cb = fc->interrupt_cb;
+	}
+	esas2r_build_flash_req(a,
+			       rq,
+			       fc->func,
+			       cksum,
+			       fc->flsh_addr,
+			       sgc->length);
+
+	esas2r_rq_free_sg_lists(rq, a);
+
+	/*
+	 * remember the length we asked for.  we have to keep track of
+	 * the current amount done so we know how much to compare when
+	 * doing the verification phase.
+	 */
+	fc->curr_len = fc->sgc.length;
+
+	if (sgc->cur_offset) {
+		/* setup the S/G context to build the S/G table  */
+		esas2r_sgc_init(sgc, a, rq, &rq->vrq->flash.data.sge[0]);
+
+		if (!esas2r_build_sg_list(a, rq, sgc)) {
+			rq->req_stat = RS_BUSY;
+			return;
+		}
+	} else {
+		fc->sgc.length = 0;
+	}
+
+	/* update the flsh_addr to the next one to write to  */
+	fc->flsh_addr += fc->curr_len;
+}
+
+/* determine the method to process the flash request */
+static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+	/*
+	 * assume we have more to do.  if we return with the status set to
+	 * RS_PENDING, FM API tasks will continue.
+	 */
+	rq->req_stat = RS_PENDING;
+	if (a->flags & AF_DEGRADED_MODE)
+		/* not suppported for now */;
+	else
+		build_flash_msg(a, rq);
+
+	return rq->req_stat == RS_PENDING;
+}
+
+/*  boot image fixer uppers called before downloading the image. */
+static void fix_bios(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
+{
+	struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_BIOS];
+	struct esas2r_pc_image *pi;
+	struct esas2r_boot_header *bh;
+
+	pi = (struct esas2r_pc_image *)((u8 *)fi + ch->image_offset);
+	bh =
+		(struct esas2r_boot_header *)((u8 *)pi +
+					      le16_to_cpu(pi->header_offset));
+	bh->device_id = cpu_to_le16(a->pcid->device);
+
+	/* Recalculate the checksum in the PNP header if there  */
+	if (pi->pnp_offset) {
+		u8 *pnp_header_bytes =
+			((u8 *)pi + le16_to_cpu(pi->pnp_offset));
+
+		/* Identifier - dword that starts at byte 10 */
+		*((u32 *)&pnp_header_bytes[10]) =
+			cpu_to_le32(MAKEDWORD(a->pcid->subsystem_vendor,
+					      a->pcid->subsystem_device));
+
+		/* Checksum - byte 9 */
+		pnp_header_bytes[9] -= esas2r_calc_byte_cksum(pnp_header_bytes,
+							      32, 0);
+	}
+
+	/* Recalculate the checksum needed by the PC */
+	pi->checksum = pi->checksum -
+		       esas2r_calc_byte_cksum((u8 *)pi, ch->length, 0);
+}
+
+static void fix_efi(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
+{
+	struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_EFI];
+	u32 len = ch->length;
+	u32 offset = ch->image_offset;
+	struct esas2r_efi_image *ei;
+	struct esas2r_boot_header *bh;
+
+	while (len) {
+		u32 thislen;
+
+		ei = (struct esas2r_efi_image *)((u8 *)fi + offset);
+		bh = (struct esas2r_boot_header *)((u8 *)ei +
+						   le16_to_cpu(
+							   ei->header_offset));
+		bh->device_id = cpu_to_le16(a->pcid->device);
+		thislen = (u32)le16_to_cpu(bh->image_length) * 512;
+
+		if (thislen > len)
+			break;
+
+		len -= thislen;
+		offset += thislen;
+	}
+}
+
+/* Complete a FM API request with the specified status. */
+static bool complete_fmapi_req(struct esas2r_adapter *a,
+			       struct esas2r_request *rq, u8 fi_stat)
+{
+	struct esas2r_flash_context *fc =
+		(struct esas2r_flash_context *)rq->interrupt_cx;
+	struct esas2r_flash_img *fi = fc->fi;
+
+	fi->status = fi_stat;
+	fi->driver_error = rq->req_stat;
+	rq->interrupt_cb = NULL;
+	rq->req_stat = RS_SUCCESS;
+
+	if (fi_stat != FI_STAT_IMG_VER)
+		memset(fc->scratch, 0, FM_BUF_SZ);
+
+	esas2r_enable_heartbeat(a);
+	esas2r_lock_clear_flags(&a->flags, AF_FLASH_LOCK);
+	return false;
+}
+
+/* Process each phase of the flash download process. */
+static void fw_download_proc(struct esas2r_adapter *a,
+			     struct esas2r_request *rq)
+{
+	struct esas2r_flash_context *fc =
+		(struct esas2r_flash_context *)rq->interrupt_cx;
+	struct esas2r_flash_img *fi = fc->fi;
+	struct esas2r_component_header *ch;
+	u32 len;
+	u8 *p, *q;
+
+	/* If the previous operation failed, just return. */
+	if (rq->req_stat != RS_SUCCESS)
+		goto error;
+
+	/*
+	 * If an upload just completed and the compare length is non-zero,
+	 * then we just read back part of the image we just wrote.  verify the
+	 * section and continue reading until the entire image is verified.
+	 */
+	if (fc->func == VDA_FLASH_READ
+	    && fc->cmp_len) {
+		ch = &fi->cmp_hdr[fc->comp_typ];
+
+		p = fc->scratch;
+		q = (u8 *)fi                    /* start of the whole gob     */
+		    + ch->image_offset          /* start of the current image */
+		    + ch->length                /* end of the current image   */
+		    - fc->cmp_len;              /* where we are now           */
+
+		/*
+		 * NOTE - curr_len is the exact count of bytes for the read
+		 *        even when the end is read and its not a full buffer
+		 */
+		for (len = fc->curr_len; len; len--)
+			if (*p++ != *q++)
+				goto error;
+
+		fc->cmp_len -= fc->curr_len; /* # left to compare    */
+
+		/* Update fc and determine the length for the next upload */
+		if (fc->cmp_len > FM_BUF_SZ)
+			fc->sgc.length = FM_BUF_SZ;
+		else
+			fc->sgc.length = fc->cmp_len;
+
+		fc->sgc.cur_offset = fc->sgc_offset +
+				     ((u8 *)fc->scratch - (u8 *)fi);
+	}
+
+	/*
+	 * This code uses a 'while' statement since the next component may
+	 * have a length = zero.  This can happen since some components are
+	 * not required.  At the end of this 'while' we set up the length
+	 * for the next request and therefore sgc.length can be = 0.
+	 */
+	while (fc->sgc.length == 0) {
+		ch = &fi->cmp_hdr[fc->comp_typ];
+
+		switch (fc->task) {
+		case FMTSK_ERASE_BOOT:
+			/* the BIOS image is written next */
+			ch = &fi->cmp_hdr[CH_IT_BIOS];
+			if (ch->length == 0)
+				goto no_bios;
+
+			fc->task = FMTSK_WRTBIOS;
+			fc->func = VDA_FLASH_BEGINW;
+			fc->comp_typ = CH_IT_BIOS;
+			fc->flsh_addr = FLS_OFFSET_BOOT;
+			fc->sgc.length = ch->length;
+			fc->sgc.cur_offset = fc->sgc_offset +
+					     ch->image_offset;
+			break;
+
+		case FMTSK_WRTBIOS:
+			/*
+			 * The BIOS image has been written - read it and
+			 * verify it
+			 */
+			fc->task = FMTSK_READBIOS;
+			fc->func = VDA_FLASH_READ;
+			fc->flsh_addr = FLS_OFFSET_BOOT;
+			fc->cmp_len = ch->length;
+			fc->sgc.length = FM_BUF_SZ;
+			fc->sgc.cur_offset = fc->sgc_offset
+					     + ((u8 *)fc->scratch -
+						(u8 *)fi);
+			break;
+
+		case FMTSK_READBIOS:
+no_bios:
+			/*
+			 * Mark the component header status for the image
+			 * completed
+			 */
+			ch->status = CH_STAT_SUCCESS;
+
+			/* The MAC image is written next */
+			ch = &fi->cmp_hdr[CH_IT_MAC];
+			if (ch->length == 0)
+				goto no_mac;
+
+			fc->task = FMTSK_WRTMAC;
+			fc->func = VDA_FLASH_BEGINW;
+			fc->comp_typ = CH_IT_MAC;
+			fc->flsh_addr = FLS_OFFSET_BOOT
+					+ fi->cmp_hdr[CH_IT_BIOS].length;
+			fc->sgc.length = ch->length;
+			fc->sgc.cur_offset = fc->sgc_offset +
+					     ch->image_offset;
+			break;
+
+		case FMTSK_WRTMAC:
+			/* The MAC image has been written - read and verify */
+			fc->task = FMTSK_READMAC;
+			fc->func = VDA_FLASH_READ;
+			fc->flsh_addr -= ch->length;
+			fc->cmp_len = ch->length;
+			fc->sgc.length = FM_BUF_SZ;
+			fc->sgc.cur_offset = fc->sgc_offset
+					     + ((u8 *)fc->scratch -
+						(u8 *)fi);
+			break;
+
+		case FMTSK_READMAC:
+no_mac:
+			/*
+			 * Mark the component header status for the image
+			 * completed
+			 */
+			ch->status = CH_STAT_SUCCESS;
+
+			/* The EFI image is written next */
+			ch = &fi->cmp_hdr[CH_IT_EFI];
+			if (ch->length == 0)
+				goto no_efi;
+
+			fc->task = FMTSK_WRTEFI;
+			fc->func = VDA_FLASH_BEGINW;
+			fc->comp_typ = CH_IT_EFI;
+			fc->flsh_addr = FLS_OFFSET_BOOT
+					+ fi->cmp_hdr[CH_IT_BIOS].length
+					+ fi->cmp_hdr[CH_IT_MAC].length;
+			fc->sgc.length = ch->length;
+			fc->sgc.cur_offset = fc->sgc_offset +
+					     ch->image_offset;
+			break;
+
+		case FMTSK_WRTEFI:
+			/* The EFI image has been written - read and verify */
+			fc->task = FMTSK_READEFI;
+			fc->func = VDA_FLASH_READ;
+			fc->flsh_addr -= ch->length;
+			fc->cmp_len = ch->length;
+			fc->sgc.length = FM_BUF_SZ;
+			fc->sgc.cur_offset = fc->sgc_offset
+					     + ((u8 *)fc->scratch -
+						(u8 *)fi);
+			break;
+
+		case FMTSK_READEFI:
+no_efi:
+			/*
+			 * Mark the component header status for the image
+			 * completed
+			 */
+			ch->status = CH_STAT_SUCCESS;
+
+			/* The CFG image is written next */
+			ch = &fi->cmp_hdr[CH_IT_CFG];
+
+			if (ch->length == 0)
+				goto no_cfg;
+			fc->task = FMTSK_WRTCFG;
+			fc->func = VDA_FLASH_BEGINW;
+			fc->comp_typ = CH_IT_CFG;
+			fc->flsh_addr = FLS_OFFSET_CPYR - ch->length;
+			fc->sgc.length = ch->length;
+			fc->sgc.cur_offset = fc->sgc_offset +
+					     ch->image_offset;
+			break;
+
+		case FMTSK_WRTCFG:
+			/* The CFG image has been written - read and verify */
+			fc->task = FMTSK_READCFG;
+			fc->func = VDA_FLASH_READ;
+			fc->flsh_addr = FLS_OFFSET_CPYR - ch->length;
+			fc->cmp_len = ch->length;
+			fc->sgc.length = FM_BUF_SZ;
+			fc->sgc.cur_offset = fc->sgc_offset
+					     + ((u8 *)fc->scratch -
+						(u8 *)fi);
+			break;
+
+		case FMTSK_READCFG:
+no_cfg:
+			/*
+			 * Mark the component header status for the image
+			 * completed
+			 */
+			ch->status = CH_STAT_SUCCESS;
+
+			/*
+			 * The download is complete.  If in degraded mode,
+			 * attempt a chip reset.
+			 */
+			if (a->flags & AF_DEGRADED_MODE)
+				esas2r_local_reset_adapter(a);
+
+			a->flash_ver = fi->cmp_hdr[CH_IT_BIOS].version;
+			esas2r_print_flash_rev(a);
+
+			/* Update the type of boot image on the card */
+			memcpy(a->image_type, fi->rel_version,
+			       sizeof(fi->rel_version));
+			complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
+			return;
+		}
+
+		/* If verifying, don't try reading more than what's there */
+		if (fc->func == VDA_FLASH_READ
+		    && fc->sgc.length > fc->cmp_len)
+			fc->sgc.length = fc->cmp_len;
+	}
+
+	/* Build the request to perform the next action */
+	if (!load_image(a, rq)) {
+error:
+		if (fc->comp_typ < fi->num_comps) {
+			ch = &fi->cmp_hdr[fc->comp_typ];
+			ch->status = CH_STAT_FAILED;
+		}
+
+		complete_fmapi_req(a, rq, FI_STAT_FAILED);
+	}
+}
+
+/* Determine the flash image adaptyp for this adapter */
+static u8 get_fi_adap_type(struct esas2r_adapter *a)
+{
+	u8 type;
+
+	/* use the device ID to get the correct adap_typ for this HBA */
+	switch (a->pcid->device) {
+	case ATTO_DID_INTEL_IOP348:
+		type = FI_AT_SUN_LAKE;
+		break;
+
+	case ATTO_DID_MV_88RC9580:
+	case ATTO_DID_MV_88RC9580TS:
+	case ATTO_DID_MV_88RC9580TSE:
+	case ATTO_DID_MV_88RC9580TL:
+		type = FI_AT_MV_9580;
+		break;
+
+	default:
+		type = FI_AT_UNKNWN;
+		break;
+	}
+
+	return type;
+}
+
+/* Size of config + copyright + flash_ver images, 0 for failure. */
+static u32 chk_cfg(u8 *cfg, u32 length, u32 *flash_ver)
+{
+	u16 *pw = (u16 *)cfg - 1;
+	u32 sz = 0;
+	u32 len = length;
+
+	if (len == 0)
+		len = FM_BUF_SZ;
+
+	if (flash_ver)
+		*flash_ver = 0;
+
+	while (true) {
+		u16 type;
+		u16 size;
+
+		type = le16_to_cpu(*pw--);
+		size = le16_to_cpu(*pw--);
+
+		if (type != FBT_CPYR
+		    && type != FBT_SETUP
+		    && type != FBT_FLASH_VER)
+			break;
+
+		if (type == FBT_FLASH_VER
+		    && flash_ver)
+			*flash_ver = le32_to_cpu(*(u32 *)(pw - 1));
+
+		sz += size + (2 * sizeof(u16));
+		pw -= size / sizeof(u16);
+
+		if (sz > len - (2 * sizeof(u16)))
+			break;
+	}
+
+	/* See if we are comparing the size to the specified length */
+	if (length && sz != length)
+		return 0;
+
+	return sz;
+}
+
+/* Verify that the boot image is valid */
+static u8 chk_boot(u8 *boot_img, u32 length)
+{
+	struct esas2r_boot_image *bi = (struct esas2r_boot_image *)boot_img;
+	u16 hdroffset = le16_to_cpu(bi->header_offset);
+	struct esas2r_boot_header *bh;
+
+	if (bi->signature != le16_to_cpu(0xaa55)
+	    || (long)hdroffset >
+	    (long)(65536L - sizeof(struct esas2r_boot_header))
+	    || (hdroffset & 3)
+	    || (hdroffset < sizeof(struct esas2r_boot_image))
+	    || ((u32)hdroffset + sizeof(struct esas2r_boot_header) > length))
+		return 0xff;
+
+	bh = (struct esas2r_boot_header *)((char *)bi + hdroffset);
+
+	if (bh->signature[0] != 'P'
+	    || bh->signature[1] != 'C'
+	    || bh->signature[2] != 'I'
+	    || bh->signature[3] != 'R'
+	    || le16_to_cpu(bh->struct_length) <
+	    (u16)sizeof(struct esas2r_boot_header)
+	    || bh->class_code[2] != 0x01
+	    || bh->class_code[1] != 0x04
+	    || bh->class_code[0] != 0x00
+	    || (bh->code_type != CODE_TYPE_PC
+		&& bh->code_type != CODE_TYPE_OPEN
+		&& bh->code_type != CODE_TYPE_EFI))
+		return 0xff;
+
+	return bh->code_type;
+}
+
+/* The sum of all the WORDS of the image */
+static u16 calc_fi_checksum(struct esas2r_flash_context *fc)
+{
+	struct esas2r_flash_img *fi = fc->fi;
+	u16 cksum;
+	u32 len;
+	u16 *pw;
+
+	for (len = (fi->length - fc->fi_hdr_len) / 2,
+	     pw = (u16 *)((u8 *)fi + fc->fi_hdr_len),
+	     cksum = 0;
+	     len;
+	     len--, pw++)
+		cksum = cksum + le16_to_cpu(*pw);
+
+	return cksum;
+}
+
+/*
+ * Verify the flash image structure.  The following verifications will
+ * be performed:
+ *              1)  verify the fi_version is correct
+ *              2)  verify the checksum of the entire image.
+ *              3)  validate the adap_typ, action and length fields.
+ *              4)  valdiate each component header. check the img_type and
+ *                  length fields
+ *              5)  valdiate each component image.  validate signatures and
+ *                  local checksums
+ */
+static bool verify_fi(struct esas2r_adapter *a,
+		      struct esas2r_flash_context *fc)
+{
+	struct esas2r_flash_img *fi = fc->fi;
+	u8 type;
+	bool imgerr;
+	u16 i;
+	u32 len;
+	struct esas2r_component_header *ch;
+
+	/* Verify the length - length must even since we do a word checksum */
+	len = fi->length;
+
+	if ((len & 1)
+	    || len < fc->fi_hdr_len) {
+		fi->status = FI_STAT_LENGTH;
+		return false;
+	}
+
+	/* Get adapter type and verify type in flash image */
+	type = get_fi_adap_type(a);
+	if ((type == FI_AT_UNKNWN) || (fi->adap_typ != type)) {
+		fi->status = FI_STAT_ADAPTYP;
+		return false;
+	}
+
+	/*
+	 * Loop through each component and verify the img_type and length
+	 * fields.  Keep a running count of the sizes sooze we can verify total
+	 * size to additive size.
+	 */
+	imgerr = false;
+
+	for (i = 0, len = 0, ch = fi->cmp_hdr;
+	     i < fi->num_comps;
+	     i++, ch++) {
+		bool cmperr = false;
+
+		/*
+		 * Verify that the component header has the same index as the
+		 * image type.  The headers must be ordered correctly
+		 */
+		if (i != ch->img_type) {
+			imgerr = true;
+			ch->status = CH_STAT_INVALID;
+			continue;
+		}
+
+		switch (ch->img_type) {
+		case CH_IT_BIOS:
+			type = CODE_TYPE_PC;
+			break;
+
+		case CH_IT_MAC:
+			type = CODE_TYPE_OPEN;
+			break;
+
+		case CH_IT_EFI:
+			type = CODE_TYPE_EFI;
+			break;
+		}
+
+		switch (ch->img_type) {
+		case CH_IT_FW:
+		case CH_IT_NVR:
+			break;
+
+		case CH_IT_BIOS:
+		case CH_IT_MAC:
+		case CH_IT_EFI:
+			if (ch->length & 0x1ff)
+				cmperr = true;
+
+			/* Test if component image is present  */
+			if (ch->length == 0)
+				break;
+
+			/* Image is present - verify the image */
+			if (chk_boot((u8 *)fi + ch->image_offset, ch->length)
+			    != type)
+				cmperr = true;
+
+			break;
+
+		case CH_IT_CFG:
+
+			/* Test if component image is present */
+			if (ch->length == 0) {
+				cmperr = true;
+				break;
+			}
+
+			/* Image is present - verify the image */
+			if (!chk_cfg((u8 *)fi + ch->image_offset + ch->length,
+				     ch->length, NULL))
+				cmperr = true;
+
+			break;
+
+		default:
+
+			fi->status = FI_STAT_UNKNOWN;
+			return false;
+		}
+
+		if (cmperr) {
+			imgerr = true;
+			ch->status = CH_STAT_INVALID;
+		} else {
+			ch->status = CH_STAT_PENDING;
+			len += ch->length;
+		}
+	}
+
+	if (imgerr) {
+		fi->status = FI_STAT_MISSING;
+		return false;
+	}
+
+	/* Compare fi->length to the sum of ch->length fields */
+	if (len != fi->length - fc->fi_hdr_len) {
+		fi->status = FI_STAT_LENGTH;
+		return false;
+	}
+
+	/* Compute the checksum - it should come out zero */
+	if (fi->checksum != calc_fi_checksum(fc)) {
+		fi->status = FI_STAT_CHKSUM;
+		return false;
+	}
+
+	return true;
+}
+
+/* Fill in the FS IOCTL response data from a completed request. */
+static void esas2r_complete_fs_ioctl(struct esas2r_adapter *a,
+				     struct esas2r_request *rq)
+{
+	struct esas2r_ioctl_fs *fs =
+		(struct esas2r_ioctl_fs *)rq->interrupt_cx;
+
+	if (rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
+		esas2r_enable_heartbeat(a);
+
+	fs->driver_error = rq->req_stat;
+
+	if (fs->driver_error == RS_SUCCESS)
+		fs->status = ATTO_STS_SUCCESS;
+	else
+		fs->status = ATTO_STS_FAILED;
+}
+
+/* Prepare an FS IOCTL request to be sent to the firmware. */
+bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
+			     struct esas2r_ioctl_fs *fs,
+			     struct esas2r_request *rq,
+			     struct esas2r_sg_context *sgc)
+{
+	u8 cmdcnt = (u8)ARRAY_SIZE(cmd_to_fls_func);
+	struct esas2r_ioctlfs_command *fsc = &fs->command;
+	u8 func = 0;
+	u32 datalen;
+
+	fs->status = ATTO_STS_FAILED;
+	fs->driver_error = RS_PENDING;
+
+	if (fs->version > ESAS2R_FS_VER) {
+		fs->status = ATTO_STS_INV_VERSION;
+		return false;
+	}
+
+	func = cmd_to_fls_func[fsc->command];
+	if (fsc->command >= cmdcnt || func == 0xFF) {
+		fs->status = ATTO_STS_INV_FUNC;
+		return false;
+	}
+
+	if (fsc->command != ESAS2R_FS_CMD_CANCEL) {
+		if ((a->pcid->device != ATTO_DID_MV_88RC9580
+		     || fs->adap_type != ESAS2R_FS_AT_ESASRAID2)
+		    && (a->pcid->device != ATTO_DID_MV_88RC9580TS
+			|| fs->adap_type != ESAS2R_FS_AT_TSSASRAID2)
+		    && (a->pcid->device != ATTO_DID_MV_88RC9580TSE
+			|| fs->adap_type != ESAS2R_FS_AT_TSSASRAID2E)
+		    && (a->pcid->device != ATTO_DID_MV_88RC9580TL
+			|| fs->adap_type != ESAS2R_FS_AT_TLSASHBA)) {
+			fs->status = ATTO_STS_INV_ADAPTER;
+			return false;
+		}
+
+		if (fs->driver_ver > ESAS2R_FS_DRVR_VER) {
+			fs->status = ATTO_STS_INV_DRVR_VER;
+			return false;
+		}
+	}
+
+	if (a->flags & AF_DEGRADED_MODE) {
+		fs->status = ATTO_STS_DEGRADED;
+		return false;
+	}
+
+	rq->interrupt_cb = esas2r_complete_fs_ioctl;
+	rq->interrupt_cx = fs;
+	datalen = le32_to_cpu(fsc->length);
+	esas2r_build_flash_req(a,
+			       rq,
+			       func,
+			       fsc->checksum,
+			       le32_to_cpu(fsc->flash_addr),
+			       datalen);
+
+	if (func == VDA_FLASH_WRITE
+	    || func == VDA_FLASH_READ) {
+		if (datalen == 0) {
+			fs->status = ATTO_STS_INV_FUNC;
+			return false;
+		}
+
+		esas2r_sgc_init(sgc, a, rq, rq->vrq->flash.data.sge);
+		sgc->length = datalen;
+
+		if (!esas2r_build_sg_list(a, rq, sgc)) {
+			fs->status = ATTO_STS_OUT_OF_RSRC;
+			return false;
+		}
+	}
+
+	if (func == VDA_FLASH_COMMIT)
+		esas2r_disable_heartbeat(a);
+
+	esas2r_start_request(a, rq);
+
+	return true;
+}
+
+static bool esas2r_flash_access(struct esas2r_adapter *a, u32 function)
+{
+	u32 starttime;
+	u32 timeout;
+	u32 intstat;
+	u32 doorbell;
+
+	/* Disable chip interrupts awhile */
+	if (function == DRBL_FLASH_REQ)
+		esas2r_disable_chip_interrupts(a);
+
+	/* Issue the request to the firmware */
+	esas2r_write_register_dword(a, MU_DOORBELL_IN, function);
+
+	/* Now wait for the firmware to process it */
+	starttime = jiffies_to_msecs(jiffies);
+	timeout = a->flags &
+		  (AF_CHPRST_PENDING | AF_DISC_PENDING) ? 40000 : 5000;
+
+	while (true) {
+		intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
+
+		if (intstat & MU_INTSTAT_DRBL) {
+			/* Got a doorbell interrupt.  Check for the function */
+			doorbell =
+				esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+			esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+						    doorbell);
+			if (doorbell & function)
+				break;
+		}
+
+		schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+		if ((jiffies_to_msecs(jiffies) - starttime) > timeout) {
+			/*
+			 * Iimeout.  If we were requesting flash access,
+			 * indicate we are done so the firmware knows we gave
+			 * up.  If this was a REQ, we also need to re-enable
+			 * chip interrupts.
+			 */
+			if (function == DRBL_FLASH_REQ) {
+				esas2r_hdebug("flash access timeout");
+				esas2r_write_register_dword(a, MU_DOORBELL_IN,
+							    DRBL_FLASH_DONE);
+				esas2r_enable_chip_interrupts(a);
+			} else {
+				esas2r_hdebug("flash release timeout");
+			}
+
+			return false;
+		}
+	}
+
+	/* if we're done, re-enable chip interrupts */
+	if (function == DRBL_FLASH_DONE)
+		esas2r_enable_chip_interrupts(a);
+
+	return true;
+}
+
+#define WINDOW_SIZE ((signed int)MW_DATA_WINDOW_SIZE)
+
+bool esas2r_read_flash_block(struct esas2r_adapter *a,
+			     void *to,
+			     u32 from,
+			     u32 size)
+{
+	u8 *end = (u8 *)to;
+
+	/* Try to acquire access to the flash */
+	if (!esas2r_flash_access(a, DRBL_FLASH_REQ))
+		return false;
+
+	while (size) {
+		u32 len;
+		u32 offset;
+		u32 iatvr;
+
+		if (a->flags2 & AF2_SERIAL_FLASH)
+			iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE);
+		else
+			iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE);
+
+		esas2r_map_data_window(a, iatvr);
+		offset = from & (WINDOW_SIZE - 1);
+		len = size;
+
+		if (len > WINDOW_SIZE - offset)
+			len = WINDOW_SIZE - offset;
+
+		from += len;
+		size -= len;
+
+		while (len--) {
+			*end++ = esas2r_read_data_byte(a, offset);
+			offset++;
+		}
+	}
+
+	/* Release flash access */
+	esas2r_flash_access(a, DRBL_FLASH_DONE);
+	return true;
+}
+
+bool esas2r_read_flash_rev(struct esas2r_adapter *a)
+{
+	u8 bytes[256];
+	u16 *pw;
+	u16 *pwstart;
+	u16 type;
+	u16 size;
+	u32 sz;
+
+	sz = sizeof(bytes);
+	pw = (u16 *)(bytes + sz);
+	pwstart = (u16 *)bytes + 2;
+
+	if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_CPYR - sz, sz))
+		goto invalid_rev;
+
+	while (pw >= pwstart) {
+		pw--;
+		type = le16_to_cpu(*pw);
+		pw--;
+		size = le16_to_cpu(*pw);
+		pw -= size / 2;
+
+		if (type == FBT_CPYR
+		    || type == FBT_SETUP
+		    || pw < pwstart)
+			continue;
+
+		if (type == FBT_FLASH_VER)
+			a->flash_ver = le32_to_cpu(*(u32 *)pw);
+
+		break;
+	}
+
+invalid_rev:
+	return esas2r_print_flash_rev(a);
+}
+
+bool esas2r_print_flash_rev(struct esas2r_adapter *a)
+{
+	u16 year = LOWORD(a->flash_ver);
+	u8 day = LOBYTE(HIWORD(a->flash_ver));
+	u8 month = HIBYTE(HIWORD(a->flash_ver));
+
+	if (day == 0
+	    || month == 0
+	    || day > 31
+	    || month > 12
+	    || year < 2006
+	    || year > 9999) {
+		strcpy(a->flash_rev, "not found");
+		a->flash_ver = 0;
+		return false;
+	}
+
+	sprintf(a->flash_rev, "%02d/%02d/%04d", month, day, year);
+	esas2r_hdebug("flash version: %s", a->flash_rev);
+	return true;
+}
+
+/*
+ * Find the type of boot image type that is currently in the flash.
+ * The chip only has a 64 KB PCI-e expansion ROM
+ * size so only one image can be flashed at a time.
+ */
+bool esas2r_read_image_type(struct esas2r_adapter *a)
+{
+	u8 bytes[256];
+	struct esas2r_boot_image *bi;
+	struct esas2r_boot_header *bh;
+	u32 sz;
+	u32 len;
+	u32 offset;
+
+	/* Start at the base of the boot images and look for a valid image */
+	sz = sizeof(bytes);
+	len = FLS_LENGTH_BOOT;
+	offset = 0;
+
+	while (true) {
+		if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_BOOT +
+					     offset,
+					     sz))
+			goto invalid_rev;
+
+		bi = (struct esas2r_boot_image *)bytes;
+		bh = (struct esas2r_boot_header *)((u8 *)bi +
+						   le16_to_cpu(
+							   bi->header_offset));
+		if (bi->signature != cpu_to_le16(0xAA55))
+			goto invalid_rev;
+
+		if (bh->code_type == CODE_TYPE_PC) {
+			strcpy(a->image_type, "BIOS");
+
+			return true;
+		} else if (bh->code_type == CODE_TYPE_EFI) {
+			struct esas2r_efi_image *ei;
+
+			/*
+			 * So we have an EFI image.  There are several types
+			 * so see which architecture we have.
+			 */
+			ei = (struct esas2r_efi_image *)bytes;
+
+			switch (le16_to_cpu(ei->machine_type)) {
+			case EFI_MACHINE_IA32:
+				strcpy(a->image_type, "EFI 32-bit");
+				return true;
+
+			case EFI_MACHINE_IA64:
+				strcpy(a->image_type, "EFI itanium");
+				return true;
+
+			case EFI_MACHINE_X64:
+				strcpy(a->image_type, "EFI 64-bit");
+				return true;
+
+			case EFI_MACHINE_EBC:
+				strcpy(a->image_type, "EFI EBC");
+				return true;
+
+			default:
+				goto invalid_rev;
+			}
+		} else {
+			u32 thislen;
+
+			/* jump to the next image */
+			thislen = (u32)le16_to_cpu(bh->image_length) * 512;
+			if (thislen == 0
+			    || thislen + offset > len
+			    || bh->indicator == INDICATOR_LAST)
+				break;
+
+			offset += thislen;
+		}
+	}
+
+invalid_rev:
+	strcpy(a->image_type, "no boot images");
+	return false;
+}
+
+/*
+ *  Read and validate current NVRAM parameters by accessing
+ *  physical NVRAM directly.  if currently stored parameters are
+ *  invalid, use the defaults.
+ */
+bool esas2r_nvram_read_direct(struct esas2r_adapter *a)
+{
+	bool result;
+
+	if (down_interruptible(&a->nvram_semaphore))
+		return false;
+
+	if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR,
+				     sizeof(struct esas2r_sas_nvram))) {
+		esas2r_hdebug("NVRAM read failed, using defaults");
+		return false;
+	}
+
+	result = esas2r_nvram_validate(a);
+
+	up(&a->nvram_semaphore);
+
+	return result;
+}
+
+/* Interrupt callback to process NVRAM completions. */
+static void esas2r_nvram_callback(struct esas2r_adapter *a,
+				  struct esas2r_request *rq)
+{
+	struct atto_vda_flash_req *vrq = &rq->vrq->flash;
+
+	if (rq->req_stat == RS_SUCCESS) {
+		/* last request was successful.  see what to do now. */
+
+		switch (vrq->sub_func) {
+		case VDA_FLASH_BEGINW:
+			vrq->sub_func = VDA_FLASH_WRITE;
+			rq->req_stat = RS_PENDING;
+			break;
+
+		case VDA_FLASH_WRITE:
+			vrq->sub_func = VDA_FLASH_COMMIT;
+			rq->req_stat = RS_PENDING;
+			break;
+
+		case VDA_FLASH_READ:
+			esas2r_nvram_validate(a);
+			break;
+
+		case VDA_FLASH_COMMIT:
+		default:
+			break;
+		}
+	}
+
+	if (rq->req_stat != RS_PENDING) {
+		/* update the NVRAM state */
+		if (rq->req_stat == RS_SUCCESS)
+			esas2r_lock_set_flags(&a->flags, AF_NVR_VALID);
+		else
+			esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
+
+		esas2r_enable_heartbeat(a);
+
+		up(&a->nvram_semaphore);
+	}
+}
+
+/*
+ * Write the contents of nvram to the adapter's physical NVRAM.
+ * The cached copy of the NVRAM is also updated.
+ */
+bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
+			struct esas2r_sas_nvram *nvram)
+{
+	struct esas2r_sas_nvram *n = nvram;
+	u8 sas_address_bytes[8];
+	u32 *sas_address_dwords = (u32 *)&sas_address_bytes[0];
+	struct atto_vda_flash_req *vrq = &rq->vrq->flash;
+
+	if (a->flags & AF_DEGRADED_MODE)
+		return false;
+
+	if (down_interruptible(&a->nvram_semaphore))
+		return false;
+
+	if (n == NULL)
+		n = a->nvram;
+
+	/* check the validity of the settings */
+	if (n->version > SASNVR_VERSION) {
+		up(&a->nvram_semaphore);
+		return false;
+	}
+
+	memcpy(&sas_address_bytes[0], n->sas_addr, 8);
+
+	if (sas_address_bytes[0] != 0x50
+	    || sas_address_bytes[1] != 0x01
+	    || sas_address_bytes[2] != 0x08
+	    || (sas_address_bytes[3] & 0xF0) != 0x60
+	    || ((sas_address_bytes[3] & 0x0F) | sas_address_dwords[1]) == 0) {
+		up(&a->nvram_semaphore);
+		return false;
+	}
+
+	if (n->spin_up_delay > SASNVR_SPINUP_MAX)
+		n->spin_up_delay = SASNVR_SPINUP_MAX;
+
+	n->version = SASNVR_VERSION;
+	n->checksum = n->checksum - esas2r_nvramcalc_cksum(n);
+	memcpy(a->nvram, n, sizeof(struct esas2r_sas_nvram));
+
+	/* write the NVRAM */
+	n = a->nvram;
+	esas2r_disable_heartbeat(a);
+
+	esas2r_build_flash_req(a,
+			       rq,
+			       VDA_FLASH_BEGINW,
+			       esas2r_nvramcalc_xor_cksum(n),
+			       FLS_OFFSET_NVR,
+			       sizeof(struct esas2r_sas_nvram));
+
+	if (a->flags & AF_LEGACY_SGE_MODE) {
+
+		vrq->data.sge[0].length =
+			cpu_to_le32(SGE_LAST |
+				    sizeof(struct esas2r_sas_nvram));
+		vrq->data.sge[0].address = cpu_to_le64(
+			a->uncached_phys + (u64)((u8 *)n - a->uncached));
+	} else {
+		vrq->data.prde[0].ctl_len =
+			cpu_to_le32(sizeof(struct esas2r_sas_nvram));
+		vrq->data.prde[0].address = cpu_to_le64(
+			a->uncached_phys
+			+ (u64)((u8 *)n - a->uncached));
+	}
+	rq->interrupt_cb = esas2r_nvram_callback;
+	esas2r_start_request(a, rq);
+	return true;
+}
+
+/* Validate the cached NVRAM.  if the NVRAM is invalid, load the defaults. */
+bool esas2r_nvram_validate(struct esas2r_adapter *a)
+{
+	struct esas2r_sas_nvram *n = a->nvram;
+	bool rslt = false;
+
+	if (n->signature[0] != 'E'
+	    || n->signature[1] != 'S'
+	    || n->signature[2] != 'A'
+	    || n->signature[3] != 'S') {
+		esas2r_hdebug("invalid NVRAM signature");
+	} else if (esas2r_nvramcalc_cksum(n)) {
+		esas2r_hdebug("invalid NVRAM checksum");
+	} else if (n->version > SASNVR_VERSION) {
+		esas2r_hdebug("invalid NVRAM version");
+	} else {
+		esas2r_lock_set_flags(&a->flags, AF_NVR_VALID);
+		rslt = true;
+	}
+
+	if (rslt == false) {
+		esas2r_hdebug("using defaults");
+		esas2r_nvram_set_defaults(a);
+	}
+
+	return rslt;
+}
+
+/*
+ * Set the cached NVRAM to defaults.  note that this function sets the default
+ * NVRAM when it has been determined that the physical NVRAM is invalid.
+ * In this case, the SAS address is fabricated.
+ */
+void esas2r_nvram_set_defaults(struct esas2r_adapter *a)
+{
+	struct esas2r_sas_nvram *n = a->nvram;
+	u32 time = jiffies_to_msecs(jiffies);
+
+	esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
+	memcpy(n, &default_sas_nvram, sizeof(struct esas2r_sas_nvram));
+	n->sas_addr[3] |= 0x0F;
+	n->sas_addr[4] = HIBYTE(LOWORD(time));
+	n->sas_addr[5] = LOBYTE(LOWORD(time));
+	n->sas_addr[6] = a->pcid->bus->number;
+	n->sas_addr[7] = a->pcid->devfn;
+}
+
+void esas2r_nvram_get_defaults(struct esas2r_adapter *a,
+			       struct esas2r_sas_nvram *nvram)
+{
+	u8 sas_addr[8];
+
+	/*
+	 * in case we are copying the defaults into the adapter, copy the SAS
+	 * address out first.
+	 */
+	memcpy(&sas_addr[0], a->nvram->sas_addr, 8);
+	memcpy(nvram, &default_sas_nvram, sizeof(struct esas2r_sas_nvram));
+	memcpy(&nvram->sas_addr[0], &sas_addr[0], 8);
+}
+
+bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
+		   struct esas2r_request *rq, struct esas2r_sg_context *sgc)
+{
+	struct esas2r_flash_context *fc = &a->flash_context;
+	u8 j;
+	struct esas2r_component_header *ch;
+
+	if (esas2r_lock_set_flags(&a->flags, AF_FLASH_LOCK) & AF_FLASH_LOCK) {
+		/* flag was already set */
+		fi->status = FI_STAT_BUSY;
+		return false;
+	}
+
+	memcpy(&fc->sgc, sgc, sizeof(struct esas2r_sg_context));
+	sgc = &fc->sgc;
+	fc->fi = fi;
+	fc->sgc_offset = sgc->cur_offset;
+	rq->req_stat = RS_SUCCESS;
+	rq->interrupt_cx = fc;
+
+	switch (fi->fi_version) {
+	case FI_VERSION_1:
+		fc->scratch = ((struct esas2r_flash_img *)fi)->scratch_buf;
+		fc->num_comps = FI_NUM_COMPS_V1;
+		fc->fi_hdr_len = sizeof(struct esas2r_flash_img);
+		break;
+
+	default:
+		return complete_fmapi_req(a, rq, FI_STAT_IMG_VER);
+	}
+
+	if (a->flags & AF_DEGRADED_MODE)
+		return complete_fmapi_req(a, rq, FI_STAT_DEGRADED);
+
+	switch (fi->action) {
+	case FI_ACT_DOWN: /* Download the components */
+		/* Verify the format of the flash image */
+		if (!verify_fi(a, fc))
+			return complete_fmapi_req(a, rq, fi->status);
+
+		/* Adjust the BIOS fields that are dependent on the HBA */
+		ch = &fi->cmp_hdr[CH_IT_BIOS];
+
+		if (ch->length)
+			fix_bios(a, fi);
+
+		/* Adjust the EFI fields that are dependent on the HBA */
+		ch = &fi->cmp_hdr[CH_IT_EFI];
+
+		if (ch->length)
+			fix_efi(a, fi);
+
+		/*
+		 * Since the image was just modified, compute the checksum on
+		 * the modified image.  First update the CRC for the composite
+		 * expansion ROM image.
+		 */
+		fi->checksum = calc_fi_checksum(fc);
+
+		/* Disable the heartbeat */
+		esas2r_disable_heartbeat(a);
+
+		/* Now start up the download sequence */
+		fc->task = FMTSK_ERASE_BOOT;
+		fc->func = VDA_FLASH_BEGINW;
+		fc->comp_typ = CH_IT_CFG;
+		fc->flsh_addr = FLS_OFFSET_BOOT;
+		fc->sgc.length = FLS_LENGTH_BOOT;
+		fc->sgc.cur_offset = NULL;
+
+		/* Setup the callback address */
+		fc->interrupt_cb = fw_download_proc;
+		break;
+
+	case FI_ACT_UPSZ: /* Get upload sizes */
+		fi->adap_typ = get_fi_adap_type(a);
+		fi->flags = 0;
+		fi->num_comps = fc->num_comps;
+		fi->length = fc->fi_hdr_len;
+
+		/* Report the type of boot image in the rel_version string */
+		memcpy(fi->rel_version, a->image_type,
+		       sizeof(fi->rel_version));
+
+		/* Build the component headers */
+		for (j = 0, ch = fi->cmp_hdr;
+		     j < fi->num_comps;
+		     j++, ch++) {
+			ch->img_type = j;
+			ch->status = CH_STAT_PENDING;
+			ch->length = 0;
+			ch->version = 0xffffffff;
+			ch->image_offset = 0;
+			ch->pad[0] = 0;
+			ch->pad[1] = 0;
+		}
+
+		if (a->flash_ver != 0) {
+			fi->cmp_hdr[CH_IT_BIOS].version =
+				fi->cmp_hdr[CH_IT_MAC].version =
+					fi->cmp_hdr[CH_IT_EFI].version =
+						fi->cmp_hdr[CH_IT_CFG].version
+							= a->flash_ver;
+
+			fi->cmp_hdr[CH_IT_BIOS].status =
+				fi->cmp_hdr[CH_IT_MAC].status =
+					fi->cmp_hdr[CH_IT_EFI].status =
+						fi->cmp_hdr[CH_IT_CFG].status =
+							CH_STAT_SUCCESS;
+
+			return complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
+		}
+
+	/* fall through */
+
+	case FI_ACT_UP: /* Upload the components */
+	default:
+		return complete_fmapi_req(a, rq, FI_STAT_INVALID);
+	}
+
+	/*
+	 * If we make it here, fc has been setup to do the first task.  Call
+	 * load_image to format the request, start it, and get out.  The
+	 * interrupt code will call the callback when the first message is
+	 * complete.
+	 */
+	if (!load_image(a, rq))
+		return complete_fmapi_req(a, rq, FI_STAT_FAILED);
+
+	esas2r_start_request(a, rq);
+
+	return true;
+}

+ 1773 - 0
drivers/scsi/esas2r/esas2r_init.c

@@ -0,0 +1,1773 @@
+/*
+ *  linux/drivers/scsi/esas2r/esas2r_init.c
+ *      For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ *  Copyright (c) 2001-2013 ATTO Technology, Inc.
+ *  (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+static bool esas2r_initmem_alloc(struct esas2r_adapter *a,
+				 struct esas2r_mem_desc *mem_desc,
+				 u32 align)
+{
+	mem_desc->esas2r_param = mem_desc->size + align;
+	mem_desc->virt_addr = NULL;
+	mem_desc->phys_addr = 0;
+	mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev,
+						   (size_t)mem_desc->
+						   esas2r_param,
+						   (dma_addr_t *)&mem_desc->
+						   phys_addr,
+						   GFP_KERNEL);
+
+	if (mem_desc->esas2r_data == NULL) {
+		esas2r_log(ESAS2R_LOG_CRIT,
+			   "failed to allocate %lu bytes of consistent memory!",
+			   (long
+			    unsigned
+			    int)mem_desc->esas2r_param);
+		return false;
+	}
+
+	mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align);
+	mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align);
+	memset(mem_desc->virt_addr, 0, mem_desc->size);
+	return true;
+}
+
+static void esas2r_initmem_free(struct esas2r_adapter *a,
+				struct esas2r_mem_desc *mem_desc)
+{
+	if (mem_desc->virt_addr == NULL)
+		return;
+
+	/*
+	 * Careful!  phys_addr and virt_addr may have been adjusted from the
+	 * original allocation in order to return the desired alignment.  That
+	 * means we have to use the original address (in esas2r_data) and size
+	 * (esas2r_param) and calculate the original physical address based on
+	 * the difference between the requested and actual allocation size.
+	 */
+	if (mem_desc->phys_addr) {
+		int unalign = ((u8 *)mem_desc->virt_addr) -
+			      ((u8 *)mem_desc->esas2r_data);
+
+		dma_free_coherent(&a->pcid->dev,
+				  (size_t)mem_desc->esas2r_param,
+				  mem_desc->esas2r_data,
+				  (dma_addr_t)(mem_desc->phys_addr - unalign));
+	} else {
+		kfree(mem_desc->esas2r_data);
+	}
+
+	mem_desc->virt_addr = NULL;
+}
+
+static bool alloc_vda_req(struct esas2r_adapter *a,
+			  struct esas2r_request *rq)
+{
+	struct esas2r_mem_desc *memdesc = kzalloc(
+		sizeof(struct esas2r_mem_desc), GFP_KERNEL);
+
+	if (memdesc == NULL) {
+		esas2r_hdebug("could not alloc mem for vda request memdesc\n");
+		return false;
+	}
+
+	memdesc->size = sizeof(union atto_vda_req) +
+			ESAS2R_DATA_BUF_LEN;
+
+	if (!esas2r_initmem_alloc(a, memdesc, 256)) {
+		esas2r_hdebug("could not alloc mem for vda request\n");
+		kfree(memdesc);
+		return false;
+	}
+
+	a->num_vrqs++;
+	list_add(&memdesc->next_desc, &a->vrq_mds_head);
+
+	rq->vrq_md = memdesc;
+	rq->vrq = (union atto_vda_req *)memdesc->virt_addr;
+	rq->vrq->scsi.handle = a->num_vrqs;
+
+	return true;
+}
+
+static void esas2r_unmap_regions(struct esas2r_adapter *a)
+{
+	if (a->regs)
+		iounmap((void __iomem *)a->regs);
+
+	a->regs = NULL;
+
+	pci_release_region(a->pcid, 2);
+
+	if (a->data_window)
+		iounmap((void __iomem *)a->data_window);
+
+	a->data_window = NULL;
+
+	pci_release_region(a->pcid, 0);
+}
+
+static int esas2r_map_regions(struct esas2r_adapter *a)
+{
+	int error;
+
+	a->regs = NULL;
+	a->data_window = NULL;
+
+	error = pci_request_region(a->pcid, 2, a->name);
+	if (error != 0) {
+		esas2r_log(ESAS2R_LOG_CRIT,
+			   "pci_request_region(2) failed, error %d",
+			   error);
+
+		return error;
+	}
+
+	a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2),
+					  pci_resource_len(a->pcid, 2));
+	if (a->regs == NULL) {
+		esas2r_log(ESAS2R_LOG_CRIT,
+			   "ioremap failed for regs mem region\n");
+		pci_release_region(a->pcid, 2);
+		return -EFAULT;
+	}
+
+	error = pci_request_region(a->pcid, 0, a->name);
+	if (error != 0) {
+		esas2r_log(ESAS2R_LOG_CRIT,
+			   "pci_request_region(2) failed, error %d",
+			   error);
+		esas2r_unmap_regions(a);
+		return error;
+	}
+
+	a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid,
+								    0),
+						 pci_resource_len(a->pcid, 0));
+	if (a->data_window == NULL) {
+		esas2r_log(ESAS2R_LOG_CRIT,
+			   "ioremap failed for data_window mem region\n");
+		esas2r_unmap_regions(a);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode)
+{
+	int i;
+
+	/* Set up interrupt mode based on the requested value */
+	switch (intr_mode) {
+	case INTR_MODE_LEGACY:
+use_legacy_interrupts:
+		a->intr_mode = INTR_MODE_LEGACY;
+		break;
+
+	case INTR_MODE_MSI:
+		i = pci_enable_msi(a->pcid);
+		if (i != 0) {
+			esas2r_log(ESAS2R_LOG_WARN,
+				   "failed to enable MSI for adapter %d, "
+				   "falling back to legacy interrupts "
+				   "(err=%d)", a->index,
+				   i);
+			goto use_legacy_interrupts;
+		}
+		a->intr_mode = INTR_MODE_MSI;
+		esas2r_lock_set_flags(&a->flags2, AF2_MSI_ENABLED);
+		break;
+
+
+	default:
+		esas2r_log(ESAS2R_LOG_WARN,
+			   "unknown interrupt_mode %d requested, "
+			   "falling back to legacy interrupt",
+			   interrupt_mode);
+		goto use_legacy_interrupts;
+	}
+}
+
+static void esas2r_claim_interrupts(struct esas2r_adapter *a)
+{
+	unsigned long flags = IRQF_DISABLED;
+
+	if (a->intr_mode == INTR_MODE_LEGACY)
+		flags |= IRQF_SHARED;
+
+	esas2r_log(ESAS2R_LOG_INFO,
+		   "esas2r_claim_interrupts irq=%d (%p, %s, %x)",
+		   a->pcid->irq, a, a->name, flags);
+
+	if (request_irq(a->pcid->irq,
+			(a->intr_mode ==
+			 INTR_MODE_LEGACY) ? esas2r_interrupt :
+			esas2r_msi_interrupt,
+			flags,
+			a->name,
+			a)) {
+		esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X",
+			   a->pcid->irq);
+		return;
+	}
+
+	esas2r_lock_set_flags(&a->flags2, AF2_IRQ_CLAIMED);
+	esas2r_log(ESAS2R_LOG_INFO,
+		   "claimed IRQ %d flags: 0x%lx",
+		   a->pcid->irq, flags);
+}
+
+int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
+			int index)
+{
+	struct esas2r_adapter *a;
+	u64 bus_addr = 0;
+	int i;
+	void *next_uncached;
+	struct esas2r_request *first_request, *last_request;
+
+	if (index >= MAX_ADAPTERS) {
+		esas2r_log(ESAS2R_LOG_CRIT,
+			   "tried to init invalid adapter index %u!",
+			   index);
+		return 0;
+	}
+
+	if (esas2r_adapters[index]) {
+		esas2r_log(ESAS2R_LOG_CRIT,
+			   "tried to init existing adapter index %u!",
+			   index);
+		return 0;
+	}
+
+	a = (struct esas2r_adapter *)host->hostdata;
+	memset(a, 0, sizeof(struct esas2r_adapter));
+	a->pcid = pcid;
+	a->host = host;
+
+	if (sizeof(dma_addr_t) > 4) {
+		const uint64_t required_mask = dma_get_required_mask
+						       (&pcid->dev);
+		if (required_mask > DMA_BIT_MASK(32)
+		    && !pci_set_dma_mask(pcid, DMA_BIT_MASK(64))
+		    && !pci_set_consistent_dma_mask(pcid,
+						    DMA_BIT_MASK(64))) {
+			esas2r_log_dev(ESAS2R_LOG_INFO,
+				       &(a->pcid->dev),
+				       "64-bit PCI addressing enabled\n");
+		} else if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
+			   && !pci_set_consistent_dma_mask(pcid,
+							   DMA_BIT_MASK(32))) {
+			esas2r_log_dev(ESAS2R_LOG_INFO,
+				       &(a->pcid->dev),
+				       "32-bit PCI addressing enabled\n");
+		} else {
+			esas2r_log(ESAS2R_LOG_CRIT,
+				   "failed to set DMA mask");
+			esas2r_kill_adapter(index);
+			return 0;
+		}
+	} else {
+		if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
+		    && !pci_set_consistent_dma_mask(pcid,
+						    DMA_BIT_MASK(32))) {
+			esas2r_log_dev(ESAS2R_LOG_INFO,
+				       &(a->pcid->dev),
+				       "32-bit PCI addressing enabled\n");
+		} else {
+			esas2r_log(ESAS2R_LOG_CRIT,
+				   "failed to set DMA mask");
+			esas2r_kill_adapter(index);
+			return 0;
+		}
+	}
+	esas2r_adapters[index] = a;
+	sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index);
+	esas2r_debug("new adapter %p, name %s", a, a->name);
+	spin_lock_init(&a->request_lock);
+	spin_lock_init(&a->fw_event_lock);
+	sema_init(&a->fm_api_semaphore, 1);
+	sema_init(&a->fs_api_semaphore, 1);
+	sema_init(&a->nvram_semaphore, 1);
+
+	esas2r_fw_event_off(a);
+	snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d",
+		 a->index);
+	a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name);
+
+	init_waitqueue_head(&a->buffered_ioctl_waiter);
+	init_waitqueue_head(&a->nvram_waiter);
+	init_waitqueue_head(&a->fm_api_waiter);
+	init_waitqueue_head(&a->fs_api_waiter);
+	init_waitqueue_head(&a->vda_waiter);
+
+	INIT_LIST_HEAD(&a->general_req.req_list);
+	INIT_LIST_HEAD(&a->active_list);
+	INIT_LIST_HEAD(&a->defer_list);
+	INIT_LIST_HEAD(&a->free_sg_list_head);
+	INIT_LIST_HEAD(&a->avail_request);
+	INIT_LIST_HEAD(&a->vrq_mds_head);
+	INIT_LIST_HEAD(&a->fw_event_list);
+
+	first_request = (struct esas2r_request *)((u8 *)(a + 1));
+
+	for (last_request = first_request, i = 1; i < num_requests;
+	     last_request++, i++) {
+		INIT_LIST_HEAD(&last_request->req_list);
+		list_add_tail(&last_request->comp_list, &a->avail_request);
+		if (!alloc_vda_req(a, last_request)) {
+			esas2r_log(ESAS2R_LOG_CRIT,
+				   "failed to allocate a VDA request!");
+			esas2r_kill_adapter(index);
+			return 0;
+		}
+	}
+
+	esas2r_debug("requests: %p to %p (%d, %d)", first_request,
+		     last_request,
+		     sizeof(*first_request),
+		     num_requests);
+
+	if (esas2r_map_regions(a) != 0) {
+		esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!");
+		esas2r_kill_adapter(index);
+		return 0;
+	}
+
+	a->index = index;
+
+	/* interrupts will be disabled until we are done with init */
+	atomic_inc(&a->dis_ints_cnt);
+	atomic_inc(&a->disable_cnt);
+	a->flags |= AF_CHPRST_PENDING
+		    | AF_DISC_PENDING
+		    | AF_FIRST_INIT
+		    | AF_LEGACY_SGE_MODE;
+
+	a->init_msg = ESAS2R_INIT_MSG_START;
+	a->max_vdareq_size = 128;
+	a->build_sgl = esas2r_build_sg_list_sge;
+
+	esas2r_setup_interrupts(a, interrupt_mode);
+
+	a->uncached_size = esas2r_get_uncached_size(a);
+	a->uncached = dma_alloc_coherent(&pcid->dev,
+					 (size_t)a->uncached_size,
+					 (dma_addr_t *)&bus_addr,
+					 GFP_KERNEL);
+	if (a->uncached == NULL) {
+		esas2r_log(ESAS2R_LOG_CRIT,
+			   "failed to allocate %d bytes of consistent memory!",
+			   a->uncached_size);
+		esas2r_kill_adapter(index);
+		return 0;
+	}
+
+	a->uncached_phys = bus_addr;
+
+	esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)",
+		     a->uncached_size,
+		     a->uncached,
+		     upper_32_bits(bus_addr),
+		     lower_32_bits(bus_addr));
+	memset(a->uncached, 0, a->uncached_size);
+	next_uncached = a->uncached;
+
+	if (!esas2r_init_adapter_struct(a,
+					&next_uncached)) {
+		esas2r_log(ESAS2R_LOG_CRIT,
+			   "failed to initialize adapter structure (2)!");
+		esas2r_kill_adapter(index);
+		return 0;
+	}
+
+	tasklet_init(&a->tasklet,
+		     esas2r_adapter_tasklet,
+		     (unsigned long)a);
+
+	/*
+	 * Disable chip interrupts to prevent spurious interrupts
+	 * until we claim the IRQ.
+	 */
+	esas2r_disable_chip_interrupts(a);
+	esas2r_check_adapter(a);
+
+	if (!esas2r_init_adapter_hw(a, true))
+		esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!");
+	else
+		esas2r_debug("esas2r_init_adapter ok");
+
+	esas2r_claim_interrupts(a);
+
+	if (a->flags2 & AF2_IRQ_CLAIMED)
+		esas2r_enable_chip_interrupts(a);
+
+	esas2r_lock_set_flags(&a->flags2, AF2_INIT_DONE);
+	if (!(a->flags & AF_DEGRADED_MODE))
+		esas2r_kickoff_timer(a);
+	esas2r_debug("esas2r_init_adapter done for %p (%d)",
+		     a, a->disable_cnt);
+
+	return 1;
+}
+
+static void esas2r_adapter_power_down(struct esas2r_adapter *a,
+				      int power_management)
+{
+	struct esas2r_mem_desc *memdesc, *next;
+
+	if ((a->flags2 & AF2_INIT_DONE)
+	    &&  (!(a->flags & AF_DEGRADED_MODE))) {
+		if (!power_management) {
+			del_timer_sync(&a->timer);
+			tasklet_kill(&a->tasklet);
+		}
+		esas2r_power_down(a);
+
+		/*
+		 * There are versions of firmware that do not handle the sync
+		 * cache command correctly.  Stall here to ensure that the
+		 * cache is lazily flushed.
+		 */
+		mdelay(500);
+		esas2r_debug("chip halted");
+	}
+
+	/* Remove sysfs binary files */
+	if (a->sysfs_fw_created) {
+		sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw);
+		a->sysfs_fw_created = 0;
+	}
+
+	if (a->sysfs_fs_created) {
+		sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs);
+		a->sysfs_fs_created = 0;
+	}
+
+	if (a->sysfs_vda_created) {
+		sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda);
+		a->sysfs_vda_created = 0;
+	}
+
+	if (a->sysfs_hw_created) {
+		sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw);
+		a->sysfs_hw_created = 0;
+	}
+
+	if (a->sysfs_live_nvram_created) {
+		sysfs_remove_bin_file(&a->host->shost_dev.kobj,
+				      &bin_attr_live_nvram);
+		a->sysfs_live_nvram_created = 0;
+	}
+
+	if (a->sysfs_default_nvram_created) {
+		sysfs_remove_bin_file(&a->host->shost_dev.kobj,
+				      &bin_attr_default_nvram);
+		a->sysfs_default_nvram_created = 0;
+	}
+
+	/* Clean up interrupts */
+	if (a->flags2 & AF2_IRQ_CLAIMED) {
+		esas2r_log_dev(ESAS2R_LOG_INFO,
+			       &(a->pcid->dev),
+			       "free_irq(%d) called", a->pcid->irq);
+
+		free_irq(a->pcid->irq, a);
+		esas2r_debug("IRQ released");
+		esas2r_lock_clear_flags(&a->flags2, AF2_IRQ_CLAIMED);
+	}
+
+	if (a->flags2 & AF2_MSI_ENABLED) {
+		pci_disable_msi(a->pcid);
+		esas2r_lock_clear_flags(&a->flags2, AF2_MSI_ENABLED);
+		esas2r_debug("MSI disabled");
+	}
+
+	if (a->inbound_list_md.virt_addr)
+		esas2r_initmem_free(a, &a->inbound_list_md);
+
+	if (a->outbound_list_md.virt_addr)
+		esas2r_initmem_free(a, &a->outbound_list_md);
+
+	list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head,
+				 next_desc) {
+		esas2r_initmem_free(a, memdesc);
+	}
+
+	/* Following frees everything allocated via alloc_vda_req */
+	list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) {
+		esas2r_initmem_free(a, memdesc);
+		list_del(&memdesc->next_desc);
+		kfree(memdesc);
+	}
+
+	kfree(a->first_ae_req);
+	a->first_ae_req = NULL;
+
+	kfree(a->sg_list_mds);
+	a->sg_list_mds = NULL;
+
+	kfree(a->req_table);
+	a->req_table = NULL;
+
+	if (a->regs) {
+		esas2r_unmap_regions(a);
+		a->regs = NULL;
+		a->data_window = NULL;
+		esas2r_debug("regions unmapped");
+	}
+}
+
+/* Release/free allocated resources for specified adapters. */
+void esas2r_kill_adapter(int i)
+{
+	struct esas2r_adapter *a = esas2r_adapters[i];
+
+	if (a) {
+		unsigned long flags;
+		struct workqueue_struct *wq;
+		esas2r_debug("killing adapter %p [%d] ", a, i);
+		esas2r_fw_event_off(a);
+		esas2r_adapter_power_down(a, 0);
+		if (esas2r_buffered_ioctl &&
+		    (a->pcid == esas2r_buffered_ioctl_pcid)) {
+			dma_free_coherent(&a->pcid->dev,
+					  (size_t)esas2r_buffered_ioctl_size,
+					  esas2r_buffered_ioctl,
+					  esas2r_buffered_ioctl_addr);
+			esas2r_buffered_ioctl = NULL;
+		}
+
+		if (a->vda_buffer) {
+			dma_free_coherent(&a->pcid->dev,
+					  (size_t)VDA_MAX_BUFFER_SIZE,
+					  a->vda_buffer,
+					  (dma_addr_t)a->ppvda_buffer);
+			a->vda_buffer = NULL;
+		}
+		if (a->fs_api_buffer) {
+			dma_free_coherent(&a->pcid->dev,
+					  (size_t)a->fs_api_buffer_size,
+					  a->fs_api_buffer,
+					  (dma_addr_t)a->ppfs_api_buffer);
+			a->fs_api_buffer = NULL;
+		}
+
+		kfree(a->local_atto_ioctl);
+		a->local_atto_ioctl = NULL;
+
+		spin_lock_irqsave(&a->fw_event_lock, flags);
+		wq = a->fw_event_q;
+		a->fw_event_q = NULL;
+		spin_unlock_irqrestore(&a->fw_event_lock, flags);
+		if (wq)
+			destroy_workqueue(wq);
+
+		if (a->uncached) {
+			dma_free_coherent(&a->pcid->dev,
+					  (size_t)a->uncached_size,
+					  a->uncached,
+					  (dma_addr_t)a->uncached_phys);
+			a->uncached = NULL;
+			esas2r_debug("uncached area freed");
+		}
+
+		esas2r_log_dev(ESAS2R_LOG_INFO,
+			       &(a->pcid->dev),
+			       "pci_disable_device() called.  msix_enabled: %d "
+			       "msi_enabled: %d irq: %d pin: %d",
+			       a->pcid->msix_enabled,
+			       a->pcid->msi_enabled,
+			       a->pcid->irq,
+			       a->pcid->pin);
+
+		esas2r_log_dev(ESAS2R_LOG_INFO,
+			       &(a->pcid->dev),
+			       "before pci_disable_device() enable_cnt: %d",
+			       a->pcid->enable_cnt.counter);
+
+		pci_disable_device(a->pcid);
+		esas2r_log_dev(ESAS2R_LOG_INFO,
+			       &(a->pcid->dev),
+			       "after pci_disable_device() enable_cnt: %d",
+			       a->pcid->enable_cnt.counter);
+
+		esas2r_log_dev(ESAS2R_LOG_INFO,
+			       &(a->pcid->dev),
+			       "pci_set_drv_data(%p, NULL) called",
+			       a->pcid);
+
+		pci_set_drvdata(a->pcid, NULL);
+		esas2r_adapters[i] = NULL;
+
+		if (a->flags2 & AF2_INIT_DONE) {
+			esas2r_lock_clear_flags(&a->flags2,
+						AF2_INIT_DONE);
+
+			esas2r_lock_set_flags(&a->flags,
+					      AF_DEGRADED_MODE);
+
+			esas2r_log_dev(ESAS2R_LOG_INFO,
+				       &(a->host->shost_gendev),
+				       "scsi_remove_host() called");
+
+			scsi_remove_host(a->host);
+
+			esas2r_log_dev(ESAS2R_LOG_INFO,
+				       &(a->host->shost_gendev),
+				       "scsi_host_put() called");
+
+			scsi_host_put(a->host);
+		}
+	}
+}
+
+int esas2r_cleanup(struct Scsi_Host *host)
+{
+	struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
+	int index;
+
+	if (host == NULL) {
+		int i;
+
+		esas2r_debug("esas2r_cleanup everything");
+		for (i = 0; i < MAX_ADAPTERS; i++)
+			esas2r_kill_adapter(i);
+		return -1;
+	}
+
+	esas2r_debug("esas2r_cleanup called for host %p", host);
+	index = a->index;
+	esas2r_kill_adapter(index);
+	return index;
+}
+
+int esas2r_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct Scsi_Host *host = pci_get_drvdata(pdev);
+	u32 device_state;
+	struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
+
+	esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "suspending adapter()");
+	if (!a)
+		return -ENODEV;
+
+	esas2r_adapter_power_down(a, 1);
+	device_state = pci_choose_state(pdev, state);
+	esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+		       "pci_save_state() called");
+	pci_save_state(pdev);
+	esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+		       "pci_disable_device() called");
+	pci_disable_device(pdev);
+	esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+		       "pci_set_power_state() called");
+	pci_set_power_state(pdev, device_state);
+	esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "esas2r_suspend(): 0");
+	return 0;
+}
+
+int esas2r_resume(struct pci_dev *pdev)
+{
+	struct Scsi_Host *host = pci_get_drvdata(pdev);
+	struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
+	int rez;
+
+	esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "resuming adapter()");
+	esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+		       "pci_set_power_state(PCI_D0) "
+		       "called");
+	pci_set_power_state(pdev, PCI_D0);
+	esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+		       "pci_enable_wake(PCI_D0, 0) "
+		       "called");
+	pci_enable_wake(pdev, PCI_D0, 0);
+	esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+		       "pci_restore_state() called");
+	pci_restore_state(pdev);
+	esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+		       "pci_enable_device() called");
+	rez = pci_enable_device(pdev);
+	pci_set_master(pdev);
+
+	if (!a) {
+		rez = -ENODEV;
+		goto error_exit;
+	}
+
+	if (esas2r_map_regions(a) != 0) {
+		esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!");
+		rez = -ENOMEM;
+		goto error_exit;
+	}
+
+	/* Set up interupt mode */
+	esas2r_setup_interrupts(a, a->intr_mode);
+
+	/*
+	 * Disable chip interrupts to prevent spurious interrupts until we
+	 * claim the IRQ.
+	 */
+	esas2r_disable_chip_interrupts(a);
+	if (!esas2r_power_up(a, true)) {
+		esas2r_debug("yikes, esas2r_power_up failed");
+		rez = -ENOMEM;
+		goto error_exit;
+	}
+
+	esas2r_claim_interrupts(a);
+
+	if (a->flags2 & AF2_IRQ_CLAIMED) {
+		/*
+		 * Now that system interrupt(s) are claimed, we can enable
+		 * chip interrupts.
+		 */
+		esas2r_enable_chip_interrupts(a);
+		esas2r_kickoff_timer(a);
+	} else {
+		esas2r_debug("yikes, unable to claim IRQ");
+		esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!");
+		rez = -ENOMEM;
+		goto error_exit;
+	}
+
+error_exit:
+	esas2r_log_dev(ESAS2R_LOG_CRIT, &(pdev->dev), "esas2r_resume(): %d",
+		       rez);
+	return rez;
+}
+
+bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
+{
+	esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE);
+	esas2r_log(ESAS2R_LOG_CRIT,
+		   "setting adapter to degraded mode: %s\n", error_str);
+	return false;
+}
+
+u32 esas2r_get_uncached_size(struct esas2r_adapter *a)
+{
+	return sizeof(struct esas2r_sas_nvram)
+	       + ALIGN(ESAS2R_DISC_BUF_LEN, 8)
+	       + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */
+	       + 8
+	       + (num_sg_lists * (u16)sgl_page_size)
+	       + ALIGN((num_requests + num_ae_requests + 1 +
+			ESAS2R_LIST_EXTRA) *
+		       sizeof(struct esas2r_inbound_list_source_entry),
+		       8)
+	       + ALIGN((num_requests + num_ae_requests + 1 +
+			ESAS2R_LIST_EXTRA) *
+		       sizeof(struct atto_vda_ob_rsp), 8)
+	       + 256; /* VDA request and buffer align */
+}
+
+static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
+{
+	int pcie_cap_reg;
+
+	pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
+	if (0xffff && pcie_cap_reg) {
+		u16 devcontrol;
+
+		pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
+				     &devcontrol);
+
+		if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 0x2000) {
+			esas2r_log(ESAS2R_LOG_INFO,
+				   "max read request size > 512B");
+
+			devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
+			devcontrol |= 0x2000;
+			pci_write_config_word(a->pcid,
+					      pcie_cap_reg + PCI_EXP_DEVCTL,
+					      devcontrol);
+		}
+	}
+}
+
+/*
+ * Determine the organization of the uncached data area and
+ * finish initializing the adapter structure
+ */
+bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
+				void **uncached_area)
+{
+	u32 i;
+	u8 *high;
+	struct esas2r_inbound_list_source_entry *element;
+	struct esas2r_request *rq;
+	struct esas2r_mem_desc *sgl;
+
+	spin_lock_init(&a->sg_list_lock);
+	spin_lock_init(&a->mem_lock);
+	spin_lock_init(&a->queue_lock);
+
+	a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS];
+
+	if (!alloc_vda_req(a, &a->general_req)) {
+		esas2r_hdebug(
+			"failed to allocate a VDA request for the general req!");
+		return false;
+	}
+
+	/* allocate requests for asynchronous events */
+	a->first_ae_req =
+		kzalloc(num_ae_requests * sizeof(struct esas2r_request),
+			GFP_KERNEL);
+
+	if (a->first_ae_req == NULL) {
+		esas2r_log(ESAS2R_LOG_CRIT,
+			   "failed to allocate memory for asynchronous events");
+		return false;
+	}
+
+	/* allocate the S/G list memory descriptors */
+	a->sg_list_mds = kzalloc(
+		num_sg_lists * sizeof(struct esas2r_mem_desc), GFP_KERNEL);
+
+	if (a->sg_list_mds == NULL) {
+		esas2r_log(ESAS2R_LOG_CRIT,
+			   "failed to allocate memory for s/g list descriptors");
+		return false;
+	}
+
+	/* allocate the request table */
+	a->req_table =
+		kzalloc((num_requests + num_ae_requests +
+			 1) * sizeof(struct esas2r_request *), GFP_KERNEL);
+
+	if (a->req_table == NULL) {
+		esas2r_log(ESAS2R_LOG_CRIT,
+			   "failed to allocate memory for the request table");
+		return false;
+	}
+
+	/* initialize PCI configuration space */
+	esas2r_init_pci_cfg_space(a);
+
+	/*
+	 * the thunder_stream boards all have a serial flash part that has a
+	 * different base address on the AHB bus.
+	 */
+	if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID)
+	    && (a->pcid->subsystem_device & ATTO_SSDID_TBT))
+		a->flags2 |= AF2_THUNDERBOLT;
+
+	if (a->flags2 & AF2_THUNDERBOLT)
+		a->flags2 |= AF2_SERIAL_FLASH;
+
+	if (a->pcid->subsystem_device == ATTO_TLSH_1068)
+		a->flags2 |= AF2_THUNDERLINK;
+
+	/* Uncached Area */
+	high = (u8 *)*uncached_area;
+
+	/* initialize the scatter/gather table pages */
+
+	for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) {
+		sgl->size = sgl_page_size;
+
+		list_add_tail(&sgl->next_desc, &a->free_sg_list_head);
+
+		if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) {
+			/* Allow the driver to load if the minimum count met. */
+			if (i < NUM_SGL_MIN)
+				return false;
+			break;
+		}
+	}
+
+	/* compute the size of the lists */
+	a->list_size = num_requests + ESAS2R_LIST_EXTRA;
+
+	/* allocate the inbound list */
+	a->inbound_list_md.size = a->list_size *
+				  sizeof(struct
+					 esas2r_inbound_list_source_entry);
+
+	if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) {
+		esas2r_hdebug("failed to allocate IB list");
+		return false;
+	}
+
+	/* allocate the outbound list */
+	a->outbound_list_md.size = a->list_size *
+				   sizeof(struct atto_vda_ob_rsp);
+
+	if (!esas2r_initmem_alloc(a, &a->outbound_list_md,
+				  ESAS2R_LIST_ALIGN)) {
+		esas2r_hdebug("failed to allocate IB list");
+		return false;
+	}
+
+	/* allocate the NVRAM structure */
+	a->nvram = (struct esas2r_sas_nvram *)high;
+	high += sizeof(struct esas2r_sas_nvram);
+
+	/* allocate the discovery buffer */
+	a->disc_buffer = high;
+	high += ESAS2R_DISC_BUF_LEN;
+	high = PTR_ALIGN(high, 8);
+
+	/* allocate the outbound list copy pointer */
+	a->outbound_copy = (u32 volatile *)high;
+	high += sizeof(u32);
+
+	if (!(a->flags & AF_NVR_VALID))
+		esas2r_nvram_set_defaults(a);
+
+	/* update the caller's uncached memory area pointer */
+	*uncached_area = (void *)high;
+
+	/* initialize the allocated memory */
+	if (a->flags & AF_FIRST_INIT) {
+		memset(a->req_table, 0,
+		       (num_requests + num_ae_requests +
+			1) * sizeof(struct esas2r_request *));
+
+		esas2r_targ_db_initialize(a);
+
+		/* prime parts of the inbound list */
+		element =
+			(struct esas2r_inbound_list_source_entry *)a->
+			inbound_list_md.
+			virt_addr;
+
+		for (i = 0; i < a->list_size; i++) {
+			element->address = 0;
+			element->reserved = 0;
+			element->length = cpu_to_le32(HWILSE_INTERFACE_F0
+						      | (sizeof(union
+								atto_vda_req)
+							 /
+							 sizeof(u32)));
+			element++;
+		}
+
+		/* init the AE requests */
+		for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++,
+		     i++) {
+			INIT_LIST_HEAD(&rq->req_list);
+			if (!alloc_vda_req(a, rq)) {
+				esas2r_hdebug(
+					"failed to allocate a VDA request!");
+				return false;
+			}
+
+			esas2r_rq_init_request(rq, a);
+
+			/* override the completion function */
+			rq->comp_cb = esas2r_ae_complete;
+		}
+	}
+
+	return true;
+}
+
+/* This code will verify that the chip is operational. */
+bool esas2r_check_adapter(struct esas2r_adapter *a)
+{
+	u32 starttime;
+	u32 doorbell;
+	u64 ppaddr;
+	u32 dw;
+
+	/*
+	 * if the chip reset detected flag is set, we can bypass a bunch of
+	 * stuff.
+	 */
+	if (a->flags & AF_CHPRST_DETECTED)
+		goto skip_chip_reset;
+
+	/*
+	 * BEFORE WE DO ANYTHING, disable the chip interrupts!  the boot driver
+	 * may have left them enabled or we may be recovering from a fault.
+	 */
+	esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK);
+	esas2r_flush_register_dword(a, MU_INT_MASK_OUT);
+
+	/*
+	 * wait for the firmware to become ready by forcing an interrupt and
+	 * waiting for a response.
+	 */
+	starttime = jiffies_to_msecs(jiffies);
+
+	while (true) {
+		esas2r_force_interrupt(a);
+		doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+		if (doorbell == 0xFFFFFFFF) {
+			/*
+			 * Give the firmware up to two seconds to enable
+			 * register access after a reset.
+			 */
+			if ((jiffies_to_msecs(jiffies) - starttime) > 2000)
+				return esas2r_set_degraded_mode(a,
+								"unable to access registers");
+		} else if (doorbell & DRBL_FORCE_INT) {
+			u32 ver = (doorbell & DRBL_FW_VER_MSK);
+
+			/*
+			 * This driver supports version 0 and version 1 of
+			 * the API
+			 */
+			esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+						    doorbell);
+
+			if (ver == DRBL_FW_VER_0) {
+				esas2r_lock_set_flags(&a->flags,
+						      AF_LEGACY_SGE_MODE);
+
+				a->max_vdareq_size = 128;
+				a->build_sgl = esas2r_build_sg_list_sge;
+			} else if (ver == DRBL_FW_VER_1) {
+				esas2r_lock_clear_flags(&a->flags,
+							AF_LEGACY_SGE_MODE);
+
+				a->max_vdareq_size = 1024;
+				a->build_sgl = esas2r_build_sg_list_prd;
+			} else {
+				return esas2r_set_degraded_mode(a,
+								"unknown firmware version");
+			}
+			break;
+		}
+
+		schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+		if ((jiffies_to_msecs(jiffies) - starttime) > 180000) {
+			esas2r_hdebug("FW ready TMO");
+			esas2r_bugon();
+
+			return esas2r_set_degraded_mode(a,
+							"firmware start has timed out");
+		}
+	}
+
+	/* purge any asynchronous events since we will repost them later */
+	esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN);
+	starttime = jiffies_to_msecs(jiffies);
+
+	while (true) {
+		doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+		if (doorbell & DRBL_MSG_IFC_DOWN) {
+			esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+						    doorbell);
+			break;
+		}
+
+		schedule_timeout_interruptible(msecs_to_jiffies(50));
+
+		if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
+			esas2r_hdebug("timeout waiting for interface down");
+			break;
+		}
+	}
+skip_chip_reset:
+	/*
+	 * first things first, before we go changing any of these registers
+	 * disable the communication lists.
+	 */
+	dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
+	dw &= ~MU_ILC_ENABLE;
+	esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
+	dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
+	dw &= ~MU_OLC_ENABLE;
+	esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
+
+	/* configure the communication list addresses */
+	ppaddr = a->inbound_list_md.phys_addr;
+	esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO,
+				    lower_32_bits(ppaddr));
+	esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI,
+				    upper_32_bits(ppaddr));
+	ppaddr = a->outbound_list_md.phys_addr;
+	esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO,
+				    lower_32_bits(ppaddr));
+	esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI,
+				    upper_32_bits(ppaddr));
+	ppaddr = a->uncached_phys +
+		 ((u8 *)a->outbound_copy - a->uncached);
+	esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO,
+				    lower_32_bits(ppaddr));
+	esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI,
+				    upper_32_bits(ppaddr));
+
+	/* reset the read and write pointers */
+	*a->outbound_copy =
+		a->last_write =
+			a->last_read = a->list_size - 1;
+	esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
+	esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |
+				    a->last_write);
+	esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE |
+				    a->last_write);
+	esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE |
+				    a->last_write);
+	esas2r_write_register_dword(a, MU_OUT_LIST_WRITE,
+				    MU_OLW_TOGGLE | a->last_write);
+
+	/* configure the interface select fields */
+	dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG);
+	dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST);
+	esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG,
+				    (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR));
+	dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG);
+	dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE);
+	esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG,
+				    (dw | MU_OLIC_LIST_F0 |
+				     MU_OLIC_SOURCE_DDR));
+
+	/* finish configuring the communication lists */
+	dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
+	dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK);
+	dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC
+	      | (a->list_size << MU_ILC_NUMBER_SHIFT);
+	esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
+	dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
+	dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK);
+	dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT);
+	esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
+
+	/*
+	 * notify the firmware that we're done setting up the communication
+	 * list registers.  wait here until the firmware is done configuring
+	 * its lists.  it will signal that it is done by enabling the lists.
+	 */
+	esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT);
+	starttime = jiffies_to_msecs(jiffies);
+
+	while (true) {
+		doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+		if (doorbell & DRBL_MSG_IFC_INIT) {
+			esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+						    doorbell);
+			break;
+		}
+
+		schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+		if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
+			esas2r_hdebug(
+				"timeout waiting for communication list init");
+			esas2r_bugon();
+			return esas2r_set_degraded_mode(a,
+							"timeout waiting for communication list init");
+		}
+	}
+
+	/*
+	 * flag whether the firmware supports the power down doorbell.  we
+	 * determine this by reading the inbound doorbell enable mask.
+	 */
+	doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);
+	if (doorbell & DRBL_POWER_DOWN)
+		esas2r_lock_set_flags(&a->flags2, AF2_VDA_POWER_DOWN);
+	else
+		esas2r_lock_clear_flags(&a->flags2, AF2_VDA_POWER_DOWN);
+
+	/*
+	 * enable assertion of outbound queue and doorbell interrupts in the
+	 * main interrupt cause register.
+	 */
+	esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK);
+	esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK);
+	return true;
+}
+
+/* Process the initialization message just completed and format the next one. */
+static bool esas2r_format_init_msg(struct esas2r_adapter *a,
+				   struct esas2r_request *rq)
+{
+	u32 msg = a->init_msg;
+	struct atto_vda_cfg_init *ci;
+
+	a->init_msg = 0;
+
+	switch (msg) {
+	case ESAS2R_INIT_MSG_START:
+	case ESAS2R_INIT_MSG_REINIT:
+	{
+		struct timeval now;
+		do_gettimeofday(&now);
+		esas2r_hdebug("CFG init");
+		esas2r_build_cfg_req(a,
+				     rq,
+				     VDA_CFG_INIT,
+				     0,
+				     NULL);
+		ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
+		ci->sgl_page_size = sgl_page_size;
+		ci->epoch_time = now.tv_sec;
+		rq->flags |= RF_FAILURE_OK;
+		a->init_msg = ESAS2R_INIT_MSG_INIT;
+		break;
+	}
+
+	case ESAS2R_INIT_MSG_INIT:
+		if (rq->req_stat == RS_SUCCESS) {
+			u32 major;
+			u32 minor;
+
+			a->fw_version = le16_to_cpu(
+				rq->func_rsp.cfg_rsp.vda_version);
+			a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
+			major = LOBYTE(rq->func_rsp.cfg_rsp.fw_release);
+			minor = HIBYTE(rq->func_rsp.cfg_rsp.fw_release);
+			a->fw_version += (major << 16) + (minor << 24);
+		} else {
+			esas2r_hdebug("FAILED");
+		}
+
+		/*
+		 * the 2.71 and earlier releases of R6xx firmware did not error
+		 * unsupported config requests correctly.
+		 */
+
+		if ((a->flags2 & AF2_THUNDERBOLT)
+		    || (be32_to_cpu(a->fw_version) >
+			be32_to_cpu(0x47020052))) {
+			esas2r_hdebug("CFG get init");
+			esas2r_build_cfg_req(a,
+					     rq,
+					     VDA_CFG_GET_INIT2,
+					     sizeof(struct atto_vda_cfg_init),
+					     NULL);
+
+			rq->vrq->cfg.sg_list_offset = offsetof(
+				struct atto_vda_cfg_req,
+				data.sge);
+			rq->vrq->cfg.data.prde.ctl_len =
+				cpu_to_le32(sizeof(struct atto_vda_cfg_init));
+			rq->vrq->cfg.data.prde.address = cpu_to_le64(
+				rq->vrq_md->phys_addr +
+				sizeof(union atto_vda_req));
+			rq->flags |= RF_FAILURE_OK;
+			a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
+			break;
+		}
+
+	case ESAS2R_INIT_MSG_GET_INIT:
+		if (msg == ESAS2R_INIT_MSG_GET_INIT) {
+			ci = (struct atto_vda_cfg_init *)rq->data_buf;
+			if (rq->req_stat == RS_SUCCESS) {
+				a->num_targets_backend =
+					le32_to_cpu(ci->num_targets_backend);
+				a->ioctl_tunnel =
+					le32_to_cpu(ci->ioctl_tunnel);
+			} else {
+				esas2r_hdebug("FAILED");
+			}
+		}
+	/* fall through */
+
+	default:
+		rq->req_stat = RS_SUCCESS;
+		return false;
+	}
+	return true;
+}
+
+/*
+ * Perform initialization messages via the request queue.  Messages are
+ * performed with interrupts disabled.
+ */
+bool esas2r_init_msgs(struct esas2r_adapter *a)
+{
+	bool success = true;
+	struct esas2r_request *rq = &a->general_req;
+
+	esas2r_rq_init_request(rq, a);
+	rq->comp_cb = esas2r_dummy_complete;
+
+	if (a->init_msg == 0)
+		a->init_msg = ESAS2R_INIT_MSG_REINIT;
+
+	while (a->init_msg) {
+		if (esas2r_format_init_msg(a, rq)) {
+			unsigned long flags;
+			while (true) {
+				spin_lock_irqsave(&a->queue_lock, flags);
+				esas2r_start_vda_request(a, rq);
+				spin_unlock_irqrestore(&a->queue_lock, flags);
+				esas2r_wait_request(a, rq);
+				if (rq->req_stat != RS_PENDING)
+					break;
+			}
+		}
+
+		if (rq->req_stat == RS_SUCCESS
+		    || ((rq->flags & RF_FAILURE_OK)
+			&& rq->req_stat != RS_TIMEOUT))
+			continue;
+
+		esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)",
+			   a->init_msg, rq->req_stat, rq->flags);
+		a->init_msg = ESAS2R_INIT_MSG_START;
+		success = false;
+		break;
+	}
+
+	esas2r_rq_destroy_request(rq, a);
+	return success;
+}
+
+/* Initialize the adapter chip */
+bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
+{
+	bool rslt = false;
+	struct esas2r_request *rq;
+	u32 i;
+
+	if (a->flags & AF_DEGRADED_MODE)
+		goto exit;
+
+	if (!(a->flags & AF_NVR_VALID)) {
+		if (!esas2r_nvram_read_direct(a))
+			esas2r_log(ESAS2R_LOG_WARN,
+				   "invalid/missing NVRAM parameters");
+	}
+
+	if (!esas2r_init_msgs(a)) {
+		esas2r_set_degraded_mode(a, "init messages failed");
+		goto exit;
+	}
+
+	/* The firmware is ready. */
+	esas2r_lock_clear_flags(&a->flags, AF_DEGRADED_MODE);
+	esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
+
+	/* Post all the async event requests */
+	for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
+		esas2r_start_ae_request(a, rq);
+
+	if (!a->flash_rev[0])
+		esas2r_read_flash_rev(a);
+
+	if (!a->image_type[0])
+		esas2r_read_image_type(a);
+
+	if (a->fw_version == 0)
+		a->fw_rev[0] = 0;
+	else
+		sprintf(a->fw_rev, "%1d.%02d",
+			(int)LOBYTE(HIWORD(a->fw_version)),
+			(int)HIBYTE(HIWORD(a->fw_version)));
+
+	esas2r_hdebug("firmware revision: %s", a->fw_rev);
+
+	if ((a->flags & AF_CHPRST_DETECTED)
+	    && (a->flags & AF_FIRST_INIT)) {
+		esas2r_enable_chip_interrupts(a);
+		return true;
+	}
+
+	/* initialize discovery */
+	esas2r_disc_initialize(a);
+
+	/*
+	 * wait for the device wait time to expire here if requested.  this is
+	 * usually requested during initial driver load and possibly when
+	 * resuming from a low power state.  deferred device waiting will use
+	 * interrupts.  chip reset recovery always defers device waiting to
+	 * avoid being in a TASKLET too long.
+	 */
+	if (init_poll) {
+		u32 currtime = a->disc_start_time;
+		u32 nexttick = 100;
+		u32 deltatime;
+
+		/*
+		 * Block Tasklets from getting scheduled and indicate this is
+		 * polled discovery.
+		 */
+		esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED);
+		esas2r_lock_set_flags(&a->flags, AF_DISC_POLLED);
+
+		/*
+		 * Temporarily bring the disable count to zero to enable
+		 * deferred processing.  Note that the count is already zero
+		 * after the first initialization.
+		 */
+		if (a->flags & AF_FIRST_INIT)
+			atomic_dec(&a->disable_cnt);
+
+		while (a->flags & AF_DISC_PENDING) {
+			schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+			/*
+			 * Determine the need for a timer tick based on the
+			 * delta time between this and the last iteration of
+			 * this loop.  We don't use the absolute time because
+			 * then we would have to worry about when nexttick
+			 * wraps and currtime hasn't yet.
+			 */
+			deltatime = jiffies_to_msecs(jiffies) - currtime;
+			currtime += deltatime;
+
+			/*
+			 * Process any waiting discovery as long as the chip is
+			 * up.  If a chip reset happens during initial polling,
+			 * we have to make sure the timer tick processes the
+			 * doorbell indicating the firmware is ready.
+			 */
+			if (!(a->flags & AF_CHPRST_PENDING))
+				esas2r_disc_check_for_work(a);
+
+			/* Simulate a timer tick. */
+			if (nexttick <= deltatime) {
+
+				/* Time for a timer tick */
+				nexttick += 100;
+				esas2r_timer_tick(a);
+			}
+
+			if (nexttick > deltatime)
+				nexttick -= deltatime;
+
+			/* Do any deferred processing */
+			if (esas2r_is_tasklet_pending(a))
+				esas2r_do_tasklet_tasks(a);
+
+		}
+
+		if (a->flags & AF_FIRST_INIT)
+			atomic_inc(&a->disable_cnt);
+
+		esas2r_lock_clear_flags(&a->flags, AF_DISC_POLLED);
+		esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
+	}
+
+
+	esas2r_targ_db_report_changes(a);
+
+	/*
+	 * For cases where (a) the initialization messages processing may
+	 * handle an interrupt for a port event and a discovery is waiting, but
+	 * we are not waiting for devices, or (b) the device wait time has been
+	 * exhausted but there is still discovery pending, start any leftover
+	 * discovery in interrupt driven mode.
+	 */
+	esas2r_disc_start_waiting(a);
+
+	/* Enable chip interrupts */
+	a->int_mask = ESAS2R_INT_STS_MASK;
+	esas2r_enable_chip_interrupts(a);
+	esas2r_enable_heartbeat(a);
+	rslt = true;
+
+exit:
+	/*
+	 * Regardless of whether initialization was successful, certain things
+	 * need to get done before we exit.
+	 */
+
+	if ((a->flags & AF_CHPRST_DETECTED)
+	    && (a->flags & AF_FIRST_INIT)) {
+		/*
+		 * Reinitialization was performed during the first
+		 * initialization.  Only clear the chip reset flag so the
+		 * original device polling is not cancelled.
+		 */
+		if (!rslt)
+			esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
+	} else {
+		/* First initialization or a subsequent re-init is complete. */
+		if (!rslt) {
+			esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
+			esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
+		}
+
+
+		/* Enable deferred processing after the first initialization. */
+		if (a->flags & AF_FIRST_INIT) {
+			esas2r_lock_clear_flags(&a->flags, AF_FIRST_INIT);
+
+			if (atomic_dec_return(&a->disable_cnt) == 0)
+				esas2r_do_deferred_processes(a);
+		}
+	}
+
+	return rslt;
+}
+
+void esas2r_reset_adapter(struct esas2r_adapter *a)
+{
+	esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
+	esas2r_local_reset_adapter(a);
+	esas2r_schedule_tasklet(a);
+}
+
+void esas2r_reset_chip(struct esas2r_adapter *a)
+{
+	if (!esas2r_is_adapter_present(a))
+		return;
+
+	/*
+	 * Before we reset the chip, save off the VDA core dump.  The VDA core
+	 * dump is located in the upper 512KB of the onchip SRAM.  Make sure
+	 * to not overwrite a previous crash that was saved.
+	 */
+	if ((a->flags2 & AF2_COREDUMP_AVAIL)
+	    && !(a->flags2 & AF2_COREDUMP_SAVED)
+	    && a->fw_coredump_buff) {
+		esas2r_read_mem_block(a,
+				      a->fw_coredump_buff,
+				      MW_DATA_ADDR_SRAM + 0x80000,
+				      ESAS2R_FWCOREDUMP_SZ);
+
+		esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_SAVED);
+	}
+
+	esas2r_lock_clear_flags(&a->flags2, AF2_COREDUMP_AVAIL);
+
+	/* Reset the chip */
+	if (a->pcid->revision == MVR_FREY_B2)
+		esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2,
+					    MU_CTL_IN_FULL_RST2);
+	else
+		esas2r_write_register_dword(a, MU_CTL_STATUS_IN,
+					    MU_CTL_IN_FULL_RST);
+
+
+	/* Stall a little while to let the reset condition clear */
+	mdelay(10);
+}
+
+static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a)
+{
+	u32 starttime;
+	u32 doorbell;
+
+	esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN);
+	starttime = jiffies_to_msecs(jiffies);
+
+	while (true) {
+		doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+		if (doorbell & DRBL_POWER_DOWN) {
+			esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+						    doorbell);
+			break;
+		}
+
+		schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+		if ((jiffies_to_msecs(jiffies) - starttime) > 30000) {
+			esas2r_hdebug("Timeout waiting for power down");
+			break;
+		}
+	}
+}
+
+/*
+ * Perform power management processing including managing device states, adapter
+ * states, interrupts, and I/O.
+ */
+void esas2r_power_down(struct esas2r_adapter *a)
+{
+	esas2r_lock_set_flags(&a->flags, AF_POWER_MGT);
+	esas2r_lock_set_flags(&a->flags, AF_POWER_DOWN);
+
+	if (!(a->flags & AF_DEGRADED_MODE)) {
+		u32 starttime;
+		u32 doorbell;
+
+		/*
+		 * We are currently running OK and will be reinitializing later.
+		 * increment the disable count to coordinate with
+		 * esas2r_init_adapter.  We don't have to do this in degraded
+		 * mode since we never enabled interrupts in the first place.
+		 */
+		esas2r_disable_chip_interrupts(a);
+		esas2r_disable_heartbeat(a);
+
+		/* wait for any VDA activity to clear before continuing */
+		esas2r_write_register_dword(a, MU_DOORBELL_IN,
+					    DRBL_MSG_IFC_DOWN);
+		starttime = jiffies_to_msecs(jiffies);
+
+		while (true) {
+			doorbell =
+				esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+			if (doorbell & DRBL_MSG_IFC_DOWN) {
+				esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+							    doorbell);
+				break;
+			}
+
+			schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+			if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
+				esas2r_hdebug(
+					"timeout waiting for interface down");
+				break;
+			}
+		}
+
+		/*
+		 * For versions of firmware that support it tell them the driver
+		 * is powering down.
+		 */
+		if (a->flags2 & AF2_VDA_POWER_DOWN)
+			esas2r_power_down_notify_firmware(a);
+	}
+
+	/* Suspend I/O processing. */
+	esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
+	esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING);
+	esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING);
+
+	esas2r_process_adapter_reset(a);
+
+	/* Remove devices now that I/O is cleaned up. */
+	a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a);
+	esas2r_targ_db_remove_all(a, false);
+}
+
+/*
+ * Perform power management processing including managing device states, adapter
+ * states, interrupts, and I/O.
+ */
+bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
+{
+	bool ret;
+
+	esas2r_lock_clear_flags(&a->flags, AF_POWER_DOWN);
+	esas2r_init_pci_cfg_space(a);
+	esas2r_lock_set_flags(&a->flags, AF_FIRST_INIT);
+	atomic_inc(&a->disable_cnt);
+
+	/* reinitialize the adapter */
+	ret = esas2r_check_adapter(a);
+	if (!esas2r_init_adapter_hw(a, init_poll))
+		ret = false;
+
+	/* send the reset asynchronous event */
+	esas2r_send_reset_ae(a, true);
+
+	/* clear this flag after initialization. */
+	esas2r_lock_clear_flags(&a->flags, AF_POWER_MGT);
+	return ret;
+}
+
+bool esas2r_is_adapter_present(struct esas2r_adapter *a)
+{
+	if (a->flags & AF_NOT_PRESENT)
+		return false;
+
+	if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) {
+		esas2r_lock_set_flags(&a->flags, AF_NOT_PRESENT);
+
+		return false;
+	}
+	return true;
+}
+
+const char *esas2r_get_model_name(struct esas2r_adapter *a)
+{
+	switch (a->pcid->subsystem_device) {
+	case ATTO_ESAS_R680:
+		return "ATTO ExpressSAS R680";
+
+	case ATTO_ESAS_R608:
+		return "ATTO ExpressSAS R608";
+
+	case ATTO_ESAS_R60F:
+		return "ATTO ExpressSAS R60F";
+
+	case ATTO_ESAS_R6F0:
+		return "ATTO ExpressSAS R6F0";
+
+	case ATTO_ESAS_R644:
+		return "ATTO ExpressSAS R644";
+
+	case ATTO_ESAS_R648:
+		return "ATTO ExpressSAS R648";
+
+	case ATTO_TSSC_3808:
+		return "ATTO ThunderStream SC 3808D";
+
+	case ATTO_TSSC_3808E:
+		return "ATTO ThunderStream SC 3808E";
+
+	case ATTO_TLSH_1068:
+		return "ATTO ThunderLink SH 1068";
+	}
+
+	return "ATTO SAS Controller";
+}
+
+const char *esas2r_get_model_name_short(struct esas2r_adapter *a)
+{
+	switch (a->pcid->subsystem_device) {
+	case ATTO_ESAS_R680:
+		return "R680";
+
+	case ATTO_ESAS_R608:
+		return "R608";
+
+	case ATTO_ESAS_R60F:
+		return "R60F";
+
+	case ATTO_ESAS_R6F0:
+		return "R6F0";
+
+	case ATTO_ESAS_R644:
+		return "R644";
+
+	case ATTO_ESAS_R648:
+		return "R648";
+
+	case ATTO_TSSC_3808:
+		return "SC 3808D";
+
+	case ATTO_TSSC_3808E:
+		return "SC 3808E";
+
+	case ATTO_TLSH_1068:
+		return "SH 1068";
+	}
+
+	return "unknown";
+}

+ 941 - 0
drivers/scsi/esas2r/esas2r_int.c

@@ -0,0 +1,941 @@
+/*
+ *  linux/drivers/scsi/esas2r/esas2r_int.c
+ *      esas2r interrupt handling
+ *
+ *  Copyright (c) 2001-2013 ATTO Technology, Inc.
+ *  (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  NO WARRANTY
+ *  THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ *  CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ *  LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ *  MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ *  solely responsible for determining the appropriateness of using and
+ *  distributing the Program and assumes all risks associated with its
+ *  exercise of rights under this Agreement, including but not limited to
+ *  the risks and costs of program errors, damage to or loss of data,
+ *  programs or equipment, and unavailability or interruption of operations.
+ *
+ *  DISCLAIMER OF LIABILITY
+ *  NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ *  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ *  DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ *  USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ *  HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "esas2r.h"
+
+/* Local function prototypes */
+static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell);
+static void esas2r_get_outbound_responses(struct esas2r_adapter *a);
+static void esas2r_process_bus_reset(struct esas2r_adapter *a);
+
+/*
+ * Poll the adapter for interrupts and service them.
+ * This function handles both legacy interrupts and MSI.
+ */
+void esas2r_polled_interrupt(struct esas2r_adapter *a)
+{
+	u32 intstat;
+	u32 doorbell;
+
+	esas2r_disable_chip_interrupts(a);
+
+	intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
+
+	if (intstat & MU_INTSTAT_POST_OUT) {
+		/* clear the interrupt */
+
+		esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
+					    MU_OLIS_INT);
+		esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
+
+		esas2r_get_outbound_responses(a);
+	}
+
+	if (intstat & MU_INTSTAT_DRBL) {
+		doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+		if (doorbell != 0)
+			esas2r_doorbell_interrupt(a, doorbell);
+	}
+
+	esas2r_enable_chip_interrupts(a);
+
+	if (atomic_read(&a->disable_cnt) == 0)
+		esas2r_do_deferred_processes(a);
+}
+
+/*
+ * Legacy and MSI interrupt handlers.  Note that the legacy interrupt handler
+ * schedules a TASKLET to process events, whereas the MSI handler just
+ * processes interrupt events directly.
+ */
+irqreturn_t esas2r_interrupt(int irq, void *dev_id)
+{
+	struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
+
+	if (!esas2r_adapter_interrupt_pending(a))
+		return IRQ_NONE;
+
+	esas2r_lock_set_flags(&a->flags2, AF2_INT_PENDING);
+	esas2r_schedule_tasklet(a);
+
+	return IRQ_HANDLED;
+}
+
+void esas2r_adapter_interrupt(struct esas2r_adapter *a)
+{
+	u32 doorbell;
+
+	if (likely(a->int_stat & MU_INTSTAT_POST_OUT)) {
+		/* clear the interrupt */
+		esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
+					    MU_OLIS_INT);
+		esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
+		esas2r_get_outbound_responses(a);
+	}
+
+	if (unlikely(a->int_stat & MU_INTSTAT_DRBL)) {
+		doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+		if (doorbell != 0)
+			esas2r_doorbell_interrupt(a, doorbell);
+	}
+
+	a->int_mask = ESAS2R_INT_STS_MASK;
+
+	esas2r_enable_chip_interrupts(a);
+
+	if (likely(atomic_read(&a->disable_cnt) == 0))
+		esas2r_do_deferred_processes(a);
+}
+
+irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id)
+{
+	struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
+	u32 intstat;
+	u32 doorbell;
+
+	intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
+
+	if (likely(intstat & MU_INTSTAT_POST_OUT)) {
+		/* clear the interrupt */
+
+		esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
+					    MU_OLIS_INT);
+		esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
+
+		esas2r_get_outbound_responses(a);
+	}
+
+	if (unlikely(intstat & MU_INTSTAT_DRBL)) {
+		doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+		if (doorbell != 0)
+			esas2r_doorbell_interrupt(a, doorbell);
+	}
+
+	/*
+	 * Work around a chip bug and force a new MSI to be sent if one is
+	 * still pending.
+	 */
+	esas2r_disable_chip_interrupts(a);
+	esas2r_enable_chip_interrupts(a);
+
+	if (likely(atomic_read(&a->disable_cnt) == 0))
+		esas2r_do_deferred_processes(a);
+
+	esas2r_do_tasklet_tasks(a);
+
+	return 1;
+}
+
+
+
+static void esas2r_handle_outbound_rsp_err(struct esas2r_adapter *a,
+					   struct esas2r_request *rq,
+					   struct atto_vda_ob_rsp *rsp)
+{
+
+	/*
+	 * For I/O requests, only copy the response if an error
+	 * occurred and setup a callback to do error processing.
+	 */
+	if (unlikely(rq->req_stat != RS_SUCCESS)) {
+		memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp));
+
+		if (rq->req_stat == RS_ABORTED) {
+			if (rq->timeout > RQ_MAX_TIMEOUT)
+				rq->req_stat = RS_TIMEOUT;
+		} else if (rq->req_stat == RS_SCSI_ERROR) {
+			u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat;
+
+			esas2r_trace("scsistatus: %x", scsistatus);
+
+			/* Any of these are a good result. */
+			if (scsistatus == SAM_STAT_GOOD || scsistatus ==
+			    SAM_STAT_CONDITION_MET || scsistatus ==
+			    SAM_STAT_INTERMEDIATE || scsistatus ==
+			    SAM_STAT_INTERMEDIATE_CONDITION_MET) {
+				rq->req_stat = RS_SUCCESS;
+				rq->func_rsp.scsi_rsp.scsi_stat =
+					SAM_STAT_GOOD;
+			}
+		}
+	}
+}
+
+static void esas2r_get_outbound_responses(struct esas2r_adapter *a)
+{
+	struct atto_vda_ob_rsp *rsp;
+	u32 rspput_ptr;
+	u32 rspget_ptr;
+	struct esas2r_request *rq;
+	u32 handle;
+	unsigned long flags;
+
+	LIST_HEAD(comp_list);
+
+	esas2r_trace_enter();
+
+	spin_lock_irqsave(&a->queue_lock, flags);
+
+	/* Get the outbound limit and pointers */
+	rspput_ptr = le32_to_cpu(*a->outbound_copy) & MU_OLC_WRT_PTR;
+	rspget_ptr = a->last_read;
+
+	esas2r_trace("rspput_ptr: %x, rspget_ptr: %x", rspput_ptr, rspget_ptr);
+
+	/* If we don't have anything to process, get out */
+	if (unlikely(rspget_ptr == rspput_ptr)) {
+		spin_unlock_irqrestore(&a->queue_lock, flags);
+		esas2r_trace_exit();
+		return;
+	}
+
+	/* Make sure the firmware is healthy */
+	if (unlikely(rspput_ptr >= a->list_size)) {
+		spin_unlock_irqrestore(&a->queue_lock, flags);
+		esas2r_bugon();
+		esas2r_local_reset_adapter(a);
+		esas2r_trace_exit();
+		return;
+	}
+
+	do {
+		rspget_ptr++;
+
+		if (rspget_ptr >= a->list_size)
+			rspget_ptr = 0;
+
+		rsp = (struct atto_vda_ob_rsp *)a->outbound_list_md.virt_addr
+		      + rspget_ptr;
+
+		handle = rsp->handle;
+
+		/* Verify the handle range */
+		if (unlikely(LOWORD(handle) == 0
+			     || LOWORD(handle) > num_requests +
+			     num_ae_requests + 1)) {
+			esas2r_bugon();
+			continue;
+		}
+
+		/* Get the request for this handle */
+		rq = a->req_table[LOWORD(handle)];
+
+		if (unlikely(rq == NULL || rq->vrq->scsi.handle != handle)) {
+			esas2r_bugon();
+			continue;
+		}
+
+		list_del(&rq->req_list);
+
+		/* Get the completion status */
+		rq->req_stat = rsp->req_stat;
+
+		esas2r_trace("handle: %x", handle);
+		esas2r_trace("rq: %p", rq);
+		esas2r_trace("req_status: %x", rq->req_stat);
+
+		if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
+			esas2r_handle_outbound_rsp_err(a, rq, rsp);
+		} else {
+			/*
+			 * Copy the outbound completion struct for non-I/O
+			 * requests.
+			 */
+			memcpy(&rq->func_rsp, &rsp->func_rsp,
+			       sizeof(rsp->func_rsp));
+		}
+
+		/* Queue the request for completion. */
+		list_add_tail(&rq->comp_list, &comp_list);
+
+	} while (rspget_ptr != rspput_ptr);
+
+	a->last_read = rspget_ptr;
+	spin_unlock_irqrestore(&a->queue_lock, flags);
+
+	esas2r_comp_list_drain(a, &comp_list);
+	esas2r_trace_exit();
+}
+
+/*
+ * Perform all deferred processes for the adapter.  Deferred
+ * processes can only be done while the current interrupt
+ * disable_cnt for the adapter is zero.
+ */
+void esas2r_do_deferred_processes(struct esas2r_adapter *a)
+{
+	int startreqs = 2;
+	struct esas2r_request *rq;
+	unsigned long flags;
+
+	/*
+	 * startreqs is used to control starting requests
+	 * that are on the deferred queue
+	 *  = 0 - do not start any requests
+	 *  = 1 - can start discovery requests
+	 *  = 2 - can start any request
+	 */
+
+	if (a->flags & (AF_CHPRST_PENDING | AF_FLASHING))
+		startreqs = 0;
+	else if (a->flags & AF_DISC_PENDING)
+		startreqs = 1;
+
+	atomic_inc(&a->disable_cnt);
+
+	/* Clear off the completed list to be processed later. */
+
+	if (esas2r_is_tasklet_pending(a)) {
+		esas2r_schedule_tasklet(a);
+
+		startreqs = 0;
+	}
+
+	/*
+	 * If we can start requests then traverse the defer queue
+	 * looking for requests to start or complete
+	 */
+	if (startreqs && !list_empty(&a->defer_list)) {
+		LIST_HEAD(comp_list);
+		struct list_head *element, *next;
+
+		spin_lock_irqsave(&a->queue_lock, flags);
+
+		list_for_each_safe(element, next, &a->defer_list) {
+			rq = list_entry(element, struct esas2r_request,
+					req_list);
+
+			if (rq->req_stat != RS_PENDING) {
+				list_del(element);
+				list_add_tail(&rq->comp_list, &comp_list);
+			}
+			/*
+			 * Process discovery and OS requests separately.  We
+			 * can't hold up discovery requests when discovery is
+			 * pending.  In general, there may be different sets of
+			 * conditions for starting different types of requests.
+			 */
+			else if (rq->req_type == RT_DISC_REQ) {
+				list_del(element);
+				esas2r_disc_local_start_request(a, rq);
+			} else if (startreqs == 2) {
+				list_del(element);
+				esas2r_local_start_request(a, rq);
+
+				/*
+				 * Flashing could have been set by last local
+				 * start
+				 */
+				if (a->flags & AF_FLASHING)
+					break;
+			}
+		}
+
+		spin_unlock_irqrestore(&a->queue_lock, flags);
+		esas2r_comp_list_drain(a, &comp_list);
+	}
+
+	atomic_dec(&a->disable_cnt);
+}
+
+/*
+ * Process an adapter reset (or one that is about to happen)
+ * by making sure all outstanding requests are completed that
+ * haven't been already.
+ */
+void esas2r_process_adapter_reset(struct esas2r_adapter *a)
+{
+	struct esas2r_request *rq = &a->general_req;
+	unsigned long flags;
+	struct esas2r_disc_context *dc;
+
+	LIST_HEAD(comp_list);
+	struct list_head *element;
+
+	esas2r_trace_enter();
+
+	spin_lock_irqsave(&a->queue_lock, flags);
+
+	/* abort the active discovery, if any.   */
+
+	if (rq->interrupt_cx) {
+		dc = (struct esas2r_disc_context *)rq->interrupt_cx;
+
+		dc->disc_evt = 0;
+
+		esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
+	}
+
+	/*
+	 * just clear the interrupt callback for now.  it will be dequeued if
+	 * and when we find it on the active queue and we don't want the
+	 * callback called.  also set the dummy completion callback in case we
+	 * were doing an I/O request.
+	 */
+
+	rq->interrupt_cx = NULL;
+	rq->interrupt_cb = NULL;
+
+	rq->comp_cb = esas2r_dummy_complete;
+
+	/* Reset the read and write pointers */
+
+	*a->outbound_copy =
+		a->last_write =
+			a->last_read = a->list_size - 1;
+
+	esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
+
+	/* Kill all the requests on the active list */
+	list_for_each(element, &a->defer_list) {
+		rq = list_entry(element, struct esas2r_request, req_list);
+
+		if (rq->req_stat == RS_STARTED)
+			if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
+				list_add_tail(&rq->comp_list, &comp_list);
+	}
+
+	spin_unlock_irqrestore(&a->queue_lock, flags);
+	esas2r_comp_list_drain(a, &comp_list);
+	esas2r_process_bus_reset(a);
+	esas2r_trace_exit();
+}
+
+static void esas2r_process_bus_reset(struct esas2r_adapter *a)
+{
+	struct esas2r_request *rq;
+	struct list_head *element;
+	unsigned long flags;
+
+	LIST_HEAD(comp_list);
+
+	esas2r_trace_enter();
+
+	esas2r_hdebug("reset detected");
+
+	spin_lock_irqsave(&a->queue_lock, flags);
+
+	/* kill all the requests on the deferred queue */
+	list_for_each(element, &a->defer_list) {
+		rq = list_entry(element, struct esas2r_request, req_list);
+		if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
+			list_add_tail(&rq->comp_list, &comp_list);
+	}
+
+	spin_unlock_irqrestore(&a->queue_lock, flags);
+
+	esas2r_comp_list_drain(a, &comp_list);
+
+	if (atomic_read(&a->disable_cnt) == 0)
+		esas2r_do_deferred_processes(a);
+
+	esas2r_lock_clear_flags(&a->flags, AF_OS_RESET);
+
+	esas2r_trace_exit();
+}
+
+static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
+{
+
+	esas2r_lock_clear_flags(&a->flags, AF_CHPRST_NEEDED);
+	esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED);
+	esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED);
+	esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING);
+	/*
+	 * Make sure we don't get attempt more than 3 resets
+	 * when the uptime between resets does not exceed one
+	 * minute.  This will stop any situation where there is
+	 * really something wrong with the hardware.  The way
+	 * this works is that we start with uptime ticks at 0.
+	 * Each time we do a reset, we add 20 seconds worth to
+	 * the count.  Each time a timer tick occurs, as long
+	 * as a chip reset is not pending, we decrement the
+	 * tick count.  If the uptime ticks ever gets to 60
+	 * seconds worth, we disable the adapter from that
+	 * point forward.  Three strikes, you're out.
+	 */
+	if (!esas2r_is_adapter_present(a) || (a->chip_uptime >=
+					      ESAS2R_CHP_UPTIME_MAX)) {
+		esas2r_hdebug("*** adapter disabled ***");
+
+		/*
+		 * Ok, some kind of hard failure.  Make sure we
+		 * exit this loop with chip interrupts
+		 * permanently disabled so we don't lock up the
+		 * entire system.  Also flag degraded mode to
+		 * prevent the heartbeat from trying to recover.
+		 */
+
+		esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE);
+		esas2r_lock_set_flags(&a->flags, AF_DISABLED);
+		esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
+		esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
+
+		esas2r_disable_chip_interrupts(a);
+		a->int_mask = 0;
+		esas2r_process_adapter_reset(a);
+
+		esas2r_log(ESAS2R_LOG_CRIT,
+			   "Adapter disabled because of hardware failure");
+	} else {
+		u32 flags =
+			esas2r_lock_set_flags(&a->flags, AF_CHPRST_STARTED);
+
+		if (!(flags & AF_CHPRST_STARTED))
+			/*
+			 * Only disable interrupts if this is
+			 * the first reset attempt.
+			 */
+			esas2r_disable_chip_interrupts(a);
+
+		if ((a->flags & AF_POWER_MGT) && !(a->flags & AF_FIRST_INIT) &&
+		    !(flags & AF_CHPRST_STARTED)) {
+			/*
+			 * Don't reset the chip on the first
+			 * deferred power up attempt.
+			 */
+		} else {
+			esas2r_hdebug("*** resetting chip ***");
+			esas2r_reset_chip(a);
+		}
+
+		/* Kick off the reinitialization */
+		a->chip_uptime += ESAS2R_CHP_UPTIME_CNT;
+		a->chip_init_time = jiffies_to_msecs(jiffies);
+		if (!(a->flags & AF_POWER_MGT)) {
+			esas2r_process_adapter_reset(a);
+
+			if (!(flags & AF_CHPRST_STARTED)) {
+				/* Remove devices now that I/O is cleaned up. */
+				a->prev_dev_cnt =
+					esas2r_targ_db_get_tgt_cnt(a);
+				esas2r_targ_db_remove_all(a, false);
+			}
+		}
+
+		a->int_mask = 0;
+	}
+}
+
+static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
+{
+	while (a->flags & AF_CHPRST_DETECTED) {
+		/*
+		 * Balance the enable in esas2r_initadapter_hw.
+		 * Esas2r_power_down already took care of it for power
+		 * management.
+		 */
+		if (!(a->flags & AF_DEGRADED_MODE) && !(a->flags &
+							AF_POWER_MGT))
+			esas2r_disable_chip_interrupts(a);
+
+		/* Reinitialize the chip. */
+		esas2r_check_adapter(a);
+		esas2r_init_adapter_hw(a, 0);
+
+		if (a->flags & AF_CHPRST_NEEDED)
+			break;
+
+		if (a->flags & AF_POWER_MGT) {
+			/* Recovery from power management. */
+			if (a->flags & AF_FIRST_INIT) {
+				/* Chip reset during normal power up */
+				esas2r_log(ESAS2R_LOG_CRIT,
+					   "The firmware was reset during a normal power-up sequence");
+			} else {
+				/* Deferred power up complete. */
+				esas2r_lock_clear_flags(&a->flags,
+							AF_POWER_MGT);
+				esas2r_send_reset_ae(a, true);
+			}
+		} else {
+			/* Recovery from online chip reset. */
+			if (a->flags & AF_FIRST_INIT) {
+				/* Chip reset during driver load */
+			} else {
+				/* Chip reset after driver load */
+				esas2r_send_reset_ae(a, false);
+			}
+
+			esas2r_log(ESAS2R_LOG_CRIT,
+				   "Recovering from a chip reset while the chip was online");
+		}
+
+		esas2r_lock_clear_flags(&a->flags, AF_CHPRST_STARTED);
+		esas2r_enable_chip_interrupts(a);
+
+		/*
+		 * Clear this flag last!  this indicates that the chip has been
+		 * reset already during initialization.
+		 */
+		esas2r_lock_clear_flags(&a->flags, AF_CHPRST_DETECTED);
+	}
+}
+
+
+/* Perform deferred tasks when chip interrupts are disabled */
+void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)
+{
+	if (a->flags & (AF_CHPRST_NEEDED | AF_CHPRST_DETECTED)) {
+		if (a->flags & AF_CHPRST_NEEDED)
+			esas2r_chip_rst_needed_during_tasklet(a);
+
+		esas2r_handle_chip_rst_during_tasklet(a);
+	}
+
+	if (a->flags & AF_BUSRST_NEEDED) {
+		esas2r_hdebug("hard resetting bus");
+
+		esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED);
+
+		if (a->flags & AF_FLASHING)
+			esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED);
+		else
+			esas2r_write_register_dword(a, MU_DOORBELL_IN,
+						    DRBL_RESET_BUS);
+	}
+
+	if (a->flags & AF_BUSRST_DETECTED) {
+		esas2r_process_bus_reset(a);
+
+		esas2r_log_dev(ESAS2R_LOG_WARN,
+			       &(a->host->shost_gendev),
+			       "scsi_report_bus_reset() called");
+
+		scsi_report_bus_reset(a->host, 0);
+
+		esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED);
+		esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING);
+
+		esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete");
+	}
+
+	if (a->flags & AF_PORT_CHANGE) {
+		esas2r_lock_clear_flags(&a->flags, AF_PORT_CHANGE);
+
+		esas2r_targ_db_report_changes(a);
+	}
+
+	if (atomic_read(&a->disable_cnt) == 0)
+		esas2r_do_deferred_processes(a);
+}
+
+static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)
+{
+	if (!(doorbell & DRBL_FORCE_INT)) {
+		esas2r_trace_enter();
+		esas2r_trace("doorbell: %x", doorbell);
+	}
+
+	/* First clear the doorbell bits */
+	esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell);
+
+	if (doorbell & DRBL_RESET_BUS)
+		esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED);
+
+	if (doorbell & DRBL_FORCE_INT)
+		esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT);
+
+	if (doorbell & DRBL_PANIC_REASON_MASK) {
+		esas2r_hdebug("*** Firmware Panic ***");
+		esas2r_log(ESAS2R_LOG_CRIT, "The firmware has panicked");
+	}
+
+	if (doorbell & DRBL_FW_RESET) {
+		esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_AVAIL);
+		esas2r_local_reset_adapter(a);
+	}
+
+	if (!(doorbell & DRBL_FORCE_INT))
+		esas2r_trace_exit();
+}
+
+void esas2r_force_interrupt(struct esas2r_adapter *a)
+{
+	esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_FORCE_INT |
+				    DRBL_DRV_VER);
+}
+
+
+static void esas2r_lun_event(struct esas2r_adapter *a, union atto_vda_ae *ae,
+			     u16 target, u32 length)
+{
+	struct esas2r_target *t = a->targetdb + target;
+	u32 cplen = length;
+	unsigned long flags;
+
+	if (cplen > sizeof(t->lu_event))
+		cplen = sizeof(t->lu_event);
+
+	esas2r_trace("ae->lu.dwevent: %x", ae->lu.dwevent);
+	esas2r_trace("ae->lu.bystate: %x", ae->lu.bystate);
+
+	spin_lock_irqsave(&a->mem_lock, flags);
+
+	t->new_target_state = TS_INVALID;
+
+	if (ae->lu.dwevent  & VDAAE_LU_LOST) {
+		t->new_target_state = TS_NOT_PRESENT;
+	} else {
+		switch (ae->lu.bystate) {
+		case VDAAE_LU_NOT_PRESENT:
+		case VDAAE_LU_OFFLINE:
+		case VDAAE_LU_DELETED:
+		case VDAAE_LU_FACTORY_DISABLED:
+			t->new_target_state = TS_NOT_PRESENT;
+			break;
+
+		case VDAAE_LU_ONLINE:
+		case VDAAE_LU_DEGRADED:
+			t->new_target_state = TS_PRESENT;
+			break;
+		}
+	}
+
+	if (t->new_target_state != TS_INVALID) {
+		memcpy(&t->lu_event, &ae->lu, cplen);
+
+		esas2r_disc_queue_event(a, DCDE_DEV_CHANGE);
+	}
+
+	spin_unlock_irqrestore(&a->mem_lock, flags);
+}
+
+
+
+void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+	union atto_vda_ae *ae =
+		(union atto_vda_ae *)rq->vda_rsp_data->ae_data.event_data;
+	u32 length = le32_to_cpu(rq->func_rsp.ae_rsp.length);
+	union atto_vda_ae *last =
+		(union atto_vda_ae *)(rq->vda_rsp_data->ae_data.event_data
+				      + length);
+
+	esas2r_trace_enter();
+	esas2r_trace("length: %d", length);
+
+	if (length > sizeof(struct atto_vda_ae_data)
+	    || (length & 3) != 0
+	    || length == 0) {
+		esas2r_log(ESAS2R_LOG_WARN,
+			   "The AE request response length (%p) is too long: %d",
+			   rq, length);
+
+		esas2r_hdebug("aereq->length (0x%x) too long", length);
+		esas2r_bugon();
+
+		last = ae;
+	}
+
+	while (ae < last) {
+		u16 target;
+
+		esas2r_trace("ae: %p", ae);
+		esas2r_trace("ae->hdr: %p", &(ae->hdr));
+
+		length = ae->hdr.bylength;
+
+		if (length > (u32)((u8 *)last - (u8 *)ae)
+		    || (length & 3) != 0
+		    || length == 0) {
+			esas2r_log(ESAS2R_LOG_CRIT,
+				   "the async event length is invalid (%p): %d",
+				   ae, length);
+
+			esas2r_hdebug("ae->hdr.length (0x%x) invalid", length);
+			esas2r_bugon();
+
+			break;
+		}
+
+		esas2r_nuxi_ae_data(ae);
+
+		esas2r_queue_fw_event(a, fw_event_vda_ae, ae,
+				      sizeof(union atto_vda_ae));
+
+		switch (ae->hdr.bytype) {
+		case VDAAE_HDR_TYPE_RAID:
+
+			if (ae->raid.dwflags & (VDAAE_GROUP_STATE
+						| VDAAE_RBLD_STATE
+						| VDAAE_MEMBER_CHG
+						| VDAAE_PART_CHG)) {
+				esas2r_log(ESAS2R_LOG_INFO,
+					   "RAID event received - name:%s rebuild_state:%d group_state:%d",
+					   ae->raid.acname,
+					   ae->raid.byrebuild_state,
+					   ae->raid.bygroup_state);
+			}
+
+			break;
+
+		case VDAAE_HDR_TYPE_LU:
+			esas2r_log(ESAS2R_LOG_INFO,
+				   "LUN event received: event:%d target_id:%d LUN:%d state:%d",
+				   ae->lu.dwevent,
+				   ae->lu.id.tgtlun.wtarget_id,
+				   ae->lu.id.tgtlun.bylun,
+				   ae->lu.bystate);
+
+			target = ae->lu.id.tgtlun.wtarget_id;
+
+			if (target < ESAS2R_MAX_TARGETS)
+				esas2r_lun_event(a, ae, target, length);
+
+			break;
+
+		case VDAAE_HDR_TYPE_DISK:
+			esas2r_log(ESAS2R_LOG_INFO, "Disk event received");
+			break;
+
+		default:
+
+			/* Silently ignore the rest and let the apps deal with
+			 * them.
+			 */
+
+			break;
+		}
+
+		ae = (union atto_vda_ae *)((u8 *)ae + length);
+	}
+
+	/* Now requeue it. */
+	esas2r_start_ae_request(a, rq);
+	esas2r_trace_exit();
+}
+
+/* Send an asynchronous event for a chip reset or power management. */
+void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt)
+{
+	struct atto_vda_ae_hdr ae;
+
+	if (pwr_mgt)
+		ae.bytype = VDAAE_HDR_TYPE_PWRMGT;
+	else
+		ae.bytype = VDAAE_HDR_TYPE_RESET;
+
+	ae.byversion = VDAAE_HDR_VER_0;
+	ae.byflags = 0;
+	ae.bylength = (u8)sizeof(struct atto_vda_ae_hdr);
+
+	if (pwr_mgt)
+		esas2r_hdebug("*** sending power management AE ***");
+	else
+		esas2r_hdebug("*** sending reset AE ***");
+
+	esas2r_queue_fw_event(a, fw_event_vda_ae, &ae,
+			      sizeof(union atto_vda_ae));
+}
+
+void esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
+{}
+
+static void esas2r_check_req_rsp_sense(struct esas2r_adapter *a,
+				       struct esas2r_request *rq)
+{
+	u8 snslen, snslen2;
+
+	snslen = snslen2 = rq->func_rsp.scsi_rsp.sense_len;
+
+	if (snslen > rq->sense_len)
+		snslen = rq->sense_len;
+
+	if (snslen) {
+		if (rq->sense_buf)
+			memcpy(rq->sense_buf, rq->data_buf, snslen);
+		else
+			rq->sense_buf = (u8 *)rq->data_buf;
+
+		/* See about possible sense data */
+		if (snslen2 > 0x0c) {
+			u8 *s = (u8 *)rq->data_buf;
+
+			esas2r_trace_enter();
+
+			/* Report LUNS data has changed */
+			if (s[0x0c] == 0x3f && s[0x0d] == 0x0E) {
+				esas2r_trace("rq->target_id: %d",
+					     rq->target_id);
+				esas2r_target_state_changed(a, rq->target_id,
+							    TS_LUN_CHANGE);
+			}
+
+			esas2r_trace("add_sense_key=%x", s[0x0c]);
+			esas2r_trace("add_sense_qual=%x", s[0x0d]);
+			esas2r_trace_exit();
+		}
+	}
+
+	rq->sense_len = snslen;
+}
+
+
+void esas2r_complete_request(struct esas2r_adapter *a,
+			     struct esas2r_request *rq)
+{
+	if (rq->vrq->scsi.function == VDA_FUNC_FLASH
+	    && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
+		esas2r_lock_clear_flags(&a->flags, AF_FLASHING);
+
+	/* See if we setup a callback to do special processing */
+
+	if (rq->interrupt_cb) {
+		(*rq->interrupt_cb)(a, rq);
+
+		if (rq->req_stat == RS_PENDING) {
+			esas2r_start_request(a, rq);
+			return;
+		}
+	}
+
+	if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)
+	    && unlikely(rq->req_stat != RS_SUCCESS)) {
+		esas2r_check_req_rsp_sense(a, rq);
+		esas2r_log_request_failure(a, rq);
+	}
+
+	(*rq->comp_cb)(a, rq);
+}

+ 880 - 0
drivers/scsi/esas2r/esas2r_io.c

@@ -0,0 +1,880 @@
+/*
+ *  linux/drivers/scsi/esas2r/esas2r_io.c
+ *      For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ *  Copyright (c) 2001-2013 ATTO Technology, Inc.
+ *  (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+	struct esas2r_target *t = NULL;
+	struct esas2r_request *startrq = rq;
+	unsigned long flags;
+
+	if (unlikely(a->flags & (AF_DEGRADED_MODE | AF_POWER_DOWN))) {
+		if (rq->vrq->scsi.function == VDA_FUNC_SCSI)
+			rq->req_stat = RS_SEL2;
+		else
+			rq->req_stat = RS_DEGRADED;
+	} else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
+		t = a->targetdb + rq->target_id;
+
+		if (unlikely(t >= a->targetdb_end
+			     || !(t->flags & TF_USED))) {
+			rq->req_stat = RS_SEL;
+		} else {
+			/* copy in the target ID. */
+			rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id);
+
+			/*
+			 * Test if we want to report RS_SEL for missing target.
+			 * Note that if AF_DISC_PENDING is set than this will
+			 * go on the defer queue.
+			 */
+			if (unlikely(t->target_state != TS_PRESENT
+				     && !(a->flags & AF_DISC_PENDING)))
+				rq->req_stat = RS_SEL;
+		}
+	}
+
+	if (unlikely(rq->req_stat != RS_PENDING)) {
+		esas2r_complete_request(a, rq);
+		return;
+	}
+
+	esas2r_trace("rq=%p", rq);
+	esas2r_trace("rq->vrq->scsi.handle=%x", rq->vrq->scsi.handle);
+
+	if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
+		esas2r_trace("rq->target_id=%d", rq->target_id);
+		esas2r_trace("rq->vrq->scsi.flags=%x", rq->vrq->scsi.flags);
+	}
+
+	spin_lock_irqsave(&a->queue_lock, flags);
+
+	if (likely(list_empty(&a->defer_list) &&
+		   !(a->flags &
+		     (AF_CHPRST_PENDING | AF_FLASHING | AF_DISC_PENDING))))
+		esas2r_local_start_request(a, startrq);
+	else
+		list_add_tail(&startrq->req_list, &a->defer_list);
+
+	spin_unlock_irqrestore(&a->queue_lock, flags);
+}
+
+/*
+ * Starts the specified request.  all requests have RS_PENDING set when this
+ * routine is called.  The caller is usually esas2r_start_request, but
+ * esas2r_do_deferred_processes will start request that are deferred.
+ *
+ * The caller must ensure that requests can be started.
+ *
+ * esas2r_start_request will defer a request if there are already requests
+ * waiting or there is a chip reset pending.  once the reset condition clears,
+ * esas2r_do_deferred_processes will call this function to start the request.
+ *
+ * When a request is started, it is placed on the active list and queued to
+ * the controller.
+ */
+void esas2r_local_start_request(struct esas2r_adapter *a,
+				struct esas2r_request *rq)
+{
+	esas2r_trace_enter();
+	esas2r_trace("rq=%p", rq);
+	esas2r_trace("rq->vrq:%p", rq->vrq);
+	esas2r_trace("rq->vrq_md->phys_addr:%x", rq->vrq_md->phys_addr);
+
+	if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH
+		     && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT))
+		esas2r_lock_set_flags(&a->flags, AF_FLASHING);
+
+	list_add_tail(&rq->req_list, &a->active_list);
+	esas2r_start_vda_request(a, rq);
+	esas2r_trace_exit();
+	return;
+}
+
+void esas2r_start_vda_request(struct esas2r_adapter *a,
+			      struct esas2r_request *rq)
+{
+	struct esas2r_inbound_list_source_entry *element;
+	u32 dw;
+
+	rq->req_stat = RS_STARTED;
+	/*
+	 * Calculate the inbound list entry location and the current state of
+	 * toggle bit.
+	 */
+	a->last_write++;
+	if (a->last_write >= a->list_size) {
+		a->last_write = 0;
+		/* update the toggle bit */
+		if (a->flags & AF_COMM_LIST_TOGGLE)
+			esas2r_lock_clear_flags(&a->flags,
+						AF_COMM_LIST_TOGGLE);
+		else
+			esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
+	}
+
+	element =
+		(struct esas2r_inbound_list_source_entry *)a->inbound_list_md.
+		virt_addr
+		+ a->last_write;
+
+	/* Set the VDA request size if it was never modified */
+	if (rq->vda_req_sz == RQ_SIZE_DEFAULT)
+		rq->vda_req_sz = (u16)(a->max_vdareq_size / sizeof(u32));
+
+	element->address = cpu_to_le64(rq->vrq_md->phys_addr);
+	element->length = cpu_to_le32(rq->vda_req_sz);
+
+	/* Update the write pointer */
+	dw = a->last_write;
+
+	if (a->flags & AF_COMM_LIST_TOGGLE)
+		dw |= MU_ILW_TOGGLE;
+
+	esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle);
+	esas2r_trace("dw:%x", dw);
+	esas2r_trace("rq->vda_req_sz:%x", rq->vda_req_sz);
+	esas2r_write_register_dword(a, MU_IN_LIST_WRITE, dw);
+}
+
+/*
+ * Build the scatter/gather list for an I/O request according to the
+ * specifications placed in the s/g context.  The caller must initialize
+ * context prior to the initial call by calling esas2r_sgc_init().
+ */
+bool esas2r_build_sg_list_sge(struct esas2r_adapter *a,
+			      struct esas2r_sg_context *sgc)
+{
+	struct esas2r_request *rq = sgc->first_req;
+	union atto_vda_req *vrq = rq->vrq;
+
+	while (sgc->length) {
+		u32 rem = 0;
+		u64 addr;
+		u32 len;
+
+		len = (*sgc->get_phys_addr)(sgc, &addr);
+
+		if (unlikely(len == 0))
+			return false;
+
+		/* if current length is more than what's left, stop there */
+		if (unlikely(len > sgc->length))
+			len = sgc->length;
+
+another_entry:
+		/* limit to a round number less than the maximum length */
+		if (len > SGE_LEN_MAX) {
+			/*
+			 * Save the remainder of the split.  Whenever we limit
+			 * an entry we come back around to build entries out
+			 * of the leftover.  We do this to prevent multiple
+			 * calls to the get_phys_addr() function for an SGE
+			 * that is too large.
+			 */
+			rem = len - SGE_LEN_MAX;
+			len = SGE_LEN_MAX;
+		}
+
+		/* See if we need to allocate a new SGL */
+		if (unlikely(sgc->sge.a64.curr > sgc->sge.a64.limit)) {
+			u8 sgelen;
+			struct esas2r_mem_desc *sgl;
+
+			/*
+			 * If no SGls are available, return failure.  The
+			 * caller can call us later with the current context
+			 * to pick up here.
+			 */
+			sgl = esas2r_alloc_sgl(a);
+
+			if (unlikely(sgl == NULL))
+				return false;
+
+			/* Calculate the length of the last SGE filled in */
+			sgelen = (u8)((u8 *)sgc->sge.a64.curr
+				      - (u8 *)sgc->sge.a64.last);
+
+			/*
+			 * Copy the last SGE filled in to the first entry of
+			 * the new SGL to make room for the chain entry.
+			 */
+			memcpy(sgl->virt_addr, sgc->sge.a64.last, sgelen);
+
+			/* Figure out the new curr pointer in the new segment */
+			sgc->sge.a64.curr =
+				(struct atto_vda_sge *)((u8 *)sgl->virt_addr +
+							sgelen);
+
+			/* Set the limit pointer and build the chain entry */
+			sgc->sge.a64.limit =
+				(struct atto_vda_sge *)((u8 *)sgl->virt_addr
+							+ sgl_page_size
+							- sizeof(struct
+								 atto_vda_sge));
+			sgc->sge.a64.last->length = cpu_to_le32(
+				SGE_CHAIN | SGE_ADDR_64);
+			sgc->sge.a64.last->address =
+				cpu_to_le64(sgl->phys_addr);
+
+			/*
+			 * Now, if there was a previous chain entry, then
+			 * update it to contain the length of this segment
+			 * and size of this chain.  otherwise this is the
+			 * first SGL, so set the chain_offset in the request.
+			 */
+			if (sgc->sge.a64.chain) {
+				sgc->sge.a64.chain->length |=
+					cpu_to_le32(
+						((u8 *)(sgc->sge.a64.
+							last + 1)
+						 - (u8 *)rq->sg_table->
+						 virt_addr)
+						+ sizeof(struct atto_vda_sge) *
+						LOBIT(SGE_CHAIN_SZ));
+			} else {
+				vrq->scsi.chain_offset = (u8)
+							 ((u8 *)sgc->
+							  sge.a64.last -
+							  (u8 *)vrq);
+
+				/*
+				 * This is the first SGL, so set the
+				 * chain_offset and the VDA request size in
+				 * the request.
+				 */
+				rq->vda_req_sz =
+					(vrq->scsi.chain_offset +
+					 sizeof(struct atto_vda_sge) +
+					 3)
+					/ sizeof(u32);
+			}
+
+			/*
+			 * Remember this so when we get a new SGL filled in we
+			 * can update the length of this chain entry.
+			 */
+			sgc->sge.a64.chain = sgc->sge.a64.last;
+
+			/* Now link the new SGL onto the primary request. */
+			list_add(&sgl->next_desc, &rq->sg_table_head);
+		}
+
+		/* Update last one filled in */
+		sgc->sge.a64.last = sgc->sge.a64.curr;
+
+		/* Build the new SGE and update the S/G context */
+		sgc->sge.a64.curr->length = cpu_to_le32(SGE_ADDR_64 | len);
+		sgc->sge.a64.curr->address = cpu_to_le32(addr);
+		sgc->sge.a64.curr++;
+		sgc->cur_offset += len;
+		sgc->length -= len;
+
+		/*
+		 * Check if we previously split an entry.  If so we have to
+		 * pick up where we left off.
+		 */
+		if (rem) {
+			addr += len;
+			len = rem;
+			rem = 0;
+			goto another_entry;
+		}
+	}
+
+	/* Mark the end of the SGL */
+	sgc->sge.a64.last->length |= cpu_to_le32(SGE_LAST);
+
+	/*
+	 * If there was a previous chain entry, update the length to indicate
+	 * the length of this last segment.
+	 */
+	if (sgc->sge.a64.chain) {
+		sgc->sge.a64.chain->length |= cpu_to_le32(
+			((u8 *)(sgc->sge.a64.curr) -
+			 (u8 *)rq->sg_table->virt_addr));
+	} else {
+		u16 reqsize;
+
+		/*
+		 * The entire VDA request was not used so lets
+		 * set the size of the VDA request to be DMA'd
+		 */
+		reqsize =
+			((u16)((u8 *)sgc->sge.a64.last - (u8 *)vrq)
+			 + sizeof(struct atto_vda_sge) + 3) / sizeof(u32);
+
+		/*
+		 * Only update the request size if it is bigger than what is
+		 * already there.  We can come in here twice for some management
+		 * commands.
+		 */
+		if (reqsize > rq->vda_req_sz)
+			rq->vda_req_sz = reqsize;
+	}
+	return true;
+}
+
+
+/*
+ * Create PRD list for each I-block consumed by the command. This routine
+ * determines how much data is required from each I-block being consumed
+ * by the command. The first and last I-blocks can be partials and all of
+ * the I-blocks in between are for a full I-block of data.
+ *
+ * The interleave size is used to determine the number of bytes in the 1st
+ * I-block and the remaining I-blocks are what remeains.
+ */
+static bool esas2r_build_prd_iblk(struct esas2r_adapter *a,
+				  struct esas2r_sg_context *sgc)
+{
+	struct esas2r_request *rq = sgc->first_req;
+	u64 addr;
+	u32 len;
+	struct esas2r_mem_desc *sgl;
+	u32 numchain = 1;
+	u32 rem = 0;
+
+	while (sgc->length) {
+		/* Get the next address/length pair */
+
+		len = (*sgc->get_phys_addr)(sgc, &addr);
+
+		if (unlikely(len == 0))
+			return false;
+
+		/* If current length is more than what's left, stop there */
+
+		if (unlikely(len > sgc->length))
+			len = sgc->length;
+
+another_entry:
+		/* Limit to a round number less than the maximum length */
+
+		if (len > PRD_LEN_MAX) {
+			/*
+			 * Save the remainder of the split.  whenever we limit
+			 * an entry we come back around to build entries out
+			 * of the leftover.  We do this to prevent multiple
+			 * calls to the get_phys_addr() function for an SGE
+			 * that is too large.
+			 */
+			rem = len - PRD_LEN_MAX;
+			len = PRD_LEN_MAX;
+		}
+
+		/* See if we need to allocate a new SGL */
+		if (sgc->sge.prd.sge_cnt == 0) {
+			if (len == sgc->length) {
+				/*
+				 * We only have 1 PRD entry left.
+				 * It can be placed where the chain
+				 * entry would have gone
+				 */
+
+				/* Build the simple SGE */
+				sgc->sge.prd.curr->ctl_len = cpu_to_le32(
+					PRD_DATA | len);
+				sgc->sge.prd.curr->address = cpu_to_le64(addr);
+
+				/* Adjust length related fields */
+				sgc->cur_offset += len;
+				sgc->length -= len;
+
+				/* We use the reserved chain entry for data */
+				numchain = 0;
+
+				break;
+			}
+
+			if (sgc->sge.prd.chain) {
+				/*
+				 * Fill # of entries of current SGL in previous
+				 * chain the length of this current SGL may not
+				 * full.
+				 */
+
+				sgc->sge.prd.chain->ctl_len |= cpu_to_le32(
+					sgc->sge.prd.sgl_max_cnt);
+			}
+
+			/*
+			 * If no SGls are available, return failure.  The
+			 * caller can call us later with the current context
+			 * to pick up here.
+			 */
+
+			sgl = esas2r_alloc_sgl(a);
+
+			if (unlikely(sgl == NULL))
+				return false;
+
+			/*
+			 * Link the new SGL onto the chain
+			 * They are in reverse order
+			 */
+			list_add(&sgl->next_desc, &rq->sg_table_head);
+
+			/*
+			 * An SGL was just filled in and we are starting
+			 * a new SGL. Prime the chain of the ending SGL with
+			 * info that points to the new SGL. The length gets
+			 * filled in when the new SGL is filled or ended
+			 */
+
+			sgc->sge.prd.chain = sgc->sge.prd.curr;
+
+			sgc->sge.prd.chain->ctl_len = cpu_to_le32(PRD_CHAIN);
+			sgc->sge.prd.chain->address =
+				cpu_to_le64(sgl->phys_addr);
+
+			/*
+			 * Start a new segment.
+			 * Take one away and save for chain SGE
+			 */
+
+			sgc->sge.prd.curr =
+				(struct atto_physical_region_description *)sgl
+				->
+				virt_addr;
+			sgc->sge.prd.sge_cnt = sgc->sge.prd.sgl_max_cnt - 1;
+		}
+
+		sgc->sge.prd.sge_cnt--;
+		/* Build the simple SGE */
+		sgc->sge.prd.curr->ctl_len = cpu_to_le32(PRD_DATA | len);
+		sgc->sge.prd.curr->address = cpu_to_le64(addr);
+
+		/* Used another element.  Point to the next one */
+
+		sgc->sge.prd.curr++;
+
+		/* Adjust length related fields */
+
+		sgc->cur_offset += len;
+		sgc->length -= len;
+
+		/*
+		 * Check if we previously split an entry.  If so we have to
+		 * pick up where we left off.
+		 */
+
+		if (rem) {
+			addr += len;
+			len = rem;
+			rem = 0;
+			goto another_entry;
+		}
+	}
+
+	if (!list_empty(&rq->sg_table_head)) {
+		if (sgc->sge.prd.chain) {
+			sgc->sge.prd.chain->ctl_len |=
+				cpu_to_le32(sgc->sge.prd.sgl_max_cnt
+					    - sgc->sge.prd.sge_cnt
+					    - numchain);
+		}
+	}
+
+	return true;
+}
+
+bool esas2r_build_sg_list_prd(struct esas2r_adapter *a,
+			      struct esas2r_sg_context *sgc)
+{
+	struct esas2r_request *rq = sgc->first_req;
+	u32 len = sgc->length;
+	struct esas2r_target *t = a->targetdb + rq->target_id;
+	u8 is_i_o = 0;
+	u16 reqsize;
+	struct atto_physical_region_description *curr_iblk_chn;
+	u8 *cdb = (u8 *)&rq->vrq->scsi.cdb[0];
+
+	/*
+	 * extract LBA from command so we can determine
+	 * the I-Block boundary
+	 */
+
+	if (rq->vrq->scsi.function == VDA_FUNC_SCSI
+	    && t->target_state == TS_PRESENT
+	    && !(t->flags & TF_PASS_THRU)) {
+		u32 lbalo = 0;
+
+		switch (rq->vrq->scsi.cdb[0]) {
+		case    READ_16:
+		case    WRITE_16:
+		{
+			lbalo =
+				MAKEDWORD(MAKEWORD(cdb[9],
+						   cdb[8]),
+					  MAKEWORD(cdb[7],
+						   cdb[6]));
+			is_i_o = 1;
+			break;
+		}
+
+		case    READ_12:
+		case    WRITE_12:
+		case    READ_10:
+		case    WRITE_10:
+		{
+			lbalo =
+				MAKEDWORD(MAKEWORD(cdb[5],
+						   cdb[4]),
+					  MAKEWORD(cdb[3],
+						   cdb[2]));
+			is_i_o = 1;
+			break;
+		}
+
+		case    READ_6:
+		case    WRITE_6:
+		{
+			lbalo =
+				MAKEDWORD(MAKEWORD(cdb[3],
+						   cdb[2]),
+					  MAKEWORD(cdb[1] & 0x1F,
+						   0));
+			is_i_o = 1;
+			break;
+		}
+
+		default:
+			break;
+		}
+
+		if (is_i_o) {
+			u32 startlba;
+
+			rq->vrq->scsi.iblk_cnt_prd = 0;
+
+			/* Determine size of 1st I-block PRD list       */
+			startlba = t->inter_block - (lbalo & (t->inter_block -
+							      1));
+			sgc->length = startlba * t->block_size;
+
+			/* Chk if the 1st iblk chain starts at base of Iblock */
+			if ((lbalo & (t->inter_block - 1)) == 0)
+				rq->flags |= RF_1ST_IBLK_BASE;
+
+			if (sgc->length > len)
+				sgc->length = len;
+		} else {
+			sgc->length = len;
+		}
+	} else {
+		sgc->length = len;
+	}
+
+	/* get our starting chain address   */
+
+	curr_iblk_chn =
+		(struct atto_physical_region_description *)sgc->sge.a64.curr;
+
+	sgc->sge.prd.sgl_max_cnt = sgl_page_size /
+				   sizeof(struct
+					  atto_physical_region_description);
+
+	/* create all of the I-block PRD lists          */
+
+	while (len) {
+		sgc->sge.prd.sge_cnt = 0;
+		sgc->sge.prd.chain = NULL;
+		sgc->sge.prd.curr = curr_iblk_chn;
+
+		/* increment to next I-Block    */
+
+		len -= sgc->length;
+
+		/* go build the next I-Block PRD list   */
+
+		if (unlikely(!esas2r_build_prd_iblk(a, sgc)))
+			return false;
+
+		curr_iblk_chn++;
+
+		if (is_i_o) {
+			rq->vrq->scsi.iblk_cnt_prd++;
+
+			if (len > t->inter_byte)
+				sgc->length = t->inter_byte;
+			else
+				sgc->length = len;
+		}
+	}
+
+	/* figure out the size used of the VDA request */
+
+	reqsize = ((u16)((u8 *)curr_iblk_chn - (u8 *)rq->vrq))
+		  / sizeof(u32);
+
+	/*
+	 * only update the request size if it is bigger than what is
+	 * already there.  we can come in here twice for some management
+	 * commands.
+	 */
+
+	if (reqsize > rq->vda_req_sz)
+		rq->vda_req_sz = reqsize;
+
+	return true;
+}
+
+static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime)
+{
+	u32 delta = currtime - a->chip_init_time;
+
+	if (delta <= ESAS2R_CHPRST_WAIT_TIME) {
+		/* Wait before accessing registers */
+	} else if (delta >= ESAS2R_CHPRST_TIME) {
+		/*
+		 * The last reset failed so try again. Reset
+		 * processing will give up after three tries.
+		 */
+		esas2r_local_reset_adapter(a);
+	} else {
+		/* We can now see if the firmware is ready */
+		u32 doorbell;
+
+		doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+		if (doorbell == 0xFFFFFFFF || !(doorbell & DRBL_FORCE_INT)) {
+			esas2r_force_interrupt(a);
+		} else {
+			u32 ver = (doorbell & DRBL_FW_VER_MSK);
+
+			/* Driver supports API version 0 and 1 */
+			esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+						    doorbell);
+			if (ver == DRBL_FW_VER_0) {
+				esas2r_lock_set_flags(&a->flags,
+						      AF_CHPRST_DETECTED);
+				esas2r_lock_set_flags(&a->flags,
+						      AF_LEGACY_SGE_MODE);
+
+				a->max_vdareq_size = 128;
+				a->build_sgl = esas2r_build_sg_list_sge;
+			} else if (ver == DRBL_FW_VER_1) {
+				esas2r_lock_set_flags(&a->flags,
+						      AF_CHPRST_DETECTED);
+				esas2r_lock_clear_flags(&a->flags,
+							AF_LEGACY_SGE_MODE);
+
+				a->max_vdareq_size = 1024;
+				a->build_sgl = esas2r_build_sg_list_prd;
+			} else {
+				esas2r_local_reset_adapter(a);
+			}
+		}
+	}
+}
+
+
+/* This function must be called once per timer tick */
+void esas2r_timer_tick(struct esas2r_adapter *a)
+{
+	u32 currtime = jiffies_to_msecs(jiffies);
+	u32 deltatime = currtime - a->last_tick_time;
+
+	a->last_tick_time = currtime;
+
+	/* count down the uptime */
+	if (a->chip_uptime
+	    && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) {
+		if (deltatime >= a->chip_uptime)
+			a->chip_uptime = 0;
+		else
+			a->chip_uptime -= deltatime;
+	}
+
+	if (a->flags & AF_CHPRST_PENDING) {
+		if (!(a->flags & AF_CHPRST_NEEDED)
+		    && !(a->flags & AF_CHPRST_DETECTED))
+			esas2r_handle_pending_reset(a, currtime);
+	} else {
+		if (a->flags & AF_DISC_PENDING)
+			esas2r_disc_check_complete(a);
+
+		if (a->flags & AF_HEARTBEAT_ENB) {
+			if (a->flags & AF_HEARTBEAT) {
+				if ((currtime - a->heartbeat_time) >=
+				    ESAS2R_HEARTBEAT_TIME) {
+					esas2r_lock_clear_flags(&a->flags,
+								AF_HEARTBEAT);
+					esas2r_hdebug("heartbeat failed");
+					esas2r_log(ESAS2R_LOG_CRIT,
+						   "heartbeat failed");
+					esas2r_bugon();
+					esas2r_local_reset_adapter(a);
+				}
+			} else {
+				esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT);
+				a->heartbeat_time = currtime;
+				esas2r_force_interrupt(a);
+			}
+		}
+	}
+
+	if (atomic_read(&a->disable_cnt) == 0)
+		esas2r_do_deferred_processes(a);
+}
+
+/*
+ * Send the specified task management function to the target and LUN
+ * specified in rqaux.  in addition, immediately abort any commands that
+ * are queued but not sent to the device according to the rules specified
+ * by the task management function.
+ */
+bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
+			   struct esas2r_request *rqaux, u8 task_mgt_func)
+{
+	u16 targetid = rqaux->target_id;
+	u8 lun = (u8)le32_to_cpu(rqaux->vrq->scsi.flags);
+	bool ret = false;
+	struct esas2r_request *rq;
+	struct list_head *next, *element;
+	unsigned long flags;
+
+	LIST_HEAD(comp_list);
+
+	esas2r_trace_enter();
+	esas2r_trace("rqaux:%p", rqaux);
+	esas2r_trace("task_mgt_func:%x", task_mgt_func);
+	spin_lock_irqsave(&a->queue_lock, flags);
+
+	/* search the defer queue looking for requests for the device */
+	list_for_each_safe(element, next, &a->defer_list) {
+		rq = list_entry(element, struct esas2r_request, req_list);
+
+		if (rq->vrq->scsi.function == VDA_FUNC_SCSI
+		    && rq->target_id == targetid
+		    && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
+			|| task_mgt_func == 0x20)) { /* target reset */
+			/* Found a request affected by the task management */
+			if (rq->req_stat == RS_PENDING) {
+				/*
+				 * The request is pending or waiting.  We can
+				 * safelycomplete the request now.
+				 */
+				if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
+					list_add_tail(&rq->comp_list,
+						      &comp_list);
+			}
+		}
+	}
+
+	/* Send the task management request to the firmware */
+	rqaux->sense_len = 0;
+	rqaux->vrq->scsi.length = 0;
+	rqaux->target_id = targetid;
+	rqaux->vrq->scsi.flags |= cpu_to_le32(lun);
+	memset(rqaux->vrq->scsi.cdb, 0, sizeof(rqaux->vrq->scsi.cdb));
+	rqaux->vrq->scsi.flags |=
+		cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK));
+
+	if (a->flags & AF_FLASHING) {
+		/* Assume success.  if there are active requests, return busy */
+		rqaux->req_stat = RS_SUCCESS;
+
+		list_for_each_safe(element, next, &a->active_list) {
+			rq = list_entry(element, struct esas2r_request,
+					req_list);
+			if (rq->vrq->scsi.function == VDA_FUNC_SCSI
+			    && rq->target_id == targetid
+			    && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
+				|| task_mgt_func == 0x20))  /* target reset */
+				rqaux->req_stat = RS_BUSY;
+		}
+
+		ret = true;
+	}
+
+	spin_unlock_irqrestore(&a->queue_lock, flags);
+
+	if (!(a->flags & AF_FLASHING))
+		esas2r_start_request(a, rqaux);
+
+	esas2r_comp_list_drain(a, &comp_list);
+
+	if (atomic_read(&a->disable_cnt) == 0)
+		esas2r_do_deferred_processes(a);
+
+	esas2r_trace_exit();
+
+	return ret;
+}
+
+void esas2r_reset_bus(struct esas2r_adapter *a)
+{
+	esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset");
+
+	if (!(a->flags & AF_DEGRADED_MODE)
+	    && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) {
+		esas2r_lock_set_flags(&a->flags, AF_BUSRST_NEEDED);
+		esas2r_lock_set_flags(&a->flags, AF_BUSRST_PENDING);
+		esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
+
+		esas2r_schedule_tasklet(a);
+	}
+}
+
+bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
+			  u8 status)
+{
+	esas2r_trace_enter();
+	esas2r_trace("rq:%p", rq);
+	list_del_init(&rq->req_list);
+	if (rq->timeout > RQ_MAX_TIMEOUT) {
+		/*
+		 * The request timed out, but we could not abort it because a
+		 * chip reset occurred.  Return busy status.
+		 */
+		rq->req_stat = RS_BUSY;
+		esas2r_trace_exit();
+		return true;
+	}
+
+	rq->req_stat = status;
+	esas2r_trace_exit();
+	return true;
+}

+ 2110 - 0
drivers/scsi/esas2r/esas2r_ioctl.c

@@ -0,0 +1,2110 @@
+/*
+ *  linux/drivers/scsi/esas2r/esas2r_ioctl.c
+ *      For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ *  Copyright (c) 2001-2013 ATTO Technology, Inc.
+ *  (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+/*
+ * Buffered ioctl handlers.  A buffered ioctl is one which requires that we
+ * allocate a DMA-able memory area to communicate with the firmware.  In
+ * order to prevent continually allocating and freeing consistent memory,
+ * we will allocate a global buffer the first time we need it and re-use
+ * it for subsequent ioctl calls that require it.
+ */
+
+u8 *esas2r_buffered_ioctl;
+dma_addr_t esas2r_buffered_ioctl_addr;
+u32 esas2r_buffered_ioctl_size;
+struct pci_dev *esas2r_buffered_ioctl_pcid;
+
+static DEFINE_SEMAPHORE(buffered_ioctl_semaphore);
+typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *,
+				       struct esas2r_request *,
+				       struct esas2r_sg_context *,
+				       void *);
+typedef void (*BUFFERED_IOCTL_DONE_CALLBACK)(struct esas2r_adapter *,
+					     struct esas2r_request *, void *);
+
+struct esas2r_buffered_ioctl {
+	struct esas2r_adapter *a;
+	void *ioctl;
+	u32 length;
+	u32 control_code;
+	u32 offset;
+	BUFFERED_IOCTL_CALLBACK
+		callback;
+	void *context;
+	BUFFERED_IOCTL_DONE_CALLBACK
+		done_callback;
+	void *done_context;
+
+};
+
+static void complete_fm_api_req(struct esas2r_adapter *a,
+				struct esas2r_request *rq)
+{
+	a->fm_api_command_done = 1;
+	wake_up_interruptible(&a->fm_api_waiter);
+}
+
+/* Callbacks for building scatter/gather lists for FM API requests */
+static u32 get_physaddr_fm_api(struct esas2r_sg_context *sgc, u64 *addr)
+{
+	struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
+	int offset = sgc->cur_offset - a->save_offset;
+
+	(*addr) = a->firmware.phys + offset;
+	return a->firmware.orig_len - offset;
+}
+
+static u32 get_physaddr_fm_api_header(struct esas2r_sg_context *sgc, u64 *addr)
+{
+	struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
+	int offset = sgc->cur_offset - a->save_offset;
+
+	(*addr) = a->firmware.header_buff_phys + offset;
+	return sizeof(struct esas2r_flash_img) - offset;
+}
+
+/* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */
+static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
+{
+	struct esas2r_request *rq;
+
+	if (down_interruptible(&a->fm_api_semaphore)) {
+		fi->status = FI_STAT_BUSY;
+		return;
+	}
+
+	rq = esas2r_alloc_request(a);
+	if (rq == NULL) {
+		up(&a->fm_api_semaphore);
+		fi->status = FI_STAT_BUSY;
+		return;
+	}
+
+	if (fi == &a->firmware.header) {
+		a->firmware.header_buff = dma_alloc_coherent(&a->pcid->dev,
+							     (size_t)sizeof(
+								     struct
+								     esas2r_flash_img),
+							     (dma_addr_t *)&a->
+							     firmware.
+							     header_buff_phys,
+							     GFP_KERNEL);
+
+		if (a->firmware.header_buff == NULL) {
+			esas2r_debug("failed to allocate header buffer!");
+			fi->status = FI_STAT_BUSY;
+			return;
+		}
+
+		memcpy(a->firmware.header_buff, fi,
+		       sizeof(struct esas2r_flash_img));
+		a->save_offset = a->firmware.header_buff;
+		a->fm_api_sgc.get_phys_addr =
+			(PGETPHYSADDR)get_physaddr_fm_api_header;
+	} else {
+		a->save_offset = (u8 *)fi;
+		a->fm_api_sgc.get_phys_addr =
+			(PGETPHYSADDR)get_physaddr_fm_api;
+	}
+
+	rq->comp_cb = complete_fm_api_req;
+	a->fm_api_command_done = 0;
+	a->fm_api_sgc.cur_offset = a->save_offset;
+
+	if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq,
+			   &a->fm_api_sgc))
+		goto all_done;
+
+	/* Now wait around for it to complete. */
+	while (!a->fm_api_command_done)
+		wait_event_interruptible(a->fm_api_waiter,
+					 a->fm_api_command_done);
+all_done:
+	if (fi == &a->firmware.header) {
+		memcpy(fi, a->firmware.header_buff,
+		       sizeof(struct esas2r_flash_img));
+
+		dma_free_coherent(&a->pcid->dev,
+				  (size_t)sizeof(struct esas2r_flash_img),
+				  a->firmware.header_buff,
+				  (dma_addr_t)a->firmware.header_buff_phys);
+	}
+
+	up(&a->fm_api_semaphore);
+	esas2r_free_request(a, (struct esas2r_request *)rq);
+	return;
+
+}
+
+static void complete_nvr_req(struct esas2r_adapter *a,
+			     struct esas2r_request *rq)
+{
+	a->nvram_command_done = 1;
+	wake_up_interruptible(&a->nvram_waiter);
+}
+
+/* Callback for building scatter/gather lists for buffered ioctls */
+static u32 get_physaddr_buffered_ioctl(struct esas2r_sg_context *sgc,
+				       u64 *addr)
+{
+	int offset = (u8 *)sgc->cur_offset - esas2r_buffered_ioctl;
+
+	(*addr) = esas2r_buffered_ioctl_addr + offset;
+	return esas2r_buffered_ioctl_size - offset;
+}
+
+static void complete_buffered_ioctl_req(struct esas2r_adapter *a,
+					struct esas2r_request *rq)
+{
+	a->buffered_ioctl_done = 1;
+	wake_up_interruptible(&a->buffered_ioctl_waiter);
+}
+
+static u8 handle_buffered_ioctl(struct esas2r_buffered_ioctl *bi)
+{
+	struct esas2r_adapter *a = bi->a;
+	struct esas2r_request *rq;
+	struct esas2r_sg_context sgc;
+	u8 result = IOCTL_SUCCESS;
+
+	if (down_interruptible(&buffered_ioctl_semaphore))
+		return IOCTL_OUT_OF_RESOURCES;
+
+	/* allocate a buffer or use the existing buffer. */
+	if (esas2r_buffered_ioctl) {
+		if (esas2r_buffered_ioctl_size < bi->length) {
+			/* free the too-small buffer and get a new one */
+			dma_free_coherent(&a->pcid->dev,
+					  (size_t)esas2r_buffered_ioctl_size,
+					  esas2r_buffered_ioctl,
+					  esas2r_buffered_ioctl_addr);
+
+			goto allocate_buffer;
+		}
+	} else {
+allocate_buffer:
+		esas2r_buffered_ioctl_size = bi->length;
+		esas2r_buffered_ioctl_pcid = a->pcid;
+		esas2r_buffered_ioctl = dma_alloc_coherent(&a->pcid->dev,
+							   (size_t)
+							   esas2r_buffered_ioctl_size,
+							   &
+							   esas2r_buffered_ioctl_addr,
+							   GFP_KERNEL);
+	}
+
+	if (!esas2r_buffered_ioctl) {
+		esas2r_log(ESAS2R_LOG_CRIT,
+			   "could not allocate %d bytes of consistent memory "
+			   "for a buffered ioctl!",
+			   bi->length);
+
+		esas2r_debug("buffered ioctl alloc failure");
+		result = IOCTL_OUT_OF_RESOURCES;
+		goto exit_cleanly;
+	}
+
+	memcpy(esas2r_buffered_ioctl, bi->ioctl, bi->length);
+
+	rq = esas2r_alloc_request(a);
+	if (rq == NULL) {
+		esas2r_log(ESAS2R_LOG_CRIT,
+			   "could not allocate an internal request");
+
+		result = IOCTL_OUT_OF_RESOURCES;
+		esas2r_debug("buffered ioctl - no requests");
+		goto exit_cleanly;
+	}
+
+	a->buffered_ioctl_done = 0;
+	rq->comp_cb = complete_buffered_ioctl_req;
+	sgc.cur_offset = esas2r_buffered_ioctl + bi->offset;
+	sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_buffered_ioctl;
+	sgc.length = esas2r_buffered_ioctl_size;
+
+	if (!(*bi->callback)(a, rq, &sgc, bi->context)) {
+		/* completed immediately, no need to wait */
+		a->buffered_ioctl_done = 0;
+		goto free_andexit_cleanly;
+	}
+
+	/* now wait around for it to complete. */
+	while (!a->buffered_ioctl_done)
+		wait_event_interruptible(a->buffered_ioctl_waiter,
+					 a->buffered_ioctl_done);
+
+free_andexit_cleanly:
+	if (result == IOCTL_SUCCESS && bi->done_callback)
+		(*bi->done_callback)(a, rq, bi->done_context);
+
+	esas2r_free_request(a, rq);
+
+exit_cleanly:
+	if (result == IOCTL_SUCCESS)
+		memcpy(bi->ioctl, esas2r_buffered_ioctl, bi->length);
+
+	up(&buffered_ioctl_semaphore);
+	return result;
+}
+
+/* SMP ioctl support */
+static int smp_ioctl_callback(struct esas2r_adapter *a,
+			      struct esas2r_request *rq,
+			      struct esas2r_sg_context *sgc, void *context)
+{
+	struct atto_ioctl_smp *si =
+		(struct atto_ioctl_smp *)esas2r_buffered_ioctl;
+
+	esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
+	esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_SMP);
+
+	if (!esas2r_build_sg_list(a, rq, sgc)) {
+		si->status = ATTO_STS_OUT_OF_RSRC;
+		return false;
+	}
+
+	esas2r_start_request(a, rq);
+	return true;
+}
+
+static u8 handle_smp_ioctl(struct esas2r_adapter *a, struct atto_ioctl_smp *si)
+{
+	struct esas2r_buffered_ioctl bi;
+
+	memset(&bi, 0, sizeof(bi));
+
+	bi.a = a;
+	bi.ioctl = si;
+	bi.length = sizeof(struct atto_ioctl_smp)
+		    + le32_to_cpu(si->req_length)
+		    + le32_to_cpu(si->rsp_length);
+	bi.offset = 0;
+	bi.callback = smp_ioctl_callback;
+	return handle_buffered_ioctl(&bi);
+}
+
+
+/* CSMI ioctl support */
+static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a,
+					     struct esas2r_request *rq)
+{
+	rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id);
+	rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun);
+
+	/* Now call the original completion callback. */
+	(*rq->aux_req_cb)(a, rq);
+}
+
+/* Tunnel a CSMI IOCTL to the back end driver for processing. */
+static bool csmi_ioctl_tunnel(struct esas2r_adapter *a,
+			      union atto_ioctl_csmi *ci,
+			      struct esas2r_request *rq,
+			      struct esas2r_sg_context *sgc,
+			      u32 ctrl_code,
+			      u16 target_id)
+{
+	struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl;
+
+	if (a->flags & AF_DEGRADED_MODE)
+		return false;
+
+	esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
+	esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_CSMI);
+	ioctl->csmi.ctrl_code = cpu_to_le32(ctrl_code);
+	ioctl->csmi.target_id = cpu_to_le16(target_id);
+	ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags);
+
+	/*
+	 * Always usurp the completion callback since the interrupt callback
+	 * mechanism may be used.
+	 */
+	rq->aux_req_cx = ci;
+	rq->aux_req_cb = rq->comp_cb;
+	rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb;
+
+	if (!esas2r_build_sg_list(a, rq, sgc))
+		return false;
+
+	esas2r_start_request(a, rq);
+	return true;
+}
+
+static bool check_lun(struct scsi_lun lun)
+{
+	bool result;
+
+	result = ((lun.scsi_lun[7] == 0) &&
+		  (lun.scsi_lun[6] == 0) &&
+		  (lun.scsi_lun[5] == 0) &&
+		  (lun.scsi_lun[4] == 0) &&
+		  (lun.scsi_lun[3] == 0) &&
+		  (lun.scsi_lun[2] == 0) &&
+/* Byte 1 is intentionally skipped */
+		  (lun.scsi_lun[0] == 0));
+
+	return result;
+}
+
+static int csmi_ioctl_callback(struct esas2r_adapter *a,
+			       struct esas2r_request *rq,
+			       struct esas2r_sg_context *sgc, void *context)
+{
+	struct atto_csmi *ci = (struct atto_csmi *)context;
+	union atto_ioctl_csmi *ioctl_csmi =
+		(union atto_ioctl_csmi *)esas2r_buffered_ioctl;
+	u8 path = 0;
+	u8 tid = 0;
+	u8 lun = 0;
+	u32 sts = CSMI_STS_SUCCESS;
+	struct esas2r_target *t;
+	unsigned long flags;
+
+	if (ci->control_code == CSMI_CC_GET_DEV_ADDR) {
+		struct atto_csmi_get_dev_addr *gda = &ci->data.dev_addr;
+
+		path = gda->path_id;
+		tid = gda->target_id;
+		lun = gda->lun;
+	} else if (ci->control_code == CSMI_CC_TASK_MGT) {
+		struct atto_csmi_task_mgmt *tm = &ci->data.tsk_mgt;
+
+		path = tm->path_id;
+		tid = tm->target_id;
+		lun = tm->lun;
+	}
+
+	if (path > 0 || tid > ESAS2R_MAX_ID) {
+		rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(
+			CSMI_STS_INV_PARAM);
+		return false;
+	}
+
+	rq->target_id = tid;
+	rq->vrq->scsi.flags |= cpu_to_le32(lun);
+
+	switch (ci->control_code) {
+	case CSMI_CC_GET_DRVR_INFO:
+	{
+		struct atto_csmi_get_driver_info *gdi = &ioctl_csmi->drvr_info;
+
+		strcpy(gdi->description, esas2r_get_model_name(a));
+		gdi->csmi_major_rev = CSMI_MAJOR_REV;
+		gdi->csmi_minor_rev = CSMI_MINOR_REV;
+		break;
+	}
+
+	case CSMI_CC_GET_CNTLR_CFG:
+	{
+		struct atto_csmi_get_cntlr_cfg *gcc = &ioctl_csmi->cntlr_cfg;
+
+		gcc->base_io_addr = 0;
+		pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_2,
+				      &gcc->base_memaddr_lo);
+		pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_3,
+				      &gcc->base_memaddr_hi);
+		gcc->board_id = MAKEDWORD(a->pcid->subsystem_device,
+					  a->pcid->subsystem_vendor);
+		gcc->slot_num = CSMI_SLOT_NUM_UNKNOWN;
+		gcc->cntlr_class = CSMI_CNTLR_CLASS_HBA;
+		gcc->io_bus_type = CSMI_BUS_TYPE_PCI;
+		gcc->pci_addr.bus_num = a->pcid->bus->number;
+		gcc->pci_addr.device_num = PCI_SLOT(a->pcid->devfn);
+		gcc->pci_addr.function_num = PCI_FUNC(a->pcid->devfn);
+
+		memset(gcc->serial_num, 0, sizeof(gcc->serial_num));
+
+		gcc->major_rev = LOBYTE(LOWORD(a->fw_version));
+		gcc->minor_rev = HIBYTE(LOWORD(a->fw_version));
+		gcc->build_rev = LOBYTE(HIWORD(a->fw_version));
+		gcc->release_rev = HIBYTE(HIWORD(a->fw_version));
+		gcc->bios_major_rev = HIBYTE(HIWORD(a->flash_ver));
+		gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver));
+		gcc->bios_build_rev = LOWORD(a->flash_ver);
+
+		if (a->flags2 & AF2_THUNDERLINK)
+			gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA
+					   | CSMI_CNTLRF_SATA_HBA;
+		else
+			gcc->cntlr_flags = CSMI_CNTLRF_SAS_RAID
+					   | CSMI_CNTLRF_SATA_RAID;
+
+		gcc->rrom_major_rev = 0;
+		gcc->rrom_minor_rev = 0;
+		gcc->rrom_build_rev = 0;
+		gcc->rrom_release_rev = 0;
+		gcc->rrom_biosmajor_rev = 0;
+		gcc->rrom_biosminor_rev = 0;
+		gcc->rrom_biosbuild_rev = 0;
+		gcc->rrom_biosrelease_rev = 0;
+		break;
+	}
+
+	case CSMI_CC_GET_CNTLR_STS:
+	{
+		struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts;
+
+		if (a->flags & AF_DEGRADED_MODE)
+			gcs->status = CSMI_CNTLR_STS_FAILED;
+		else
+			gcs->status = CSMI_CNTLR_STS_GOOD;
+
+		gcs->offline_reason = CSMI_OFFLINE_NO_REASON;
+		break;
+	}
+
+	case CSMI_CC_FW_DOWNLOAD:
+	case CSMI_CC_GET_RAID_INFO:
+	case CSMI_CC_GET_RAID_CFG:
+
+		sts = CSMI_STS_BAD_CTRL_CODE;
+		break;
+
+	case CSMI_CC_SMP_PASSTHRU:
+	case CSMI_CC_SSP_PASSTHRU:
+	case CSMI_CC_STP_PASSTHRU:
+	case CSMI_CC_GET_PHY_INFO:
+	case CSMI_CC_SET_PHY_INFO:
+	case CSMI_CC_GET_LINK_ERRORS:
+	case CSMI_CC_GET_SATA_SIG:
+	case CSMI_CC_GET_CONN_INFO:
+	case CSMI_CC_PHY_CTRL:
+
+		if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
+				       ci->control_code,
+				       ESAS2R_TARG_ID_INV)) {
+			sts = CSMI_STS_FAILED;
+			break;
+		}
+
+		return true;
+
+	case CSMI_CC_GET_SCSI_ADDR:
+	{
+		struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
+
+		struct scsi_lun lun;
+
+		memcpy(&lun, gsa->sas_lun, sizeof(struct scsi_lun));
+
+		if (!check_lun(lun)) {
+			sts = CSMI_STS_NO_SCSI_ADDR;
+			break;
+		}
+
+		/* make sure the device is present */
+		spin_lock_irqsave(&a->mem_lock, flags);
+		t = esas2r_targ_db_find_by_sas_addr(a, (u64 *)gsa->sas_addr);
+		spin_unlock_irqrestore(&a->mem_lock, flags);
+
+		if (t == NULL) {
+			sts = CSMI_STS_NO_SCSI_ADDR;
+			break;
+		}
+
+		gsa->host_index = 0xFF;
+		gsa->lun = gsa->sas_lun[1];
+		rq->target_id = esas2r_targ_get_id(t, a);
+		break;
+	}
+
+	case CSMI_CC_GET_DEV_ADDR:
+	{
+		struct atto_csmi_get_dev_addr *gda = &ioctl_csmi->dev_addr;
+
+		/* make sure the target is present */
+		t = a->targetdb + rq->target_id;
+
+		if (t >= a->targetdb_end
+		    || t->target_state != TS_PRESENT
+		    || t->sas_addr == 0) {
+			sts = CSMI_STS_NO_DEV_ADDR;
+			break;
+		}
+
+		/* fill in the result */
+		*(u64 *)gda->sas_addr = t->sas_addr;
+		memset(gda->sas_lun, 0, sizeof(gda->sas_lun));
+		gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags);
+		break;
+	}
+
+	case CSMI_CC_TASK_MGT:
+
+		/* make sure the target is present */
+		t = a->targetdb + rq->target_id;
+
+		if (t >= a->targetdb_end
+		    || t->target_state != TS_PRESENT
+		    || !(t->flags & TF_PASS_THRU)) {
+			sts = CSMI_STS_NO_DEV_ADDR;
+			break;
+		}
+
+		if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
+				       ci->control_code,
+				       t->phys_targ_id)) {
+			sts = CSMI_STS_FAILED;
+			break;
+		}
+
+		return true;
+
+	default:
+
+		sts = CSMI_STS_BAD_CTRL_CODE;
+		break;
+	}
+
+	rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts);
+
+	return false;
+}
+
+
+static void csmi_ioctl_done_callback(struct esas2r_adapter *a,
+				     struct esas2r_request *rq, void *context)
+{
+	struct atto_csmi *ci = (struct atto_csmi *)context;
+	union atto_ioctl_csmi *ioctl_csmi =
+		(union atto_ioctl_csmi *)esas2r_buffered_ioctl;
+
+	switch (ci->control_code) {
+	case CSMI_CC_GET_DRVR_INFO:
+	{
+		struct atto_csmi_get_driver_info *gdi =
+			&ioctl_csmi->drvr_info;
+
+		strcpy(gdi->name, ESAS2R_VERSION_STR);
+
+		gdi->major_rev = ESAS2R_MAJOR_REV;
+		gdi->minor_rev = ESAS2R_MINOR_REV;
+		gdi->build_rev = 0;
+		gdi->release_rev = 0;
+		break;
+	}
+
+	case CSMI_CC_GET_SCSI_ADDR:
+	{
+		struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
+
+		if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) ==
+		    CSMI_STS_SUCCESS) {
+			gsa->target_id = rq->target_id;
+			gsa->path_id = 0;
+		}
+
+		break;
+	}
+	}
+
+	ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status);
+}
+
+
+static u8 handle_csmi_ioctl(struct esas2r_adapter *a, struct atto_csmi *ci)
+{
+	struct esas2r_buffered_ioctl bi;
+
+	memset(&bi, 0, sizeof(bi));
+
+	bi.a = a;
+	bi.ioctl = &ci->data;
+	bi.length = sizeof(union atto_ioctl_csmi);
+	bi.offset = 0;
+	bi.callback = csmi_ioctl_callback;
+	bi.context = ci;
+	bi.done_callback = csmi_ioctl_done_callback;
+	bi.done_context = ci;
+
+	return handle_buffered_ioctl(&bi);
+}
+
+/* ATTO HBA ioctl support */
+
+/* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */
+static bool hba_ioctl_tunnel(struct esas2r_adapter *a,
+			     struct atto_ioctl *hi,
+			     struct esas2r_request *rq,
+			     struct esas2r_sg_context *sgc)
+{
+	esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
+
+	esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_HBA);
+
+	if (!esas2r_build_sg_list(a, rq, sgc)) {
+		hi->status = ATTO_STS_OUT_OF_RSRC;
+
+		return false;
+	}
+
+	esas2r_start_request(a, rq);
+
+	return true;
+}
+
+static void scsi_passthru_comp_cb(struct esas2r_adapter *a,
+				  struct esas2r_request *rq)
+{
+	struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx;
+	struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
+	u8 sts = ATTO_SPT_RS_FAILED;
+
+	spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat;
+	spt->sense_length = rq->sense_len;
+	spt->residual_length =
+		le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length);
+
+	switch (rq->req_stat) {
+	case RS_SUCCESS:
+	case RS_SCSI_ERROR:
+		sts = ATTO_SPT_RS_SUCCESS;
+		break;
+	case RS_UNDERRUN:
+		sts = ATTO_SPT_RS_UNDERRUN;
+		break;
+	case RS_OVERRUN:
+		sts = ATTO_SPT_RS_OVERRUN;
+		break;
+	case RS_SEL:
+	case RS_SEL2:
+		sts = ATTO_SPT_RS_NO_DEVICE;
+		break;
+	case RS_NO_LUN:
+		sts = ATTO_SPT_RS_NO_LUN;
+		break;
+	case RS_TIMEOUT:
+		sts = ATTO_SPT_RS_TIMEOUT;
+		break;
+	case RS_DEGRADED:
+		sts = ATTO_SPT_RS_DEGRADED;
+		break;
+	case RS_BUSY:
+		sts = ATTO_SPT_RS_BUSY;
+		break;
+	case RS_ABORTED:
+		sts = ATTO_SPT_RS_ABORTED;
+		break;
+	case RS_RESET:
+		sts = ATTO_SPT_RS_BUS_RESET;
+		break;
+	}
+
+	spt->req_status = sts;
+
+	/* Update the target ID to the next one present. */
+	spt->target_id =
+		esas2r_targ_db_find_next_present(a, (u16)spt->target_id);
+
+	/* Done, call the completion callback. */
+	(*rq->aux_req_cb)(a, rq);
+}
+
+static int hba_ioctl_callback(struct esas2r_adapter *a,
+			      struct esas2r_request *rq,
+			      struct esas2r_sg_context *sgc,
+			      void *context)
+{
+	struct atto_ioctl *hi = (struct atto_ioctl *)esas2r_buffered_ioctl;
+
+	hi->status = ATTO_STS_SUCCESS;
+
+	switch (hi->function) {
+	case ATTO_FUNC_GET_ADAP_INFO:
+	{
+		u8 *class_code = (u8 *)&a->pcid->class;
+
+		struct atto_hba_get_adapter_info *gai =
+			&hi->data.get_adap_info;
+		int pcie_cap_reg;
+
+		if (hi->flags & HBAF_TUNNEL) {
+			hi->status = ATTO_STS_UNSUPPORTED;
+			break;
+		}
+
+		if (hi->version > ATTO_VER_GET_ADAP_INFO0) {
+			hi->status = ATTO_STS_INV_VERSION;
+			hi->version = ATTO_VER_GET_ADAP_INFO0;
+			break;
+		}
+
+		memset(gai, 0, sizeof(*gai));
+
+		gai->pci.vendor_id = a->pcid->vendor;
+		gai->pci.device_id = a->pcid->device;
+		gai->pci.ss_vendor_id = a->pcid->subsystem_vendor;
+		gai->pci.ss_device_id = a->pcid->subsystem_device;
+		gai->pci.class_code[0] = class_code[0];
+		gai->pci.class_code[1] = class_code[1];
+		gai->pci.class_code[2] = class_code[2];
+		gai->pci.rev_id = a->pcid->revision;
+		gai->pci.bus_num = a->pcid->bus->number;
+		gai->pci.dev_num = PCI_SLOT(a->pcid->devfn);
+		gai->pci.func_num = PCI_FUNC(a->pcid->devfn);
+
+		pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
+		if (pcie_cap_reg) {
+			u16 stat;
+			u32 caps;
+
+			pci_read_config_word(a->pcid,
+					     pcie_cap_reg + PCI_EXP_LNKSTA,
+					     &stat);
+			pci_read_config_dword(a->pcid,
+					      pcie_cap_reg + PCI_EXP_LNKCAP,
+					      &caps);
+
+			gai->pci.link_speed_curr =
+				(u8)(stat & PCI_EXP_LNKSTA_CLS);
+			gai->pci.link_speed_max =
+				(u8)(caps & PCI_EXP_LNKCAP_SLS);
+			gai->pci.link_width_curr =
+				(u8)((stat & PCI_EXP_LNKSTA_NLW)
+				     >> PCI_EXP_LNKSTA_NLW_SHIFT);
+			gai->pci.link_width_max =
+				(u8)((caps & PCI_EXP_LNKCAP_MLW)
+				     >> 4);
+		}
+
+		gai->pci.msi_vector_cnt = 1;
+
+		if (a->pcid->msix_enabled)
+			gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSIX;
+		else if (a->pcid->msi_enabled)
+			gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSI;
+		else
+			gai->pci.interrupt_mode = ATTO_GAI_PCIIM_LEGACY;
+
+		gai->adap_type = ATTO_GAI_AT_ESASRAID2;
+
+		if (a->flags2 & AF2_THUNDERLINK)
+			gai->adap_type = ATTO_GAI_AT_TLSASHBA;
+
+		if (a->flags & AF_DEGRADED_MODE)
+			gai->adap_flags |= ATTO_GAI_AF_DEGRADED;
+
+		gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP |
+				   ATTO_GAI_AF_DEVADDR_SUPP;
+
+		if (a->pcid->subsystem_device == ATTO_ESAS_R60F
+		    || a->pcid->subsystem_device == ATTO_ESAS_R608
+		    || a->pcid->subsystem_device == ATTO_ESAS_R644
+		    || a->pcid->subsystem_device == ATTO_TSSC_3808E)
+			gai->adap_flags |= ATTO_GAI_AF_VIRT_SES;
+
+		gai->num_ports = ESAS2R_NUM_PHYS;
+		gai->num_phys = ESAS2R_NUM_PHYS;
+
+		strcpy(gai->firmware_rev, a->fw_rev);
+		strcpy(gai->flash_rev, a->flash_rev);
+		strcpy(gai->model_name_short, esas2r_get_model_name_short(a));
+		strcpy(gai->model_name, esas2r_get_model_name(a));
+
+		gai->num_targets = ESAS2R_MAX_TARGETS;
+
+		gai->num_busses = 1;
+		gai->num_targsper_bus = gai->num_targets;
+		gai->num_lunsper_targ = 256;
+
+		if (a->pcid->subsystem_device == ATTO_ESAS_R6F0
+		    || a->pcid->subsystem_device == ATTO_ESAS_R60F)
+			gai->num_connectors = 4;
+		else
+			gai->num_connectors = 2;
+
+		gai->adap_flags2 |= ATTO_GAI_AF2_ADAP_CTRL_SUPP;
+
+		gai->num_targets_backend = a->num_targets_backend;
+
+		gai->tunnel_flags = a->ioctl_tunnel
+				    & (ATTO_GAI_TF_MEM_RW
+				       | ATTO_GAI_TF_TRACE
+				       | ATTO_GAI_TF_SCSI_PASS_THRU
+				       | ATTO_GAI_TF_GET_DEV_ADDR
+				       | ATTO_GAI_TF_PHY_CTRL
+				       | ATTO_GAI_TF_CONN_CTRL
+				       | ATTO_GAI_TF_GET_DEV_INFO);
+		break;
+	}
+
+	case ATTO_FUNC_GET_ADAP_ADDR:
+	{
+		struct atto_hba_get_adapter_address *gaa =
+			&hi->data.get_adap_addr;
+
+		if (hi->flags & HBAF_TUNNEL) {
+			hi->status = ATTO_STS_UNSUPPORTED;
+			break;
+		}
+
+		if (hi->version > ATTO_VER_GET_ADAP_ADDR0) {
+			hi->status = ATTO_STS_INV_VERSION;
+			hi->version = ATTO_VER_GET_ADAP_ADDR0;
+		} else if (gaa->addr_type == ATTO_GAA_AT_PORT
+			   || gaa->addr_type == ATTO_GAA_AT_NODE) {
+			if (gaa->addr_type == ATTO_GAA_AT_PORT
+			    && gaa->port_id >= ESAS2R_NUM_PHYS) {
+				hi->status = ATTO_STS_NOT_APPL;
+			} else {
+				memcpy((u64 *)gaa->address,
+				       &a->nvram->sas_addr[0], sizeof(u64));
+				gaa->addr_len = sizeof(u64);
+			}
+		} else {
+			hi->status = ATTO_STS_INV_PARAM;
+		}
+
+		break;
+	}
+
+	case ATTO_FUNC_MEM_RW:
+	{
+		if (hi->flags & HBAF_TUNNEL) {
+			if (hba_ioctl_tunnel(a, hi, rq, sgc))
+				return true;
+
+			break;
+		}
+
+		hi->status = ATTO_STS_UNSUPPORTED;
+
+		break;
+	}
+
+	case ATTO_FUNC_TRACE:
+	{
+		struct atto_hba_trace *trc = &hi->data.trace;
+
+		if (hi->flags & HBAF_TUNNEL) {
+			if (hba_ioctl_tunnel(a, hi, rq, sgc))
+				return true;
+
+			break;
+		}
+
+		if (hi->version > ATTO_VER_TRACE1) {
+			hi->status = ATTO_STS_INV_VERSION;
+			hi->version = ATTO_VER_TRACE1;
+			break;
+		}
+
+		if (trc->trace_type == ATTO_TRC_TT_FWCOREDUMP
+		    && hi->version >= ATTO_VER_TRACE1) {
+			if (trc->trace_func == ATTO_TRC_TF_UPLOAD) {
+				u32 len = hi->data_length;
+				u32 offset = trc->current_offset;
+				u32 total_len = ESAS2R_FWCOREDUMP_SZ;
+
+				/* Size is zero if a core dump isn't present */
+				if (!(a->flags2 & AF2_COREDUMP_SAVED))
+					total_len = 0;
+
+				if (len > total_len)
+					len = total_len;
+
+				if (offset >= total_len
+				    || offset + len > total_len
+				    || len == 0) {
+					hi->status = ATTO_STS_INV_PARAM;
+					break;
+				}
+
+				memcpy(trc + 1,
+				       a->fw_coredump_buff + offset,
+				       len);
+
+				hi->data_length = len;
+			} else if (trc->trace_func == ATTO_TRC_TF_RESET) {
+				memset(a->fw_coredump_buff, 0,
+				       ESAS2R_FWCOREDUMP_SZ);
+
+				esas2r_lock_clear_flags(&a->flags2,
+							AF2_COREDUMP_SAVED);
+			} else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) {
+				hi->status = ATTO_STS_UNSUPPORTED;
+				break;
+			}
+
+			/* Always return all the info we can. */
+			trc->trace_mask = 0;
+			trc->current_offset = 0;
+			trc->total_length = ESAS2R_FWCOREDUMP_SZ;
+
+			/* Return zero length buffer if core dump not present */
+			if (!(a->flags2 & AF2_COREDUMP_SAVED))
+				trc->total_length = 0;
+		} else {
+			hi->status = ATTO_STS_UNSUPPORTED;
+		}
+
+		break;
+	}
+
+	case ATTO_FUNC_SCSI_PASS_THRU:
+	{
+		struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
+		struct scsi_lun lun;
+
+		memcpy(&lun, spt->lun, sizeof(struct scsi_lun));
+
+		if (hi->flags & HBAF_TUNNEL) {
+			if (hba_ioctl_tunnel(a, hi, rq, sgc))
+				return true;
+
+			break;
+		}
+
+		if (hi->version > ATTO_VER_SCSI_PASS_THRU0) {
+			hi->status = ATTO_STS_INV_VERSION;
+			hi->version = ATTO_VER_SCSI_PASS_THRU0;
+			break;
+		}
+
+		if (spt->target_id >= ESAS2R_MAX_TARGETS || !check_lun(lun)) {
+			hi->status = ATTO_STS_INV_PARAM;
+			break;
+		}
+
+		esas2r_sgc_init(sgc, a, rq, NULL);
+
+		sgc->length = hi->data_length;
+		sgc->cur_offset += offsetof(struct atto_ioctl, data.byte)
+				   + sizeof(struct atto_hba_scsi_pass_thru);
+
+		/* Finish request initialization */
+		rq->target_id = (u16)spt->target_id;
+		rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]);
+		memcpy(rq->vrq->scsi.cdb, spt->cdb, 16);
+		rq->vrq->scsi.length = cpu_to_le32(hi->data_length);
+		rq->sense_len = spt->sense_length;
+		rq->sense_buf = (u8 *)spt->sense_data;
+		/* NOTE: we ignore spt->timeout */
+
+		/*
+		 * always usurp the completion callback since the interrupt
+		 * callback mechanism may be used.
+		 */
+
+		rq->aux_req_cx = hi;
+		rq->aux_req_cb = rq->comp_cb;
+		rq->comp_cb = scsi_passthru_comp_cb;
+
+		if (spt->flags & ATTO_SPTF_DATA_IN) {
+			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
+		} else if (spt->flags & ATTO_SPTF_DATA_OUT) {
+			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
+		} else {
+			if (sgc->length) {
+				hi->status = ATTO_STS_INV_PARAM;
+				break;
+			}
+		}
+
+		if (spt->flags & ATTO_SPTF_ORDERED_Q)
+			rq->vrq->scsi.flags |=
+				cpu_to_le32(FCP_CMND_TA_ORDRD_Q);
+		else if (spt->flags & ATTO_SPTF_HEAD_OF_Q)
+			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q);
+
+		if (!esas2r_build_sg_list(a, rq, sgc)) {
+			hi->status = ATTO_STS_OUT_OF_RSRC;
+			break;
+		}
+
+		esas2r_start_request(a, rq);
+
+		return true;
+	}
+
+	case ATTO_FUNC_GET_DEV_ADDR:
+	{
+		struct atto_hba_get_device_address *gda =
+			&hi->data.get_dev_addr;
+		struct esas2r_target *t;
+
+		if (hi->flags & HBAF_TUNNEL) {
+			if (hba_ioctl_tunnel(a, hi, rq, sgc))
+				return true;
+
+			break;
+		}
+
+		if (hi->version > ATTO_VER_GET_DEV_ADDR0) {
+			hi->status = ATTO_STS_INV_VERSION;
+			hi->version = ATTO_VER_GET_DEV_ADDR0;
+			break;
+		}
+
+		if (gda->target_id >= ESAS2R_MAX_TARGETS) {
+			hi->status = ATTO_STS_INV_PARAM;
+			break;
+		}
+
+		t = a->targetdb + (u16)gda->target_id;
+
+		if (t->target_state != TS_PRESENT) {
+			hi->status = ATTO_STS_FAILED;
+		} else if (gda->addr_type == ATTO_GDA_AT_PORT) {
+			if (t->sas_addr == 0) {
+				hi->status = ATTO_STS_UNSUPPORTED;
+			} else {
+				*(u64 *)gda->address = t->sas_addr;
+
+				gda->addr_len = sizeof(u64);
+			}
+		} else if (gda->addr_type == ATTO_GDA_AT_NODE) {
+			hi->status = ATTO_STS_NOT_APPL;
+		} else {
+			hi->status = ATTO_STS_INV_PARAM;
+		}
+
+		/* update the target ID to the next one present. */
+
+		gda->target_id =
+			esas2r_targ_db_find_next_present(a,
+							 (u16)gda->target_id);
+		break;
+	}
+
+	case ATTO_FUNC_PHY_CTRL:
+	case ATTO_FUNC_CONN_CTRL:
+	{
+		if (hba_ioctl_tunnel(a, hi, rq, sgc))
+			return true;
+
+		break;
+	}
+
+	case ATTO_FUNC_ADAP_CTRL:
+	{
+		struct atto_hba_adap_ctrl *ac = &hi->data.adap_ctrl;
+
+		if (hi->flags & HBAF_TUNNEL) {
+			hi->status = ATTO_STS_UNSUPPORTED;
+			break;
+		}
+
+		if (hi->version > ATTO_VER_ADAP_CTRL0) {
+			hi->status = ATTO_STS_INV_VERSION;
+			hi->version = ATTO_VER_ADAP_CTRL0;
+			break;
+		}
+
+		if (ac->adap_func == ATTO_AC_AF_HARD_RST) {
+			esas2r_reset_adapter(a);
+		} else if (ac->adap_func != ATTO_AC_AF_GET_STATE) {
+			hi->status = ATTO_STS_UNSUPPORTED;
+			break;
+		}
+
+		if (a->flags & AF_CHPRST_NEEDED)
+			ac->adap_state = ATTO_AC_AS_RST_SCHED;
+		else if (a->flags & AF_CHPRST_PENDING)
+			ac->adap_state = ATTO_AC_AS_RST_IN_PROG;
+		else if (a->flags & AF_DISC_PENDING)
+			ac->adap_state = ATTO_AC_AS_RST_DISC;
+		else if (a->flags & AF_DISABLED)
+			ac->adap_state = ATTO_AC_AS_DISABLED;
+		else if (a->flags & AF_DEGRADED_MODE)
+			ac->adap_state = ATTO_AC_AS_DEGRADED;
+		else
+			ac->adap_state = ATTO_AC_AS_OK;
+
+		break;
+	}
+
+	case ATTO_FUNC_GET_DEV_INFO:
+	{
+		struct atto_hba_get_device_info *gdi = &hi->data.get_dev_info;
+		struct esas2r_target *t;
+
+		if (hi->flags & HBAF_TUNNEL) {
+			if (hba_ioctl_tunnel(a, hi, rq, sgc))
+				return true;
+
+			break;
+		}
+
+		if (hi->version > ATTO_VER_GET_DEV_INFO0) {
+			hi->status = ATTO_STS_INV_VERSION;
+			hi->version = ATTO_VER_GET_DEV_INFO0;
+			break;
+		}
+
+		if (gdi->target_id >= ESAS2R_MAX_TARGETS) {
+			hi->status = ATTO_STS_INV_PARAM;
+			break;
+		}
+
+		t = a->targetdb + (u16)gdi->target_id;
+
+		/* update the target ID to the next one present. */
+
+		gdi->target_id =
+			esas2r_targ_db_find_next_present(a,
+							 (u16)gdi->target_id);
+
+		if (t->target_state != TS_PRESENT) {
+			hi->status = ATTO_STS_FAILED;
+			break;
+		}
+
+		hi->status = ATTO_STS_UNSUPPORTED;
+		break;
+	}
+
+	default:
+
+		hi->status = ATTO_STS_INV_FUNC;
+		break;
+	}
+
+	return false;
+}
+
+static void hba_ioctl_done_callback(struct esas2r_adapter *a,
+				    struct esas2r_request *rq, void *context)
+{
+	struct atto_ioctl *ioctl_hba =
+		(struct atto_ioctl *)esas2r_buffered_ioctl;
+
+	esas2r_debug("hba_ioctl_done_callback %d", a->index);
+
+	if (ioctl_hba->function == ATTO_FUNC_GET_ADAP_INFO) {
+		struct atto_hba_get_adapter_info *gai =
+			&ioctl_hba->data.get_adap_info;
+
+		esas2r_debug("ATTO_FUNC_GET_ADAP_INFO");
+
+		gai->drvr_rev_major = ESAS2R_MAJOR_REV;
+		gai->drvr_rev_minor = ESAS2R_MINOR_REV;
+
+		strcpy(gai->drvr_rev_ascii, ESAS2R_VERSION_STR);
+		strcpy(gai->drvr_name, ESAS2R_DRVR_NAME);
+
+		gai->num_busses = 1;
+		gai->num_targsper_bus = ESAS2R_MAX_ID + 1;
+		gai->num_lunsper_targ = 1;
+	}
+}
+
+u8 handle_hba_ioctl(struct esas2r_adapter *a,
+		    struct atto_ioctl *ioctl_hba)
+{
+	struct esas2r_buffered_ioctl bi;
+
+	memset(&bi, 0, sizeof(bi));
+
+	bi.a = a;
+	bi.ioctl = ioctl_hba;
+	bi.length = sizeof(struct atto_ioctl) + ioctl_hba->data_length;
+	bi.callback = hba_ioctl_callback;
+	bi.context = NULL;
+	bi.done_callback = hba_ioctl_done_callback;
+	bi.done_context = NULL;
+	bi.offset = 0;
+
+	return handle_buffered_ioctl(&bi);
+}
+
+
+int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
+			struct esas2r_sas_nvram *data)
+{
+	int result = 0;
+
+	a->nvram_command_done = 0;
+	rq->comp_cb = complete_nvr_req;
+
+	if (esas2r_nvram_write(a, rq, data)) {
+		/* now wait around for it to complete. */
+		while (!a->nvram_command_done)
+			wait_event_interruptible(a->nvram_waiter,
+						 a->nvram_command_done);
+		;
+
+		/* done, check the status. */
+		if (rq->req_stat == RS_SUCCESS)
+			result = 1;
+	}
+	return result;
+}
+
+
+/* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */
+int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
+{
+	struct atto_express_ioctl *ioctl = NULL;
+	struct esas2r_adapter *a;
+	struct esas2r_request *rq;
+	u16 code;
+	int err;
+
+	esas2r_log(ESAS2R_LOG_DEBG, "ioctl (%p, %x, %p)", hostdata, cmd, arg);
+
+	if ((arg == NULL)
+	    || (cmd < EXPRESS_IOCTL_MIN)
+	    || (cmd > EXPRESS_IOCTL_MAX))
+		return -ENOTSUPP;
+
+	if (!access_ok(VERIFY_WRITE, arg, sizeof(struct atto_express_ioctl))) {
+		esas2r_log(ESAS2R_LOG_WARN,
+			   "ioctl_handler access_ok failed for cmd %d, "
+			   "address %p", cmd,
+			   arg);
+		return -EFAULT;
+	}
+
+	/* allocate a kernel memory buffer for the IOCTL data */
+	ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL);
+	if (ioctl == NULL) {
+		esas2r_log(ESAS2R_LOG_WARN,
+			   "ioctl_handler kzalloc failed for %d bytes",
+			   sizeof(struct atto_express_ioctl));
+		return -ENOMEM;
+	}
+
+	err = __copy_from_user(ioctl, arg, sizeof(struct atto_express_ioctl));
+	if (err != 0) {
+		esas2r_log(ESAS2R_LOG_WARN,
+			   "copy_from_user didn't copy everything (err %d, cmd %d)",
+			   err,
+			   cmd);
+		kfree(ioctl);
+
+		return -EFAULT;
+	}
+
+	/* verify the signature */
+
+	if (memcmp(ioctl->header.signature,
+		   EXPRESS_IOCTL_SIGNATURE,
+		   EXPRESS_IOCTL_SIGNATURE_SIZE) != 0) {
+		esas2r_log(ESAS2R_LOG_WARN, "invalid signature");
+		kfree(ioctl);
+
+		return -ENOTSUPP;
+	}
+
+	/* assume success */
+
+	ioctl->header.return_code = IOCTL_SUCCESS;
+	err = 0;
+
+	/*
+	 * handle EXPRESS_IOCTL_GET_CHANNELS
+	 * without paying attention to channel
+	 */
+
+	if (cmd == EXPRESS_IOCTL_GET_CHANNELS) {
+		int i = 0, k = 0;
+
+		ioctl->data.chanlist.num_channels = 0;
+
+		while (i < MAX_ADAPTERS) {
+			if (esas2r_adapters[i]) {
+				ioctl->data.chanlist.num_channels++;
+				ioctl->data.chanlist.channel[k] = i;
+				k++;
+			}
+			i++;
+		}
+
+		goto ioctl_done;
+	}
+
+	/* get the channel */
+
+	if (ioctl->header.channel == 0xFF) {
+		a = (struct esas2r_adapter *)hostdata;
+	} else {
+		a = esas2r_adapters[ioctl->header.channel];
+		if (ioctl->header.channel >= MAX_ADAPTERS || (a == NULL)) {
+			ioctl->header.return_code = IOCTL_BAD_CHANNEL;
+			esas2r_log(ESAS2R_LOG_WARN, "bad channel value");
+			kfree(ioctl);
+
+			return -ENOTSUPP;
+		}
+	}
+
+	switch (cmd) {
+	case EXPRESS_IOCTL_RW_FIRMWARE:
+
+		if (ioctl->data.fwrw.img_type == FW_IMG_FM_API) {
+			err = esas2r_write_fw(a,
+					      (char *)ioctl->data.fwrw.image,
+					      0,
+					      sizeof(struct
+						     atto_express_ioctl));
+
+			if (err >= 0) {
+				err = esas2r_read_fw(a,
+						     (char *)ioctl->data.fwrw.
+						     image,
+						     0,
+						     sizeof(struct
+							    atto_express_ioctl));
+			}
+		} else if (ioctl->data.fwrw.img_type == FW_IMG_FS_API) {
+			err = esas2r_write_fs(a,
+					      (char *)ioctl->data.fwrw.image,
+					      0,
+					      sizeof(struct
+						     atto_express_ioctl));
+
+			if (err >= 0) {
+				err = esas2r_read_fs(a,
+						     (char *)ioctl->data.fwrw.
+						     image,
+						     0,
+						     sizeof(struct
+							    atto_express_ioctl));
+			}
+		} else {
+			ioctl->header.return_code = IOCTL_BAD_FLASH_IMGTYPE;
+		}
+
+		break;
+
+	case EXPRESS_IOCTL_READ_PARAMS:
+
+		memcpy(ioctl->data.prw.data_buffer, a->nvram,
+		       sizeof(struct esas2r_sas_nvram));
+		ioctl->data.prw.code = 1;
+		break;
+
+	case EXPRESS_IOCTL_WRITE_PARAMS:
+
+		rq = esas2r_alloc_request(a);
+		if (rq == NULL) {
+			up(&a->nvram_semaphore);
+			ioctl->data.prw.code = 0;
+			break;
+		}
+
+		code = esas2r_write_params(a, rq,
+					   (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
+		ioctl->data.prw.code = code;
+
+		esas2r_free_request(a, rq);
+
+		break;
+
+	case EXPRESS_IOCTL_DEFAULT_PARAMS:
+
+		esas2r_nvram_get_defaults(a,
+					  (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
+		ioctl->data.prw.code = 1;
+		break;
+
+	case EXPRESS_IOCTL_CHAN_INFO:
+
+		ioctl->data.chaninfo.major_rev = ESAS2R_MAJOR_REV;
+		ioctl->data.chaninfo.minor_rev = ESAS2R_MINOR_REV;
+		ioctl->data.chaninfo.IRQ = a->pcid->irq;
+		ioctl->data.chaninfo.device_id = a->pcid->device;
+		ioctl->data.chaninfo.vendor_id = a->pcid->vendor;
+		ioctl->data.chaninfo.ven_dev_id = a->pcid->subsystem_device;
+		ioctl->data.chaninfo.revision_id = a->pcid->revision;
+		ioctl->data.chaninfo.pci_bus = a->pcid->bus->number;
+		ioctl->data.chaninfo.pci_dev_func = a->pcid->devfn;
+		ioctl->data.chaninfo.core_rev = 0;
+		ioctl->data.chaninfo.host_no = a->host->host_no;
+		ioctl->data.chaninfo.hbaapi_rev = 0;
+		break;
+
+	case EXPRESS_IOCTL_SMP:
+		ioctl->header.return_code = handle_smp_ioctl(a,
+							     &ioctl->data.
+							     ioctl_smp);
+		break;
+
+	case EXPRESS_CSMI:
+		ioctl->header.return_code =
+			handle_csmi_ioctl(a, &ioctl->data.csmi);
+		break;
+
+	case EXPRESS_IOCTL_HBA:
+		ioctl->header.return_code = handle_hba_ioctl(a,
+							     &ioctl->data.
+							     ioctl_hba);
+		break;
+
+	case EXPRESS_IOCTL_VDA:
+		err = esas2r_write_vda(a,
+				       (char *)&ioctl->data.ioctl_vda,
+				       0,
+				       sizeof(struct atto_ioctl_vda) +
+				       ioctl->data.ioctl_vda.data_length);
+
+		if (err >= 0) {
+			err = esas2r_read_vda(a,
+					      (char *)&ioctl->data.ioctl_vda,
+					      0,
+					      sizeof(struct atto_ioctl_vda) +
+					      ioctl->data.ioctl_vda.data_length);
+		}
+
+
+
+
+		break;
+
+	case EXPRESS_IOCTL_GET_MOD_INFO:
+
+		ioctl->data.modinfo.adapter = a;
+		ioctl->data.modinfo.pci_dev = a->pcid;
+		ioctl->data.modinfo.scsi_host = a->host;
+		ioctl->data.modinfo.host_no = a->host->host_no;
+
+		break;
+
+	default:
+		esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd);
+		ioctl->header.return_code = IOCTL_ERR_INVCMD;
+	}
+
+ioctl_done:
+
+	if (err < 0) {
+		esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %d", err,
+			   cmd);
+
+		switch (err) {
+		case -ENOMEM:
+		case -EBUSY:
+			ioctl->header.return_code = IOCTL_OUT_OF_RESOURCES;
+			break;
+
+		case -ENOSYS:
+		case -EINVAL:
+			ioctl->header.return_code = IOCTL_INVALID_PARAM;
+			break;
+		}
+
+		ioctl->header.return_code = IOCTL_GENERAL_ERROR;
+	}
+
+	/* Always copy the buffer back, if only to pick up the status */
+	err = __copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl));
+	if (err != 0) {
+		esas2r_log(ESAS2R_LOG_WARN,
+			   "ioctl_handler copy_to_user didn't copy "
+			   "everything (err %d, cmd %d)", err,
+			   cmd);
+		kfree(ioctl);
+
+		return -EFAULT;
+	}
+
+	kfree(ioctl);
+
+	return 0;
+}
+
+int esas2r_ioctl(struct scsi_device *sd, int cmd, void __user *arg)
+{
+	return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg);
+}
+
+static void free_fw_buffers(struct esas2r_adapter *a)
+{
+	if (a->firmware.data) {
+		dma_free_coherent(&a->pcid->dev,
+				  (size_t)a->firmware.orig_len,
+				  a->firmware.data,
+				  (dma_addr_t)a->firmware.phys);
+
+		a->firmware.data = NULL;
+	}
+}
+
+static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length)
+{
+	free_fw_buffers(a);
+
+	a->firmware.orig_len = length;
+
+	a->firmware.data = (u8 *)dma_alloc_coherent(&a->pcid->dev,
+						    (size_t)length,
+						    (dma_addr_t *)&a->firmware.
+						    phys,
+						    GFP_KERNEL);
+
+	if (!a->firmware.data) {
+		esas2r_debug("buffer alloc failed!");
+		return 0;
+	}
+
+	return 1;
+}
+
+/* Handle a call to read firmware. */
+int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count)
+{
+	esas2r_trace_enter();
+	/* if the cached header is a status, simply copy it over and return. */
+	if (a->firmware.state == FW_STATUS_ST) {
+		int size = min_t(int, count, sizeof(a->firmware.header));
+		esas2r_trace_exit();
+		memcpy(buf, &a->firmware.header, size);
+		esas2r_debug("esas2r_read_fw: STATUS size %d", size);
+		return size;
+	}
+
+	/*
+	 * if the cached header is a command, do it if at
+	 * offset 0, otherwise copy the pieces.
+	 */
+
+	if (a->firmware.state == FW_COMMAND_ST) {
+		u32 length = a->firmware.header.length;
+		esas2r_trace_exit();
+
+		esas2r_debug("esas2r_read_fw: COMMAND length %d off %d",
+			     length,
+			     off);
+
+		if (off == 0) {
+			if (a->firmware.header.action == FI_ACT_UP) {
+				if (!allocate_fw_buffers(a, length))
+					return -ENOMEM;
+
+
+				/* copy header over */
+
+				memcpy(a->firmware.data,
+				       &a->firmware.header,
+				       sizeof(a->firmware.header));
+
+				do_fm_api(a,
+					  (struct esas2r_flash_img *)a->firmware.data);
+			} else if (a->firmware.header.action == FI_ACT_UPSZ) {
+				int size =
+					min((int)count,
+					    (int)sizeof(a->firmware.header));
+				do_fm_api(a, &a->firmware.header);
+				memcpy(buf, &a->firmware.header, size);
+				esas2r_debug("FI_ACT_UPSZ size %d", size);
+				return size;
+			} else {
+				esas2r_debug("invalid action %d",
+					     a->firmware.header.action);
+				return -ENOSYS;
+			}
+		}
+
+		if (count + off > length)
+			count = length - off;
+
+		if (count < 0)
+			return 0;
+
+		if (!a->firmware.data) {
+			esas2r_debug(
+				"read: nonzero offset but no buffer available!");
+			return -ENOMEM;
+		}
+
+		esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off,
+			     count,
+			     length);
+
+		memcpy(buf, &a->firmware.data[off], count);
+
+		/* when done, release the buffer */
+
+		if (length <= off + count) {
+			esas2r_debug("esas2r_read_fw: freeing buffer!");
+
+			free_fw_buffers(a);
+		}
+
+		return count;
+	}
+
+	esas2r_trace_exit();
+	esas2r_debug("esas2r_read_fw: invalid firmware state %d",
+		     a->firmware.state);
+
+	return -EINVAL;
+}
+
+/* Handle a call to write firmware. */
+int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off,
+		    int count)
+{
+	u32 length;
+
+	if (off == 0) {
+		struct esas2r_flash_img *header =
+			(struct esas2r_flash_img *)buf;
+
+		/* assume version 0 flash image */
+
+		int min_size = sizeof(struct esas2r_flash_img_v0);
+
+		a->firmware.state = FW_INVALID_ST;
+
+		/* validate the version field first */
+
+		if (count < 4
+		    ||  header->fi_version > FI_VERSION_1) {
+			esas2r_debug(
+				"esas2r_write_fw: short header or invalid version");
+			return -EINVAL;
+		}
+
+		/* See if its a version 1 flash image */
+
+		if (header->fi_version == FI_VERSION_1)
+			min_size = sizeof(struct esas2r_flash_img);
+
+		/* If this is the start, the header must be full and valid. */
+		if (count < min_size) {
+			esas2r_debug("esas2r_write_fw: short header, aborting");
+			return -EINVAL;
+		}
+
+		/* Make sure the size is reasonable. */
+		length = header->length;
+
+		if (length > 1024 * 1024) {
+			esas2r_debug(
+				"esas2r_write_fw: hosed, length %d  fi_version %d",
+				length, header->fi_version);
+			return -EINVAL;
+		}
+
+		/*
+		 * If this is a write command, allocate memory because
+		 * we have to cache everything. otherwise, just cache
+		 * the header, because the read op will do the command.
+		 */
+
+		if (header->action == FI_ACT_DOWN) {
+			if (!allocate_fw_buffers(a, length))
+				return -ENOMEM;
+
+			/*
+			 * Store the command, so there is context on subsequent
+			 * calls.
+			 */
+			memcpy(&a->firmware.header,
+			       buf,
+			       sizeof(*header));
+		} else if (header->action == FI_ACT_UP
+			   ||  header->action == FI_ACT_UPSZ) {
+			/* Save the command, result will be picked up on read */
+			memcpy(&a->firmware.header,
+			       buf,
+			       sizeof(*header));
+
+			a->firmware.state = FW_COMMAND_ST;
+
+			esas2r_debug(
+				"esas2r_write_fw: COMMAND, count %d, action %d ",
+				count, header->action);
+
+			/*
+			 * Pretend we took the whole buffer,
+			 * so we don't get bothered again.
+			 */
+
+			return count;
+		} else {
+			esas2r_debug("esas2r_write_fw: invalid action %d ",
+				     a->firmware.header.action);
+			return -ENOSYS;
+		}
+	} else {
+		length = a->firmware.header.length;
+	}
+
+	/*
+	 * We only get here on a download command, regardless of offset.
+	 * the chunks written by the system need to be cached, and when
+	 * the final one arrives, issue the fmapi command.
+	 */
+
+	if (off + count > length)
+		count = length - off;
+
+	if (count > 0) {
+		esas2r_debug("esas2r_write_fw: off %d count %d length %d", off,
+			     count,
+			     length);
+
+		/*
+		 * On a full upload, the system tries sending the whole buffer.
+		 * there's nothing to do with it, so just drop it here, before
+		 * trying to copy over into unallocated memory!
+		 */
+		if (a->firmware.header.action == FI_ACT_UP)
+			return count;
+
+		if (!a->firmware.data) {
+			esas2r_debug(
+				"write: nonzero offset but no buffer available!");
+			return -ENOMEM;
+		}
+
+		memcpy(&a->firmware.data[off], buf, count);
+
+		if (length == off + count) {
+			do_fm_api(a,
+				  (struct esas2r_flash_img *)a->firmware.data);
+
+			/*
+			 * Now copy the header result to be picked up by the
+			 * next read
+			 */
+			memcpy(&a->firmware.header,
+			       a->firmware.data,
+			       sizeof(a->firmware.header));
+
+			a->firmware.state = FW_STATUS_ST;
+
+			esas2r_debug("write completed");
+
+			/*
+			 * Since the system has the data buffered, the only way
+			 * this can leak is if a root user writes a program
+			 * that writes a shorter buffer than it claims, and the
+			 * copyin fails.
+			 */
+			free_fw_buffers(a);
+		}
+	}
+
+	return count;
+}
+
+/* Callback for the completion of a VDA request. */
+static void vda_complete_req(struct esas2r_adapter *a,
+			     struct esas2r_request *rq)
+{
+	a->vda_command_done = 1;
+	wake_up_interruptible(&a->vda_waiter);
+}
+
+/* Scatter/gather callback for VDA requests */
+static u32 get_physaddr_vda(struct esas2r_sg_context *sgc, u64 *addr)
+{
+	struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
+	int offset = (u8 *)sgc->cur_offset - (u8 *)a->vda_buffer;
+
+	(*addr) = a->ppvda_buffer + offset;
+	return VDA_MAX_BUFFER_SIZE - offset;
+}
+
+/* Handle a call to read a VDA command. */
+int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count)
+{
+	if (!a->vda_buffer)
+		return -ENOMEM;
+
+	if (off == 0) {
+		struct esas2r_request *rq;
+		struct atto_ioctl_vda *vi =
+			(struct atto_ioctl_vda *)a->vda_buffer;
+		struct esas2r_sg_context sgc;
+		bool wait_for_completion;
+
+		/*
+		 * Presumeably, someone has already written to the vda_buffer,
+		 * and now they are reading the node the response, so now we
+		 * will actually issue the request to the chip and reply.
+		 */
+
+		/* allocate a request */
+		rq = esas2r_alloc_request(a);
+		if (rq == NULL) {
+			esas2r_debug("esas2r_read_vda: out of requestss");
+			return -EBUSY;
+		}
+
+		rq->comp_cb = vda_complete_req;
+
+		sgc.first_req = rq;
+		sgc.adapter = a;
+		sgc.cur_offset = a->vda_buffer + VDA_BUFFER_HEADER_SZ;
+		sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_vda;
+
+		a->vda_command_done = 0;
+
+		wait_for_completion =
+			esas2r_process_vda_ioctl(a, vi, rq, &sgc);
+
+		if (wait_for_completion) {
+			/* now wait around for it to complete. */
+
+			while (!a->vda_command_done)
+				wait_event_interruptible(a->vda_waiter,
+							 a->vda_command_done);
+		}
+
+		esas2r_free_request(a, (struct esas2r_request *)rq);
+	}
+
+	if (off > VDA_MAX_BUFFER_SIZE)
+		return 0;
+
+	if (count + off > VDA_MAX_BUFFER_SIZE)
+		count = VDA_MAX_BUFFER_SIZE - off;
+
+	if (count < 0)
+		return 0;
+
+	memcpy(buf, a->vda_buffer + off, count);
+
+	return count;
+}
+
+/* Handle a call to write a VDA command. */
+int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
+		     int count)
+{
+	/*
+	 * allocate memory for it, if not already done.  once allocated,
+	 * we will keep it around until the driver is unloaded.
+	 */
+
+	if (!a->vda_buffer) {
+		dma_addr_t dma_addr;
+		a->vda_buffer = (u8 *)dma_alloc_coherent(&a->pcid->dev,
+							 (size_t)
+							 VDA_MAX_BUFFER_SIZE,
+							 &dma_addr,
+							 GFP_KERNEL);
+
+		a->ppvda_buffer = dma_addr;
+	}
+
+	if (!a->vda_buffer)
+		return -ENOMEM;
+
+	if (off > VDA_MAX_BUFFER_SIZE)
+		return 0;
+
+	if (count + off > VDA_MAX_BUFFER_SIZE)
+		count = VDA_MAX_BUFFER_SIZE - off;
+
+	if (count < 1)
+		return 0;
+
+	memcpy(a->vda_buffer + off, buf, count);
+
+	return count;
+}
+
+/* Callback for the completion of an FS_API request.*/
+static void fs_api_complete_req(struct esas2r_adapter *a,
+				struct esas2r_request *rq)
+{
+	a->fs_api_command_done = 1;
+
+	wake_up_interruptible(&a->fs_api_waiter);
+}
+
+/* Scatter/gather callback for VDA requests */
+static u32 get_physaddr_fs_api(struct esas2r_sg_context *sgc, u64 *addr)
+{
+	struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
+	struct esas2r_ioctl_fs *fs =
+		(struct esas2r_ioctl_fs *)a->fs_api_buffer;
+	u32 offset = (u8 *)sgc->cur_offset - (u8 *)fs;
+
+	(*addr) = a->ppfs_api_buffer + offset;
+
+	return a->fs_api_buffer_size - offset;
+}
+
+/* Handle a call to read firmware via FS_API. */
+int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count)
+{
+	if (!a->fs_api_buffer)
+		return -ENOMEM;
+
+	if (off == 0) {
+		struct esas2r_request *rq;
+		struct esas2r_sg_context sgc;
+		struct esas2r_ioctl_fs *fs =
+			(struct esas2r_ioctl_fs *)a->fs_api_buffer;
+
+		/* If another flash request is already in progress, return. */
+		if (down_interruptible(&a->fs_api_semaphore)) {
+busy:
+			fs->status = ATTO_STS_OUT_OF_RSRC;
+			return -EBUSY;
+		}
+
+		/*
+		 * Presumeably, someone has already written to the
+		 * fs_api_buffer, and now they are reading the node the
+		 * response, so now we will actually issue the request to the
+		 * chip and reply. Allocate a request
+		 */
+
+		rq = esas2r_alloc_request(a);
+		if (rq == NULL) {
+			esas2r_debug("esas2r_read_fs: out of requests");
+			up(&a->fs_api_semaphore);
+			goto busy;
+		}
+
+		rq->comp_cb = fs_api_complete_req;
+
+		/* Set up the SGCONTEXT for to build the s/g table */
+
+		sgc.cur_offset = fs->data;
+		sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fs_api;
+
+		a->fs_api_command_done = 0;
+
+		if (!esas2r_process_fs_ioctl(a, fs, rq, &sgc)) {
+			if (fs->status == ATTO_STS_OUT_OF_RSRC)
+				count = -EBUSY;
+
+			goto dont_wait;
+		}
+
+		/* Now wait around for it to complete. */
+
+		while (!a->fs_api_command_done)
+			wait_event_interruptible(a->fs_api_waiter,
+						 a->fs_api_command_done);
+		;
+dont_wait:
+		/* Free the request and keep going */
+		up(&a->fs_api_semaphore);
+		esas2r_free_request(a, (struct esas2r_request *)rq);
+
+		/* Pick up possible error code from above */
+		if (count < 0)
+			return count;
+	}
+
+	if (off > a->fs_api_buffer_size)
+		return 0;
+
+	if (count + off > a->fs_api_buffer_size)
+		count = a->fs_api_buffer_size - off;
+
+	if (count < 0)
+		return 0;
+
+	memcpy(buf, a->fs_api_buffer + off, count);
+
+	return count;
+}
+
+/* Handle a call to write firmware via FS_API. */
+int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
+		    int count)
+{
+	if (off == 0) {
+		struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)buf;
+		u32 length = fs->command.length + offsetof(
+			struct esas2r_ioctl_fs,
+			data);
+
+		/*
+		 * Special case, for BEGIN commands, the length field
+		 * is lying to us, so just get enough for the header.
+		 */
+
+		if (fs->command.command == ESAS2R_FS_CMD_BEGINW)
+			length = offsetof(struct esas2r_ioctl_fs, data);
+
+		/*
+		 * Beginning a command.  We assume we'll get at least
+		 * enough in the first write so we can look at the
+		 * header and see how much we need to alloc.
+		 */
+
+		if (count < offsetof(struct esas2r_ioctl_fs, data))
+			return -EINVAL;
+
+		/* Allocate a buffer or use the existing buffer. */
+		if (a->fs_api_buffer) {
+			if (a->fs_api_buffer_size < length) {
+				/* Free too-small buffer and get a new one */
+				dma_free_coherent(&a->pcid->dev,
+						  (size_t)a->fs_api_buffer_size,
+						  a->fs_api_buffer,
+						  (dma_addr_t)a->ppfs_api_buffer);
+
+				goto re_allocate_buffer;
+			}
+		} else {
+re_allocate_buffer:
+			a->fs_api_buffer_size = length;
+
+			a->fs_api_buffer = (u8 *)dma_alloc_coherent(
+				&a->pcid->dev,
+				(size_t)a->fs_api_buffer_size,
+				(dma_addr_t *)&a->ppfs_api_buffer,
+				GFP_KERNEL);
+		}
+	}
+
+	if (!a->fs_api_buffer)
+		return -ENOMEM;
+
+	if (off > a->fs_api_buffer_size)
+		return 0;
+
+	if (count + off > a->fs_api_buffer_size)
+		count = a->fs_api_buffer_size - off;
+
+	if (count < 1)
+		return 0;
+
+	memcpy(a->fs_api_buffer + off, buf, count);
+
+	return count;
+}

+ 254 - 0
drivers/scsi/esas2r/esas2r_log.c

@@ -0,0 +1,254 @@
+/*
+ *  linux/drivers/scsi/esas2r/esas2r_log.c
+ *      For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ *  Copyright (c) 2001-2013 ATTO Technology, Inc.
+ *  (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+/*
+ * this module within the driver is tasked with providing logging functionality.
+ * the event_log_level module parameter controls the level of messages that are
+ * written to the system log.  the default level of messages that are written
+ * are critical and warning messages.  if other types of messages are desired,
+ * one simply needs to load the module with the correct value for the
+ * event_log_level module parameter.  for example:
+ *
+ * insmod <module> event_log_level=1
+ *
+ * will load the module and only critical events will be written by this module
+ * to the system log.  if critical, warning, and information-level messages are
+ * desired, the correct value for the event_log_level module parameter
+ * would be as follows:
+ *
+ * insmod <module> event_log_level=3
+ */
+
+#define EVENT_LOG_BUFF_SIZE 1024
+
+static long event_log_level = ESAS2R_LOG_DFLT;
+
+module_param(event_log_level, long, S_IRUGO | S_IRUSR);
+MODULE_PARM_DESC(event_log_level,
+		 "Specifies the level of events to report to the system log.  Critical and warning level events are logged by default.");
+
+/* A shared buffer to use for formatting messages. */
+static char event_buffer[EVENT_LOG_BUFF_SIZE];
+
+/* A lock to protect the shared buffer used for formatting messages. */
+static DEFINE_SPINLOCK(event_buffer_lock);
+
+/**
+ * translates an esas2r-defined logging event level to a kernel logging level.
+ *
+ * @param [in] level the esas2r-defined logging event level to translate
+ *
+ * @return the corresponding kernel logging level.
+ */
+static const char *translate_esas2r_event_level_to_kernel(const long level)
+{
+	switch (level) {
+	case ESAS2R_LOG_CRIT:
+		return KERN_CRIT;
+
+	case ESAS2R_LOG_WARN:
+		return KERN_WARNING;
+
+	case ESAS2R_LOG_INFO:
+		return KERN_INFO;
+
+	case ESAS2R_LOG_DEBG:
+	case ESAS2R_LOG_TRCE:
+	default:
+		return KERN_DEBUG;
+	}
+}
+
+/**
+ * the master logging function.  this function will format the message as
+ * outlined by the formatting string, the input device information and the
+ * substitution arguments and output the resulting string to the system log.
+ *
+ * @param [in] level  the event log level of the message
+ * @param [in] dev    the device information
+ * @param [in] format the formatting string for the message
+ * @param [in] args   the substition arguments to the formatting string
+ *
+ * @return 0 on success, or -1 if an error occurred.
+ */
+static int esas2r_log_master(const long level,
+			     const struct device *dev,
+			     const char *format,
+			     va_list args)
+{
+	if (level <= event_log_level) {
+		unsigned long flags = 0;
+		int retval = 0;
+		char *buffer = event_buffer;
+		size_t buflen = EVENT_LOG_BUFF_SIZE;
+		const char *fmt_nodev = "%s%s: ";
+		const char *fmt_dev = "%s%s [%s, %s, %s]";
+		const char *slevel =
+			translate_esas2r_event_level_to_kernel(level);
+
+		spin_lock_irqsave(&event_buffer_lock, flags);
+
+		if (buffer == NULL) {
+			spin_unlock_irqrestore(&event_buffer_lock, flags);
+			return -1;
+		}
+
+		memset(buffer, 0, buflen);
+
+		/*
+		 * format the level onto the beginning of the string and do
+		 * some pointer arithmetic to move the pointer to the point
+		 * where the actual message can be inserted.
+		 */
+
+		if (dev == NULL) {
+			snprintf(buffer, buflen, fmt_nodev, slevel,
+				 ESAS2R_DRVR_NAME);
+		} else {
+			snprintf(buffer, buflen, fmt_dev, slevel,
+				 ESAS2R_DRVR_NAME,
+				 (dev->driver ? dev->driver->name : "unknown"),
+				 (dev->bus ? dev->bus->name : "unknown"),
+				 dev_name(dev));
+		}
+
+		buffer += strlen(event_buffer);
+		buflen -= strlen(event_buffer);
+
+		retval = vsnprintf(buffer, buflen, format, args);
+		if (retval < 0) {
+			spin_unlock_irqrestore(&event_buffer_lock, flags);
+			return -1;
+		}
+
+		/*
+		 * Put a line break at the end of the formatted string so that
+		 * we don't wind up with run-on messages.  only append if there
+		 * is enough space in the buffer.
+		 */
+		if (strlen(event_buffer) < buflen)
+			strcat(buffer, "\n");
+
+		printk(event_buffer);
+
+		spin_unlock_irqrestore(&event_buffer_lock, flags);
+	}
+
+	return 0;
+}
+
+/**
+ * formats and logs a message to the system log.
+ *
+ * @param [in] level  the event level of the message
+ * @param [in] format the formating string for the message
+ * @param [in] ...    the substitution arguments to the formatting string
+ *
+ * @return 0 on success, or -1 if an error occurred.
+ */
+int esas2r_log(const long level, const char *format, ...)
+{
+	int retval = 0;
+	va_list args;
+
+	va_start(args, format);
+
+	retval = esas2r_log_master(level, NULL, format, args);
+
+	va_end(args);
+
+	return retval;
+}
+
+/**
+ * formats and logs a message to the system log.  this message will include
+ * device information.
+ *
+ * @param [in] level   the event level of the message
+ * @param [in] dev     the device information
+ * @param [in] format  the formatting string for the message
+ * @param [in] ...     the substitution arguments to the formatting string
+ *
+ * @return 0 on success, or -1 if an error occurred.
+ */
+int esas2r_log_dev(const long level,
+		   const struct device *dev,
+		   const char *format,
+		   ...)
+{
+	int retval = 0;
+	va_list args;
+
+	va_start(args, format);
+
+	retval = esas2r_log_master(level, dev, format, args);
+
+	va_end(args);
+
+	return retval;
+}
+
+/**
+ * formats and logs a message to the system log.  this message will include
+ * device information.
+ *
+ * @param [in] level   the event level of the message
+ * @param [in] buf
+ * @param [in] len
+ *
+ * @return 0 on success, or -1 if an error occurred.
+ */
+int esas2r_log_hexdump(const long level,
+		       const void *buf,
+		       size_t len)
+{
+	if (level <= event_log_level) {
+		print_hex_dump(translate_esas2r_event_level_to_kernel(level),
+			       "", DUMP_PREFIX_OFFSET, 16, 1, buf,
+			       len, true);
+	}
+
+	return 1;
+}

+ 118 - 0
drivers/scsi/esas2r/esas2r_log.h

@@ -0,0 +1,118 @@
+/*
+ *  linux/drivers/scsi/esas2r/esas2r_log.h
+ *      For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ *  Copyright (c) 2001-2013 ATTO Technology, Inc.
+ *  (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA.
+ */
+
+#ifndef __esas2r_log_h__
+#define __esas2r_log_h__
+
+struct device;
+
+enum {
+	ESAS2R_LOG_NONE = 0,    /* no events logged */
+	ESAS2R_LOG_CRIT = 1,    /* critical events  */
+	ESAS2R_LOG_WARN = 2,    /* warning events   */
+	ESAS2R_LOG_INFO = 3,    /* info events      */
+	ESAS2R_LOG_DEBG = 4,    /* debugging events */
+	ESAS2R_LOG_TRCE = 5,    /* tracing events   */
+
+#ifdef ESAS2R_TRACE
+	ESAS2R_LOG_DFLT = ESAS2R_LOG_TRCE
+#else
+	ESAS2R_LOG_DFLT = ESAS2R_LOG_WARN
+#endif
+};
+
+int esas2r_log(const long level, const char *format, ...);
+int esas2r_log_dev(const long level,
+		   const struct device *dev,
+		   const char *format,
+		   ...);
+int esas2r_log_hexdump(const long level,
+		       const void *buf,
+		       size_t len);
+
+/*
+ * the following macros are provided specifically for debugging and tracing
+ * messages.  esas2r_debug() is provided for generic non-hardware layer
+ * debugging and tracing events.  esas2r_hdebug is provided specifically for
+ * hardware layer debugging and tracing events.
+ */
+
+#ifdef ESAS2R_DEBUG
+#define esas2r_debug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args)
+#define esas2r_hdebug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args)
+#else
+#define esas2r_debug(f, args ...)
+#define esas2r_hdebug(f, args ...)
+#endif  /* ESAS2R_DEBUG */
+
+/*
+ * the following macros are provided in order to trace the driver and catch
+ * some more serious bugs.  be warned, enabling these macros may *severely*
+ * impact performance.
+ */
+
+#ifdef ESAS2R_TRACE
+#define esas2r_bugon() \
+	do { \
+		esas2r_log(ESAS2R_LOG_TRCE, "esas2r_bugon() called in %s:%d" \
+			   " - dumping stack and stopping kernel", __func__, \
+			   __LINE__); \
+		dump_stack(); \
+		BUG(); \
+	} while (0)
+
+#define esas2r_trace_enter() esas2r_log(ESAS2R_LOG_TRCE, "entered %s (%s:%d)", \
+					__func__, __FILE__, __LINE__)
+#define esas2r_trace_exit() esas2r_log(ESAS2R_LOG_TRCE, "exited %s (%s:%d)", \
+				       __func__, __FILE__, __LINE__)
+#define esas2r_trace(f, args ...) esas2r_log(ESAS2R_LOG_TRCE, "(%s:%s:%d): " \
+					     f, __func__, __FILE__, __LINE__, \
+					     ## args)
+#else
+#define esas2r_bugon()
+#define esas2r_trace_enter()
+#define esas2r_trace_exit()
+#define esas2r_trace(f, args ...)
+#endif  /* ESAS2R_TRACE */
+
+#endif  /* __esas2r_log_h__ */

+ 2032 - 0
drivers/scsi/esas2r/esas2r_main.c

@@ -0,0 +1,2032 @@
+/*
+ *  linux/drivers/scsi/esas2r/esas2r_main.c
+ *      For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ *  Copyright (c) 2001-2013 ATTO Technology, Inc.
+ *  (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+MODULE_DESCRIPTION(ESAS2R_DRVR_NAME ": " ESAS2R_LONGNAME " driver");
+MODULE_AUTHOR("ATTO Technology, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ESAS2R_VERSION_STR);
+
+/* global definitions */
+
+static int found_adapters;
+struct esas2r_adapter *esas2r_adapters[MAX_ADAPTERS];
+
+#define ESAS2R_VDA_EVENT_PORT1       54414
+#define ESAS2R_VDA_EVENT_PORT2       54415
+#define ESAS2R_VDA_EVENT_SOCK_COUNT  2
+
+static struct esas2r_adapter *esas2r_adapter_from_kobj(struct kobject *kobj)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct Scsi_Host *host = class_to_shost(dev);
+
+	return (struct esas2r_adapter *)host->hostdata;
+}
+
+static ssize_t read_fw(struct file *file, struct kobject *kobj,
+		       struct bin_attribute *attr,
+		       char *buf, loff_t off, size_t count)
+{
+	struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+	return esas2r_read_fw(a, buf, off, count);
+}
+
+static ssize_t write_fw(struct file *file, struct kobject *kobj,
+			struct bin_attribute *attr,
+			char *buf, loff_t off, size_t count)
+{
+	struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+	return esas2r_write_fw(a, buf, off, count);
+}
+
+static ssize_t read_fs(struct file *file, struct kobject *kobj,
+		       struct bin_attribute *attr,
+		       char *buf, loff_t off, size_t count)
+{
+	struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+	return esas2r_read_fs(a, buf, off, count);
+}
+
+static ssize_t write_fs(struct file *file, struct kobject *kobj,
+			struct bin_attribute *attr,
+			char *buf, loff_t off, size_t count)
+{
+	struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+	int length = min(sizeof(struct esas2r_ioctl_fs), count);
+	int result = 0;
+
+	result = esas2r_write_fs(a, buf, off, count);
+
+	if (result < 0)
+		result = 0;
+
+	return length;
+}
+
+static ssize_t read_vda(struct file *file, struct kobject *kobj,
+			struct bin_attribute *attr,
+			char *buf, loff_t off, size_t count)
+{
+	struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+	return esas2r_read_vda(a, buf, off, count);
+}
+
+static ssize_t write_vda(struct file *file, struct kobject *kobj,
+			 struct bin_attribute *attr,
+			 char *buf, loff_t off, size_t count)
+{
+	struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+	return esas2r_write_vda(a, buf, off, count);
+}
+
+static ssize_t read_live_nvram(struct file *file, struct kobject *kobj,
+			       struct bin_attribute *attr,
+			       char *buf, loff_t off, size_t count)
+{
+	struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+	int length = min_t(size_t, sizeof(struct esas2r_sas_nvram), PAGE_SIZE);
+
+	memcpy(buf, a->nvram, length);
+	return length;
+}
+
+static ssize_t write_live_nvram(struct file *file, struct kobject *kobj,
+				struct bin_attribute *attr,
+				char *buf, loff_t off, size_t count)
+{
+	struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+	struct esas2r_request *rq;
+	int result = -EFAULT;
+
+	rq = esas2r_alloc_request(a);
+	if (rq == NULL)
+		return -ENOMEM;
+
+	if (esas2r_write_params(a, rq, (struct esas2r_sas_nvram *)buf))
+		result = count;
+
+	esas2r_free_request(a, rq);
+
+	return result;
+}
+
+static ssize_t read_default_nvram(struct file *file, struct kobject *kobj,
+				  struct bin_attribute *attr,
+				  char *buf, loff_t off, size_t count)
+{
+	struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+	esas2r_nvram_get_defaults(a, (struct esas2r_sas_nvram *)buf);
+
+	return sizeof(struct esas2r_sas_nvram);
+}
+
+static ssize_t read_hw(struct file *file, struct kobject *kobj,
+		       struct bin_attribute *attr,
+		       char *buf, loff_t off, size_t count)
+{
+	struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+	int length = min_t(size_t, sizeof(struct atto_ioctl), PAGE_SIZE);
+
+	if (!a->local_atto_ioctl)
+		return -ENOMEM;
+
+	if (handle_hba_ioctl(a, a->local_atto_ioctl) != IOCTL_SUCCESS)
+		return -ENOMEM;
+
+	memcpy(buf, a->local_atto_ioctl, length);
+
+	return length;
+}
+
+static ssize_t write_hw(struct file *file, struct kobject *kobj,
+			struct bin_attribute *attr,
+			char *buf, loff_t off, size_t count)
+{
+	struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+	int length = min(sizeof(struct atto_ioctl), count);
+
+	if (!a->local_atto_ioctl) {
+		a->local_atto_ioctl = kzalloc(sizeof(struct atto_ioctl),
+					      GFP_KERNEL);
+		if (a->local_atto_ioctl == NULL) {
+			esas2r_log(ESAS2R_LOG_WARN,
+				   "write_hw kzalloc failed for %d bytes",
+				   sizeof(struct atto_ioctl));
+			return -ENOMEM;
+		}
+	}
+
+	memset(a->local_atto_ioctl, 0, sizeof(struct atto_ioctl));
+	memcpy(a->local_atto_ioctl, buf, length);
+
+	return length;
+}
+
+#define ESAS2R_RW_BIN_ATTR(_name) \
+	struct bin_attribute bin_attr_ ## _name = { \
+		.attr	= \
+		{ .name = __stringify(_name), .mode  = S_IRUSR | S_IWUSR }, \
+		.size	= 0, \
+		.read	= read_ ## _name, \
+		.write	= write_ ## _name }
+
+ESAS2R_RW_BIN_ATTR(fw);
+ESAS2R_RW_BIN_ATTR(fs);
+ESAS2R_RW_BIN_ATTR(vda);
+ESAS2R_RW_BIN_ATTR(hw);
+ESAS2R_RW_BIN_ATTR(live_nvram);
+
+struct bin_attribute bin_attr_default_nvram = {
+	.attr	= { .name = "default_nvram", .mode = S_IRUGO },
+	.size	= 0,
+	.read	= read_default_nvram,
+	.write	= NULL
+};
+
+static struct scsi_host_template driver_template = {
+	.module				= THIS_MODULE,
+	.show_info			= esas2r_show_info,
+	.name				= ESAS2R_LONGNAME,
+	.release			= esas2r_release,
+	.info				= esas2r_info,
+	.ioctl				= esas2r_ioctl,
+	.queuecommand			= esas2r_queuecommand,
+	.eh_abort_handler		= esas2r_eh_abort,
+	.eh_device_reset_handler	= esas2r_device_reset,
+	.eh_bus_reset_handler		= esas2r_bus_reset,
+	.eh_host_reset_handler		= esas2r_host_reset,
+	.eh_target_reset_handler	= esas2r_target_reset,
+	.can_queue			= 128,
+	.this_id			= -1,
+	.sg_tablesize			= SCSI_MAX_SG_SEGMENTS,
+	.cmd_per_lun			=
+		ESAS2R_DEFAULT_CMD_PER_LUN,
+	.present			= 0,
+	.unchecked_isa_dma		= 0,
+	.use_clustering			= ENABLE_CLUSTERING,
+	.emulated			= 0,
+	.proc_name			= ESAS2R_DRVR_NAME,
+	.slave_configure		= esas2r_slave_configure,
+	.slave_alloc			= esas2r_slave_alloc,
+	.slave_destroy			= esas2r_slave_destroy,
+	.change_queue_depth		= esas2r_change_queue_depth,
+	.change_queue_type		= esas2r_change_queue_type,
+	.max_sectors			= 0xFFFF,
+};
+
+int sgl_page_size = 512;
+module_param(sgl_page_size, int, 0);
+MODULE_PARM_DESC(sgl_page_size,
+		 "Scatter/gather list (SGL) page size in number of S/G "
+		 "entries.  If your application is doing a lot of very large "
+		 "transfers, you may want to increase the SGL page size.  "
+		 "Default 512.");
+
+int num_sg_lists = 1024;
+module_param(num_sg_lists, int, 0);
+MODULE_PARM_DESC(num_sg_lists,
+		 "Number of scatter/gather lists.  Default 1024.");
+
+int sg_tablesize = SCSI_MAX_SG_SEGMENTS;
+module_param(sg_tablesize, int, 0);
+MODULE_PARM_DESC(sg_tablesize,
+		 "Maximum number of entries in a scatter/gather table.");
+
+int num_requests = 256;
+module_param(num_requests, int, 0);
+MODULE_PARM_DESC(num_requests,
+		 "Number of requests.  Default 256.");
+
+int num_ae_requests = 4;
+module_param(num_ae_requests, int, 0);
+MODULE_PARM_DESC(num_ae_requests,
+		 "Number of VDA asynchromous event requests.  Default 4.");
+
+int cmd_per_lun = ESAS2R_DEFAULT_CMD_PER_LUN;
+module_param(cmd_per_lun, int, 0);
+MODULE_PARM_DESC(cmd_per_lun,
+		 "Maximum number of commands per LUN.  Default "
+		 DEFINED_NUM_TO_STR(ESAS2R_DEFAULT_CMD_PER_LUN) ".");
+
+int can_queue = 128;
+module_param(can_queue, int, 0);
+MODULE_PARM_DESC(can_queue,
+		 "Maximum number of commands per adapter.  Default 128.");
+
+int esas2r_max_sectors = 0xFFFF;
+module_param(esas2r_max_sectors, int, 0);
+MODULE_PARM_DESC(esas2r_max_sectors,
+		 "Maximum number of disk sectors in a single data transfer.  "
+		 "Default 65535 (largest possible setting).");
+
+int interrupt_mode = 1;
+module_param(interrupt_mode, int, 0);
+MODULE_PARM_DESC(interrupt_mode,
+		 "Defines the interrupt mode to use.  0 for legacy"
+		 ", 1 for MSI.  Default is MSI (1).");
+
+static struct pci_device_id
+	esas2r_pci_table[] = {
+	{ ATTO_VENDOR_ID, 0x0049,	  ATTO_VENDOR_ID, 0x0049,
+	  0,
+	  0, 0 },
+	{ ATTO_VENDOR_ID, 0x0049,	  ATTO_VENDOR_ID, 0x004A,
+	  0,
+	  0, 0 },
+	{ ATTO_VENDOR_ID, 0x0049,	  ATTO_VENDOR_ID, 0x004B,
+	  0,
+	  0, 0 },
+	{ ATTO_VENDOR_ID, 0x0049,	  ATTO_VENDOR_ID, 0x004C,
+	  0,
+	  0, 0 },
+	{ ATTO_VENDOR_ID, 0x0049,	  ATTO_VENDOR_ID, 0x004D,
+	  0,
+	  0, 0 },
+	{ ATTO_VENDOR_ID, 0x0049,	  ATTO_VENDOR_ID, 0x004E,
+	  0,
+	  0, 0 },
+	{ 0,		  0,		  0,		  0,
+	  0,
+	  0, 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, esas2r_pci_table);
+
+static int
+esas2r_probe(struct pci_dev *pcid, const struct pci_device_id *id);
+
+static void
+esas2r_remove(struct pci_dev *pcid);
+
+static struct pci_driver
+	esas2r_pci_driver = {
+	.name		= ESAS2R_DRVR_NAME,
+	.id_table	= esas2r_pci_table,
+	.probe		= esas2r_probe,
+	.remove		= esas2r_remove,
+	.suspend	= esas2r_suspend,
+	.resume		= esas2r_resume,
+};
+
+static int esas2r_probe(struct pci_dev *pcid,
+			const struct pci_device_id *id)
+{
+	struct Scsi_Host *host = NULL;
+	struct esas2r_adapter *a;
+	int err;
+
+	size_t host_alloc_size = sizeof(struct esas2r_adapter)
+				 + ((num_requests) +
+				    1) * sizeof(struct esas2r_request);
+
+	esas2r_log_dev(ESAS2R_LOG_DEBG, &(pcid->dev),
+		       "esas2r_probe() 0x%02x 0x%02x 0x%02x 0x%02x",
+		       pcid->vendor,
+		       pcid->device,
+		       pcid->subsystem_vendor,
+		       pcid->subsystem_device);
+
+	esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
+		       "before pci_enable_device() "
+		       "enable_cnt: %d",
+		       pcid->enable_cnt.counter);
+
+	err = pci_enable_device(pcid);
+	if (err != 0) {
+		esas2r_log_dev(ESAS2R_LOG_CRIT, &(pcid->dev),
+			       "pci_enable_device() FAIL (%d)",
+			       err);
+		return -ENODEV;
+	}
+
+	esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
+		       "pci_enable_device() OK");
+	esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
+		       "after pci_device_enable() enable_cnt: %d",
+		       pcid->enable_cnt.counter);
+
+	host = scsi_host_alloc(&driver_template, host_alloc_size);
+	if (host == NULL) {
+		esas2r_log(ESAS2R_LOG_CRIT, "scsi_host_alloc() FAIL");
+		return -ENODEV;
+	}
+
+	memset(host->hostdata, 0, host_alloc_size);
+
+	a = (struct esas2r_adapter *)host->hostdata;
+
+	esas2r_log(ESAS2R_LOG_INFO, "scsi_host_alloc() OK host: %p", host);
+
+	/* override max LUN and max target id */
+
+	host->max_id = ESAS2R_MAX_ID + 1;
+	host->max_lun = 255;
+
+	/* we can handle 16-byte CDbs */
+
+	host->max_cmd_len = 16;
+
+	host->can_queue = can_queue;
+	host->cmd_per_lun = cmd_per_lun;
+	host->this_id = host->max_id + 1;
+	host->max_channel = 0;
+	host->unique_id = found_adapters;
+	host->sg_tablesize = sg_tablesize;
+	host->max_sectors = esas2r_max_sectors;
+
+	/* set to bus master for BIOses that don't do it for us */
+
+	esas2r_log(ESAS2R_LOG_INFO, "pci_set_master() called");
+
+	pci_set_master(pcid);
+
+	if (!esas2r_init_adapter(host, pcid, found_adapters)) {
+		esas2r_log(ESAS2R_LOG_CRIT,
+			   "unable to initialize device at PCI bus %x:%x",
+			   pcid->bus->number,
+			   pcid->devfn);
+
+		esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
+			       "scsi_host_put() called");
+
+		scsi_host_put(host);
+
+		return 0;
+
+	}
+
+	esas2r_log(ESAS2R_LOG_INFO, "pci_set_drvdata(%p, %p) called", pcid,
+		   host->hostdata);
+
+	pci_set_drvdata(pcid, host);
+
+	esas2r_log(ESAS2R_LOG_INFO, "scsi_add_host() called");
+
+	err = scsi_add_host(host, &pcid->dev);
+
+	if (err) {
+		esas2r_log(ESAS2R_LOG_CRIT, "scsi_add_host returned %d", err);
+		esas2r_log_dev(ESAS2R_LOG_CRIT, &(host->shost_gendev),
+			       "scsi_add_host() FAIL");
+
+		esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
+			       "scsi_host_put() called");
+
+		scsi_host_put(host);
+
+		esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
+			       "pci_set_drvdata(%p, NULL) called",
+			       pcid);
+
+		pci_set_drvdata(pcid, NULL);
+
+		return -ENODEV;
+	}
+
+
+	esas2r_fw_event_on(a);
+
+	esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
+		       "scsi_scan_host() called");
+
+	scsi_scan_host(host);
+
+	/* Add sysfs binary files */
+	if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fw))
+		esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+			       "Failed to create sysfs binary file: fw");
+	else
+		a->sysfs_fw_created = 1;
+
+	if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fs))
+		esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+			       "Failed to create sysfs binary file: fs");
+	else
+		a->sysfs_fs_created = 1;
+
+	if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_vda))
+		esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+			       "Failed to create sysfs binary file: vda");
+	else
+		a->sysfs_vda_created = 1;
+
+	if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_hw))
+		esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+			       "Failed to create sysfs binary file: hw");
+	else
+		a->sysfs_hw_created = 1;
+
+	if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_live_nvram))
+		esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+			       "Failed to create sysfs binary file: live_nvram");
+	else
+		a->sysfs_live_nvram_created = 1;
+
+	if (sysfs_create_bin_file(&host->shost_dev.kobj,
+				  &bin_attr_default_nvram))
+		esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+			       "Failed to create sysfs binary file: default_nvram");
+	else
+		a->sysfs_default_nvram_created = 1;
+
+	found_adapters++;
+
+	return 0;
+}
+
+static void esas2r_remove(struct pci_dev *pdev)
+{
+	struct Scsi_Host *host;
+	int index;
+
+	if (pdev == NULL) {
+		esas2r_log(ESAS2R_LOG_WARN, "esas2r_remove pdev==NULL");
+		return;
+	}
+
+	host = pci_get_drvdata(pdev);
+
+	if (host == NULL) {
+		/*
+		 * this can happen if pci_set_drvdata was already called
+		 * to clear the host pointer.  if this is the case, we
+		 * are okay; this channel has already been cleaned up.
+		 */
+
+		return;
+	}
+
+	esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+		       "esas2r_remove(%p) called; "
+		       "host:%p", pdev,
+		       host);
+
+	index = esas2r_cleanup(host);
+
+	if (index < 0)
+		esas2r_log_dev(ESAS2R_LOG_WARN, &(pdev->dev),
+			       "unknown host in %s",
+			       __func__);
+
+	found_adapters--;
+
+	/* if this was the last adapter, clean up the rest of the driver */
+
+	if (found_adapters == 0)
+		esas2r_cleanup(NULL);
+}
+
+static int __init esas2r_init(void)
+{
+	int i;
+
+	esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__);
+
+	/* verify valid parameters */
+
+	if (can_queue < 1) {
+		esas2r_log(ESAS2R_LOG_WARN,
+			   "warning: can_queue must be at least 1, value "
+			   "forced.");
+		can_queue = 1;
+	} else if (can_queue > 2048) {
+		esas2r_log(ESAS2R_LOG_WARN,
+			   "warning: can_queue must be no larger than 2048, "
+			   "value forced.");
+		can_queue = 2048;
+	}
+
+	if (cmd_per_lun < 1) {
+		esas2r_log(ESAS2R_LOG_WARN,
+			   "warning: cmd_per_lun must be at least 1, value "
+			   "forced.");
+		cmd_per_lun = 1;
+	} else if (cmd_per_lun > 2048) {
+		esas2r_log(ESAS2R_LOG_WARN,
+			   "warning: cmd_per_lun must be no larger than "
+			   "2048, value forced.");
+		cmd_per_lun = 2048;
+	}
+
+	if (sg_tablesize < 32) {
+		esas2r_log(ESAS2R_LOG_WARN,
+			   "warning: sg_tablesize must be at least 32, "
+			   "value forced.");
+		sg_tablesize = 32;
+	}
+
+	if (esas2r_max_sectors < 1) {
+		esas2r_log(ESAS2R_LOG_WARN,
+			   "warning: esas2r_max_sectors must be at least "
+			   "1, value forced.");
+		esas2r_max_sectors = 1;
+	} else if (esas2r_max_sectors > 0xffff) {
+		esas2r_log(ESAS2R_LOG_WARN,
+			   "warning: esas2r_max_sectors must be no larger "
+			   "than 0xffff, value forced.");
+		esas2r_max_sectors = 0xffff;
+	}
+
+	sgl_page_size &= ~(ESAS2R_SGL_ALIGN - 1);
+
+	if (sgl_page_size < SGL_PG_SZ_MIN)
+		sgl_page_size = SGL_PG_SZ_MIN;
+	else if (sgl_page_size > SGL_PG_SZ_MAX)
+		sgl_page_size = SGL_PG_SZ_MAX;
+
+	if (num_sg_lists < NUM_SGL_MIN)
+		num_sg_lists = NUM_SGL_MIN;
+	else if (num_sg_lists > NUM_SGL_MAX)
+		num_sg_lists = NUM_SGL_MAX;
+
+	if (num_requests < NUM_REQ_MIN)
+		num_requests = NUM_REQ_MIN;
+	else if (num_requests > NUM_REQ_MAX)
+		num_requests = NUM_REQ_MAX;
+
+	if (num_ae_requests < NUM_AE_MIN)
+		num_ae_requests = NUM_AE_MIN;
+	else if (num_ae_requests > NUM_AE_MAX)
+		num_ae_requests = NUM_AE_MAX;
+
+	/* set up other globals */
+
+	for (i = 0; i < MAX_ADAPTERS; i++)
+		esas2r_adapters[i] = NULL;
+
+	/* initialize */
+
+	driver_template.module = THIS_MODULE;
+
+	if (pci_register_driver(&esas2r_pci_driver) != 0)
+		esas2r_log(ESAS2R_LOG_CRIT, "pci_register_driver FAILED");
+	else
+		esas2r_log(ESAS2R_LOG_INFO, "pci_register_driver() OK");
+
+	if (!found_adapters) {
+		pci_unregister_driver(&esas2r_pci_driver);
+		esas2r_cleanup(NULL);
+
+		esas2r_log(ESAS2R_LOG_CRIT,
+			   "driver will not be loaded because no ATTO "
+			   "%s devices were found",
+			   ESAS2R_DRVR_NAME);
+		return -1;
+	} else {
+		esas2r_log(ESAS2R_LOG_INFO, "found %d adapters",
+			   found_adapters);
+	}
+
+	return 0;
+}
+
+/* Handle ioctl calls to "/proc/scsi/esas2r/ATTOnode" */
+static const struct file_operations esas2r_proc_fops = {
+	.compat_ioctl	= esas2r_proc_ioctl,
+	.unlocked_ioctl = esas2r_proc_ioctl,
+};
+
+static struct Scsi_Host *esas2r_proc_host;
+static int esas2r_proc_major;
+
+long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+	return esas2r_ioctl_handler(esas2r_proc_host->hostdata,
+				    (int)cmd, (void __user *)arg);
+}
+
+static void __exit esas2r_exit(void)
+{
+	esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__);
+
+	if (esas2r_proc_major > 0) {
+		esas2r_log(ESAS2R_LOG_INFO, "unregister proc");
+
+		remove_proc_entry(ATTONODE_NAME,
+				  esas2r_proc_host->hostt->proc_dir);
+		unregister_chrdev(esas2r_proc_major, ESAS2R_DRVR_NAME);
+
+		esas2r_proc_major = 0;
+	}
+
+	esas2r_log(ESAS2R_LOG_INFO, "pci_unregister_driver() called");
+
+	pci_unregister_driver(&esas2r_pci_driver);
+}
+
+int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh)
+{
+	struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata;
+
+	struct esas2r_target *t;
+	int dev_count = 0;
+
+	esas2r_log(ESAS2R_LOG_DEBG, "esas2r_show_info (%p,%d)", m, sh->host_no);
+
+	seq_printf(m, ESAS2R_LONGNAME "\n"
+		   "Driver version: "ESAS2R_VERSION_STR "\n"
+		   "Flash version: %s\n"
+		   "Firmware version: %s\n"
+		   "Copyright "ESAS2R_COPYRIGHT_YEARS "\n"
+		   "http://www.attotech.com\n"
+		   "\n",
+		   a->flash_rev,
+		   a->fw_rev[0] ? a->fw_rev : "(none)");
+
+
+	seq_printf(m, "Adapter information:\n"
+		   "--------------------\n"
+		   "Model: %s\n"
+		   "SAS address: %02X%02X%02X%02X:%02X%02X%02X%02X\n",
+		   esas2r_get_model_name(a),
+		   a->nvram->sas_addr[0],
+		   a->nvram->sas_addr[1],
+		   a->nvram->sas_addr[2],
+		   a->nvram->sas_addr[3],
+		   a->nvram->sas_addr[4],
+		   a->nvram->sas_addr[5],
+		   a->nvram->sas_addr[6],
+		   a->nvram->sas_addr[7]);
+
+	seq_puts(m, "\n"
+		   "Discovered devices:\n"
+		   "\n"
+		   "   #  Target ID\n"
+		   "---------------\n");
+
+	for (t = a->targetdb; t < a->targetdb_end; t++)
+		if (t->buffered_target_state == TS_PRESENT) {
+			seq_printf(m, " %3d   %3d\n",
+				   ++dev_count,
+				   (u16)(uintptr_t)(t - a->targetdb));
+		}
+
+	if (dev_count == 0)
+		seq_puts(m, "none\n");
+
+	seq_puts(m, "\n");
+	return 0;
+
+}
+
+int esas2r_release(struct Scsi_Host *sh)
+{
+	esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev),
+		       "esas2r_release() called");
+
+	esas2r_cleanup(sh);
+	if (sh->irq)
+		free_irq(sh->irq, NULL);
+	scsi_unregister(sh);
+	return 0;
+}
+
+const char *esas2r_info(struct Scsi_Host *sh)
+{
+	struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata;
+	static char esas2r_info_str[512];
+
+	esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev),
+		       "esas2r_info() called");
+
+	/*
+	 * if we haven't done so already, register as a char driver
+	 * and stick a node under "/proc/scsi/esas2r/ATTOnode"
+	 */
+
+	if (esas2r_proc_major <= 0) {
+		esas2r_proc_host = sh;
+
+		esas2r_proc_major = register_chrdev(0, ESAS2R_DRVR_NAME,
+						    &esas2r_proc_fops);
+
+		esas2r_log_dev(ESAS2R_LOG_DEBG, &(sh->shost_gendev),
+			       "register_chrdev (major %d)",
+			       esas2r_proc_major);
+
+		if (esas2r_proc_major > 0) {
+			struct proc_dir_entry *pde;
+
+			pde = proc_create(ATTONODE_NAME, 0,
+					  sh->hostt->proc_dir,
+					  &esas2r_proc_fops);
+
+			if (!pde) {
+				esas2r_log_dev(ESAS2R_LOG_WARN,
+					       &(sh->shost_gendev),
+					       "failed to create_proc_entry");
+				esas2r_proc_major = -1;
+			}
+		}
+	}
+
+	sprintf(esas2r_info_str,
+		ESAS2R_LONGNAME " (bus 0x%02X, device 0x%02X, IRQ 0x%02X)"
+		" driver version: "ESAS2R_VERSION_STR "  firmware version: "
+		"%s\n",
+		a->pcid->bus->number, a->pcid->devfn, a->pcid->irq,
+		a->fw_rev[0] ? a->fw_rev : "(none)");
+
+	return esas2r_info_str;
+}
+
+/* Callback for building a request scatter/gather list */
+static u32 get_physaddr_from_sgc(struct esas2r_sg_context *sgc, u64 *addr)
+{
+	u32 len;
+
+	if (likely(sgc->cur_offset == sgc->exp_offset)) {
+		/*
+		 * the normal case: caller used all bytes from previous call, so
+		 * expected offset is the same as the current offset.
+		 */
+
+		if (sgc->sgel_count < sgc->num_sgel) {
+			/* retrieve next segment, except for first time */
+			if (sgc->exp_offset > (u8 *)0) {
+				/* advance current segment */
+				sgc->cur_sgel = sg_next(sgc->cur_sgel);
+				++(sgc->sgel_count);
+			}
+
+
+			len = sg_dma_len(sgc->cur_sgel);
+			(*addr) = sg_dma_address(sgc->cur_sgel);
+
+			/* save the total # bytes returned to caller so far */
+			sgc->exp_offset += len;
+
+		} else {
+			len = 0;
+		}
+	} else if (sgc->cur_offset < sgc->exp_offset) {
+		/*
+		 * caller did not use all bytes from previous call. need to
+		 * compute the address based on current segment.
+		 */
+
+		len = sg_dma_len(sgc->cur_sgel);
+		(*addr) = sg_dma_address(sgc->cur_sgel);
+
+		sgc->exp_offset -= len;
+
+		/* calculate PA based on prev segment address and offsets */
+		*addr = *addr +
+			(sgc->cur_offset - sgc->exp_offset);
+
+		sgc->exp_offset += len;
+
+		/* re-calculate length based on offset */
+		len = lower_32_bits(
+			sgc->exp_offset - sgc->cur_offset);
+	} else {   /* if ( sgc->cur_offset > sgc->exp_offset ) */
+		   /*
+		    * we don't expect the caller to skip ahead.
+		    * cur_offset will never exceed the len we return
+		    */
+		len = 0;
+	}
+
+	return len;
+}
+
+int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+{
+	struct esas2r_adapter *a =
+		(struct esas2r_adapter *)cmd->device->host->hostdata;
+	struct esas2r_request *rq;
+	struct esas2r_sg_context sgc;
+	unsigned bufflen;
+
+	/* Assume success, if it fails we will fix the result later. */
+	cmd->result = DID_OK << 16;
+
+	if (unlikely(a->flags & AF_DEGRADED_MODE)) {
+		cmd->result = DID_NO_CONNECT << 16;
+		cmd->scsi_done(cmd);
+		return 0;
+	}
+
+	rq = esas2r_alloc_request(a);
+	if (unlikely(rq == NULL)) {
+		esas2r_debug("esas2r_alloc_request failed");
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	rq->cmd = cmd;
+	bufflen = scsi_bufflen(cmd);
+
+	if (likely(bufflen != 0)) {
+		if (cmd->sc_data_direction == DMA_TO_DEVICE)
+			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
+		else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
+	}
+
+	memcpy(rq->vrq->scsi.cdb, cmd->cmnd, cmd->cmd_len);
+	rq->vrq->scsi.length = cpu_to_le32(bufflen);
+	rq->target_id = cmd->device->id;
+	rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
+	rq->sense_buf = cmd->sense_buffer;
+	rq->sense_len = SCSI_SENSE_BUFFERSIZE;
+
+	esas2r_sgc_init(&sgc, a, rq, NULL);
+
+	sgc.length = bufflen;
+	sgc.cur_offset = NULL;
+
+	sgc.cur_sgel = scsi_sglist(cmd);
+	sgc.exp_offset = NULL;
+	sgc.num_sgel = scsi_dma_map(cmd);
+	sgc.sgel_count = 0;
+
+	if (unlikely(sgc.num_sgel < 0)) {
+		esas2r_free_request(a, rq);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_from_sgc;
+
+	if (unlikely(!esas2r_build_sg_list(a, rq, &sgc))) {
+		scsi_dma_unmap(cmd);
+		esas2r_free_request(a, rq);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	esas2r_debug("start request %p to %d:%d\n", rq, (int)cmd->device->id,
+		     (int)cmd->device->lun);
+
+	esas2r_start_request(a, rq);
+
+	return 0;
+}
+
+static void complete_task_management_request(struct esas2r_adapter *a,
+					     struct esas2r_request *rq)
+{
+	(*rq->task_management_status_ptr) = rq->req_stat;
+	esas2r_free_request(a, rq);
+}
+
+/**
+ * Searches the specified queue for the specified queue for the command
+ * to abort.
+ *
+ * @param [in] a
+ * @param [in] abort_request
+ * @param [in] cmd
+ * t
+ * @return 0 on failure, 1 if command was not found, 2 if command was found
+ */
+static int esas2r_check_active_queue(struct esas2r_adapter *a,
+				     struct esas2r_request **abort_request,
+				     struct scsi_cmnd *cmd,
+				     struct list_head *queue)
+{
+	bool found = false;
+	struct esas2r_request *ar = *abort_request;
+	struct esas2r_request *rq;
+	struct list_head *element, *next;
+
+	list_for_each_safe(element, next, queue) {
+
+		rq = list_entry(element, struct esas2r_request, req_list);
+
+		if (rq->cmd == cmd) {
+
+			/* Found the request.  See what to do with it. */
+			if (queue == &a->active_list) {
+				/*
+				 * We are searching the active queue, which
+				 * means that we need to send an abort request
+				 * to the firmware.
+				 */
+				ar = esas2r_alloc_request(a);
+				if (ar == NULL) {
+					esas2r_log_dev(ESAS2R_LOG_WARN,
+						       &(a->host->shost_gendev),
+						       "unable to allocate an abort request for cmd %p",
+						       cmd);
+					return 0; /* Failure */
+				}
+
+				/*
+				 * Task management request must be formatted
+				 * with a lock held.
+				 */
+				ar->sense_len = 0;
+				ar->vrq->scsi.length = 0;
+				ar->target_id = rq->target_id;
+				ar->vrq->scsi.flags |= cpu_to_le32(
+					(u8)le32_to_cpu(rq->vrq->scsi.flags));
+
+				memset(ar->vrq->scsi.cdb, 0,
+				       sizeof(ar->vrq->scsi.cdb));
+
+				ar->vrq->scsi.flags |= cpu_to_le32(
+					FCP_CMND_TRM);
+				ar->vrq->scsi.u.abort_handle =
+					rq->vrq->scsi.handle;
+			} else {
+				/*
+				 * The request is pending but not active on
+				 * the firmware.  Just free it now and we'll
+				 * report the successful abort below.
+				 */
+				list_del_init(&rq->req_list);
+				esas2r_free_request(a, rq);
+			}
+
+			found = true;
+			break;
+		}
+
+	}
+
+	if (!found)
+		return 1;       /* Not found */
+
+	return 2;               /* found */
+
+
+}
+
+int esas2r_eh_abort(struct scsi_cmnd *cmd)
+{
+	struct esas2r_adapter *a =
+		(struct esas2r_adapter *)cmd->device->host->hostdata;
+	struct esas2r_request *abort_request = NULL;
+	unsigned long flags;
+	struct list_head *queue;
+	int result;
+
+	esas2r_log(ESAS2R_LOG_INFO, "eh_abort (%p)", cmd);
+
+	if (a->flags & AF_DEGRADED_MODE) {
+		cmd->result = DID_ABORT << 16;
+
+		scsi_set_resid(cmd, 0);
+
+		cmd->scsi_done(cmd);
+
+		return 0;
+	}
+
+	spin_lock_irqsave(&a->queue_lock, flags);
+
+	/*
+	 * Run through the defer and active queues looking for the request
+	 * to abort.
+	 */
+
+	queue = &a->defer_list;
+
+check_active_queue:
+
+	result = esas2r_check_active_queue(a, &abort_request, cmd, queue);
+
+	if (!result) {
+		spin_unlock_irqrestore(&a->queue_lock, flags);
+		return FAILED;
+	} else if (result == 2 && (queue == &a->defer_list)) {
+		queue = &a->active_list;
+		goto check_active_queue;
+	}
+
+	spin_unlock_irqrestore(&a->queue_lock, flags);
+
+	if (abort_request) {
+		u8 task_management_status = RS_PENDING;
+
+		/*
+		 * the request is already active, so we need to tell
+		 * the firmware to abort it and wait for the response.
+		 */
+
+		abort_request->comp_cb = complete_task_management_request;
+		abort_request->task_management_status_ptr =
+			&task_management_status;
+
+		esas2r_start_request(a, abort_request);
+
+		if (atomic_read(&a->disable_cnt) == 0)
+			esas2r_do_deferred_processes(a);
+
+		while (task_management_status == RS_PENDING)
+			msleep(10);
+
+		/*
+		 * Once we get here, the original request will have been
+		 * completed by the firmware and the abort request will have
+		 * been cleaned up.  we're done!
+		 */
+
+		return SUCCESS;
+	}
+
+	/*
+	 * If we get here, either we found the inactive request and
+	 * freed it, or we didn't find it at all.  Either way, success!
+	 */
+
+	cmd->result = DID_ABORT << 16;
+
+	scsi_set_resid(cmd, 0);
+
+	cmd->scsi_done(cmd);
+
+	return SUCCESS;
+}
+
+static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset)
+{
+	struct esas2r_adapter *a =
+		(struct esas2r_adapter *)cmd->device->host->hostdata;
+
+	if (a->flags & AF_DEGRADED_MODE)
+		return FAILED;
+
+	if (host_reset)
+		esas2r_reset_adapter(a);
+	else
+		esas2r_reset_bus(a);
+
+	/* above call sets the AF_OS_RESET flag.  wait for it to clear. */
+
+	while (a->flags & AF_OS_RESET) {
+		msleep(10);
+
+		if (a->flags & AF_DEGRADED_MODE)
+			return FAILED;
+	}
+
+	if (a->flags & AF_DEGRADED_MODE)
+		return FAILED;
+
+	return SUCCESS;
+}
+
+int esas2r_host_reset(struct scsi_cmnd *cmd)
+{
+	esas2r_log(ESAS2R_LOG_INFO, "host_reset (%p)", cmd);
+
+	return esas2r_host_bus_reset(cmd, true);
+}
+
+int esas2r_bus_reset(struct scsi_cmnd *cmd)
+{
+	esas2r_log(ESAS2R_LOG_INFO, "bus_reset (%p)", cmd);
+
+	return esas2r_host_bus_reset(cmd, false);
+}
+
+static int esas2r_dev_targ_reset(struct scsi_cmnd *cmd, bool target_reset)
+{
+	struct esas2r_adapter *a =
+		(struct esas2r_adapter *)cmd->device->host->hostdata;
+	struct esas2r_request *rq;
+	u8 task_management_status = RS_PENDING;
+	bool completed;
+
+	if (a->flags & AF_DEGRADED_MODE)
+		return FAILED;
+
+retry:
+	rq = esas2r_alloc_request(a);
+	if (rq == NULL) {
+		if (target_reset) {
+			esas2r_log(ESAS2R_LOG_CRIT,
+				   "unable to allocate a request for a "
+				   "target reset (%d)!",
+				   cmd->device->id);
+		} else {
+			esas2r_log(ESAS2R_LOG_CRIT,
+				   "unable to allocate a request for a "
+				   "device reset (%d:%d)!",
+				   cmd->device->id,
+				   cmd->device->lun);
+		}
+
+
+		return FAILED;
+	}
+
+	rq->target_id = cmd->device->id;
+	rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
+	rq->req_stat = RS_PENDING;
+
+	rq->comp_cb = complete_task_management_request;
+	rq->task_management_status_ptr = &task_management_status;
+
+	if (target_reset) {
+		esas2r_debug("issuing target reset (%p) to id %d", rq,
+			     cmd->device->id);
+		completed = esas2r_send_task_mgmt(a, rq, 0x20);
+	} else {
+		esas2r_debug("issuing device reset (%p) to id %d lun %d", rq,
+			     cmd->device->id, cmd->device->lun);
+		completed = esas2r_send_task_mgmt(a, rq, 0x10);
+	}
+
+	if (completed) {
+		/* Task management cmd completed right away, need to free it. */
+
+		esas2r_free_request(a, rq);
+	} else {
+		/*
+		 * Wait for firmware to complete the request.  Completion
+		 * callback will free it.
+		 */
+		while (task_management_status == RS_PENDING)
+			msleep(10);
+	}
+
+	if (a->flags & AF_DEGRADED_MODE)
+		return FAILED;
+
+	if (task_management_status == RS_BUSY) {
+		/*
+		 * Busy, probably because we are flashing.  Wait a bit and
+		 * try again.
+		 */
+		msleep(100);
+		goto retry;
+	}
+
+	return SUCCESS;
+}
+
+int esas2r_device_reset(struct scsi_cmnd *cmd)
+{
+	esas2r_log(ESAS2R_LOG_INFO, "device_reset (%p)", cmd);
+
+	return esas2r_dev_targ_reset(cmd, false);
+
+}
+
+int esas2r_target_reset(struct scsi_cmnd *cmd)
+{
+	esas2r_log(ESAS2R_LOG_INFO, "target_reset (%p)", cmd);
+
+	return esas2r_dev_targ_reset(cmd, true);
+}
+
+int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason)
+{
+	esas2r_log(ESAS2R_LOG_INFO, "change_queue_depth %p, %d", dev, depth);
+
+	scsi_adjust_queue_depth(dev, scsi_get_tag_type(dev), depth);
+
+	return dev->queue_depth;
+}
+
+int esas2r_change_queue_type(struct scsi_device *dev, int type)
+{
+	esas2r_log(ESAS2R_LOG_INFO, "change_queue_type %p, %d", dev, type);
+
+	if (dev->tagged_supported) {
+		scsi_set_tag_type(dev, type);
+
+		if (type)
+			scsi_activate_tcq(dev, dev->queue_depth);
+		else
+			scsi_deactivate_tcq(dev, dev->queue_depth);
+	} else {
+		type = 0;
+	}
+
+	return type;
+}
+
+int esas2r_slave_alloc(struct scsi_device *dev)
+{
+	return 0;
+}
+
+int esas2r_slave_configure(struct scsi_device *dev)
+{
+	esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev),
+		       "esas2r_slave_configure()");
+
+	if (dev->tagged_supported) {
+		scsi_set_tag_type(dev, MSG_SIMPLE_TAG);
+		scsi_activate_tcq(dev, cmd_per_lun);
+	} else {
+		scsi_set_tag_type(dev, 0);
+		scsi_deactivate_tcq(dev, cmd_per_lun);
+	}
+
+	return 0;
+}
+
+void esas2r_slave_destroy(struct scsi_device *dev)
+{
+	esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev),
+		       "esas2r_slave_destroy()");
+}
+
+void esas2r_log_request_failure(struct esas2r_adapter *a,
+				struct esas2r_request *rq)
+{
+	u8 reqstatus = rq->req_stat;
+
+	if (reqstatus == RS_SUCCESS)
+		return;
+
+	if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
+		if (reqstatus == RS_SCSI_ERROR) {
+			if (rq->func_rsp.scsi_rsp.sense_len >= 13) {
+				esas2r_log(ESAS2R_LOG_WARN,
+					   "request failure - SCSI error %x ASC:%x ASCQ:%x CDB:%x",
+					   rq->sense_buf[2], rq->sense_buf[12],
+					   rq->sense_buf[13],
+					   rq->vrq->scsi.cdb[0]);
+			} else {
+				esas2r_log(ESAS2R_LOG_WARN,
+					   "request failure - SCSI error CDB:%x\n",
+					   rq->vrq->scsi.cdb[0]);
+			}
+		} else if ((rq->vrq->scsi.cdb[0] != INQUIRY
+			    && rq->vrq->scsi.cdb[0] != REPORT_LUNS)
+			   || (reqstatus != RS_SEL
+			       && reqstatus != RS_SEL2)) {
+			if ((reqstatus == RS_UNDERRUN) &&
+			    (rq->vrq->scsi.cdb[0] == INQUIRY)) {
+				/* Don't log inquiry underruns */
+			} else {
+				esas2r_log(ESAS2R_LOG_WARN,
+					   "request failure - cdb:%x reqstatus:%d target:%d",
+					   rq->vrq->scsi.cdb[0], reqstatus,
+					   rq->target_id);
+			}
+		}
+	}
+}
+
+void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+	u32 starttime;
+	u32 timeout;
+
+	starttime = jiffies_to_msecs(jiffies);
+	timeout = rq->timeout ? rq->timeout : 5000;
+
+	while (true) {
+		esas2r_polled_interrupt(a);
+
+		if (rq->req_stat != RS_STARTED)
+			break;
+
+		schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+		if ((jiffies_to_msecs(jiffies) - starttime) > timeout) {
+			esas2r_hdebug("request TMO");
+			esas2r_bugon();
+
+			rq->req_stat = RS_TIMEOUT;
+
+			esas2r_local_reset_adapter(a);
+			return;
+		}
+	}
+}
+
+u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo)
+{
+	u32 offset = addr_lo & (MW_DATA_WINDOW_SIZE - 1);
+	u32 base = addr_lo & -(signed int)MW_DATA_WINDOW_SIZE;
+
+	if (a->window_base != base) {
+		esas2r_write_register_dword(a, MVR_PCI_WIN1_REMAP,
+					    base | MVRPW1R_ENABLE);
+		esas2r_flush_register_dword(a, MVR_PCI_WIN1_REMAP);
+		a->window_base = base;
+	}
+
+	return offset;
+}
+
+/* Read a block of data from chip memory */
+bool esas2r_read_mem_block(struct esas2r_adapter *a,
+			   void *to,
+			   u32 from,
+			   u32 size)
+{
+	u8 *end = (u8 *)to;
+
+	while (size) {
+		u32 len;
+		u32 offset;
+		u32 iatvr;
+
+		iatvr = (from & -(signed int)MW_DATA_WINDOW_SIZE);
+
+		esas2r_map_data_window(a, iatvr);
+
+		offset = from & (MW_DATA_WINDOW_SIZE - 1);
+		len = size;
+
+		if (len > MW_DATA_WINDOW_SIZE - offset)
+			len = MW_DATA_WINDOW_SIZE - offset;
+
+		from += len;
+		size -= len;
+
+		while (len--) {
+			*end++ = esas2r_read_data_byte(a, offset);
+			offset++;
+		}
+	}
+
+	return true;
+}
+
+void esas2r_nuxi_mgt_data(u8 function, void *data)
+{
+	struct atto_vda_grp_info *g;
+	struct atto_vda_devinfo *d;
+	struct atto_vdapart_info *p;
+	struct atto_vda_dh_info *h;
+	struct atto_vda_metrics_info *m;
+	struct atto_vda_schedule_info *s;
+	struct atto_vda_buzzer_info *b;
+	u8 i;
+
+	switch (function) {
+	case VDAMGT_BUZZER_INFO:
+	case VDAMGT_BUZZER_SET:
+
+		b = (struct atto_vda_buzzer_info *)data;
+
+		b->duration = le32_to_cpu(b->duration);
+		break;
+
+	case VDAMGT_SCHEDULE_INFO:
+	case VDAMGT_SCHEDULE_EVENT:
+
+		s = (struct atto_vda_schedule_info *)data;
+
+		s->id = le32_to_cpu(s->id);
+
+		break;
+
+	case VDAMGT_DEV_INFO:
+	case VDAMGT_DEV_CLEAN:
+	case VDAMGT_DEV_PT_INFO:
+	case VDAMGT_DEV_FEATURES:
+	case VDAMGT_DEV_PT_FEATURES:
+	case VDAMGT_DEV_OPERATION:
+
+		d = (struct atto_vda_devinfo *)data;
+
+		d->capacity = le64_to_cpu(d->capacity);
+		d->block_size = le32_to_cpu(d->block_size);
+		d->ses_dev_index = le16_to_cpu(d->ses_dev_index);
+		d->target_id = le16_to_cpu(d->target_id);
+		d->lun = le16_to_cpu(d->lun);
+		d->features = le16_to_cpu(d->features);
+		break;
+
+	case VDAMGT_GRP_INFO:
+	case VDAMGT_GRP_CREATE:
+	case VDAMGT_GRP_DELETE:
+	case VDAMGT_ADD_STORAGE:
+	case VDAMGT_MEMBER_ADD:
+	case VDAMGT_GRP_COMMIT:
+	case VDAMGT_GRP_REBUILD:
+	case VDAMGT_GRP_COMMIT_INIT:
+	case VDAMGT_QUICK_RAID:
+	case VDAMGT_GRP_FEATURES:
+	case VDAMGT_GRP_COMMIT_INIT_AUTOMAP:
+	case VDAMGT_QUICK_RAID_INIT_AUTOMAP:
+	case VDAMGT_SPARE_LIST:
+	case VDAMGT_SPARE_ADD:
+	case VDAMGT_SPARE_REMOVE:
+	case VDAMGT_LOCAL_SPARE_ADD:
+	case VDAMGT_GRP_OPERATION:
+
+		g = (struct atto_vda_grp_info *)data;
+
+		g->capacity = le64_to_cpu(g->capacity);
+		g->block_size = le32_to_cpu(g->block_size);
+		g->interleave = le32_to_cpu(g->interleave);
+		g->features = le16_to_cpu(g->features);
+
+		for (i = 0; i < 32; i++)
+			g->members[i] = le16_to_cpu(g->members[i]);
+
+		break;
+
+	case VDAMGT_PART_INFO:
+	case VDAMGT_PART_MAP:
+	case VDAMGT_PART_UNMAP:
+	case VDAMGT_PART_AUTOMAP:
+	case VDAMGT_PART_SPLIT:
+	case VDAMGT_PART_MERGE:
+
+		p = (struct atto_vdapart_info *)data;
+
+		p->part_size = le64_to_cpu(p->part_size);
+		p->start_lba = le32_to_cpu(p->start_lba);
+		p->block_size = le32_to_cpu(p->block_size);
+		p->target_id = le16_to_cpu(p->target_id);
+		break;
+
+	case VDAMGT_DEV_HEALTH_REQ:
+
+		h = (struct atto_vda_dh_info *)data;
+
+		h->med_defect_cnt = le32_to_cpu(h->med_defect_cnt);
+		h->info_exc_cnt = le32_to_cpu(h->info_exc_cnt);
+		break;
+
+	case VDAMGT_DEV_METRICS:
+
+		m = (struct atto_vda_metrics_info *)data;
+
+		for (i = 0; i < 32; i++)
+			m->dev_indexes[i] = le16_to_cpu(m->dev_indexes[i]);
+
+		break;
+
+	default:
+		break;
+	}
+}
+
+void esas2r_nuxi_cfg_data(u8 function, void *data)
+{
+	struct atto_vda_cfg_init *ci;
+
+	switch (function) {
+	case VDA_CFG_INIT:
+	case VDA_CFG_GET_INIT:
+	case VDA_CFG_GET_INIT2:
+
+		ci = (struct atto_vda_cfg_init *)data;
+
+		ci->date_time.year = le16_to_cpu(ci->date_time.year);
+		ci->sgl_page_size = le32_to_cpu(ci->sgl_page_size);
+		ci->vda_version = le32_to_cpu(ci->vda_version);
+		ci->epoch_time = le32_to_cpu(ci->epoch_time);
+		ci->ioctl_tunnel = le32_to_cpu(ci->ioctl_tunnel);
+		ci->num_targets_backend = le32_to_cpu(ci->num_targets_backend);
+		break;
+
+	default:
+		break;
+	}
+}
+
+void esas2r_nuxi_ae_data(union atto_vda_ae *ae)
+{
+	struct atto_vda_ae_raid *r = &ae->raid;
+	struct atto_vda_ae_lu *l = &ae->lu;
+
+	switch (ae->hdr.bytype) {
+	case VDAAE_HDR_TYPE_RAID:
+
+		r->dwflags = le32_to_cpu(r->dwflags);
+		break;
+
+	case VDAAE_HDR_TYPE_LU:
+
+		l->dwevent = le32_to_cpu(l->dwevent);
+		l->wphys_target_id = le16_to_cpu(l->wphys_target_id);
+		l->id.tgtlun.wtarget_id = le16_to_cpu(l->id.tgtlun.wtarget_id);
+
+		if (l->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
+		    + sizeof(struct atto_vda_ae_lu_tgt_lun_raid)) {
+			l->id.tgtlun_raid.dwinterleave
+				= le32_to_cpu(l->id.tgtlun_raid.dwinterleave);
+			l->id.tgtlun_raid.dwblock_size
+				= le32_to_cpu(l->id.tgtlun_raid.dwblock_size);
+		}
+
+		break;
+
+	case VDAAE_HDR_TYPE_DISK:
+	default:
+		break;
+	}
+}
+
+void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+	unsigned long flags;
+
+	esas2r_rq_destroy_request(rq, a);
+	spin_lock_irqsave(&a->request_lock, flags);
+	list_add(&rq->comp_list, &a->avail_request);
+	spin_unlock_irqrestore(&a->request_lock, flags);
+}
+
+struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a)
+{
+	struct esas2r_request *rq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&a->request_lock, flags);
+
+	if (unlikely(list_empty(&a->avail_request))) {
+		spin_unlock_irqrestore(&a->request_lock, flags);
+		return NULL;
+	}
+
+	rq = list_first_entry(&a->avail_request, struct esas2r_request,
+			      comp_list);
+	list_del(&rq->comp_list);
+	spin_unlock_irqrestore(&a->request_lock, flags);
+	esas2r_rq_init_request(rq, a);
+
+	return rq;
+
+}
+
+void esas2r_complete_request_cb(struct esas2r_adapter *a,
+				struct esas2r_request *rq)
+{
+	esas2r_debug("completing request %p\n", rq);
+
+	scsi_dma_unmap(rq->cmd);
+
+	if (unlikely(rq->req_stat != RS_SUCCESS)) {
+		esas2r_debug("[%x STATUS %x:%x (%x)]", rq->target_id,
+			     rq->req_stat,
+			     rq->func_rsp.scsi_rsp.scsi_stat,
+			     rq->cmd);
+
+		rq->cmd->result =
+			((esas2r_req_status_to_error(rq->req_stat) << 16)
+			 | (rq->func_rsp.scsi_rsp.scsi_stat & STATUS_MASK));
+
+		if (rq->req_stat == RS_UNDERRUN)
+			scsi_set_resid(rq->cmd,
+				       le32_to_cpu(rq->func_rsp.scsi_rsp.
+						   residual_length));
+		else
+			scsi_set_resid(rq->cmd, 0);
+	}
+
+	rq->cmd->scsi_done(rq->cmd);
+
+	esas2r_free_request(a, rq);
+}
+
+/* Run tasklet to handle stuff outside of interrupt context. */
+void esas2r_adapter_tasklet(unsigned long context)
+{
+	struct esas2r_adapter *a = (struct esas2r_adapter *)context;
+
+	if (unlikely(a->flags2 & AF2_TIMER_TICK)) {
+		esas2r_lock_clear_flags(&a->flags2, AF2_TIMER_TICK);
+		esas2r_timer_tick(a);
+	}
+
+	if (likely(a->flags2 & AF2_INT_PENDING)) {
+		esas2r_lock_clear_flags(&a->flags2, AF2_INT_PENDING);
+		esas2r_adapter_interrupt(a);
+	}
+
+	if (esas2r_is_tasklet_pending(a))
+		esas2r_do_tasklet_tasks(a);
+
+	if (esas2r_is_tasklet_pending(a)
+	    || (a->flags2 & AF2_INT_PENDING)
+	    || (a->flags2 & AF2_TIMER_TICK)) {
+		esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
+		esas2r_schedule_tasklet(a);
+	} else {
+		esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
+	}
+}
+
+static void esas2r_timer_callback(unsigned long context);
+
+void esas2r_kickoff_timer(struct esas2r_adapter *a)
+{
+	init_timer(&a->timer);
+
+	a->timer.function = esas2r_timer_callback;
+	a->timer.data = (unsigned long)a;
+	a->timer.expires = jiffies +
+			   msecs_to_jiffies(100);
+
+	add_timer(&a->timer);
+}
+
+static void esas2r_timer_callback(unsigned long context)
+{
+	struct esas2r_adapter *a = (struct esas2r_adapter *)context;
+
+	esas2r_lock_set_flags(&a->flags2, AF2_TIMER_TICK);
+
+	esas2r_schedule_tasklet(a);
+
+	esas2r_kickoff_timer(a);
+}
+
+/*
+ * Firmware events need to be handled outside of interrupt context
+ * so we schedule a delayed_work to handle them.
+ */
+
+static void
+esas2r_free_fw_event(struct esas2r_fw_event_work *fw_event)
+{
+	unsigned long flags;
+	struct esas2r_adapter *a = fw_event->a;
+
+	spin_lock_irqsave(&a->fw_event_lock, flags);
+	list_del(&fw_event->list);
+	kfree(fw_event);
+	spin_unlock_irqrestore(&a->fw_event_lock, flags);
+}
+
+void
+esas2r_fw_event_off(struct esas2r_adapter *a)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&a->fw_event_lock, flags);
+	a->fw_events_off = 1;
+	spin_unlock_irqrestore(&a->fw_event_lock, flags);
+}
+
+void
+esas2r_fw_event_on(struct esas2r_adapter *a)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&a->fw_event_lock, flags);
+	a->fw_events_off = 0;
+	spin_unlock_irqrestore(&a->fw_event_lock, flags);
+}
+
+static void esas2r_add_device(struct esas2r_adapter *a, u16 target_id)
+{
+	int ret;
+	struct scsi_device *scsi_dev;
+
+	scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0);
+
+	if (scsi_dev) {
+		esas2r_log_dev(
+			ESAS2R_LOG_WARN,
+			&(scsi_dev->
+			  sdev_gendev),
+			"scsi device already exists at id %d", target_id);
+
+		scsi_device_put(scsi_dev);
+	} else {
+		esas2r_log_dev(
+			ESAS2R_LOG_INFO,
+			&(a->host->
+			  shost_gendev),
+			"scsi_add_device() called for 0:%d:0",
+			target_id);
+
+		ret = scsi_add_device(a->host, 0, target_id, 0);
+		if (ret) {
+			esas2r_log_dev(
+				ESAS2R_LOG_CRIT,
+				&(a->host->
+				  shost_gendev),
+				"scsi_add_device failed with %d for id %d",
+				ret, target_id);
+		}
+	}
+}
+
+static void esas2r_remove_device(struct esas2r_adapter *a, u16 target_id)
+{
+	struct scsi_device *scsi_dev;
+
+	scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0);
+
+	if (scsi_dev) {
+		scsi_device_set_state(scsi_dev, SDEV_OFFLINE);
+
+		esas2r_log_dev(
+			ESAS2R_LOG_INFO,
+			&(scsi_dev->
+			  sdev_gendev),
+			"scsi_remove_device() called for 0:%d:0",
+			target_id);
+
+		scsi_remove_device(scsi_dev);
+
+		esas2r_log_dev(
+			ESAS2R_LOG_INFO,
+			&(scsi_dev->
+			  sdev_gendev),
+			"scsi_device_put() called");
+
+		scsi_device_put(scsi_dev);
+	} else {
+		esas2r_log_dev(
+			ESAS2R_LOG_WARN,
+			&(a->host->shost_gendev),
+			"no target found at id %d",
+			target_id);
+	}
+}
+
+/*
+ * Sends a firmware asynchronous event to anyone who happens to be
+ * listening on the defined ATTO VDA event ports.
+ */
+static void esas2r_send_ae_event(struct esas2r_fw_event_work *fw_event)
+{
+	struct esas2r_vda_ae *ae = (struct esas2r_vda_ae *)fw_event->data;
+	char *type;
+
+	switch (ae->vda_ae.hdr.bytype) {
+	case VDAAE_HDR_TYPE_RAID:
+		type = "RAID group state change";
+		break;
+
+	case VDAAE_HDR_TYPE_LU:
+		type = "Mapped destination LU change";
+		break;
+
+	case VDAAE_HDR_TYPE_DISK:
+		type = "Physical disk inventory change";
+		break;
+
+	case VDAAE_HDR_TYPE_RESET:
+		type = "Firmware reset";
+		break;
+
+	case VDAAE_HDR_TYPE_LOG_INFO:
+		type = "Event Log message (INFO level)";
+		break;
+
+	case VDAAE_HDR_TYPE_LOG_WARN:
+		type = "Event Log message (WARN level)";
+		break;
+
+	case VDAAE_HDR_TYPE_LOG_CRIT:
+		type = "Event Log message (CRIT level)";
+		break;
+
+	case VDAAE_HDR_TYPE_LOG_FAIL:
+		type = "Event Log message (FAIL level)";
+		break;
+
+	case VDAAE_HDR_TYPE_NVC:
+		type = "NVCache change";
+		break;
+
+	case VDAAE_HDR_TYPE_TLG_INFO:
+		type = "Time stamped log message (INFO level)";
+		break;
+
+	case VDAAE_HDR_TYPE_TLG_WARN:
+		type = "Time stamped log message (WARN level)";
+		break;
+
+	case VDAAE_HDR_TYPE_TLG_CRIT:
+		type = "Time stamped log message (CRIT level)";
+		break;
+
+	case VDAAE_HDR_TYPE_PWRMGT:
+		type = "Power management";
+		break;
+
+	case VDAAE_HDR_TYPE_MUTE:
+		type = "Mute button pressed";
+		break;
+
+	case VDAAE_HDR_TYPE_DEV:
+		type = "Device attribute change";
+		break;
+
+	default:
+		type = "Unknown";
+		break;
+	}
+
+	esas2r_log(ESAS2R_LOG_WARN,
+		   "An async event of type \"%s\" was received from the firmware.  The event contents are:",
+		   type);
+	esas2r_log_hexdump(ESAS2R_LOG_WARN, &ae->vda_ae,
+			   ae->vda_ae.hdr.bylength);
+
+}
+
+static void
+esas2r_firmware_event_work(struct work_struct *work)
+{
+	struct esas2r_fw_event_work *fw_event =
+		container_of(work, struct esas2r_fw_event_work, work.work);
+
+	struct esas2r_adapter *a = fw_event->a;
+
+	u16 target_id = *(u16 *)&fw_event->data[0];
+
+	if (a->fw_events_off)
+		goto done;
+
+	switch (fw_event->type) {
+	case fw_event_null:
+		break; /* do nothing */
+
+	case fw_event_lun_change:
+		esas2r_remove_device(a, target_id);
+		esas2r_add_device(a, target_id);
+		break;
+
+	case fw_event_present:
+		esas2r_add_device(a, target_id);
+		break;
+
+	case fw_event_not_present:
+		esas2r_remove_device(a, target_id);
+		break;
+
+	case fw_event_vda_ae:
+		esas2r_send_ae_event(fw_event);
+		break;
+	}
+
+done:
+	esas2r_free_fw_event(fw_event);
+}
+
+void esas2r_queue_fw_event(struct esas2r_adapter *a,
+			   enum fw_event_type type,
+			   void *data,
+			   int data_sz)
+{
+	struct esas2r_fw_event_work *fw_event;
+	unsigned long flags;
+
+	fw_event = kzalloc(sizeof(struct esas2r_fw_event_work), GFP_ATOMIC);
+	if (!fw_event) {
+		esas2r_log(ESAS2R_LOG_WARN,
+			   "esas2r_queue_fw_event failed to alloc");
+		return;
+	}
+
+	if (type == fw_event_vda_ae) {
+		struct esas2r_vda_ae *ae =
+			(struct esas2r_vda_ae *)fw_event->data;
+
+		ae->signature = ESAS2R_VDA_EVENT_SIG;
+		ae->bus_number = a->pcid->bus->number;
+		ae->devfn = a->pcid->devfn;
+		memcpy(&ae->vda_ae, data, sizeof(ae->vda_ae));
+	} else {
+		memcpy(fw_event->data, data, data_sz);
+	}
+
+	fw_event->type = type;
+	fw_event->a = a;
+
+	spin_lock_irqsave(&a->fw_event_lock, flags);
+	list_add_tail(&fw_event->list, &a->fw_event_list);
+	INIT_DELAYED_WORK(&fw_event->work, esas2r_firmware_event_work);
+	queue_delayed_work_on(
+		smp_processor_id(), a->fw_event_q, &fw_event->work,
+		msecs_to_jiffies(1));
+	spin_unlock_irqrestore(&a->fw_event_lock, flags);
+}
+
+void esas2r_target_state_changed(struct esas2r_adapter *a, u16 targ_id,
+				 u8 state)
+{
+	if (state == TS_LUN_CHANGE)
+		esas2r_queue_fw_event(a, fw_event_lun_change, &targ_id,
+				      sizeof(targ_id));
+	else if (state == TS_PRESENT)
+		esas2r_queue_fw_event(a, fw_event_present, &targ_id,
+				      sizeof(targ_id));
+	else if (state == TS_NOT_PRESENT)
+		esas2r_queue_fw_event(a, fw_event_not_present, &targ_id,
+				      sizeof(targ_id));
+}
+
+/* Translate status to a Linux SCSI mid-layer error code */
+int esas2r_req_status_to_error(u8 req_stat)
+{
+	switch (req_stat) {
+	case RS_OVERRUN:
+	case RS_UNDERRUN:
+	case RS_SUCCESS:
+	/*
+	 * NOTE: SCSI mid-layer wants a good status for a SCSI error, because
+	 *       it will check the scsi_stat value in the completion anyway.
+	 */
+	case RS_SCSI_ERROR:
+		return DID_OK;
+
+	case RS_SEL:
+	case RS_SEL2:
+		return DID_NO_CONNECT;
+
+	case RS_RESET:
+		return DID_RESET;
+
+	case RS_ABORTED:
+		return DID_ABORT;
+
+	case RS_BUSY:
+		return DID_BUS_BUSY;
+	}
+
+	/* everything else is just an error. */
+
+	return DID_ERROR;
+}
+
+module_init(esas2r_init);
+module_exit(esas2r_exit);

+ 306 - 0
drivers/scsi/esas2r/esas2r_targdb.c

@@ -0,0 +1,306 @@
+/*
+ *  linux/drivers/scsi/esas2r/esas2r_targdb.c
+ *      For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ *  Copyright (c) 2001-2013 ATTO Technology, Inc.
+ *  (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+void esas2r_targ_db_initialize(struct esas2r_adapter *a)
+{
+	struct esas2r_target *t;
+
+	for (t = a->targetdb; t < a->targetdb_end; t++) {
+		memset(t, 0, sizeof(struct esas2r_target));
+
+		t->target_state = TS_NOT_PRESENT;
+		t->buffered_target_state = TS_NOT_PRESENT;
+		t->new_target_state = TS_INVALID;
+	}
+}
+
+void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify)
+{
+	struct esas2r_target *t;
+	unsigned long flags;
+
+	for (t = a->targetdb; t < a->targetdb_end; t++) {
+		if (t->target_state != TS_PRESENT)
+			continue;
+
+		spin_lock_irqsave(&a->mem_lock, flags);
+		esas2r_targ_db_remove(a, t);
+		spin_unlock_irqrestore(&a->mem_lock, flags);
+
+		if (notify) {
+			esas2r_trace("remove id:%d", esas2r_targ_get_id(t,
+									a));
+			esas2r_target_state_changed(a, esas2r_targ_get_id(t,
+									  a),
+						    TS_NOT_PRESENT);
+		}
+	}
+}
+
+void esas2r_targ_db_report_changes(struct esas2r_adapter *a)
+{
+	struct esas2r_target *t;
+	unsigned long flags;
+
+	esas2r_trace_enter();
+
+	if (a->flags & AF_DISC_PENDING) {
+		esas2r_trace_exit();
+		return;
+	}
+
+	for (t = a->targetdb; t < a->targetdb_end; t++) {
+		u8 state = TS_INVALID;
+
+		spin_lock_irqsave(&a->mem_lock, flags);
+		if (t->buffered_target_state != t->target_state)
+			state = t->buffered_target_state = t->target_state;
+
+		spin_unlock_irqrestore(&a->mem_lock, flags);
+		if (state != TS_INVALID) {
+			esas2r_trace("targ_db_report_changes:%d",
+				     esas2r_targ_get_id(
+					     t,
+					     a));
+			esas2r_trace("state:%d", state);
+
+			esas2r_target_state_changed(a,
+						    esas2r_targ_get_id(t,
+								       a),
+						    state);
+		}
+	}
+
+	esas2r_trace_exit();
+}
+
+struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a,
+					      struct esas2r_disc_context *
+					      dc)
+{
+	struct esas2r_target *t;
+
+	esas2r_trace_enter();
+
+	if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) {
+		esas2r_bugon();
+		esas2r_trace_exit();
+		return NULL;
+	}
+
+	t = a->targetdb + dc->curr_virt_id;
+
+	if (t->target_state == TS_PRESENT) {
+		esas2r_trace_exit();
+		return NULL;
+	}
+
+	esas2r_hdebug("add RAID %s, T:%d", dc->raid_grp_name,
+		      esas2r_targ_get_id(
+			      t,
+			      a));
+
+	if (dc->interleave == 0
+	    || dc->block_size  == 0) {
+		/* these are invalid values, don't create the target entry. */
+
+		esas2r_hdebug("invalid RAID group dimensions");
+
+		esas2r_trace_exit();
+
+		return NULL;
+	}
+
+	t->block_size = dc->block_size;
+	t->inter_byte = dc->interleave;
+	t->inter_block = dc->interleave / dc->block_size;
+	t->virt_targ_id = dc->curr_virt_id;
+	t->phys_targ_id = ESAS2R_TARG_ID_INV;
+
+	t->flags &= ~TF_PASS_THRU;
+	t->flags |= TF_USED;
+
+	t->identifier_len = 0;
+
+	t->target_state = TS_PRESENT;
+
+	return t;
+}
+
+struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a,
+					       struct esas2r_disc_context *dc,
+					       u8 *ident,
+					       u8 ident_len)
+{
+	struct esas2r_target *t;
+
+	esas2r_trace_enter();
+
+	if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) {
+		esas2r_bugon();
+		esas2r_trace_exit();
+		return NULL;
+	}
+
+	/* see if we found this device before. */
+
+	t = esas2r_targ_db_find_by_ident(a, ident, ident_len);
+
+	if (t == NULL) {
+		t = a->targetdb + dc->curr_virt_id;
+
+		if (ident_len > sizeof(t->identifier)
+		    || t->target_state == TS_PRESENT) {
+			esas2r_trace_exit();
+			return NULL;
+		}
+	}
+
+	esas2r_hdebug("add PT; T:%d, V:%d, P:%d", esas2r_targ_get_id(t, a),
+		      dc->curr_virt_id,
+		      dc->curr_phys_id);
+
+	t->block_size = 0;
+	t->inter_byte = 0;
+	t->inter_block = 0;
+	t->virt_targ_id = dc->curr_virt_id;
+	t->phys_targ_id = dc->curr_phys_id;
+	t->identifier_len = ident_len;
+
+	memcpy(t->identifier, ident, ident_len);
+
+	t->flags |= TF_PASS_THRU | TF_USED;
+
+	t->target_state = TS_PRESENT;
+
+	return t;
+}
+
+void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t)
+{
+	esas2r_trace_enter();
+
+	t->target_state = TS_NOT_PRESENT;
+
+	esas2r_trace("remove id:%d", esas2r_targ_get_id(t, a));
+
+	esas2r_trace_exit();
+}
+
+struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a,
+						      u64 *sas_addr)
+{
+	struct esas2r_target *t;
+
+	for (t = a->targetdb; t < a->targetdb_end; t++)
+		if (t->sas_addr == *sas_addr)
+			return t;
+
+	return NULL;
+}
+
+struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a,
+						   void *identifier,
+						   u8 ident_len)
+{
+	struct esas2r_target *t;
+
+	for (t = a->targetdb; t < a->targetdb_end; t++) {
+		if (ident_len == t->identifier_len
+		    && memcmp(&t->identifier[0], identifier,
+			      ident_len) == 0)
+			return t;
+	}
+
+	return NULL;
+}
+
+u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id)
+{
+	u16 id = target_id + 1;
+
+	while (id < ESAS2R_MAX_TARGETS) {
+		struct esas2r_target *t = a->targetdb + id;
+
+		if (t->target_state == TS_PRESENT)
+			break;
+
+		id++;
+	}
+
+	return id;
+}
+
+struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a,
+						     u16 virt_id)
+{
+	struct esas2r_target *t;
+
+	for (t = a->targetdb; t < a->targetdb_end; t++) {
+		if (t->target_state != TS_PRESENT)
+			continue;
+
+		if (t->virt_targ_id == virt_id)
+			return t;
+	}
+
+	return NULL;
+}
+
+u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a)
+{
+	u16 devcnt = 0;
+	struct esas2r_target *t;
+	unsigned long flags;
+
+	spin_lock_irqsave(&a->mem_lock, flags);
+	for (t = a->targetdb; t < a->targetdb_end; t++)
+		if (t->target_state == TS_PRESENT)
+			devcnt++;
+
+	spin_unlock_irqrestore(&a->mem_lock, flags);
+
+	return devcnt;
+}

+ 521 - 0
drivers/scsi/esas2r/esas2r_vda.c

@@ -0,0 +1,521 @@
+/*
+ *  linux/drivers/scsi/esas2r/esas2r_vda.c
+ *      esas2r driver VDA firmware interface functions
+ *
+ *  Copyright (c) 2001-2013 ATTO Technology, Inc.
+ *  (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  NO WARRANTY
+ *  THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ *  CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ *  LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ *  MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ *  solely responsible for determining the appropriateness of using and
+ *  distributing the Program and assumes all risks associated with its
+ *  exercise of rights under this Agreement, including but not limited to
+ *  the risks and costs of program errors, damage to or loss of data,
+ *  programs or equipment, and unavailability or interruption of operations.
+ *
+ *  DISCLAIMER OF LIABILITY
+ *  NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ *  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ *  DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ *  USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ *  HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "esas2r.h"
+
+static u8 esas2r_vdaioctl_versions[] = {
+	ATTO_VDA_VER_UNSUPPORTED,
+	ATTO_VDA_FLASH_VER,
+	ATTO_VDA_VER_UNSUPPORTED,
+	ATTO_VDA_VER_UNSUPPORTED,
+	ATTO_VDA_CLI_VER,
+	ATTO_VDA_VER_UNSUPPORTED,
+	ATTO_VDA_CFG_VER,
+	ATTO_VDA_MGT_VER,
+	ATTO_VDA_GSV_VER
+};
+
+static void clear_vda_request(struct esas2r_request *rq);
+
+static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
+				      struct esas2r_request *rq);
+
+/* Prepare a VDA IOCTL request to be sent to the firmware. */
+bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,
+			      struct atto_ioctl_vda *vi,
+			      struct esas2r_request *rq,
+			      struct esas2r_sg_context *sgc)
+{
+	u32 datalen = 0;
+	struct atto_vda_sge *firstsg = NULL;
+	u8 vercnt = (u8)ARRAY_SIZE(esas2r_vdaioctl_versions);
+
+	vi->status = ATTO_STS_SUCCESS;
+	vi->vda_status = RS_PENDING;
+
+	if (vi->function >= vercnt) {
+		vi->status = ATTO_STS_INV_FUNC;
+		return false;
+	}
+
+	if (vi->version > esas2r_vdaioctl_versions[vi->function]) {
+		vi->status = ATTO_STS_INV_VERSION;
+		return false;
+	}
+
+	if (a->flags & AF_DEGRADED_MODE) {
+		vi->status = ATTO_STS_DEGRADED;
+		return false;
+	}
+
+	if (vi->function != VDA_FUNC_SCSI)
+		clear_vda_request(rq);
+
+	rq->vrq->scsi.function = vi->function;
+	rq->interrupt_cb = esas2r_complete_vda_ioctl;
+	rq->interrupt_cx = vi;
+
+	switch (vi->function) {
+	case VDA_FUNC_FLASH:
+
+		if (vi->cmd.flash.sub_func != VDA_FLASH_FREAD
+		    && vi->cmd.flash.sub_func != VDA_FLASH_FWRITE
+		    && vi->cmd.flash.sub_func != VDA_FLASH_FINFO) {
+			vi->status = ATTO_STS_INV_FUNC;
+			return false;
+		}
+
+		if (vi->cmd.flash.sub_func != VDA_FLASH_FINFO)
+			datalen = vi->data_length;
+
+		rq->vrq->flash.length = cpu_to_le32(datalen);
+		rq->vrq->flash.sub_func = vi->cmd.flash.sub_func;
+
+		memcpy(rq->vrq->flash.data.file.file_name,
+		       vi->cmd.flash.data.file.file_name,
+		       sizeof(vi->cmd.flash.data.file.file_name));
+
+		firstsg = rq->vrq->flash.data.file.sge;
+		break;
+
+	case VDA_FUNC_CLI:
+
+		datalen = vi->data_length;
+
+		rq->vrq->cli.cmd_rsp_len =
+			cpu_to_le32(vi->cmd.cli.cmd_rsp_len);
+		rq->vrq->cli.length = cpu_to_le32(datalen);
+
+		firstsg = rq->vrq->cli.sge;
+		break;
+
+	case VDA_FUNC_MGT:
+	{
+		u8 *cmdcurr_offset = sgc->cur_offset
+				     - offsetof(struct atto_ioctl_vda, data)
+				     + offsetof(struct atto_ioctl_vda, cmd)
+				     + offsetof(struct atto_ioctl_vda_mgt_cmd,
+						data);
+		/*
+		 * build the data payload SGL here first since
+		 * esas2r_sgc_init() will modify the S/G list offset for the
+		 * management SGL (which is built below where the data SGL is
+		 * usually built).
+		 */
+
+		if (vi->data_length) {
+			u32 payldlen = 0;
+
+			if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_HEALTH_REQ
+			    || vi->cmd.mgt.mgt_func == VDAMGT_DEV_METRICS) {
+				rq->vrq->mgt.payld_sglst_offset =
+					(u8)offsetof(struct atto_vda_mgmt_req,
+						     payld_sge);
+
+				payldlen = vi->data_length;
+				datalen = vi->cmd.mgt.data_length;
+			} else if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_INFO2
+				   || vi->cmd.mgt.mgt_func ==
+				   VDAMGT_DEV_INFO2_BYADDR) {
+				datalen = vi->data_length;
+				cmdcurr_offset = sgc->cur_offset;
+			} else {
+				vi->status = ATTO_STS_INV_PARAM;
+				return false;
+			}
+
+			/* Setup the length so building the payload SGL works */
+			rq->vrq->mgt.length = cpu_to_le32(datalen);
+
+			if (payldlen) {
+				rq->vrq->mgt.payld_length =
+					cpu_to_le32(payldlen);
+
+				esas2r_sgc_init(sgc, a, rq,
+						rq->vrq->mgt.payld_sge);
+				sgc->length = payldlen;
+
+				if (!esas2r_build_sg_list(a, rq, sgc)) {
+					vi->status = ATTO_STS_OUT_OF_RSRC;
+					return false;
+				}
+			}
+		} else {
+			datalen = vi->cmd.mgt.data_length;
+
+			rq->vrq->mgt.length = cpu_to_le32(datalen);
+		}
+
+		/*
+		 * Now that the payload SGL is built, if any, setup to build
+		 * the management SGL.
+		 */
+		firstsg = rq->vrq->mgt.sge;
+		sgc->cur_offset = cmdcurr_offset;
+
+		/* Finish initializing the management request. */
+		rq->vrq->mgt.mgt_func = vi->cmd.mgt.mgt_func;
+		rq->vrq->mgt.scan_generation = vi->cmd.mgt.scan_generation;
+		rq->vrq->mgt.dev_index =
+			cpu_to_le32(vi->cmd.mgt.dev_index);
+
+		esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
+		break;
+	}
+
+	case VDA_FUNC_CFG:
+
+		if (vi->data_length
+		    || vi->cmd.cfg.data_length == 0) {
+			vi->status = ATTO_STS_INV_PARAM;
+			return false;
+		}
+
+		if (vi->cmd.cfg.cfg_func == VDA_CFG_INIT) {
+			vi->status = ATTO_STS_INV_FUNC;
+			return false;
+		}
+
+		rq->vrq->cfg.sub_func = vi->cmd.cfg.cfg_func;
+		rq->vrq->cfg.length = cpu_to_le32(vi->cmd.cfg.data_length);
+
+		if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
+			memcpy(&rq->vrq->cfg.data,
+			       &vi->cmd.cfg.data,
+			       vi->cmd.cfg.data_length);
+
+			esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
+					     &rq->vrq->cfg.data);
+		} else {
+			vi->status = ATTO_STS_INV_FUNC;
+
+			return false;
+		}
+
+		break;
+
+	case VDA_FUNC_GSV:
+
+		vi->cmd.gsv.rsp_len = vercnt;
+
+		memcpy(vi->cmd.gsv.version_info, esas2r_vdaioctl_versions,
+		       vercnt);
+
+		vi->vda_status = RS_SUCCESS;
+		break;
+
+	default:
+
+		vi->status = ATTO_STS_INV_FUNC;
+		return false;
+	}
+
+	if (datalen) {
+		esas2r_sgc_init(sgc, a, rq, firstsg);
+		sgc->length = datalen;
+
+		if (!esas2r_build_sg_list(a, rq, sgc)) {
+			vi->status = ATTO_STS_OUT_OF_RSRC;
+			return false;
+		}
+	}
+
+	esas2r_start_request(a, rq);
+
+	return true;
+}
+
+static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
+				      struct esas2r_request *rq)
+{
+	struct atto_ioctl_vda *vi = (struct atto_ioctl_vda *)rq->interrupt_cx;
+
+	vi->vda_status = rq->req_stat;
+
+	switch (vi->function) {
+	case VDA_FUNC_FLASH:
+
+		if (vi->cmd.flash.sub_func == VDA_FLASH_FINFO
+		    || vi->cmd.flash.sub_func == VDA_FLASH_FREAD)
+			vi->cmd.flash.data.file.file_size =
+				le32_to_cpu(rq->func_rsp.flash_rsp.file_size);
+
+		break;
+
+	case VDA_FUNC_MGT:
+
+		vi->cmd.mgt.scan_generation =
+			rq->func_rsp.mgt_rsp.scan_generation;
+		vi->cmd.mgt.dev_index = le16_to_cpu(
+			rq->func_rsp.mgt_rsp.dev_index);
+
+		if (vi->data_length == 0)
+			vi->cmd.mgt.data_length =
+				le32_to_cpu(rq->func_rsp.mgt_rsp.length);
+
+		esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
+		break;
+
+	case VDA_FUNC_CFG:
+
+		if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
+			struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg;
+			struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp;
+
+			cfg->data_length =
+				cpu_to_le32(sizeof(struct atto_vda_cfg_init));
+			cfg->data.init.vda_version =
+				le32_to_cpu(rsp->vda_version);
+			cfg->data.init.fw_build = rsp->fw_build;
+
+			sprintf((char *)&cfg->data.init.fw_release,
+				"%1d.%02d",
+				(int)LOBYTE(le16_to_cpu(rsp->fw_release)),
+				(int)HIBYTE(le16_to_cpu(rsp->fw_release)));
+
+			if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A')
+				cfg->data.init.fw_version =
+					cfg->data.init.fw_build;
+			else
+				cfg->data.init.fw_version =
+					cfg->data.init.fw_release;
+		} else {
+			esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
+					     &vi->cmd.cfg.data);
+		}
+
+		break;
+
+	case VDA_FUNC_CLI:
+
+		vi->cmd.cli.cmd_rsp_len =
+			le32_to_cpu(rq->func_rsp.cli_rsp.cmd_rsp_len);
+		break;
+
+	default:
+
+		break;
+	}
+}
+
+/* Build a flash VDA request. */
+void esas2r_build_flash_req(struct esas2r_adapter *a,
+			    struct esas2r_request *rq,
+			    u8 sub_func,
+			    u8 cksum,
+			    u32 addr,
+			    u32 length)
+{
+	struct atto_vda_flash_req *vrq = &rq->vrq->flash;
+
+	clear_vda_request(rq);
+
+	rq->vrq->scsi.function = VDA_FUNC_FLASH;
+
+	if (sub_func == VDA_FLASH_BEGINW
+	    || sub_func == VDA_FLASH_WRITE
+	    || sub_func == VDA_FLASH_READ)
+		vrq->sg_list_offset = (u8)offsetof(struct atto_vda_flash_req,
+						   data.sge);
+
+	vrq->length = cpu_to_le32(length);
+	vrq->flash_addr = cpu_to_le32(addr);
+	vrq->checksum = cksum;
+	vrq->sub_func = sub_func;
+}
+
+/* Build a VDA management request. */
+void esas2r_build_mgt_req(struct esas2r_adapter *a,
+			  struct esas2r_request *rq,
+			  u8 sub_func,
+			  u8 scan_gen,
+			  u16 dev_index,
+			  u32 length,
+			  void *data)
+{
+	struct atto_vda_mgmt_req *vrq = &rq->vrq->mgt;
+
+	clear_vda_request(rq);
+
+	rq->vrq->scsi.function = VDA_FUNC_MGT;
+
+	vrq->mgt_func = sub_func;
+	vrq->scan_generation = scan_gen;
+	vrq->dev_index = cpu_to_le16(dev_index);
+	vrq->length = cpu_to_le32(length);
+
+	if (vrq->length) {
+		if (a->flags & AF_LEGACY_SGE_MODE) {
+			vrq->sg_list_offset = (u8)offsetof(
+				struct atto_vda_mgmt_req, sge);
+
+			vrq->sge[0].length = cpu_to_le32(SGE_LAST | length);
+			vrq->sge[0].address = cpu_to_le64(
+				rq->vrq_md->phys_addr +
+				sizeof(union atto_vda_req));
+		} else {
+			vrq->sg_list_offset = (u8)offsetof(
+				struct atto_vda_mgmt_req, prde);
+
+			vrq->prde[0].ctl_len = cpu_to_le32(length);
+			vrq->prde[0].address = cpu_to_le64(
+				rq->vrq_md->phys_addr +
+				sizeof(union atto_vda_req));
+		}
+	}
+
+	if (data) {
+		esas2r_nuxi_mgt_data(sub_func, data);
+
+		memcpy(&rq->vda_rsp_data->mgt_data.data.bytes[0], data,
+		       length);
+	}
+}
+
+/* Build a VDA asyncronous event (AE) request. */
+void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+	struct atto_vda_ae_req *vrq = &rq->vrq->ae;
+
+	clear_vda_request(rq);
+
+	rq->vrq->scsi.function = VDA_FUNC_AE;
+
+	vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data));
+
+	if (a->flags & AF_LEGACY_SGE_MODE) {
+		vrq->sg_list_offset =
+			(u8)offsetof(struct atto_vda_ae_req, sge);
+		vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length);
+		vrq->sge[0].address = cpu_to_le64(
+			rq->vrq_md->phys_addr +
+			sizeof(union atto_vda_req));
+	} else {
+		vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ae_req,
+						   prde);
+		vrq->prde[0].ctl_len = cpu_to_le32(vrq->length);
+		vrq->prde[0].address = cpu_to_le64(
+			rq->vrq_md->phys_addr +
+			sizeof(union atto_vda_req));
+	}
+}
+
+/* Build a VDA CLI request. */
+void esas2r_build_cli_req(struct esas2r_adapter *a,
+			  struct esas2r_request *rq,
+			  u32 length,
+			  u32 cmd_rsp_len)
+{
+	struct atto_vda_cli_req *vrq = &rq->vrq->cli;
+
+	clear_vda_request(rq);
+
+	rq->vrq->scsi.function = VDA_FUNC_CLI;
+
+	vrq->length = cpu_to_le32(length);
+	vrq->cmd_rsp_len = cpu_to_le32(cmd_rsp_len);
+	vrq->sg_list_offset = (u8)offsetof(struct atto_vda_cli_req, sge);
+}
+
+/* Build a VDA IOCTL request. */
+void esas2r_build_ioctl_req(struct esas2r_adapter *a,
+			    struct esas2r_request *rq,
+			    u32 length,
+			    u8 sub_func)
+{
+	struct atto_vda_ioctl_req *vrq = &rq->vrq->ioctl;
+
+	clear_vda_request(rq);
+
+	rq->vrq->scsi.function = VDA_FUNC_IOCTL;
+
+	vrq->length = cpu_to_le32(length);
+	vrq->sub_func = sub_func;
+	vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ioctl_req, sge);
+}
+
+/* Build a VDA configuration request. */
+void esas2r_build_cfg_req(struct esas2r_adapter *a,
+			  struct esas2r_request *rq,
+			  u8 sub_func,
+			  u32 length,
+			  void *data)
+{
+	struct atto_vda_cfg_req *vrq = &rq->vrq->cfg;
+
+	clear_vda_request(rq);
+
+	rq->vrq->scsi.function = VDA_FUNC_CFG;
+
+	vrq->sub_func = sub_func;
+	vrq->length = cpu_to_le32(length);
+
+	if (data) {
+		esas2r_nuxi_cfg_data(sub_func, data);
+
+		memcpy(&vrq->data, data, length);
+	}
+}
+
+static void clear_vda_request(struct esas2r_request *rq)
+{
+	u32 handle = rq->vrq->scsi.handle;
+
+	memset(rq->vrq, 0, sizeof(*rq->vrq));
+
+	rq->vrq->scsi.handle = handle;
+
+	rq->req_stat = RS_PENDING;
+
+	/* since the data buffer is separate clear that too */
+
+	memset(rq->data_buf, 0, ESAS2R_DATA_BUF_LEN);
+
+	/*
+	 * Setup next and prev pointer in case the request is not going through
+	 * esas2r_start_request().
+	 */
+
+	INIT_LIST_HEAD(&rq->req_list);
+}

+ 3 - 5
drivers/scsi/hpsa.c

@@ -583,7 +583,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
 		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
 		if (likely(h->msix_vector))
 			c->Header.ReplyQueue =
-				smp_processor_id() % h->nreply_queues;
+				raw_smp_processor_id() % h->nreply_queues;
 	}
 }
 
@@ -1205,8 +1205,8 @@ static void complete_scsi_command(struct CommandList *cp)
 	scsi_set_resid(cmd, ei->ResidualCnt);
 
 	if (ei->CommandStatus == 0) {
-		cmd->scsi_done(cmd);
 		cmd_free(h, cp);
+		cmd->scsi_done(cmd);
 		return;
 	}
 
@@ -1379,8 +1379,8 @@ static void complete_scsi_command(struct CommandList *cp)
 		dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
 				cp, ei->CommandStatus);
 	}
-	cmd->scsi_done(cmd);
 	cmd_free(h, cp);
+	cmd->scsi_done(cmd);
 }
 
 static void hpsa_pci_unmap(struct pci_dev *pdev,
@@ -2721,7 +2721,6 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
 	} while (test_and_set_bit
 		 (i & (BITS_PER_LONG - 1),
 		  h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
-	h->nr_allocs++;
 	spin_unlock_irqrestore(&h->lock, flags);
 
 	c = h->cmd_pool + i;
@@ -2793,7 +2792,6 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c)
 	spin_lock_irqsave(&h->lock, flags);
 	clear_bit(i & (BITS_PER_LONG - 1),
 		  h->cmd_pool_bits + (i / BITS_PER_LONG));
-	h->nr_frees++;
 	spin_unlock_irqrestore(&h->lock, flags);
 }
 

+ 0 - 2
drivers/scsi/hpsa.h

@@ -98,8 +98,6 @@ struct ctlr_info {
 	struct ErrorInfo 	*errinfo_pool;
 	dma_addr_t		errinfo_pool_dhandle;
 	unsigned long  		*cmd_pool_bits;
-	int			nr_allocs;
-	int			nr_frees;
 	int			scan_finished;
 	spinlock_t		scan_lock;
 	wait_queue_head_t	scan_wait_queue;

+ 14 - 0
drivers/scsi/ipr.c

@@ -9990,6 +9990,20 @@ static struct pci_device_id ipr_pci_table[] = {
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(pci, ipr_pci_table);

+ 7 - 0
drivers/scsi/ipr.h

@@ -100,6 +100,13 @@
 #define IPR_SUBS_DEV_ID_57D6    0x03FC
 #define IPR_SUBS_DEV_ID_57D7    0x03FF
 #define IPR_SUBS_DEV_ID_57D8    0x03FE
+#define IPR_SUBS_DEV_ID_57D9    0x046D
+#define IPR_SUBS_DEV_ID_57EB    0x0474
+#define IPR_SUBS_DEV_ID_57EC    0x0475
+#define IPR_SUBS_DEV_ID_57ED    0x0499
+#define IPR_SUBS_DEV_ID_57EE    0x049A
+#define IPR_SUBS_DEV_ID_57EF    0x049B
+#define IPR_SUBS_DEV_ID_57F0    0x049C
 #define IPR_NAME				"ipr"
 
 /*

+ 1 - 1
drivers/scsi/isci/port_config.c

@@ -311,9 +311,9 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
 					      &ihost->phys[phy_index]);
 
 			assigned_phy_mask |= (1 << phy_index);
+			phy_index++;
 		}
 
-		phy_index++;
 	}
 
 	return sci_port_configuration_agent_validate_ports(ihost, port_agent);

+ 109 - 0
drivers/scsi/libiscsi.c

@@ -2812,6 +2812,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
 	kfree(session->boot_nic);
 	kfree(session->boot_target);
 	kfree(session->ifacename);
+	kfree(session->portal_type);
+	kfree(session->discovery_parent_type);
 
 	iscsi_destroy_session(cls_session);
 	iscsi_host_dec_session_cnt(shost);
@@ -3168,6 +3170,7 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
 {
 	struct iscsi_conn *conn = cls_conn->dd_data;
 	struct iscsi_session *session = conn->session;
+	int val;
 
 	switch(param) {
 	case ISCSI_PARAM_FAST_ABORT:
@@ -3257,6 +3260,15 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
 		return iscsi_switch_str_param(&session->boot_nic, buf);
 	case ISCSI_PARAM_BOOT_TARGET:
 		return iscsi_switch_str_param(&session->boot_target, buf);
+	case ISCSI_PARAM_PORTAL_TYPE:
+		return iscsi_switch_str_param(&session->portal_type, buf);
+	case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
+		return iscsi_switch_str_param(&session->discovery_parent_type,
+					      buf);
+	case ISCSI_PARAM_DISCOVERY_SESS:
+		sscanf(buf, "%d", &val);
+		session->discovery_sess = !!val;
+		break;
 	default:
 		return -ENOSYS;
 	}
@@ -3305,6 +3317,9 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
 	case ISCSI_PARAM_DATASEQ_INORDER_EN:
 		len = sprintf(buf, "%d\n", session->dataseq_inorder_en);
 		break;
+	case ISCSI_PARAM_DEF_TASKMGMT_TMO:
+		len = sprintf(buf, "%d\n", session->def_taskmgmt_tmo);
+		break;
 	case ISCSI_PARAM_ERL:
 		len = sprintf(buf, "%d\n", session->erl);
 		break;
@@ -3344,6 +3359,52 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
 	case ISCSI_PARAM_BOOT_TARGET:
 		len = sprintf(buf, "%s\n", session->boot_target);
 		break;
+	case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
+		len = sprintf(buf, "%u\n", session->auto_snd_tgt_disable);
+		break;
+	case ISCSI_PARAM_DISCOVERY_SESS:
+		len = sprintf(buf, "%u\n", session->discovery_sess);
+		break;
+	case ISCSI_PARAM_PORTAL_TYPE:
+		len = sprintf(buf, "%s\n", session->portal_type);
+		break;
+	case ISCSI_PARAM_CHAP_AUTH_EN:
+		len = sprintf(buf, "%u\n", session->chap_auth_en);
+		break;
+	case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
+		len = sprintf(buf, "%u\n", session->discovery_logout_en);
+		break;
+	case ISCSI_PARAM_BIDI_CHAP_EN:
+		len = sprintf(buf, "%u\n", session->bidi_chap_en);
+		break;
+	case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
+		len = sprintf(buf, "%u\n", session->discovery_auth_optional);
+		break;
+	case ISCSI_PARAM_DEF_TIME2WAIT:
+		len = sprintf(buf, "%d\n", session->time2wait);
+		break;
+	case ISCSI_PARAM_DEF_TIME2RETAIN:
+		len = sprintf(buf, "%d\n", session->time2retain);
+		break;
+	case ISCSI_PARAM_TSID:
+		len = sprintf(buf, "%u\n", session->tsid);
+		break;
+	case ISCSI_PARAM_ISID:
+		len = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
+			      session->isid[0], session->isid[1],
+			      session->isid[2], session->isid[3],
+			      session->isid[4], session->isid[5]);
+		break;
+	case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
+		len = sprintf(buf, "%u\n", session->discovery_parent_idx);
+		break;
+	case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
+		if (session->discovery_parent_type)
+			len = sprintf(buf, "%s\n",
+				      session->discovery_parent_type);
+		else
+			len = sprintf(buf, "\n");
+		break;
 	default:
 		return -ENOSYS;
 	}
@@ -3433,6 +3494,54 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
 	case ISCSI_PARAM_PERSISTENT_ADDRESS:
 		len = sprintf(buf, "%s\n", conn->persistent_address);
 		break;
+	case ISCSI_PARAM_STATSN:
+		len = sprintf(buf, "%u\n", conn->statsn);
+		break;
+	case ISCSI_PARAM_MAX_SEGMENT_SIZE:
+		len = sprintf(buf, "%u\n", conn->max_segment_size);
+		break;
+	case ISCSI_PARAM_KEEPALIVE_TMO:
+		len = sprintf(buf, "%u\n", conn->keepalive_tmo);
+		break;
+	case ISCSI_PARAM_LOCAL_PORT:
+		len = sprintf(buf, "%u\n", conn->local_port);
+		break;
+	case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
+		len = sprintf(buf, "%u\n", conn->tcp_timestamp_stat);
+		break;
+	case ISCSI_PARAM_TCP_NAGLE_DISABLE:
+		len = sprintf(buf, "%u\n", conn->tcp_nagle_disable);
+		break;
+	case ISCSI_PARAM_TCP_WSF_DISABLE:
+		len = sprintf(buf, "%u\n", conn->tcp_wsf_disable);
+		break;
+	case ISCSI_PARAM_TCP_TIMER_SCALE:
+		len = sprintf(buf, "%u\n", conn->tcp_timer_scale);
+		break;
+	case ISCSI_PARAM_TCP_TIMESTAMP_EN:
+		len = sprintf(buf, "%u\n", conn->tcp_timestamp_en);
+		break;
+	case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
+		len = sprintf(buf, "%u\n", conn->fragment_disable);
+		break;
+	case ISCSI_PARAM_IPV4_TOS:
+		len = sprintf(buf, "%u\n", conn->ipv4_tos);
+		break;
+	case ISCSI_PARAM_IPV6_TC:
+		len = sprintf(buf, "%u\n", conn->ipv6_traffic_class);
+		break;
+	case ISCSI_PARAM_IPV6_FLOW_LABEL:
+		len = sprintf(buf, "%u\n", conn->ipv6_flow_label);
+		break;
+	case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
+		len = sprintf(buf, "%u\n", conn->is_fw_assigned_ipv6);
+		break;
+	case ISCSI_PARAM_TCP_XMIT_WSF:
+		len = sprintf(buf, "%u\n", conn->tcp_xmit_wsf);
+		break;
+	case ISCSI_PARAM_TCP_RECV_WSF:
+		len = sprintf(buf, "%u\n", conn->tcp_recv_wsf);
+		break;
 	default:
 		return -ENOSYS;
 	}

+ 1 - 2
drivers/scsi/lpfc/lpfc.h

@@ -421,6 +421,7 @@ struct lpfc_vport {
 	uint32_t cfg_enable_da_id;
 	uint32_t cfg_max_scsicmpl_time;
 	uint32_t cfg_tgt_queue_depth;
+	uint32_t cfg_first_burst_size;
 
 	uint32_t dev_loss_tmo_changed;
 
@@ -710,8 +711,6 @@ struct lpfc_hba {
 	uint32_t cfg_use_msi;
 	uint32_t cfg_fcp_imax;
 	uint32_t cfg_fcp_cpu_map;
-	uint32_t cfg_fcp_wq_count;
-	uint32_t cfg_fcp_eq_count;
 	uint32_t cfg_fcp_io_channel;
 	uint32_t cfg_total_seg_cnt;
 	uint32_t cfg_sg_seg_cnt;

+ 23 - 34
drivers/scsi/lpfc/lpfc_attr.c

@@ -674,9 +674,6 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
 	int i;
 	int rc;
 
-	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
-		return 0;
-
 	init_completion(&online_compl);
 	rc = lpfc_workq_post_event(phba, &status, &online_compl,
 			      LPFC_EVT_OFFLINE_PREP);
@@ -744,14 +741,15 @@ lpfc_selective_reset(struct lpfc_hba *phba)
 	int status = 0;
 	int rc;
 
-	if ((!phba->cfg_enable_hba_reset) ||
-	    (phba->pport->fc_flag & FC_OFFLINE_MODE))
+	if (!phba->cfg_enable_hba_reset)
 		return -EACCES;
 
-	status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) {
+		status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
 
-	if (status != 0)
-		return status;
+		if (status != 0)
+			return status;
+	}
 
 	init_completion(&online_compl);
 	rc = lpfc_workq_post_event(phba, &status, &online_compl,
@@ -2591,9 +2589,12 @@ LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
 
 /*
 # lun_queue_depth:  This parameter is used to limit the number of outstanding
-# commands per FCP LUN. Value range is [1,128]. Default value is 30.
+# commands per FCP LUN. Value range is [1,512]. Default value is 30.
+# If this parameter value is greater than 1/8th the maximum number of exchanges
+# supported by the HBA port, then the lun queue depth will be reduced to
+# 1/8th the maximum number of exchanges.
 */
-LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128,
+LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512,
 		  "Max number of FCP commands we can queue to a specific LUN");
 
 /*
@@ -2601,7 +2602,7 @@ LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128,
 # commands per target port. Value range is [10,65535]. Default value is 65535.
 */
 LPFC_VPORT_ATTR_R(tgt_queue_depth, 65535, 10, 65535,
-	"Max number of FCP commands we can queue to a specific target port");
+		  "Max number of FCP commands we can queue to a specific target port");
 
 /*
 # hba_queue_depth:  This parameter is used to limit the number of outstanding
@@ -3948,6 +3949,14 @@ LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
 LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
 		   "Use ADISC on rediscovery to authenticate FCP devices");
 
+/*
+# lpfc_first_burst_size: First burst size to use on the NPorts
+# that support first burst.
+# Value range is [0,65536]. Default value is 0.
+*/
+LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
+		   "First burst size for Targets that support first burst");
+
 /*
 # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
 # depth. Default value is 0. When the value of this parameter is zero the
@@ -4111,25 +4120,6 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
 LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
 	    "MSI-X (2), if possible");
 
-/*
-# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
-# This parameter is ignored and will eventually be depricated
-#
-# Value range is [1,7]. Default value is 4.
-*/
-LPFC_ATTR_R(fcp_wq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
-	    LPFC_FCP_IO_CHAN_MAX,
-	    "Set the number of fast-path FCP work queues, if possible");
-
-/*
-# lpfc_fcp_eq_count: Set the number of FCP EQ/CQ/WQ IO channels
-#
-# Value range is [1,7]. Default value is 4.
-*/
-LPFC_ATTR_R(fcp_eq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
-	    LPFC_FCP_IO_CHAN_MAX,
-	    "Set the number of fast-path FCP event queues, if possible");
-
 /*
 # lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels
 #
@@ -4276,6 +4266,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_lpfc_devloss_tmo,
 	&dev_attr_lpfc_fcp_class,
 	&dev_attr_lpfc_use_adisc,
+	&dev_attr_lpfc_first_burst_size,
 	&dev_attr_lpfc_ack0,
 	&dev_attr_lpfc_topology,
 	&dev_attr_lpfc_scan_down,
@@ -4307,8 +4298,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_lpfc_use_msi,
 	&dev_attr_lpfc_fcp_imax,
 	&dev_attr_lpfc_fcp_cpu_map,
-	&dev_attr_lpfc_fcp_wq_count,
-	&dev_attr_lpfc_fcp_eq_count,
 	&dev_attr_lpfc_fcp_io_channel,
 	&dev_attr_lpfc_enable_bg,
 	&dev_attr_lpfc_soft_wwnn,
@@ -4352,6 +4341,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
 	&dev_attr_lpfc_restrict_login,
 	&dev_attr_lpfc_fcp_class,
 	&dev_attr_lpfc_use_adisc,
+	&dev_attr_lpfc_first_burst_size,
 	&dev_attr_lpfc_fdmi_on,
 	&dev_attr_lpfc_max_luns,
 	&dev_attr_nport_evt_cnt,
@@ -5290,8 +5280,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
 	lpfc_use_msi_init(phba, lpfc_use_msi);
 	lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
 	lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
-	lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
-	lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
 	lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
 	lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
 	lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
@@ -5331,6 +5319,7 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
 	lpfc_restrict_login_init(vport, lpfc_restrict_login);
 	lpfc_fcp_class_init(vport, lpfc_fcp_class);
 	lpfc_use_adisc_init(vport, lpfc_use_adisc);
+	lpfc_first_burst_size_init(vport, lpfc_first_burst_size);
 	lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
 	lpfc_fdmi_on_init(vport, lpfc_fdmi_on);
 	lpfc_discovery_threads_init(vport, lpfc_discovery_threads);

+ 5 - 3
drivers/scsi/lpfc/lpfc_bsg.c

@@ -2498,7 +2498,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
 	struct lpfc_sli_ct_request *ctreq = NULL;
 	int ret_val = 0;
 	int time_left;
-	int iocb_stat = 0;
+	int iocb_stat = IOCB_SUCCESS;
 	unsigned long flags;
 
 	*txxri = 0;
@@ -2574,6 +2574,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
 
 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
 	cmdiocbq->vport = phba->pport;
+	cmdiocbq->iocb_cmpl = NULL;
 
 	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
 				rspiocbq,
@@ -2963,7 +2964,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
 	uint8_t *ptr = NULL, *rx_databuf = NULL;
 	int rc = 0;
 	int time_left;
-	int iocb_stat;
+	int iocb_stat = IOCB_SUCCESS;
 	unsigned long flags;
 	void *dataout = NULL;
 	uint32_t total_mem;
@@ -3149,6 +3150,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
 	}
 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
 	cmdiocbq->vport = phba->pport;
+	cmdiocbq->iocb_cmpl = NULL;
 	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
 					     rspiocbq, (phba->fc_ratov * 2) +
 					     LPFC_DRVR_TIMEOUT);
@@ -3209,7 +3211,7 @@ err_loopback_test_exit:
 	lpfc_bsg_event_unref(evt); /* delete */
 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 
-	if (cmdiocbq != NULL)
+	if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT))
 		lpfc_sli_release_iocbq(phba, cmdiocbq);
 
 	if (rspiocbq != NULL)

+ 1 - 1
drivers/scsi/lpfc/lpfc_ct.c

@@ -895,7 +895,7 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
 	if (irsp->ulpStatus) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
-				 "0268 NS cmd %x Error (%d %d)\n",
+				 "0268 NS cmd x%x Error (x%x x%x)\n",
 				 cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]);
 
 		if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&

+ 2 - 1
drivers/scsi/lpfc/lpfc_disc.h

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -154,6 +154,7 @@ struct lpfc_node_rrq {
 #define NLP_NODEV_REMOVE   0x08000000	/* Defer removal till discovery ends */
 #define NLP_TARGET_REMOVE  0x10000000   /* Target remove in process */
 #define NLP_SC_REQ         0x20000000	/* Target requires authentication */
+#define NLP_FIRSTBURST     0x40000000	/* Target supports FirstBurst */
 #define NLP_RPI_REGISTERED 0x80000000	/* nlp_rpi is valid */
 
 /* ndlp usage management macros */

+ 2 - 0
drivers/scsi/lpfc/lpfc_els.c

@@ -2122,6 +2122,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	}
 	npr->estabImagePair = 1;
 	npr->readXferRdyDis = 1;
+	 if (vport->cfg_first_burst_size)
+		npr->writeXferRdyDis = 1;
 
 	/* For FCP support */
 	npr->prliType = PRLI_FCP_TYPE;

+ 13 - 0
drivers/scsi/lpfc/lpfc_hw4.h

@@ -234,6 +234,9 @@ struct ulp_bde64 {
 	uint32_t addrHigh;
 };
 
+/* Maximun size of immediate data that can fit into a 128 byte WQE */
+#define LPFC_MAX_BDE_IMM_SIZE	64
+
 struct lpfc_sli4_flags {
 	uint32_t word0;
 #define lpfc_idx_rsrc_rdy_SHIFT		0
@@ -2585,6 +2588,9 @@ struct lpfc_sli4_parameters {
 #define cfg_mqv_WORD				word6
 	uint32_t word7;
 	uint32_t word8;
+#define cfg_wqsize_SHIFT			8
+#define cfg_wqsize_MASK				0x0000000f
+#define cfg_wqsize_WORD				word8
 #define cfg_wqv_SHIFT				14
 #define cfg_wqv_MASK				0x00000003
 #define cfg_wqv_WORD				word8
@@ -3622,6 +3628,13 @@ union lpfc_wqe {
 	struct gen_req64_wqe gen_req;
 };
 
+union lpfc_wqe128 {
+	uint32_t words[32];
+	struct lpfc_wqe_generic generic;
+	struct xmit_seq64_wqe xmit_sequence;
+	struct gen_req64_wqe gen_req;
+};
+
 #define LPFC_GROUP_OJECT_MAGIC_NUM		0xfeaa0001
 #define LPFC_FILE_TYPE_GROUP			0xf7
 #define LPFC_FILE_ID_GROUP			0xa2

+ 25 - 17
drivers/scsi/lpfc/lpfc_init.c

@@ -472,10 +472,22 @@ lpfc_config_port_post(struct lpfc_hba *phba)
 	lpfc_sli_read_link_ste(phba);
 
 	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
-	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
-		phba->cfg_hba_queue_depth =
-			(mb->un.varRdConfig.max_xri + 1) -
-					lpfc_sli4_get_els_iocb_cnt(phba);
+	i = (mb->un.varRdConfig.max_xri + 1);
+	if (phba->cfg_hba_queue_depth > i) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"3359 HBA queue depth changed from %d to %d\n",
+				phba->cfg_hba_queue_depth, i);
+		phba->cfg_hba_queue_depth = i;
+	}
+
+	/* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3)  */
+	i = (mb->un.varRdConfig.max_xri >> 3);
+	if (phba->pport->cfg_lun_queue_depth > i) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"3360 LUN queue depth changed from %d to %d\n",
+				phba->pport->cfg_lun_queue_depth, i);
+		phba->pport->cfg_lun_queue_depth = i;
+	}
 
 	phba->lmt = mb->un.varRdConfig.lmt;
 
@@ -4901,9 +4913,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 	lpfc_get_cfgparam(phba);
 	phba->max_vpi = LPFC_MAX_VPI;
 
-	/* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
-	phba->cfg_fcp_io_channel = phba->cfg_fcp_eq_count;
-
 	/* This will be set to correct value after the read_config mbox */
 	phba->max_vports = 0;
 
@@ -6664,12 +6673,14 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
 		goto read_cfg_out;
 
 	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
-	if (phba->cfg_hba_queue_depth >
-		(phba->sli4_hba.max_cfg_param.max_xri -
-			lpfc_sli4_get_els_iocb_cnt(phba)))
-		phba->cfg_hba_queue_depth =
-			phba->sli4_hba.max_cfg_param.max_xri -
-				lpfc_sli4_get_els_iocb_cnt(phba);
+	length = phba->sli4_hba.max_cfg_param.max_xri -
+			lpfc_sli4_get_els_iocb_cnt(phba);
+	if (phba->cfg_hba_queue_depth > length) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"3361 HBA queue depth changed from %d to %d\n",
+				phba->cfg_hba_queue_depth, length);
+		phba->cfg_hba_queue_depth = length;
+	}
 
 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
 	    LPFC_SLI_INTF_IF_TYPE_2)
@@ -6859,11 +6870,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
 		cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
 	}
 
-	/* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
-
 	/* The actual number of FCP event queues adopted */
-	phba->cfg_fcp_eq_count = cfg_fcp_io_channel;
-	phba->cfg_fcp_wq_count = cfg_fcp_io_channel;
 	phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
 
 	/* Get EQ depth from module parameter, fake the default for now */
@@ -9154,6 +9161,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 	sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
 	sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
 	sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
+	sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
 	sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
 					    mbx_sli4_parameters);
 	sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,

+ 4 - 3
drivers/scsi/lpfc/lpfc_mbox.c

@@ -178,7 +178,8 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	mb->mbxOwner = OWN_HOST;
 	mb->un.varDmp.cv = 1;
 	mb->un.varDmp.type = DMP_NV_PARAMS;
-	mb->un.varDmp.entry_index = 0;
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		mb->un.varDmp.entry_index = 0;
 	mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
 	mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
 	mb->un.varDmp.co = 0;
@@ -361,7 +362,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 	/* NEW_FEATURE
 	 * SLI-2, Coalescing Response Feature.
 	 */
-	if (phba->cfg_cr_delay) {
+	if (phba->cfg_cr_delay && (phba->sli_rev < LPFC_SLI_REV4)) {
 		mb->un.varCfgLnk.cr = 1;
 		mb->un.varCfgLnk.ci = 1;
 		mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
@@ -377,7 +378,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 	mb->un.varCfgLnk.crtov = phba->fc_crtov;
 	mb->un.varCfgLnk.citov = phba->fc_citov;
 
-	if (phba->cfg_ack0)
+	if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4))
 		mb->un.varCfgLnk.ack0_enable = 1;
 
 	mb->mbxCommand = MBX_CONFIG_LINK;

+ 10 - 2
drivers/scsi/lpfc/lpfc_nportdisc.c

@@ -690,11 +690,15 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 
 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+	ndlp->nlp_flag &= ~NLP_FIRSTBURST;
 	if (npr->prliType == PRLI_FCP_TYPE) {
 		if (npr->initiatorFunc)
 			ndlp->nlp_type |= NLP_FCP_INITIATOR;
-		if (npr->targetFunc)
+		if (npr->targetFunc) {
 			ndlp->nlp_type |= NLP_FCP_TARGET;
+			if (npr->writeXferRdyDis)
+				ndlp->nlp_flag |= NLP_FIRSTBURST;
+		}
 		if (npr->Retry)
 			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
 	}
@@ -1676,12 +1680,16 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	/* Check out PRLI rsp */
 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+	ndlp->nlp_flag &= ~NLP_FIRSTBURST;
 	if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
 	    (npr->prliType == PRLI_FCP_TYPE)) {
 		if (npr->initiatorFunc)
 			ndlp->nlp_type |= NLP_FCP_INITIATOR;
-		if (npr->targetFunc)
+		if (npr->targetFunc) {
 			ndlp->nlp_type |= NLP_FCP_TARGET;
+			if (npr->writeXferRdyDis)
+				ndlp->nlp_flag |= NLP_FIRSTBURST;
+		}
 		if (npr->Retry)
 			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
 	}

+ 6 - 6
drivers/scsi/lpfc/lpfc_scsi.c

@@ -4386,11 +4386,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 	if (scsi_sg_count(scsi_cmnd)) {
 		if (datadir == DMA_TO_DEVICE) {
 			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
-			if (sli4)
-				iocb_cmd->ulpPU = PARM_READ_CHECK;
-			else {
-				iocb_cmd->un.fcpi.fcpi_parm = 0;
-				iocb_cmd->ulpPU = 0;
+			iocb_cmd->ulpPU = PARM_READ_CHECK;
+			if (vport->cfg_first_burst_size &&
+			    (pnode->nlp_flag & NLP_FIRSTBURST)) {
+				piocbq->iocb.un.fcpi.fcpi_XRdy =
+					vport->cfg_first_burst_size;
 			}
 			fcp_cmnd->fcpCntl3 = WRITE_DATA;
 			phba->fc4OutputRequests++;
@@ -5022,6 +5022,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
 		lpfc_release_scsi_buf(phba, lpfc_cmd);
 		return FAILED;
 	}
+	iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
 
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
 			 "0702 Issue %s to TGT %d LUN %d "
@@ -5034,7 +5035,6 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
 					  iocbq, iocbqrsp, lpfc_cmd->timeout);
 	if (status != IOCB_SUCCESS) {
 		if (status == IOCB_TIMEDOUT) {
-			iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
 			ret = TIMEOUT_ERROR;
 		} else
 			ret = FAILED;

+ 125 - 22
drivers/scsi/lpfc/lpfc_sli.c

@@ -6163,6 +6163,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 		kfree(vpd);
 		goto out_free_mbox;
 	}
+
 	mqe = &mboxq->u.mqe;
 	phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
 	if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
@@ -6249,6 +6250,16 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 			phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
 			phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
 
+	/* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3)  */
+	rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
+	if (phba->pport->cfg_lun_queue_depth > rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"3362 LUN queue depth changed from %d to %d\n",
+				phba->pport->cfg_lun_queue_depth, rc);
+		phba->pport->cfg_lun_queue_depth = rc;
+	}
+
+
 	/*
 	 * Discover the port's supported feature set and match it against the
 	 * hosts requests.
@@ -9889,6 +9900,24 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
 	struct lpfc_scsi_buf *lpfc_cmd;
 
 	spin_lock_irqsave(&phba->hbalock, iflags);
+	if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
+
+		/*
+		 * A time out has occurred for the iocb.  If a time out
+		 * completion handler has been supplied, call it.  Otherwise,
+		 * just free the iocbq.
+		 */
+
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
+		cmdiocbq->wait_iocb_cmpl = NULL;
+		if (cmdiocbq->iocb_cmpl)
+			(cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
+		else
+			lpfc_sli_release_iocbq(phba, cmdiocbq);
+		return;
+	}
+
 	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
 	if (cmdiocbq->context2 && rspiocbq)
 		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
@@ -9944,10 +9973,16 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba,
  * @timeout: Timeout in number of seconds.
  *
  * This function issues the iocb to firmware and waits for the
- * iocb to complete. If the iocb command is not
- * completed within timeout seconds, it returns IOCB_TIMEDOUT.
- * Caller should not free the iocb resources if this function
- * returns IOCB_TIMEDOUT.
+ * iocb to complete. The iocb_cmpl field of the shall be used
+ * to handle iocbs which time out. If the field is NULL, the
+ * function shall free the iocbq structure.  If more clean up is
+ * needed, the caller is expected to provide a completion function
+ * that will provide the needed clean up.  If the iocb command is
+ * not completed within timeout seconds, the function will either
+ * free the iocbq structure (if iocb_cmpl == NULL) or execute the
+ * completion function set in the iocb_cmpl field and then return
+ * a status of IOCB_TIMEDOUT.  The caller should not free the iocb
+ * resources if this function returns IOCB_TIMEDOUT.
  * The function waits for the iocb completion using an
  * non-interruptible wait.
  * This function will sleep while waiting for iocb completion.
@@ -9980,6 +10015,9 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
 	int txq_cnt = 0;
 	int txcmplq_cnt = 0;
 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+	unsigned long iflags;
+	bool iocb_completed = true;
+
 	/*
 	 * If the caller has provided a response iocbq buffer, then context2
 	 * is NULL or its an error.
@@ -9990,9 +10028,10 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
 		piocb->context2 = prspiocbq;
 	}
 
+	piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
 	piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
 	piocb->context_un.wait_queue = &done_q;
-	piocb->iocb_flag &= ~LPFC_IO_WAKE;
+	piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
 
 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
 		if (lpfc_readl(phba->HCregaddr, &creg_val))
@@ -10009,8 +10048,19 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
 		timeleft = wait_event_timeout(done_q,
 				lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
 				timeout_req);
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
 
-		if (piocb->iocb_flag & LPFC_IO_WAKE) {
+			/*
+			 * IOCB timed out.  Inform the wake iocb wait
+			 * completion function and set local status
+			 */
+
+			iocb_completed = false;
+			piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
+		}
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		if (iocb_completed) {
 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
 					"0331 IOCB wake signaled\n");
 		} else if (timeleft == 0) {
@@ -10122,7 +10172,6 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
 		 */
 		if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
 			retval = MBX_SUCCESS;
-			lpfc_sli4_swap_str(phba, pmboxq);
 		} else {
 			retval = MBX_TIMEOUT;
 			pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -12820,10 +12869,44 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
 		    wq->page_count);
 	bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
 		    cq->queue_id);
+
+	/* wqv is the earliest version supported, NOT the latest */
 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
 	       phba->sli4_hba.pc_sli4_params.wqv);
 
-	if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
+	switch (phba->sli4_hba.pc_sli4_params.wqv) {
+	case LPFC_Q_CREATE_VERSION_0:
+		switch (wq->entry_size) {
+		default:
+		case 64:
+			/* Nothing to do, version 0 ONLY supports 64 byte */
+			page = wq_create->u.request.page;
+			break;
+		case 128:
+			if (!(phba->sli4_hba.pc_sli4_params.wqsize &
+			    LPFC_WQ_SZ128_SUPPORT)) {
+				status = -ERANGE;
+				goto out;
+			}
+			/* If we get here the HBA MUST also support V1 and
+			 * we MUST use it
+			 */
+			bf_set(lpfc_mbox_hdr_version, &shdr->request,
+			       LPFC_Q_CREATE_VERSION_1);
+
+			bf_set(lpfc_mbx_wq_create_wqe_count,
+			       &wq_create->u.request_1, wq->entry_count);
+			bf_set(lpfc_mbx_wq_create_wqe_size,
+			       &wq_create->u.request_1,
+			       LPFC_WQ_WQE_SIZE_128);
+			bf_set(lpfc_mbx_wq_create_page_size,
+			       &wq_create->u.request_1,
+			       (PAGE_SIZE/SLI4_PAGE_SIZE));
+			page = wq_create->u.request_1.page;
+			break;
+		}
+		break;
+	case LPFC_Q_CREATE_VERSION_1:
 		bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
 		       wq->entry_count);
 		switch (wq->entry_size) {
@@ -12834,6 +12917,11 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
 			       LPFC_WQ_WQE_SIZE_64);
 			break;
 		case 128:
+			if (!(phba->sli4_hba.pc_sli4_params.wqsize &
+				LPFC_WQ_SZ128_SUPPORT)) {
+				status = -ERANGE;
+				goto out;
+			}
 			bf_set(lpfc_mbx_wq_create_wqe_size,
 			       &wq_create->u.request_1,
 			       LPFC_WQ_WQE_SIZE_128);
@@ -12842,9 +12930,12 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
 		bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
 		       (PAGE_SIZE/SLI4_PAGE_SIZE));
 		page = wq_create->u.request_1.page;
-	} else {
-		page = wq_create->u.request.page;
+		break;
+	default:
+		status = -ERANGE;
+		goto out;
 	}
+
 	list_for_each_entry(dmabuf, &wq->page_list, list) {
 		memset(dmabuf->virt, 0, hw_page_size);
 		page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
@@ -14665,14 +14756,20 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
 		first_iocbq->iocb.unsli3.rcvsli3.vpi =
 			vport->phba->vpi_ids[vport->vpi];
 		/* put the first buffer into the first IOCBq */
+		tot_len = bf_get(lpfc_rcqe_length,
+				       &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
 		first_iocbq->context2 = &seq_dmabuf->dbuf;
 		first_iocbq->context3 = NULL;
 		first_iocbq->iocb.ulpBdeCount = 1;
-		first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+		if (tot_len > LPFC_DATA_BUF_SIZE)
+			first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
 							LPFC_DATA_BUF_SIZE;
+		else
+			first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
+
 		first_iocbq->iocb.un.rcvels.remoteID = sid;
-		tot_len = bf_get(lpfc_rcqe_length,
-				       &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
 		first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
 	}
 	iocbq = first_iocbq;
@@ -14688,14 +14785,17 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
 		if (!iocbq->context3) {
 			iocbq->context3 = d_buf;
 			iocbq->iocb.ulpBdeCount++;
-			pbde = (struct ulp_bde64 *)
-					&iocbq->iocb.unsli3.sli3Words[4];
-			pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
-
 			/* We need to get the size out of the right CQE */
 			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
 			len = bf_get(lpfc_rcqe_length,
 				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
+			pbde = (struct ulp_bde64 *)
+					&iocbq->iocb.unsli3.sli3Words[4];
+			if (len > LPFC_DATA_BUF_SIZE)
+				pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
+			else
+				pbde->tus.f.bdeSize = len;
+
 			iocbq->iocb.unsli3.rcvsli3.acc_len += len;
 			tot_len += len;
 		} else {
@@ -14710,16 +14810,19 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
 				lpfc_in_buf_free(vport->phba, d_buf);
 				continue;
 			}
+			/* We need to get the size out of the right CQE */
+			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+			len = bf_get(lpfc_rcqe_length,
+				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
 			iocbq->context2 = d_buf;
 			iocbq->context3 = NULL;
 			iocbq->iocb.ulpBdeCount = 1;
-			iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+			if (len > LPFC_DATA_BUF_SIZE)
+				iocbq->iocb.un.cont64[0].tus.f.bdeSize =
 							LPFC_DATA_BUF_SIZE;
+			else
+				iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
 
-			/* We need to get the size out of the right CQE */
-			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
-			len = bf_get(lpfc_rcqe_length,
-				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
 			tot_len += len;
 			iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
 

+ 5 - 2
drivers/scsi/lpfc/lpfc_sli.h

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2007 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -60,7 +60,8 @@ struct lpfc_iocbq {
 	uint8_t retry;		/* retry counter for IOCB cmd - if needed */
 	uint16_t iocb_flag;
 #define LPFC_IO_LIBDFC		1	/* libdfc iocb */
-#define LPFC_IO_WAKE		2	/* High Priority Queue signal flag */
+#define LPFC_IO_WAKE		2	/* Synchronous I/O completed */
+#define LPFC_IO_WAKE_TMO	LPFC_IO_WAKE /* Synchronous I/O timed out */
 #define LPFC_IO_FCP		4	/* FCP command -- iocbq in scsi_buf */
 #define LPFC_DRIVER_ABORTED	8	/* driver aborted this request */
 #define LPFC_IO_FABRIC		0x10	/* Iocb send using fabric scheduler */
@@ -93,6 +94,8 @@ struct lpfc_iocbq {
 
 	void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
 			   struct lpfc_iocbq *);
+	void (*wait_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+			   struct lpfc_iocbq *);
 	void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
 			   struct lpfc_iocbq *);
 };

+ 6 - 0
drivers/scsi/lpfc/lpfc_sli4.h

@@ -117,6 +117,7 @@ union sli4_qe {
 	struct lpfc_rcqe_complete *rcqe_complete;
 	struct lpfc_mqe *mqe;
 	union  lpfc_wqe *wqe;
+	union  lpfc_wqe128 *wqe128;
 	struct lpfc_rqe *rqe;
 };
 
@@ -325,12 +326,14 @@ struct lpfc_bmbx {
 #define LPFC_EQE_SIZE_16B	16
 #define LPFC_CQE_SIZE		16
 #define LPFC_WQE_SIZE		64
+#define LPFC_WQE128_SIZE	128
 #define LPFC_MQE_SIZE		256
 #define LPFC_RQE_SIZE		8
 
 #define LPFC_EQE_DEF_COUNT	1024
 #define LPFC_CQE_DEF_COUNT      1024
 #define LPFC_WQE_DEF_COUNT      256
+#define LPFC_WQE128_DEF_COUNT   128
 #define LPFC_MQE_DEF_COUNT      16
 #define LPFC_RQE_DEF_COUNT	512
 
@@ -416,6 +419,9 @@ struct lpfc_pc_sli4_params {
 	uint8_t mqv;
 	uint8_t wqv;
 	uint8_t rqv;
+	uint8_t wqsize;
+#define LPFC_WQ_SZ64_SUPPORT	1
+#define LPFC_WQ_SZ128_SUPPORT	2
 };
 
 struct lpfc_iov {

+ 1 - 1
drivers/scsi/lpfc/lpfc_version.h

@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.40"
+#define LPFC_DRIVER_VERSION "8.3.41"
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */

+ 4 - 1
drivers/scsi/lpfc/lpfc_vport.c

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -387,6 +387,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
 	/* Create binary sysfs attribute for vport */
 	lpfc_alloc_sysfs_attr(vport);
 
+	/* Set the DFT_LUN_Q_DEPTH accordingly */
+	vport->cfg_lun_queue_depth  = phba->pport->cfg_lun_queue_depth;
+
 	*(struct lpfc_vport **)fc_vport->dd_data = vport;
 	vport->fc_vport = fc_vport;
 

+ 4 - 3
drivers/scsi/mpt2sas/mpi/mpi2.h

@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2012 LSI Corporation.
+ *  Copyright (c) 2000-2013 LSI Corporation.
  *
  *
  *           Name:  mpi2.h
@@ -8,7 +8,7 @@
  *                  scatter/gather formats.
  *  Creation Date:  June 21, 2006
  *
- *  mpi2.h Version:  02.00.27
+ *  mpi2.h Version:  02.00.28
  *
  *  Version History
  *  ---------------
@@ -77,6 +77,7 @@
  *                      Added Hard Reset delay timings.
  *  07-10-12  02.00.26  Bumped MPI2_HEADER_VERSION_UNIT.
  *  07-26-12  02.00.27  Bumped MPI2_HEADER_VERSION_UNIT.
+ *  11-27-12  02.00.28  Bumped MPI2_HEADER_VERSION_UNIT.
  *  --------------------------------------------------------------------------
  */
 
@@ -102,7 +103,7 @@
 #define MPI2_VERSION_02_00                  (0x0200)
 
 /* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x1B)
+#define MPI2_HEADER_VERSION_UNIT            (0x1C)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)

+ 8 - 2
drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h

@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2000-2011 LSI Corporation.
+ *  Copyright (c) 2000-2013 LSI Corporation.
  *
  *
  *           Name:  mpi2_cnfg.h
  *          Title:  MPI Configuration messages and pages
  *  Creation Date:  November 10, 2006
  *
- *    mpi2_cnfg.h Version:  02.00.22
+ *    mpi2_cnfg.h Version:  02.00.23
  *
  *  Version History
  *  ---------------
@@ -149,6 +149,8 @@
  *  11-18-11  02.00.22  Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT.
  *                      Added UEFIVersion field to BIOS Page 1 and defined new
  *                      BiosOptions bits.
+ *  11-27-12  02.00.23  Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER.
+ *			 Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID.
  *  --------------------------------------------------------------------------
  */
 
@@ -698,6 +700,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7
 #define MPI2_MANUFACTURING7_PAGEVERSION                 (0x01)
 
 /* defines for the Flags field */
+#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER       (0x00000002)
 #define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO                (0x00000001)
 
 
@@ -1224,6 +1227,9 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1
 #define MPI2_BIOSPAGE1_PAGEVERSION                      (0x05)
 
 /* values for BIOS Page 1 BiosOptions field */
+#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID                  (0x000000F0)
+#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID                   (0x00000000)
+
 #define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION   (0x00000006)
 #define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII              (0x00000000)
 #define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII             (0x00000002)

+ 1 - 1
drivers/scsi/mpt2sas/mpi/mpi2_init.h

@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2012 LSI Corporation.
+ *  Copyright (c) 2000-2013 LSI Corporation.
  *
  *
  *           Name:  mpi2_init.h

+ 1 - 1
drivers/scsi/mpt2sas/mpi/mpi2_ioc.h

@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2012 LSI Corporation.
+ *  Copyright (c) 2000-2013 LSI Corporation.
  *
  *
  *           Name:  mpi2_ioc.h

+ 1 - 1
drivers/scsi/mpt2sas/mpi/mpi2_raid.h

@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2012 LSI Corporation.
+ *  Copyright (c) 2000-2013 LSI Corporation.
  *
  *
  *           Name:  mpi2_raid.h

+ 1 - 1
drivers/scsi/mpt2sas/mpi/mpi2_sas.h

@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2010 LSI Corporation.
+ *  Copyright (c) 2000-2013 LSI Corporation.
  *
  *
  *           Name:  mpi2_sas.h

+ 1 - 1
drivers/scsi/mpt2sas/mpi/mpi2_tool.h

@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2012 LSI Corporation.
+ *  Copyright (c) 2000-2013 LSI Corporation.
  *
  *
  *           Name:  mpi2_tool.h

+ 1 - 1
drivers/scsi/mpt2sas/mpi/mpi2_type.h

@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2007 LSI Corporation.
+ *  Copyright (c) 2000-2013 LSI Corporation.
  *
  *
  *           Name:  mpi2_type.h

+ 23 - 17
drivers/scsi/mpt2sas/mpt2sas_base.c

@@ -3,7 +3,7 @@
  * for access to MPT (Message Passing Technology) firmware.
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2012  LSI Corporation
+ * Copyright (C) 2007-2013  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -768,10 +768,9 @@ mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
  * @msix_index: MSIX table index supplied by the OS
  * @reply: reply message frame(lower 32bit addr)
  *
- * Return 1 meaning mf should be freed from _base_interrupt
- *        0 means the mf is freed from this function.
+ * Returns void.
  */
-static u8
+static void
 _base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
 {
 	Mpi2EventNotificationReply_t *mpi_reply;
@@ -780,9 +779,9 @@ _base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
 
 	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
 	if (!mpi_reply)
-		return 1;
+		return;
 	if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
-		return 1;
+		return;
 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
 	_base_display_event_data(ioc, mpi_reply);
 #endif
@@ -812,7 +811,7 @@ _base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
 	/* ctl callback handler */
 	mpt2sas_ctl_event_callback(ioc, msix_index, reply);
 
-	return 1;
+	return;
 }
 
 /**
@@ -1409,8 +1408,6 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
 	int i;
 	u8 try_msix = 0;
 
-	INIT_LIST_HEAD(&ioc->reply_queue_list);
-
 	if (msix_disable == -1 || msix_disable == 0)
 		try_msix = 1;
 
@@ -1489,6 +1486,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
 	if (pci_enable_device_mem(pdev)) {
 		printk(MPT2SAS_WARN_FMT "pci_enable_device_mem: "
 		    "failed\n", ioc->name);
+		ioc->bars = 0;
 		return -ENODEV;
 	}
 
@@ -1497,6 +1495,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
 	    MPT2SAS_DRIVER_NAME)) {
 		printk(MPT2SAS_WARN_FMT "pci_request_selected_regions: "
 		    "failed\n", ioc->name);
+		ioc->bars = 0;
 		r = -ENODEV;
 		goto out_fail;
 	}
@@ -4229,18 +4228,25 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
 	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
 	    __func__));
 
-	_base_mask_interrupts(ioc);
-	ioc->shost_recovery = 1;
-	_base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
-	ioc->shost_recovery = 0;
+	if (ioc->chip_phys && ioc->chip) {
+		_base_mask_interrupts(ioc);
+		ioc->shost_recovery = 1;
+		_base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+		ioc->shost_recovery = 0;
+	}
+
 	_base_free_irq(ioc);
 	_base_disable_msix(ioc);
-	if (ioc->chip_phys)
+
+	if (ioc->chip_phys && ioc->chip)
 		iounmap(ioc->chip);
 	ioc->chip_phys = 0;
-	pci_release_selected_regions(ioc->pdev, ioc->bars);
-	pci_disable_pcie_error_reporting(pdev);
-	pci_disable_device(pdev);
+
+	if (pci_is_enabled(pdev)) {
+		pci_release_selected_regions(ioc->pdev, ioc->bars);
+		pci_disable_pcie_error_reporting(pdev);
+		pci_disable_device(pdev);
+	}
 	return;
 }
 

+ 5 - 5
drivers/scsi/mpt2sas/mpt2sas_base.h

@@ -3,7 +3,7 @@
  * for access to MPT (Message Passing Technology) firmware.
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_base.h
- * Copyright (C) 2007-2012  LSI Corporation
+ * Copyright (C) 2007-2013  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -69,8 +69,8 @@
 #define MPT2SAS_DRIVER_NAME		"mpt2sas"
 #define MPT2SAS_AUTHOR	"LSI Corporation <DL-MPTFusionLinux@lsi.com>"
 #define MPT2SAS_DESCRIPTION	"LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION		"15.100.00.00"
-#define MPT2SAS_MAJOR_VERSION		15
+#define MPT2SAS_DRIVER_VERSION		"16.100.00.00"
+#define MPT2SAS_MAJOR_VERSION		16
 #define MPT2SAS_MINOR_VERSION		100
 #define MPT2SAS_BUILD_VERSION		00
 #define MPT2SAS_RELEASE_VERSION		00
@@ -1061,7 +1061,7 @@ void mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
 int mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc);
 
 /* scsih shared API */
-u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
+void mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
     u32 reply);
 int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,
 	uint channel, uint id, uint lun, u8 type, u16 smid_task,
@@ -1144,7 +1144,7 @@ void mpt2sas_ctl_exit(void);
 u8 mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
     u32 reply);
 void mpt2sas_ctl_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase);
-u8 mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
+void mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
     u32 reply);
 void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc,
     Mpi2EventNotificationReply_t *mpi_reply);

+ 1 - 1
drivers/scsi/mpt2sas/mpt2sas_config.c

@@ -2,7 +2,7 @@
  * This module provides common API for accessing firmware configuration pages
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2012  LSI Corporation
+ * Copyright (C) 2007-2013  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or

+ 9 - 5
drivers/scsi/mpt2sas/mpt2sas_ctl.c

@@ -3,7 +3,7 @@
  * controllers
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c
- * Copyright (C) 2007-2012  LSI Corporation
+ * Copyright (C) 2007-2013  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -397,18 +397,22 @@ mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc,
  * This function merely adds a new work task into ioc->firmware_event_thread.
  * The tasks are worked from _firmware_event_work in user context.
  *
- * Return 1 meaning mf should be freed from _base_interrupt
- *        0 means the mf is freed from this function.
+ * Returns void.
  */
-u8
+void
 mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
 	u32 reply)
 {
 	Mpi2EventNotificationReply_t *mpi_reply;
 
 	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+	if (unlikely(!mpi_reply)) {
+		printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		return;
+	}
 	mpt2sas_ctl_add_to_event_log(ioc, mpi_reply);
-	return 1;
+	return;
 }
 
 /**

+ 1 - 1
drivers/scsi/mpt2sas/mpt2sas_ctl.h

@@ -3,7 +3,7 @@
  * controllers
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h
- * Copyright (C) 2007-2012  LSI Corporation
+ * Copyright (C) 2007-2013  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or

+ 1 - 1
drivers/scsi/mpt2sas/mpt2sas_debug.h

@@ -2,7 +2,7 @@
  * Logging Support for MPT (Message Passing Technology) based controllers
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c
- * Copyright (C) 2007-2012  LSI Corporation
+ * Copyright (C) 2007-2013  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or

+ 44 - 38
drivers/scsi/mpt2sas/mpt2sas_scsih.c

@@ -2,7 +2,7 @@
  * Scsi Host Layer for MPT (Message Passing Technology) based controllers
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c
- * Copyright (C) 2007-2012  LSI Corporation
+ * Copyright (C) 2007-2013  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -628,11 +628,12 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
 		 * devices while scanning is turned on due to an oops in
 		 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
 		 */
-		if (!ioc->is_driver_loading)
+		if (!ioc->is_driver_loading) {
 			mpt2sas_transport_port_remove(ioc,
 			sas_device->sas_address,
 			sas_device->sas_address_parent);
-		_scsih_sas_device_remove(ioc, sas_device);
+			_scsih_sas_device_remove(ioc, sas_device);
+		}
 	}
 }
 
@@ -1402,6 +1403,7 @@ _scsih_slave_alloc(struct scsi_device *sdev)
 	struct MPT2SAS_DEVICE *sas_device_priv_data;
 	struct scsi_target *starget;
 	struct _raid_device *raid_device;
+	struct _sas_device *sas_device;
 	unsigned long flags;
 
 	sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
@@ -1430,6 +1432,19 @@ _scsih_slave_alloc(struct scsi_device *sdev)
 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
 	}
 
+	if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+		spin_lock_irqsave(&ioc->sas_device_lock, flags);
+		sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+				sas_target_priv_data->sas_address);
+		if (sas_device && (sas_device->starget == NULL)) {
+			sdev_printk(KERN_INFO, sdev,
+			     "%s : sas_device->starget set to starget @ %d\n",
+			     __func__, __LINE__);
+			sas_device->starget = starget;
+		}
+		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+	}
+
 	return 0;
 }
 
@@ -6753,7 +6768,7 @@ _scsih_search_responding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
 	    handle))) {
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
-		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
 			break;
 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
 		device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
@@ -6862,7 +6877,7 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc)
 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
-		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
 			break;
 		handle = le16_to_cpu(volume_pg1.DevHandle);
 
@@ -6887,7 +6902,7 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc)
 		    phys_disk_num))) {
 			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 			    MPI2_IOCSTATUS_MASK;
-			if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+			if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
 				break;
 			phys_disk_num = pd_pg0.PhysDiskNum;
 			handle = le16_to_cpu(pd_pg0.DevHandle);
@@ -6967,7 +6982,7 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc)
 
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
-		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
 			break;
 
 		handle = le16_to_cpu(expander_pg0.DevHandle);
@@ -7109,8 +7124,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
-		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
-			break;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
 			printk(MPT2SAS_INFO_FMT "\tbreak from expander scan: "
 				"ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -7153,8 +7166,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
 	    phys_disk_num))) {
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
-		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
-			break;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
 			printk(MPT2SAS_INFO_FMT "\tbreak from phys disk scan:"
 				"ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -7219,8 +7230,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
-		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
-			break;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
 			printk(MPT2SAS_INFO_FMT "\tbreak from volume scan: "
 				"ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -7278,8 +7287,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
 	    handle))) {
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
-		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
-			break;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
 			printk(MPT2SAS_INFO_FMT "\tbreak from end device scan:"
 				" ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -7471,10 +7478,9 @@ _firmware_event_work(struct work_struct *work)
  * This function merely adds a new work task into ioc->firmware_event_thread.
  * The tasks are worked from _firmware_event_work in user context.
  *
- * Return 1 meaning mf should be freed from _base_interrupt
- *        0 means the mf is freed from this function.
+ * Returns void.
  */
-u8
+void
 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
 	u32 reply)
 {
@@ -7485,14 +7491,14 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
 
 	/* events turned off due to host reset or driver unloading */
 	if (ioc->remove_host || ioc->pci_error_recovery)
-		return 1;
+		return;
 
 	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
 
 	if (unlikely(!mpi_reply)) {
 		printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
-		return 1;
+		return;
 	}
 
 	event = le16_to_cpu(mpi_reply->Event);
@@ -7507,11 +7513,11 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
 
 		if (baen_data->Primitive !=
 		    MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
-			return 1;
+			return;
 
 		if (ioc->broadcast_aen_busy) {
 			ioc->broadcast_aen_pending++;
-			return 1;
+			return;
 		} else
 			ioc->broadcast_aen_busy = 1;
 		break;
@@ -7587,14 +7593,14 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
 		break;
 
 	default: /* ignore the rest */
-		return 1;
+		return;
 	}
 
 	fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
 	if (!fw_event) {
 		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
-		return 1;
+		return;
 	}
 	sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
 	fw_event->event_data = kzalloc(sz, GFP_ATOMIC);
@@ -7602,7 +7608,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
 		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
 		kfree(fw_event);
-		return 1;
+		return;
 	}
 
 	memcpy(fw_event->event_data, mpi_reply->EventData,
@@ -7612,7 +7618,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
 	fw_event->VP_ID = mpi_reply->VP_ID;
 	fw_event->event = event;
 	_scsih_fw_event_add(ioc, fw_event);
-	return 1;
+	return;
 }
 
 /* shost template */
@@ -7711,10 +7717,6 @@ _scsih_ir_shutdown(struct MPT2SAS_ADAPTER *ioc)
 	if (!ioc->ir_firmware)
 		return;
 
-	/* are there any volumes ? */
-	if (list_empty(&ioc->raid_device_list))
-		return;
-
 	mutex_lock(&ioc->scsih_cmds.mutex);
 
 	if (ioc->scsih_cmds.status != MPT2_CMD_NOT_USED) {
@@ -7929,10 +7931,12 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc)
 		    sas_device->sas_address_parent)) {
 			_scsih_sas_device_remove(ioc, sas_device);
 		} else if (!sas_device->starget) {
-			if (!ioc->is_driver_loading)
-				mpt2sas_transport_port_remove(ioc, sas_address,
+			if (!ioc->is_driver_loading) {
+				mpt2sas_transport_port_remove(ioc,
+					sas_address,
 					sas_address_parent);
-			_scsih_sas_device_remove(ioc, sas_device);
+				_scsih_sas_device_remove(ioc, sas_device);
+			}
 		}
 	}
 }
@@ -7985,14 +7989,14 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
 			kfree(sas_device);
 			continue;
 		} else if (!sas_device->starget) {
-			if (!ioc->is_driver_loading)
+			if (!ioc->is_driver_loading) {
 				mpt2sas_transport_port_remove(ioc,
 					sas_device->sas_address,
 					sas_device->sas_address_parent);
-			list_del(&sas_device->list);
-			kfree(sas_device);
-			continue;
-
+				list_del(&sas_device->list);
+				kfree(sas_device);
+				continue;
+			}
 		}
 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
 		list_move_tail(&sas_device->list, &ioc->sas_device_list);
@@ -8175,6 +8179,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
 	INIT_LIST_HEAD(&ioc->delayed_tr_list);
 	INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
+	INIT_LIST_HEAD(&ioc->reply_queue_list);
 
 	/* init shost parameters */
 	shost->max_cmd_len = 32;
@@ -8280,6 +8285,7 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
 
 	mpt2sas_base_stop_watchdog(ioc);
 	scsi_block_requests(shost);
+	_scsih_ir_shutdown(ioc);
 	device_state = pci_choose_state(pdev, state);
 	printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, entering "
 	    "operating state [D%d]\n", ioc->name, pdev,

+ 5 - 2
drivers/scsi/mpt2sas/mpt2sas_transport.c

@@ -2,7 +2,7 @@
  * SAS Transport Layer for MPT (Message Passing Technology) based controllers
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
- * Copyright (C) 2007-2012  LSI Corporation
+ * Copyright (C) 2007-2013  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -1006,9 +1006,12 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
 		    &mpt2sas_phy->remote_identify);
 		_transport_add_phy_to_an_existing_port(ioc, sas_node,
 		    mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address);
-	} else
+	} else {
 		memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
 		    sas_identify));
+		_transport_del_phy_from_an_existing_port(ioc, sas_node,
+		    mpt2sas_phy);
+	}
 
 	if (mpt2sas_phy->phy)
 		mpt2sas_phy->phy->negotiated_linkrate =

+ 31 - 10
drivers/scsi/mpt3sas/mpt3sas_base.c

@@ -82,6 +82,10 @@ static int msix_disable = -1;
 module_param(msix_disable, int, 0);
 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
 
+static int max_msix_vectors = 8;
+module_param(max_msix_vectors, int, 0);
+MODULE_PARM_DESC(max_msix_vectors,
+	" max msix vectors - (default=8)");
 
 static int mpt3sas_fwfault_debug;
 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
@@ -1709,8 +1713,6 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
 	int i;
 	u8 try_msix = 0;
 
-	INIT_LIST_HEAD(&ioc->reply_queue_list);
-
 	if (msix_disable == -1 || msix_disable == 0)
 		try_msix = 1;
 
@@ -1723,6 +1725,16 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
 	ioc->reply_queue_count = min_t(int, ioc->cpu_count,
 	    ioc->msix_vector_count);
 
+	printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
+	  ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
+	  ioc->cpu_count, max_msix_vectors);
+
+	if (max_msix_vectors > 0) {
+		ioc->reply_queue_count = min_t(int, max_msix_vectors,
+			ioc->reply_queue_count);
+		ioc->msix_vector_count = ioc->reply_queue_count;
+	}
+
 	entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
 	    GFP_KERNEL);
 	if (!entries) {
@@ -1790,6 +1802,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
 	if (pci_enable_device_mem(pdev)) {
 		pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
 			ioc->name);
+		ioc->bars = 0;
 		return -ENODEV;
 	}
 
@@ -1798,6 +1811,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
 	    MPT3SAS_DRIVER_NAME)) {
 		pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
 			ioc->name);
+		ioc->bars = 0;
 		r = -ENODEV;
 		goto out_fail;
 	}
@@ -4393,18 +4407,25 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
 	dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
 	    __func__));
 
-	_base_mask_interrupts(ioc);
-	ioc->shost_recovery = 1;
-	_base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
-	ioc->shost_recovery = 0;
+	if (ioc->chip_phys && ioc->chip) {
+		_base_mask_interrupts(ioc);
+		ioc->shost_recovery = 1;
+		_base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+		ioc->shost_recovery = 0;
+	}
+
 	_base_free_irq(ioc);
 	_base_disable_msix(ioc);
-	if (ioc->chip_phys)
+
+	if (ioc->chip_phys && ioc->chip)
 		iounmap(ioc->chip);
 	ioc->chip_phys = 0;
-	pci_release_selected_regions(ioc->pdev, ioc->bars);
-	pci_disable_pcie_error_reporting(pdev);
-	pci_disable_device(pdev);
+
+	if (pci_is_enabled(pdev)) {
+		pci_release_selected_regions(ioc->pdev, ioc->bars);
+		pci_disable_pcie_error_reporting(pdev);
+		pci_disable_device(pdev);
+	}
 	return;
 }
 

+ 1 - 0
drivers/scsi/mpt3sas/mpt3sas_scsih.c

@@ -7779,6 +7779,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
 	INIT_LIST_HEAD(&ioc->delayed_tr_list);
 	INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
+	INIT_LIST_HEAD(&ioc->reply_queue_list);
 
 	/* init shost parameters */
 	shost->max_cmd_len = 32;

+ 4 - 1
drivers/scsi/mpt3sas/mpt3sas_transport.c

@@ -1003,9 +1003,12 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
 		    &mpt3sas_phy->remote_identify);
 		_transport_add_phy_to_an_existing_port(ioc, sas_node,
 		    mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
-	} else
+	} else {
 		memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
 		    sas_identify));
+		_transport_del_phy_from_an_existing_port(ioc, sas_node,
+		    mpt3sas_phy);
+	}
 
 	if (mpt3sas_phy->phy)
 		mpt3sas_phy->phy->negotiated_linkrate =

+ 3 - 2
drivers/scsi/pm8001/pm8001_init.c

@@ -424,7 +424,8 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
 			PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
 				"base addr %llx virt_addr=%llx len=%d\n",
 				(u64)pm8001_ha->io_mem[logicalBar].membase,
-				(u64)pm8001_ha->io_mem[logicalBar].memvirtaddr,
+				(u64)(unsigned long)
+				pm8001_ha->io_mem[logicalBar].memvirtaddr,
 				pm8001_ha->io_mem[logicalBar].memsize));
 		} else {
 			pm8001_ha->io_mem[logicalBar].membase	= 0;
@@ -734,7 +735,7 @@ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
 	pdev = pm8001_ha->pdev;
 
 #ifdef PM8001_USE_MSIX
-	if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
+	if (pdev->msix_cap)
 		return pm8001_setup_msix(pm8001_ha);
 	else {
 		PM8001_INIT_DBG(pm8001_ha,

+ 1 - 1
drivers/scsi/qla2xxx/Makefile

@@ -1,6 +1,6 @@
 qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
 		qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
-        qla_nx.o qla_mr.o qla_target.o
+        qla_nx.o qla_mr.o qla_nx2.o qla_target.o
 
 obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
 obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o

+ 60 - 21
drivers/scsi/qla2xxx/qla_attr.c

@@ -29,7 +29,7 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
 	if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
 		return 0;
 
-	if (IS_QLA82XX(ha)) {
+	if (IS_P3P_TYPE(ha)) {
 		if (off < ha->md_template_size) {
 			rval = memory_read_from_buffer(buf, count,
 			    &off, ha->md_tmplt_hdr, ha->md_template_size);
@@ -71,7 +71,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
 		ql_log(ql_log_info, vha, 0x705d,
 		    "Firmware dump cleared on (%ld).\n", vha->host_no);
 
-		if (IS_QLA82XX(vha->hw)) {
+		if (IS_P3P_TYPE(ha)) {
 			qla82xx_md_free(vha);
 			qla82xx_md_prep(vha);
 		}
@@ -95,11 +95,15 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
 			qla82xx_idc_lock(ha);
 			qla82xx_set_reset_owner(vha);
 			qla82xx_idc_unlock(ha);
+		} else if (IS_QLA8044(ha)) {
+			qla8044_idc_lock(ha);
+			qla82xx_set_reset_owner(vha);
+			qla8044_idc_unlock(ha);
 		} else
 			qla2x00_system_error(vha);
 		break;
 	case 4:
-		if (IS_QLA82XX(ha)) {
+		if (IS_P3P_TYPE(ha)) {
 			if (ha->md_tmplt_hdr)
 				ql_dbg(ql_dbg_user, vha, 0x705b,
 				    "MiniDump supported with this firmware.\n");
@@ -109,7 +113,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
 		}
 		break;
 	case 5:
-		if (IS_QLA82XX(ha))
+		if (IS_P3P_TYPE(ha))
 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 		break;
 	case 6:
@@ -586,7 +590,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 	int type;
 	uint32_t idc_control;
-
+	uint8_t *tmp_data = NULL;
 	if (off != 0)
 		return -EINVAL;
 
@@ -597,14 +601,23 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
 		    "Issuing ISP reset.\n");
 
 		scsi_block_requests(vha->host);
-		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 		if (IS_QLA82XX(ha)) {
 			ha->flags.isp82xx_no_md_cap = 1;
 			qla82xx_idc_lock(ha);
 			qla82xx_set_reset_owner(vha);
 			qla82xx_idc_unlock(ha);
+		} else if (IS_QLA8044(ha)) {
+			qla8044_idc_lock(ha);
+			idc_control = qla8044_rd_reg(ha,
+			    QLA8044_IDC_DRV_CTRL);
+			qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
+			    (idc_control | GRACEFUL_RESET_BIT1));
+			qla82xx_set_reset_owner(vha);
+			qla8044_idc_unlock(ha);
+		} else {
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
 		}
-		qla2xxx_wake_dpc(vha);
 		qla2x00_wait_for_chip_reset(vha);
 		scsi_unblock_requests(vha->host);
 		break;
@@ -640,7 +653,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
 			break;
 		}
 	case 0x2025e:
-		if (!IS_QLA82XX(ha) || vha != base_vha) {
+		if (!IS_P3P_TYPE(ha) || vha != base_vha) {
 			ql_log(ql_log_info, vha, 0x7071,
 			    "FCoE ctx reset no supported.\n");
 			return -EPERM;
@@ -674,7 +687,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
 		__qla83xx_set_idc_control(vha, idc_control);
 		qla83xx_idc_unlock(vha, 0);
 		break;
-
+	case 0x20261:
+		ql_dbg(ql_dbg_user, vha, 0x70e0,
+		    "Updating cache versions without reset ");
+
+		tmp_data = vmalloc(256);
+		if (!tmp_data) {
+			ql_log(ql_log_warn, vha, 0x70e1,
+			    "Unable to allocate memory for VPD information update.\n");
+			return -ENOMEM;
+		}
+		ha->isp_ops->get_flash_version(vha, tmp_data);
+		vfree(tmp_data);
+		break;
 	}
 	return count;
 }
@@ -1212,7 +1237,7 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
 		return snprintf(buf, PAGE_SIZE, "\n");
 
 	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
@@ -1265,10 +1290,7 @@ qla2x00_vn_port_mac_address_show(struct device *dev,
 	if (!IS_CNA_CAPABLE(vha->hw))
 		return snprintf(buf, PAGE_SIZE, "\n");
 
-	return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
-	    vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
-	    vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
-	    vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
+	return snprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
 }
 
 static ssize_t
@@ -1287,12 +1309,6 @@ qla2x00_thermal_temp_show(struct device *dev,
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	uint16_t temp = 0;
 
-	if (!vha->hw->thermal_support) {
-		ql_log(ql_log_warn, vha, 0x70db,
-		    "Thermal not supported by this card.\n");
-		goto done;
-	}
-
 	if (qla2x00_reset_active(vha)) {
 		ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
 		goto done;
@@ -1725,11 +1741,21 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
 		pfc_host_stat->lip_count = stats->lip_cnt;
 		pfc_host_stat->tx_frames = stats->tx_frames;
 		pfc_host_stat->rx_frames = stats->rx_frames;
-		pfc_host_stat->dumped_frames = stats->dumped_frames;
+		pfc_host_stat->dumped_frames = stats->discarded_frames;
 		pfc_host_stat->nos_count = stats->nos_rcvd;
+		pfc_host_stat->error_frames =
+			stats->dropped_frames + stats->discarded_frames;
+		pfc_host_stat->rx_words = vha->qla_stats.input_bytes;
+		pfc_host_stat->tx_words = vha->qla_stats.output_bytes;
 	}
+	pfc_host_stat->fcp_control_requests = vha->qla_stats.control_requests;
+	pfc_host_stat->fcp_input_requests = vha->qla_stats.input_requests;
+	pfc_host_stat->fcp_output_requests = vha->qla_stats.output_requests;
 	pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
 	pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
+	pfc_host_stat->seconds_since_last_reset =
+		get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
+	do_div(pfc_host_stat->seconds_since_last_reset, HZ);
 
 done_free:
         dma_pool_free(ha->s_dma_pool, stats, stats_dma);
@@ -1737,6 +1763,16 @@ done:
 	return pfc_host_stat;
 }
 
+static void
+qla2x00_reset_host_stats(struct Scsi_Host *shost)
+{
+	scsi_qla_host_t *vha = shost_priv(shost);
+
+	memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
+
+	vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
+}
+
 static void
 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
 {
@@ -2043,6 +2079,7 @@ struct fc_function_template qla2xxx_transport_functions = {
 	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
 	.terminate_rport_io = qla2x00_terminate_rport_io,
 	.get_fc_host_stats = qla2x00_get_fc_host_stats,
+	.reset_fc_host_stats = qla2x00_reset_host_stats,
 
 	.vport_create = qla24xx_vport_create,
 	.vport_disable = qla24xx_vport_disable,
@@ -2089,6 +2126,8 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
 	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
 	.terminate_rport_io = qla2x00_terminate_rport_io,
 	.get_fc_host_stats = qla2x00_get_fc_host_stats,
+	.reset_fc_host_stats = qla2x00_reset_host_stats,
+
 	.bsg_request = qla24xx_bsg_request,
 	.bsg_timeout = qla24xx_bsg_timeout,
 };

+ 28 - 15
drivers/scsi/qla2xxx/qla_bsg.c

@@ -125,7 +125,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
 	uint32_t len;
 	uint32_t oper;
 
-	if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
+	if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
 		ret = -EINVAL;
 		goto exit_fcp_prio_cfg;
 	}
@@ -559,7 +559,7 @@ qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
 	uint16_t new_config[4];
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
 		goto done_reset_internal;
 
 	memset(new_config, 0 , sizeof(new_config));
@@ -627,9 +627,10 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
 {
 	int ret = 0;
 	int rval = 0;
+	unsigned long rem_tmo = 0, current_tmo = 0;
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
 		goto done_set_internal;
 
 	if (mode == INTERNAL_LOOPBACK)
@@ -652,8 +653,19 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
 	}
 
 	/* Wait for DCBX complete event */
-	if (!wait_for_completion_timeout(&ha->dcbx_comp,
-	    (DCBX_COMP_TIMEOUT * HZ))) {
+	current_tmo = DCBX_COMP_TIMEOUT * HZ;
+	while (1) {
+		rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
+		    current_tmo);
+		if (!ha->idc_extend_tmo || rem_tmo) {
+			ha->idc_extend_tmo = 0;
+			break;
+		}
+		current_tmo = ha->idc_extend_tmo * HZ;
+		ha->idc_extend_tmo = 0;
+	}
+
+	if (!rem_tmo) {
 		ql_dbg(ql_dbg_user, vha, 0x7022,
 		    "DCBX completion not received.\n");
 		ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
@@ -678,6 +690,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
 	}
 
 	ha->notify_dcbx_comp = 0;
+	ha->idc_extend_tmo = 0;
 
 done_set_internal:
 	return rval;
@@ -773,7 +786,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
 
 	if (atomic_read(&vha->loop_state) == LOOP_READY &&
 	    (ha->current_topology == ISP_CFG_F ||
-	    ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
+	    ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
 	    le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
 	    && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
 		elreq.options == EXTERNAL_LOOPBACK) {
@@ -783,7 +796,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
 		command_sent = INT_DEF_LB_ECHO_CMD;
 		rval = qla2x00_echo_test(vha, &elreq, response);
 	} else {
-		if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
+		if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
 			memset(config, 0, sizeof(config));
 			memset(new_config, 0, sizeof(new_config));
 
@@ -806,7 +819,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
 			    "elreq.options=%04x\n", elreq.options);
 
 			if (elreq.options == EXTERNAL_LOOPBACK)
-				if (IS_QLA8031(ha))
+				if (IS_QLA8031(ha) || IS_QLA8044(ha))
 					rval = qla81xx_set_loopback_mode(vha,
 					    config, new_config, elreq.options);
 				else
@@ -1266,6 +1279,7 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
 	int rval = 0;
 	struct qla_port_param *port_param = NULL;
 	fc_port_t *fcport = NULL;
+	int found = 0;
 	uint16_t mb[MAILBOX_REGISTER_COUNT];
 	uint8_t *rsp_ptr = NULL;
 
@@ -1288,10 +1302,12 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
 		if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
 			fcport->port_name, sizeof(fcport->port_name)))
 			continue;
+
+		found = 1;
 		break;
 	}
 
-	if (!fcport) {
+	if (!found) {
 		ql_log(ql_log_warn, vha, 0x7049,
 		    "Failed to find port.\n");
 		return -EINVAL;
@@ -1318,12 +1334,9 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
 
 	if (rval) {
 		ql_log(ql_log_warn, vha, 0x704c,
-		    "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
-		    "%04x %x %04x %04x.\n", fcport->port_name[0],
-		    fcport->port_name[1], fcport->port_name[2],
-		    fcport->port_name[3], fcport->port_name[4],
-		    fcport->port_name[5], fcport->port_name[6],
-		    fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
+		    "iIDMA cmd failed for %8phN -- "
+		    "%04x %x %04x %04x.\n", fcport->port_name,
+		    rval, fcport->fp_speed, mb[0], mb[1]);
 		rval = (DID_ERROR << 16);
 	} else {
 		if (!port_param->mode) {

+ 38 - 20
drivers/scsi/qla2xxx/qla_dbg.c

@@ -11,9 +11,12 @@
  * ----------------------------------------------------------------------
  * |             Level            |   Last Value Used  |     Holes	|
  * ----------------------------------------------------------------------
- * | Module Init and Probe        |       0x014f       | 0x4b,0xba,0xfa |
- * | Mailbox commands             |       0x117a       | 0x111a-0x111b  |
+ * | Module Init and Probe        |       0x0159       | 0x4b,0xba,0xfa |
+ * | Mailbox commands             |       0x1181       | 0x111a-0x111b  |
  * |                              |                    | 0x1155-0x1158  |
+ * |                              |                    | 0x1018-0x1019  |
+ * |                              |                    | 0x1115-0x1116  |
+ * |                              |                    | 0x10ca		|
  * | Device Discovery             |       0x2095       | 0x2020-0x2022, |
  * |                              |                    | 0x2011-0x2012, |
  * |                              |                    | 0x2016         |
@@ -24,11 +27,12 @@
  * |                              |                    | 0x3036,0x3038  |
  * |                              |                    | 0x303a		|
  * | DPC Thread                   |       0x4022       | 0x4002,0x4013  |
- * | Async Events                 |       0x5081       | 0x502b-0x502f  |
+ * | Async Events                 |       0x5087       | 0x502b-0x502f  |
  * |                              |                    | 0x5047,0x5052  |
- * |                              |                    | 0x5040,0x5075  |
- * | Timer Routines               |       0x6011       |                |
- * | User Space Interactions      |       0x70dd       | 0x7018,0x702e, |
+ * |                              |                    | 0x5084,0x5075	|
+ * |                              |                    | 0x503d,0x5044  |
+ * | Timer Routines               |       0x6012       |                |
+ * | User Space Interactions      |       0x70e1       | 0x7018,0x702e, |
  * |                              |                    | 0x7020,0x7024, |
  * |                              |                    | 0x7039,0x7045, |
  * |                              |                    | 0x7073-0x7075, |
@@ -36,17 +40,28 @@
  * |                              |                    | 0x70a5,0x70a6, |
  * |                              |                    | 0x70a8,0x70ab, |
  * |                              |                    | 0x70ad-0x70ae, |
- * |                              |                    | 0x70d1-0x70da, |
+ * |                              |                    | 0x70d1-0x70db, |
  * |                              |                    | 0x7047,0x703b	|
- * | Task Management              |       0x803c       | 0x8025-0x8026  |
+ * |                              |                    | 0x70de-0x70df, |
+ * | Task Management              |       0x803d       | 0x8025-0x8026  |
  * |                              |                    | 0x800b,0x8039  |
  * | AER/EEH                      |       0x9011       |		|
  * | Virtual Port                 |       0xa007       |		|
- * | ISP82XX Specific             |       0xb086       | 0xb002,0xb024  |
+ * | ISP82XX Specific             |       0xb14c       | 0xb002,0xb024  |
+ * |                              |                    | 0xb09e,0xb0ae  |
+ * |                              |                    | 0xb0e0-0xb0ef  |
+ * |                              |                    | 0xb085,0xb0dc  |
+ * |                              |                    | 0xb107,0xb108  |
+ * |                              |                    | 0xb111,0xb11e  |
+ * |                              |                    | 0xb12c,0xb12d  |
+ * |                              |                    | 0xb13a,0xb142  |
+ * |                              |                    | 0xb13c-0xb140  |
+ * |                              |                    | 0xb149		|
  * | MultiQ                       |       0xc00c       |		|
  * | Misc                         |       0xd010       |		|
- * | Target Mode		  |	  0xe070       |		|
- * | Target Mode Management	  |	  0xf072       |		|
+ * | Target Mode		  |	  0xe070       | 0xe021		|
+ * | Target Mode Management	  |	  0xf072       | 0xf002-0xf003	|
+ * |                              |                    | 0xf046-0xf049  |
  * | Target Mode Task Management  |	  0x1000b      |		|
  * ----------------------------------------------------------------------
  */
@@ -519,7 +534,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
 	uint32_t cnt, que_idx;
 	uint8_t que_cnt;
 	struct qla2xxx_mq_chain *mq = ptr;
-	struct device_reg_25xxmq __iomem *reg;
+	device_reg_t __iomem *reg;
 
 	if (!ha->mqenable || IS_QLA83XX(ha))
 		return ptr;
@@ -533,13 +548,16 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
 		ha->max_req_queues : ha->max_rsp_queues;
 	mq->count = htonl(que_cnt);
 	for (cnt = 0; cnt < que_cnt; cnt++) {
-		reg = (struct device_reg_25xxmq __iomem *)
-			(ha->mqiobase + cnt * QLA_QUE_PAGE);
+		reg = ISP_QUE_REG(ha, cnt);
 		que_idx = cnt * 4;
-		mq->qregs[que_idx] = htonl(RD_REG_DWORD(&reg->req_q_in));
-		mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(&reg->req_q_out));
-		mq->qregs[que_idx+2] = htonl(RD_REG_DWORD(&reg->rsp_q_in));
-		mq->qregs[que_idx+3] = htonl(RD_REG_DWORD(&reg->rsp_q_out));
+		mq->qregs[que_idx] =
+		    htonl(RD_REG_DWORD(&reg->isp25mq.req_q_in));
+		mq->qregs[que_idx+1] =
+		    htonl(RD_REG_DWORD(&reg->isp25mq.req_q_out));
+		mq->qregs[que_idx+2] =
+		    htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_in));
+		mq->qregs[que_idx+3] =
+		    htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_out));
 	}
 
 	return ptr + sizeof(struct qla2xxx_mq_chain);
@@ -941,7 +959,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 	uint32_t	*last_chain = NULL;
 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
-	if (IS_QLA82XX(ha))
+	if (IS_P3P_TYPE(ha))
 		return;
 
 	risc_address = ext_mem_cnt = 0;
@@ -2530,7 +2548,7 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
 	if (!ql_mask_match(level))
 		return;
 
-	if (IS_QLA82XX(ha))
+	if (IS_P3P_TYPE(ha))
 		mbx_reg = &reg82->mailbox_in[0];
 	else if (IS_FWI2_CAPABLE(ha))
 		mbx_reg = &reg24->mailbox0;

+ 27 - 12
drivers/scsi/qla2xxx/qla_def.h

@@ -35,6 +35,7 @@
 
 #include "qla_bsg.h"
 #include "qla_nx.h"
+#include "qla_nx2.h"
 #define QLA2XXX_DRIVER_NAME	"qla2xxx"
 #define QLA2XXX_APIDEV		"ql2xapidev"
 #define QLA2XXX_MANUFACTURER	"QLogic Corporation"
@@ -642,6 +643,7 @@ struct device_reg_fx00 {
 	uint32_t initval6;		/* C8 */
 	uint32_t initval7;		/* CC */
 	uint32_t fwheartbeat;		/* D0 */
+	uint32_t pseudoaen;		/* D4 */
 };
 
 
@@ -805,6 +807,7 @@ struct mbx_cmd_32 {
 #define MBA_MIRROR_LUN_CHANGE	0x8402	/* Mirror LUN State Change
 					   Notification */
 #define MBA_FW_POLL_STATE	0x8600  /* Firmware in poll diagnostic state */
+#define MBA_FW_RESET_FCT	0x8502	/* Firmware reset factory defaults */
 
 /* 83XX FCoE specific */
 #define MBA_IDC_AEN		0x8200  /* FCoE: NIC Core state change AEN */
@@ -997,6 +1000,7 @@ struct mbx_cmd_32 {
 #define	MBX_1		BIT_1
 #define	MBX_0		BIT_0
 
+#define RNID_TYPE_SET_VERSION	0x9
 #define RNID_TYPE_ASIC_TEMP	0xC
 
 /*
@@ -1233,8 +1237,9 @@ struct link_statistics {
 	uint32_t unused1[0x1a];
 	uint32_t tx_frames;
 	uint32_t rx_frames;
-	uint32_t dumped_frames;
-	uint32_t unused2[2];
+	uint32_t discarded_frames;
+	uint32_t dropped_frames;
+	uint32_t unused2[1];
 	uint32_t nos_rcvd;
 };
 
@@ -2656,6 +2661,11 @@ struct qla_statistics {
 	uint32_t total_isp_aborts;
 	uint64_t input_bytes;
 	uint64_t output_bytes;
+	uint64_t input_requests;
+	uint64_t output_requests;
+	uint32_t control_requests;
+
+	uint64_t jiffies_at_last_reset;
 };
 
 struct bidi_statistics {
@@ -2670,9 +2680,8 @@ struct bidi_statistics {
 #define QLA_MAX_QUEUES 256
 #define ISP_QUE_REG(ha, id) \
 	((ha->mqenable || IS_QLA83XX(ha)) ? \
-	((device_reg_t __iomem *)(ha->mqiobase) +\
-	(QLA_QUE_PAGE * id)) :\
-	((device_reg_t __iomem *)(ha->iobase)))
+	 ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\
+	 ((void __iomem *)ha->iobase))
 #define QLA_REQ_QUE_ID(tag) \
 	((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0)
 #define QLA_DEFAULT_QUE_QOS 5
@@ -2935,7 +2944,8 @@ struct qla_hw_data {
 #define DT_ISP2031			BIT_15
 #define DT_ISP8031			BIT_16
 #define DT_ISPFX00			BIT_17
-#define DT_ISP_LAST			(DT_ISPFX00 << 1)
+#define DT_ISP8044			BIT_18
+#define DT_ISP_LAST			(DT_ISP8044 << 1)
 
 #define DT_T10_PI                       BIT_25
 #define DT_IIDMA                        BIT_26
@@ -2961,6 +2971,7 @@ struct qla_hw_data {
 #define IS_QLA8001(ha)	(DT_MASK(ha) & DT_ISP8001)
 #define IS_QLA81XX(ha)	(IS_QLA8001(ha))
 #define IS_QLA82XX(ha)	(DT_MASK(ha) & DT_ISP8021)
+#define IS_QLA8044(ha)  (DT_MASK(ha) & DT_ISP8044)
 #define IS_QLA2031(ha)	(DT_MASK(ha) & DT_ISP2031)
 #define IS_QLA8031(ha)	(DT_MASK(ha) & DT_ISP8031)
 #define IS_QLAFX00(ha)	(DT_MASK(ha) & DT_ISPFX00)
@@ -2975,10 +2986,12 @@ struct qla_hw_data {
 #define IS_QLA24XX_TYPE(ha)     (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
 				IS_QLA84XX(ha))
 #define IS_CNA_CAPABLE(ha)	(IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
-				IS_QLA8031(ha))
+				IS_QLA8031(ha) || IS_QLA8044(ha))
+#define IS_P3P_TYPE(ha)		(IS_QLA82XX(ha) || IS_QLA8044(ha))
 #define IS_QLA2XXX_MIDTYPE(ha)	(IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
 				IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
-				IS_QLA82XX(ha) || IS_QLA83XX(ha))
+				IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
+				IS_QLA8044(ha))
 #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
 #define IS_NOPOLLING_TYPE(ha)	((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
 			IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
@@ -3187,10 +3200,12 @@ struct qla_hw_data {
 	uint32_t	nvram_data_off;
 
 	uint32_t	fdt_wrt_disable;
+	uint32_t	fdt_wrt_enable;
 	uint32_t	fdt_erase_cmd;
 	uint32_t	fdt_block_size;
 	uint32_t	fdt_unprotect_sec_cmd;
 	uint32_t	fdt_protect_sec_cmd;
+	uint32_t	fdt_wrt_sts_reg_cmd;
 
 	uint32_t        flt_region_flt;
 	uint32_t        flt_region_fdt;
@@ -3277,6 +3292,7 @@ struct qla_hw_data {
 
 	/* QLA83XX IDC specific fields */
 	uint32_t	idc_audit_ts;
+	uint32_t	idc_extend_tmo;
 
 	/* DPC low-priority workqueue */
 	struct workqueue_struct *dpc_lp_wq;
@@ -3296,9 +3312,6 @@ struct qla_hw_data {
 	struct mr_data_fx00 mr;
 
 	struct qlt_hw_data tgt;
-	uint16_t	thermal_support;
-#define THERMAL_SUPPORT_I2C BIT_0
-#define THERMAL_SUPPORT_ISP BIT_1
 };
 
 /*
@@ -3364,6 +3377,7 @@ typedef struct scsi_qla_host {
 #define PORT_UPDATE_NEEDED	24
 #define FX00_RESET_RECOVERY	25
 #define FX00_TARGET_SCAN	26
+#define FX00_CRITEMP_RECOVERY	27
 
 	uint32_t	device_flags;
 #define SWITCH_FOUND		BIT_0
@@ -3402,7 +3416,7 @@ typedef struct scsi_qla_host {
 	uint16_t	fcoe_fcf_idx;
 	uint8_t		fcoe_vn_port_mac[6];
 
-	uint32_t   	vp_abort_cnt;
+	uint32_t	vp_abort_cnt;
 
 	struct fc_vport	*fc_vport;	/* holds fc_vport * for each vport */
 	uint16_t        vp_idx;		/* vport ID */
@@ -3435,6 +3449,7 @@ typedef struct scsi_qla_host {
 	struct bidi_statistics bidi_stats;
 
 	atomic_t	vref_count;
+	struct qla8044_reset_template reset_tmplt;
 } scsi_qla_host_t;
 
 #define SET_VP_IDX	1

+ 2 - 0
drivers/scsi/qla2xxx/qla_fw.h

@@ -1387,6 +1387,8 @@ struct qla_flt_header {
 #define FLT_REG_GOLD_FW		0x2f
 #define FLT_REG_FCP_PRIO_0	0x87
 #define FLT_REG_FCP_PRIO_1	0x88
+#define FLT_REG_CNA_FW		0x97
+#define FLT_REG_BOOT_CODE_8044	0xA2
 #define FLT_REG_FCOE_FW		0xA4
 #define FLT_REG_FCOE_NVRAM_0	0xAA
 #define FLT_REG_FCOE_NVRAM_1	0xAC

+ 57 - 17
drivers/scsi/qla2xxx/qla_gbl.h

@@ -356,6 +356,12 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *, dma_addr_t, uint16_t , uint16_t *,
 extern int
 qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
 
+extern int
+qla82xx_set_driver_version(scsi_qla_host_t *, char *);
+
+extern int
+qla25xx_set_driver_version(scsi_qla_host_t *, char *);
+
 extern int
 qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
 	uint16_t, uint16_t, uint16_t, uint16_t);
@@ -435,19 +441,19 @@ qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
  */
 extern void qla2x00_release_nvram_protection(scsi_qla_host_t *);
 extern uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *,
-    uint32_t, uint32_t);
+					 uint32_t, uint32_t);
 extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
-    uint32_t);
+					uint32_t);
 extern uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
-    uint32_t);
+					uint32_t);
 extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
-    uint32_t);
+				    uint32_t);
 extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
-    uint32_t);
+				    uint32_t);
 extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
-    uint32_t);
+					uint32_t);
 extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
-    uint32_t);
+				    uint32_t);
 extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
 
 extern int qla2x00_beacon_on(struct scsi_qla_host *);
@@ -463,21 +469,25 @@ extern int qla83xx_wr_reg(scsi_qla_host_t *, uint32_t, uint32_t);
 extern int qla83xx_rd_reg(scsi_qla_host_t *, uint32_t, uint32_t *);
 extern int qla83xx_restart_nic_firmware(scsi_qla_host_t *);
 extern int qla83xx_access_control(scsi_qla_host_t *, uint16_t, uint32_t,
-    uint32_t, uint16_t *);
+				  uint32_t, uint16_t *);
 
 extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
-    uint32_t, uint32_t);
+					 uint32_t, uint32_t);
 extern int qla2x00_write_optrom_data(struct scsi_qla_host *, uint8_t *,
-    uint32_t, uint32_t);
+				     uint32_t, uint32_t);
 extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
-    uint32_t, uint32_t);
+					 uint32_t, uint32_t);
 extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
-    uint32_t, uint32_t);
+				     uint32_t, uint32_t);
 extern uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
-    uint32_t, uint32_t);
+					 uint32_t, uint32_t);
+extern uint8_t *qla8044_read_optrom_data(struct scsi_qla_host *,
+					 uint8_t *, uint32_t, uint32_t);
+extern void qla8044_watchdog(struct scsi_qla_host *vha);
 
 extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *);
 extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
+extern int qla82xx_get_flash_version(scsi_qla_host_t *, void *);
 
 extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
 extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
@@ -498,7 +508,7 @@ extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
 extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
 extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t);
 extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t,
-	uint8_t *, uint32_t);
+			   uint8_t *, uint32_t);
 extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int);
 
 /*
@@ -584,6 +594,7 @@ extern int qlafx00_start_scsi(srb_t *);
 extern int qlafx00_abort_isp(scsi_qla_host_t *);
 extern int qlafx00_iospace_config(struct qla_hw_data *);
 extern int qlafx00_init_firmware(scsi_qla_host_t *, uint16_t);
+extern int qlafx00_driver_shutdown(scsi_qla_host_t *, int);
 extern int qlafx00_fw_ready(scsi_qla_host_t *);
 extern int qlafx00_configure_devices(scsi_qla_host_t *);
 extern int qlafx00_reset_initialize(scsi_qla_host_t *);
@@ -601,6 +612,7 @@ extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *);
 extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *);
 extern void qlafx00_timer_routine(scsi_qla_host_t *);
 extern int qlafx00_rescan_isp(scsi_qla_host_t *);
+extern int qlafx00_loop_reset(scsi_qla_host_t *vha);
 
 /* qla82xx related functions */
 
@@ -619,9 +631,9 @@ extern int qla82xx_start_firmware(scsi_qla_host_t *);
 /* Firmware and flash related functions */
 extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *);
 extern uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
-    uint32_t, uint32_t);
+					 uint32_t, uint32_t);
 extern int qla82xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
-    uint32_t, uint32_t);
+				     uint32_t, uint32_t);
 
 /* Mailbox related functions */
 extern int qla82xx_abort_isp(scsi_qla_host_t *);
@@ -662,7 +674,7 @@ extern void qla8xxx_dev_failed_handler(scsi_qla_host_t *);
 extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *);
 
 extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
-    size_t, char *);
+				   size_t, char *);
 extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
 extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
 extern void qla82xx_start_iocbs(scsi_qla_host_t *);
@@ -674,6 +686,8 @@ extern int qla81xx_get_led_config(scsi_qla_host_t *, uint16_t *);
 extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
 extern char *qdev_state(uint32_t);
 extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
+extern int qla82xx_read_temperature(scsi_qla_host_t *);
+extern int qla8044_read_temperature(scsi_qla_host_t *);
 
 /* BSG related functions */
 extern int qla24xx_bsg_request(struct fc_bsg_job *);
@@ -695,5 +709,31 @@ extern void qla82xx_md_free(scsi_qla_host_t *);
 extern int qla82xx_md_collect(scsi_qla_host_t *);
 extern void qla82xx_md_prep(scsi_qla_host_t *);
 extern void qla82xx_set_reset_owner(scsi_qla_host_t *);
+extern int qla82xx_validate_template_chksum(scsi_qla_host_t *vha);
+
+/* Function declarations for ISP8044 */
+extern int qla8044_idc_lock(struct qla_hw_data *ha);
+extern void qla8044_idc_unlock(struct qla_hw_data *ha);
+extern uint32_t qla8044_rd_reg(struct qla_hw_data *ha, ulong addr);
+extern void qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val);
+extern void qla8044_read_reset_template(struct scsi_qla_host *ha);
+extern void qla8044_set_idc_dontreset(struct scsi_qla_host *ha);
+extern int qla8044_rd_direct(struct scsi_qla_host *vha, const uint32_t crb_reg);
+extern void qla8044_wr_direct(struct scsi_qla_host *vha,
+			      const uint32_t crb_reg, const uint32_t value);
+extern inline void qla8044_set_qsnt_ready(struct scsi_qla_host *vha);
+extern inline void qla8044_need_reset_handler(struct scsi_qla_host *vha);
+extern int qla8044_device_state_handler(struct scsi_qla_host *vha);
+extern void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha);
+extern void qla8044_clear_drv_active(struct scsi_qla_host *vha);
+void qla8044_get_minidump(struct scsi_qla_host *vha);
+int qla8044_collect_md_data(struct scsi_qla_host *vha);
+extern int qla8044_md_get_template(scsi_qla_host_t *);
+extern int qla8044_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+				     uint32_t, uint32_t);
+extern irqreturn_t qla8044_intr_handler(int, void *);
+extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
+extern int qla8044_abort_isp(scsi_qla_host_t *);
+extern int qla8044_check_fw_alive(struct scsi_qla_host *);
 
 #endif /* _QLA_GBL_H */

+ 21 - 76
drivers/scsi/qla2xxx/qla_gs.c

@@ -49,6 +49,8 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
 	ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
 	ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
 
+	vha->qla_stats.control_requests++;
+
 	return (ms_pkt);
 }
 
@@ -87,6 +89,8 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
 	ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
 	ct_pkt->vp_index = vha->vp_idx;
 
+	vha->qla_stats.control_requests++;
+
 	return (ct_pkt);
 }
 
@@ -226,17 +230,9 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
 			fcport->d_id.b.domain = 0xf0;
 
 		ql_dbg(ql_dbg_disc, vha, 0x2063,
-		    "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
-		    "pn %02x%02x%02x%02x%02x%02x%02x%02x "
+		    "GA_NXT entry - nn %8phN pn %8phN "
 		    "port_id=%02x%02x%02x.\n",
-		    fcport->node_name[0], fcport->node_name[1],
-		    fcport->node_name[2], fcport->node_name[3],
-		    fcport->node_name[4], fcport->node_name[5],
-		    fcport->node_name[6], fcport->node_name[7],
-		    fcport->port_name[0], fcport->port_name[1],
-		    fcport->port_name[2], fcport->port_name[3],
-		    fcport->port_name[4], fcport->port_name[5],
-		    fcport->port_name[6], fcport->port_name[7],
+		    fcport->node_name, fcport->port_name,
 		    fcport->d_id.b.domain, fcport->d_id.b.area,
 		    fcport->d_id.b.al_pa);
 	}
@@ -447,17 +443,9 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
 			    ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
 
 			ql_dbg(ql_dbg_disc, vha, 0x2058,
-			    "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02X%02x "
-			    "pn %02x%02x%02x%02x%02x%02x%02X%02x "
+			    "GID_PT entry - nn %8phN pn %8phN "
 			    "portid=%02x%02x%02x.\n",
-			    list[i].node_name[0], list[i].node_name[1],
-			    list[i].node_name[2], list[i].node_name[3],
-			    list[i].node_name[4], list[i].node_name[5],
-			    list[i].node_name[6], list[i].node_name[7],
-			    list[i].port_name[0], list[i].port_name[1],
-			    list[i].port_name[2], list[i].port_name[3],
-			    list[i].port_name[4], list[i].port_name[5],
-			    list[i].port_name[6], list[i].port_name[7],
+			    list[i].node_name, list[i].port_name,
 			    list[i].d_id.b.domain, list[i].d_id.b.area,
 			    list[i].d_id.b.al_pa);
 		}
@@ -739,6 +727,8 @@ qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
 	wc = (data_size - 16) / 4;		/* Size in 32bit words. */
 	sns_cmd->p.cmd.size = cpu_to_le16(wc);
 
+	vha->qla_stats.control_requests++;
+
 	return (sns_cmd);
 }
 
@@ -796,17 +786,9 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
 			fcport->d_id.b.domain = 0xf0;
 
 		ql_dbg(ql_dbg_disc, vha, 0x2061,
-		    "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
-		    "pn %02x%02x%02x%02x%02x%02x%02x%02x "
+		    "GA_NXT entry - nn %8phN pn %8phN "
 		    "port_id=%02x%02x%02x.\n",
-		    fcport->node_name[0], fcport->node_name[1],
-		    fcport->node_name[2], fcport->node_name[3],
-		    fcport->node_name[4], fcport->node_name[5],
-		    fcport->node_name[6], fcport->node_name[7],
-		    fcport->port_name[0], fcport->port_name[1],
-		    fcport->port_name[2], fcport->port_name[3],
-		    fcport->port_name[4], fcport->port_name[5],
-		    fcport->port_name[6], fcport->port_name[7],
+		    fcport->node_name, fcport->port_name,
 		    fcport->d_id.b.domain, fcport->d_id.b.area,
 		    fcport->d_id.b.al_pa);
 	}
@@ -991,17 +973,9 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
 			    WWN_SIZE);
 
 			ql_dbg(ql_dbg_disc, vha, 0x206e,
-			    "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
-			    "pn %02x%02x%02x%02x%02x%02x%02x%02x "
+			    "GID_PT entry - nn %8phN pn %8phN "
 			    "port_id=%02x%02x%02x.\n",
-			    list[i].node_name[0], list[i].node_name[1],
-			    list[i].node_name[2], list[i].node_name[3],
-			    list[i].node_name[4], list[i].node_name[5],
-			    list[i].node_name[6], list[i].node_name[7],
-			    list[i].port_name[0], list[i].port_name[1],
-			    list[i].port_name[2], list[i].port_name[3],
-			    list[i].port_name[4], list[i].port_name[5],
-			    list[i].port_name[6], list[i].port_name[7],
+			    list[i].node_name, list[i].port_name,
 			    list[i].d_id.b.domain, list[i].d_id.b.area,
 			    list[i].d_id.b.al_pa);
 		}
@@ -1321,11 +1295,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
 	size += 4 + WWN_SIZE;
 
 	ql_dbg(ql_dbg_disc, vha, 0x2025,
-	    "NodeName = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
-	    eiter->a.node_name[0], eiter->a.node_name[1],
-	    eiter->a.node_name[2], eiter->a.node_name[3],
-	    eiter->a.node_name[4], eiter->a.node_name[5],
-	    eiter->a.node_name[6], eiter->a.node_name[7]);
+	    "NodeName = %8phN.\n", eiter->a.node_name);
 
 	/* Manufacturer. */
 	eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1428,16 +1398,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
 	qla2x00_update_ms_fdmi_iocb(vha, size + 16);
 
 	ql_dbg(ql_dbg_disc, vha, 0x202e,
-	    "RHBA identifier = "
-	    "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n",
-	    ct_req->req.rhba.hba_identifier[0],
-	    ct_req->req.rhba.hba_identifier[1],
-	    ct_req->req.rhba.hba_identifier[2],
-	    ct_req->req.rhba.hba_identifier[3],
-	    ct_req->req.rhba.hba_identifier[4],
-	    ct_req->req.rhba.hba_identifier[5],
-	    ct_req->req.rhba.hba_identifier[6],
-	    ct_req->req.rhba.hba_identifier[7], size);
+	    "RHBA identifier = %8phN size=%d.\n",
+	    ct_req->req.rhba.hba_identifier, size);
 	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
 	    entries, size);
 
@@ -1494,11 +1456,7 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
 	memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
 
 	ql_dbg(ql_dbg_disc, vha, 0x2036,
-	    "DHBA portname = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
-	    ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
-	    ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
-	    ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
-	    ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]);
+	    "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
 
 	/* Execute MS IOCB */
 	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -1678,12 +1636,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
 	qla2x00_update_ms_fdmi_iocb(vha, size + 16);
 
 	ql_dbg(ql_dbg_disc, vha, 0x203e,
-	    "RPA portname= %02x%02x%02x%02x%02X%02x%02x%02x size=%d.\n",
-	    ct_req->req.rpa.port_name[0], ct_req->req.rpa.port_name[1],
-	    ct_req->req.rpa.port_name[2], ct_req->req.rpa.port_name[3],
-	    ct_req->req.rpa.port_name[4], ct_req->req.rpa.port_name[5],
-	    ct_req->req.rpa.port_name[6], ct_req->req.rpa.port_name[7],
-	    size);
+	    "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
 	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
 	    entries, size);
 
@@ -1940,16 +1893,8 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
 
 			ql_dbg(ql_dbg_disc, vha, 0x205b,
 			    "GPSC ext entry - fpn "
-			    "%02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
-			    "speed=%04x.\n",
-			    list[i].fabric_port_name[0],
-			    list[i].fabric_port_name[1],
-			    list[i].fabric_port_name[2],
-			    list[i].fabric_port_name[3],
-			    list[i].fabric_port_name[4],
-			    list[i].fabric_port_name[5],
-			    list[i].fabric_port_name[6],
-			    list[i].fabric_port_name[7],
+			    "%8phN speeds=%04x speed=%04x.\n",
+			    list[i].fabric_port_name,
 			    be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
 			    be16_to_cpu(ct_rsp->rsp.gpsc.speed));
 		}

+ 48 - 38
drivers/scsi/qla2xxx/qla_init.c

@@ -524,7 +524,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
 	vha->flags.reset_active = 0;
 	ha->flags.pci_channel_io_perm_failure = 0;
 	ha->flags.eeh_busy = 0;
-	ha->thermal_support = THERMAL_SUPPORT_I2C|THERMAL_SUPPORT_ISP;
+	vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
 	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
 	atomic_set(&vha->loop_state, LOOP_DOWN);
 	vha->device_flags = DFLG_NO_CABLE;
@@ -552,7 +552,18 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
 	if (rval) {
 		ql_log(ql_log_fatal, vha, 0x004f,
 		    "Unable to validate FLASH data.\n");
-		return (rval);
+		return rval;
+	}
+
+	if (IS_QLA8044(ha)) {
+		qla8044_read_reset_template(vha);
+
+		/* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
+		 * If DONRESET_BIT0 is set, drivers should not set dev_state
+		 * to NEED_RESET. But if NEED_RESET is set, drivers should
+		 * should honor the reset. */
+		if (ql2xdontresethba == 1)
+			qla8044_set_idc_dontreset(vha);
 	}
 
 	ha->isp_ops->get_flash_version(vha, req->ring);
@@ -564,12 +575,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
 	if (ha->flags.disable_serdes) {
 		/* Mask HBA via NVRAM settings? */
 		ql_log(ql_log_info, vha, 0x0077,
-		    "Masking HBA WWPN "
-		    "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
-		    vha->port_name[0], vha->port_name[1],
-		    vha->port_name[2], vha->port_name[3],
-		    vha->port_name[4], vha->port_name[5],
-		    vha->port_name[6], vha->port_name[7]);
+		    "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
 		return QLA_FUNCTION_FAILED;
 	}
 
@@ -620,6 +626,11 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
 	if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
 		qla24xx_read_fcp_prio_cfg(vha);
 
+	if (IS_P3P_TYPE(ha))
+		qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
+	else
+		qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
+
 	return (rval);
 }
 
@@ -1332,7 +1343,7 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req = ha->req_q_map[0];
 
-	if (IS_QLA82XX(ha))
+	if (IS_P3P_TYPE(ha))
 		return QLA_SUCCESS;
 
 	ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
@@ -1615,7 +1626,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
 	unsigned long flags;
 	uint16_t fw_major_version;
 
-	if (IS_QLA82XX(ha)) {
+	if (IS_P3P_TYPE(ha)) {
 		rval = ha->isp_ops->load_risc(vha, &srisc_address);
 		if (rval == QLA_SUCCESS) {
 			qla2x00_stop_firmware(vha);
@@ -1651,7 +1662,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
 			if (rval == QLA_SUCCESS) {
 enable_82xx_npiv:
 				fw_major_version = ha->fw_major_version;
-				if (IS_QLA82XX(ha))
+				if (IS_P3P_TYPE(ha))
 					qla82xx_check_md_needed(vha);
 				else
 					rval = qla2x00_get_fw_version(vha);
@@ -1681,7 +1692,7 @@ enable_82xx_npiv:
 					goto failed;
 
 				if (!fw_major_version && ql2xallocfwdump
-				    && !IS_QLA82XX(ha))
+				    && !(IS_P3P_TYPE(ha)))
 					qla2x00_alloc_fw_dump(vha);
 			}
 		} else {
@@ -1849,7 +1860,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
 	int rval;
 	struct qla_hw_data *ha = vha->hw;
 
-	if (IS_QLA82XX(ha))
+	if (IS_P3P_TYPE(ha))
 		return;
 
 	/* Update Serial Link options. */
@@ -3061,22 +3072,13 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 	    mb);
 	if (rval != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_disc, vha, 0x2004,
-		    "Unable to adjust iIDMA "
-		    "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x "
-		    "%04x.\n", fcport->port_name[0], fcport->port_name[1],
-		    fcport->port_name[2], fcport->port_name[3],
-		    fcport->port_name[4], fcport->port_name[5],
-		    fcport->port_name[6], fcport->port_name[7], rval,
-		    fcport->fp_speed, mb[0], mb[1]);
+		    "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
+		    fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
 	} else {
 		ql_dbg(ql_dbg_disc, vha, 0x2005,
-		    "iIDMA adjusted to %s GB/s "
-		    "on %02x%02x%02x%02x%02x%02x%02x%02x.\n",
+		    "iIDMA adjusted to %s GB/s on %8phN.\n",
 		    qla2x00_get_link_speed_str(ha, fcport->fp_speed),
-		    fcport->port_name[0], fcport->port_name[1],
-		    fcport->port_name[2], fcport->port_name[3],
-		    fcport->port_name[4], fcport->port_name[5],
-		    fcport->port_name[6], fcport->port_name[7]);
+		    fcport->port_name);
 	}
 }
 
@@ -4007,10 +4009,18 @@ qla83xx_reset_ownership(scsi_qla_host_t *vha)
 	uint32_t class_type_mask = 0x3;
 	uint16_t fcoe_other_function = 0xffff, i;
 
-	qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
-
-	qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
-	qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
+	if (IS_QLA8044(ha)) {
+		drv_presence = qla8044_rd_direct(vha,
+		    QLA8044_CRB_DRV_ACTIVE_INDEX);
+		dev_part_info1 = qla8044_rd_direct(vha,
+		    QLA8044_CRB_DEV_PART_INFO_INDEX);
+		dev_part_info2 = qla8044_rd_direct(vha,
+		    QLA8044_CRB_DEV_PART_INFO2);
+	} else {
+		qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+		qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
+		qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
+	}
 	for (i = 0; i < 8; i++) {
 		class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
 		if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
@@ -4347,7 +4357,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
 	/* For ISP82XX, driver waits for completion of the commands.
 	 * online flag should be set.
 	 */
-	if (!IS_QLA82XX(ha))
+	if (!(IS_P3P_TYPE(ha)))
 		vha->flags.online = 0;
 	ha->flags.chip_reset_done = 0;
 	clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -4360,7 +4370,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
 	 * Driver waits for the completion of the commands.
 	 * the interrupts need to be enabled.
 	 */
-	if (!IS_QLA82XX(ha))
+	if (!(IS_P3P_TYPE(ha)))
 		ha->isp_ops->reset_chip(vha);
 
 	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -4403,7 +4413,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
 
 	if (!ha->flags.eeh_busy) {
 		/* Make sure for ISP 82XX IO DMA is complete */
-		if (IS_QLA82XX(ha)) {
+		if (IS_P3P_TYPE(ha)) {
 			qla82xx_chip_reset_cleanup(vha);
 			ql_log(ql_log_info, vha, 0x00b4,
 			    "Done chip reset cleanup.\n");
@@ -4723,7 +4733,7 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
-	if (IS_QLA82XX(ha))
+	if (IS_P3P_TYPE(ha))
 		return;
 
 	vha->flags.online = 0;
@@ -4789,8 +4799,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
 	}
 	ha->nvram_size = sizeof(struct nvram_24xx);
 	ha->vpd_size = FA_NVRAM_VPD_SIZE;
-	if (IS_QLA82XX(ha))
-		ha->vpd_size = FA_VPD_SIZE_82XX;
 
 	/* Get VPD data into cache */
 	ha->vpd = ha->nvram + VPD_OFFSET;
@@ -5552,6 +5560,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
 	/* Determine NVRAM starting address. */
 	ha->nvram_size = sizeof(struct nvram_81xx);
 	ha->vpd_size = FA_NVRAM_VPD_SIZE;
+	if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
+		ha->vpd_size = FA_VPD_SIZE_82XX;
 
 	/* Get VPD data into cache */
 	ha->vpd = ha->nvram + VPD_OFFSET;
@@ -5734,7 +5744,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
 
 	/* Link Down Timeout = 0:
 	 *
-	 * 	When Port Down timer expires we will start returning
+	 *	When Port Down timer expires we will start returning
 	 *	I/O's to OS with "DID_NO_CONNECT".
 	 *
 	 * Link Down Timeout != 0:
@@ -6061,7 +6071,7 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
 	if (priority < 0)
 		return QLA_FUNCTION_FAILED;
 
-	if (IS_QLA82XX(vha->hw)) {
+	if (IS_P3P_TYPE(vha->hw)) {
 		fcport->fcp_prio = priority & 0xf;
 		return QLA_SUCCESS;
 	}

+ 1 - 1
drivers/scsi/qla2xxx/qla_inline.h

@@ -59,7 +59,7 @@ qla2x00_poll(struct rsp_que *rsp)
 	unsigned long flags;
 	struct qla_hw_data *ha = rsp->hw;
 	local_irq_save(flags);
-	if (IS_QLA82XX(ha))
+	if (IS_P3P_TYPE(ha))
 		qla82xx_poll(0, rsp);
 	else
 		ha->isp_ops->intr_handler(0, rsp);

+ 15 - 2
drivers/scsi/qla2xxx/qla_iocb.c

@@ -32,9 +32,11 @@ qla2x00_get_cmd_direction(srb_t *sp)
 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 		cflags = CF_WRITE;
 		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+		vha->qla_stats.output_requests++;
 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 		cflags = CF_READ;
 		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+		vha->qla_stats.input_requests++;
 	}
 	return (cflags);
 }
@@ -474,7 +476,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
 	struct qla_hw_data *ha = vha->hw;
 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
 
-	if (IS_QLA82XX(ha)) {
+	if (IS_P3P_TYPE(ha)) {
 		qla82xx_start_iocbs(vha);
 	} else {
 		/* Adjust ring index. */
@@ -642,10 +644,12 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
 		cmd_pkt->control_flags =
 		    __constant_cpu_to_le16(CF_WRITE_DATA);
 		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+		vha->qla_stats.output_requests++;
 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 		cmd_pkt->control_flags =
 		    __constant_cpu_to_le16(CF_READ_DATA);
 		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+		vha->qla_stats.input_requests++;
 	}
 
 	cur_seg = scsi_sglist(cmd);
@@ -758,10 +762,12 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
 		cmd_pkt->task_mgmt_flags =
 		    __constant_cpu_to_le16(TMF_WRITE_DATA);
 		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+		vha->qla_stats.output_requests++;
 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 		cmd_pkt->task_mgmt_flags =
 		    __constant_cpu_to_le16(TMF_READ_DATA);
 		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+		vha->qla_stats.input_requests++;
 	}
 
 	/* One DSD is available in the Command Type 3 IOCB */
@@ -1844,7 +1850,7 @@ skip_cmd_array:
 	if (req->cnt < req_cnt) {
 		if (ha->mqenable || IS_QLA83XX(ha))
 			cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
-		else if (IS_QLA82XX(ha))
+		else if (IS_P3P_TYPE(ha))
 			cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
 		else if (IS_FWI2_CAPABLE(ha))
 			cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
@@ -2056,6 +2062,8 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
             (bsg_job->reply_payload.sg_list)));
         els_iocb->rx_len = cpu_to_le32(sg_dma_len
             (bsg_job->reply_payload.sg_list));
+
+	sp->fcport->vha->qla_stats.control_requests++;
 }
 
 static void
@@ -2133,6 +2141,8 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
 		avail_dsds--;
 	}
 	ct_iocb->entry_count = entry_count;
+
+	sp->fcport->vha->qla_stats.control_requests++;
 }
 
 static void
@@ -2685,6 +2695,9 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
 	vha->bidi_stats.transfer_bytes += req_data_len;
 	vha->bidi_stats.io_count++;
 
+	vha->qla_stats.output_bytes += req_data_len;
+	vha->qla_stats.output_requests++;
+
 	/* Only one dsd is available for bidirectional IOCB, remaining dsds
 	 * are bundled in continuation iocb
 	 */

+ 54 - 37
drivers/scsi/qla2xxx/qla_isr.c

@@ -282,25 +282,38 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
 	    "%04x %04x %04x %04x %04x %04x %04x.\n",
 	    event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
 	    mb[4], mb[5], mb[6]);
-	if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) {
-		vha->hw->flags.idc_compl_status = 1;
-		if (vha->hw->notify_dcbx_comp)
-			complete(&vha->hw->dcbx_comp);
-	}
-
-	/* Acknowledgement needed? [Notify && non-zero timeout]. */
-	timeout = (descr >> 8) & 0xf;
-	if (aen != MBA_IDC_NOTIFY || !timeout)
-		return;
+	switch (aen) {
+	/* Handle IDC Error completion case. */
+	case MBA_IDC_COMPLETE:
+		if (mb[1] >> 15) {
+			vha->hw->flags.idc_compl_status = 1;
+			if (vha->hw->notify_dcbx_comp)
+				complete(&vha->hw->dcbx_comp);
+		}
+		break;
 
-	ql_dbg(ql_dbg_async, vha, 0x5022,
-	    "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
-	    vha->host_no, event[aen & 0xff], timeout);
+	case MBA_IDC_NOTIFY:
+		/* Acknowledgement needed? [Notify && non-zero timeout]. */
+		timeout = (descr >> 8) & 0xf;
+		ql_dbg(ql_dbg_async, vha, 0x5022,
+		    "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
+		    vha->host_no, event[aen & 0xff], timeout);
 
-	rval = qla2x00_post_idc_ack_work(vha, mb);
-	if (rval != QLA_SUCCESS)
-		ql_log(ql_log_warn, vha, 0x5023,
-		    "IDC failed to post ACK.\n");
+		if (!timeout)
+			return;
+		rval = qla2x00_post_idc_ack_work(vha, mb);
+		if (rval != QLA_SUCCESS)
+			ql_log(ql_log_warn, vha, 0x5023,
+			    "IDC failed to post ACK.\n");
+		break;
+	case MBA_IDC_TIME_EXT:
+		vha->hw->idc_extend_tmo = descr;
+		ql_dbg(ql_dbg_async, vha, 0x5087,
+		    "%lu Inter-Driver Communication %s -- "
+		    "Extend timeout by=%d.\n",
+		    vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
+		break;
+	}
 }
 
 #define LS_UNKNOWN	2
@@ -691,7 +704,8 @@ skip_rio:
 	case MBA_LOOP_DOWN:		/* Loop Down Event */
 		mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
 			? RD_REG_WORD(&reg24->mailbox4) : 0;
-		mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
+		mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
+			: mbx;
 		ql_dbg(ql_dbg_async, vha, 0x500b,
 		    "LOOP DOWN detected (%x %x %x %x).\n",
 		    mb[1], mb[2], mb[3], mbx);
@@ -740,7 +754,7 @@ skip_rio:
 		if (IS_QLA2100(ha))
 			break;
 
-		if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
+		if (IS_CNA_CAPABLE(ha)) {
 			ql_dbg(ql_dbg_async, vha, 0x500d,
 			    "DCBX Completed -- %04x %04x %04x.\n",
 			    mb[1], mb[2], mb[3]);
@@ -1002,7 +1016,7 @@ skip_rio:
 		    mb[1], mb[2], mb[3]);
 		break;
 	case MBA_IDC_NOTIFY:
-		if (IS_QLA8031(vha->hw)) {
+		if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
 			mb[4] = RD_REG_WORD(&reg24->mailbox4);
 			if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
 			    (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
@@ -1022,7 +1036,8 @@ skip_rio:
 			complete(&ha->lb_portup_comp);
 		/* Fallthru */
 	case MBA_IDC_TIME_EXT:
-		if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
+		if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
+		    IS_QLA8044(ha))
 			qla81xx_idc_event(vha, mb[0], mb[1]);
 		break;
 
@@ -1063,7 +1078,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
 		ql_log(ql_log_warn, vha, 0x3014,
 		    "Invalid SCSI command index (%x).\n", index);
 
-		if (IS_QLA82XX(ha))
+		if (IS_P3P_TYPE(ha))
 			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
 		else
 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -1080,7 +1095,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
 	} else {
 		ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
 
-		if (IS_QLA82XX(ha))
+		if (IS_P3P_TYPE(ha))
 			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
 		else
 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -1100,7 +1115,7 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
 	if (index >= req->num_outstanding_cmds) {
 		ql_log(ql_log_warn, vha, 0x5031,
 		    "Invalid command index (%x).\n", index);
-		if (IS_QLA82XX(ha))
+		if (IS_P3P_TYPE(ha))
 			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
 		else
 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -1805,6 +1820,9 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
 		if (scsi_status == 0) {
 			bsg_job->reply->reply_payload_rcv_len =
 					bsg_job->reply_payload.payload_len;
+			vha->qla_stats.input_bytes +=
+				bsg_job->reply->reply_payload_rcv_len;
+			vha->qla_stats.input_requests++;
 			rval = EXT_STATUS_OK;
 		}
 		goto done;
@@ -1949,7 +1967,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 		ql_dbg(ql_dbg_io, vha, 0x3017,
 		    "Invalid status handle (0x%x).\n", sts->handle);
 
-		if (IS_QLA82XX(ha))
+		if (IS_P3P_TYPE(ha))
 			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
 		else
 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -2176,8 +2194,10 @@ check_scsi_status:
 		}
 
 		ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
-		    "Port down status: port-state=0x%x.\n",
-		    atomic_read(&fcport->state));
+		    "Port to be marked lost on fcport=%02x%02x%02x, current "
+		    "port state= %s.\n", fcport->d_id.b.domain,
+		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
+		    port_state_str[atomic_read(&fcport->state)]);
 
 		if (atomic_read(&fcport->state) == FCS_ONLINE)
 			qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
@@ -2212,16 +2232,13 @@ check_scsi_status:
 out:
 	if (logit)
 		ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
-		    "FCP command status: 0x%x-0x%x (0x%x) "
-		    "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
-		    "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
+		    "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d "
+		    "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
 		    "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
 		    comp_status, scsi_status, res, vha->host_no,
 		    cp->device->id, cp->device->lun, fcport->d_id.b.domain,
 		    fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
-		    cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
-		    cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
-		    cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
+		    cp->cmnd, scsi_bufflen(cp), rsp_info_len,
 		    resid_len, fw_resid_len);
 
 	if (!res)
@@ -2324,7 +2341,7 @@ fatal:
 	ql_log(ql_log_warn, vha, 0x5030,
 	    "Error entry - invalid handle/queue.\n");
 
-	if (IS_QLA82XX(ha))
+	if (IS_P3P_TYPE(ha))
 		set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
 	else
 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -2452,7 +2469,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
 	}
 
 	/* Adjust ring index */
-	if (IS_QLA82XX(ha)) {
+	if (IS_P3P_TYPE(ha)) {
 		struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
 		WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
 	} else
@@ -2865,7 +2882,7 @@ msix_failed:
 			ret = request_irq(qentry->vector,
 				qla83xx_msix_entries[i].handler,
 				0, qla83xx_msix_entries[i].name, rsp);
-		} else if (IS_QLA82XX(ha)) {
+		} else if (IS_P3P_TYPE(ha)) {
 			ret = request_irq(qentry->vector,
 				qla82xx_msix_entries[i].handler,
 				0, qla82xx_msix_entries[i].name, rsp);
@@ -2950,7 +2967,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
 skip_msix:
 
 	if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
-	    !IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha))
+	    !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha))
 		goto skip_msi;
 
 	ret = pci_enable_msi(ha->pdev);

+ 226 - 71
drivers/scsi/qla2xxx/qla_mbx.c

@@ -75,7 +75,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 		return QLA_FUNCTION_TIMEOUT;
 	}
 
-	if (IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung) {
+	if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
 		/* Setting Link-Down error */
 		mcp->mb[0] = MBS_LINK_DOWN_ERROR;
 		ql_log(ql_log_warn, vha, 0x1004,
@@ -106,9 +106,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
 	/* Load mailbox registers. */
-	if (IS_QLA82XX(ha))
+	if (IS_P3P_TYPE(ha))
 		optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
-	else if (IS_FWI2_CAPABLE(ha) && !IS_QLA82XX(ha))
+	else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
 		optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
 	else
 		optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
@@ -117,33 +117,25 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 	command = mcp->mb[0];
 	mboxes = mcp->out_mb;
 
+	ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111,
+	    "Mailbox registers (OUT):\n");
 	for (cnt = 0; cnt < ha->mbx_count; cnt++) {
 		if (IS_QLA2200(ha) && cnt == 8)
 			optr =
 			    (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
-		if (mboxes & BIT_0)
+		if (mboxes & BIT_0) {
+			ql_dbg(ql_dbg_mbx, vha, 0x1112,
+			    "mbox[%d]<-0x%04x\n", cnt, *iptr);
 			WRT_REG_WORD(optr, *iptr);
+		}
 
 		mboxes >>= 1;
 		optr++;
 		iptr++;
 	}
 
-	ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111,
-	    "Loaded MBX registers (displayed in bytes) =.\n");
-	ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1112,
-	    (uint8_t *)mcp->mb, 16);
-	ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1113,
-	    ".\n");
-	ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1114,
-	    ((uint8_t *)mcp->mb + 0x10), 16);
-	ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1115,
-	    ".\n");
-	ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1116,
-	    ((uint8_t *)mcp->mb + 0x20), 8);
 	ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
 	    "I/O Address = %p.\n", optr);
-	ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x100e);
 
 	/* Issue set host interrupt command to send cmd out. */
 	ha->flags.mbox_int = 0;
@@ -159,7 +151,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 	if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
 		set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
 
-		if (IS_QLA82XX(ha)) {
+		if (IS_P3P_TYPE(ha)) {
 			if (RD_REG_DWORD(&reg->isp82.hint) &
 				HINT_MBX_INT_PENDING) {
 				spin_unlock_irqrestore(&ha->hardware_lock,
@@ -189,7 +181,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 		ql_dbg(ql_dbg_mbx, vha, 0x1011,
 		    "Cmd=%x Polling Mode.\n", command);
 
-		if (IS_QLA82XX(ha)) {
+		if (IS_P3P_TYPE(ha)) {
 			if (RD_REG_DWORD(&reg->isp82.hint) &
 				HINT_MBX_INT_PENDING) {
 				spin_unlock_irqrestore(&ha->hardware_lock,
@@ -236,7 +228,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 		ha->flags.mbox_int = 0;
 		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
 
-		if ((IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung)) {
+		if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
 			ha->flags.mbox_busy = 0;
 			/* Setting Link-Down error */
 			mcp->mb[0] = MBS_LINK_DOWN_ERROR;
@@ -254,9 +246,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 		iptr2 = mcp->mb;
 		iptr = (uint16_t *)&ha->mailbox_out[0];
 		mboxes = mcp->in_mb;
+
+		ql_dbg(ql_dbg_mbx, vha, 0x1113,
+		    "Mailbox registers (IN):\n");
 		for (cnt = 0; cnt < ha->mbx_count; cnt++) {
-			if (mboxes & BIT_0)
+			if (mboxes & BIT_0) {
 				*iptr2 = *iptr;
+				ql_dbg(ql_dbg_mbx, vha, 0x1114,
+				    "mbox[%d]->0x%04x\n", cnt, *iptr2);
+			}
 
 			mboxes >>= 1;
 			iptr2++;
@@ -537,7 +535,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
 	mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
 	mcp->out_mb = MBX_0;
 	mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
-	if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha))
+	if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
 		mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
 	if (IS_FWI2_CAPABLE(ha))
 		mcp->in_mb |= MBX_17|MBX_16|MBX_15;
@@ -556,7 +554,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
 		ha->fw_memory_size = 0x1FFFF;		/* Defaults to 128KB. */
 	else
 		ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
-	if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) {
+	if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
 		ha->mpi_version[0] = mcp->mb[10] & 0xff;
 		ha->mpi_version[1] = mcp->mb[11] >> 8;
 		ha->mpi_version[2] = mcp->mb[11] & 0xff;
@@ -1201,7 +1199,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
 	    "Entered %s.\n", __func__);
 
-	if (IS_QLA82XX(ha) && ql2xdbwr)
+	if (IS_P3P_TYPE(ha) && ql2xdbwr)
 		qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
 			(0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
 
@@ -1667,7 +1665,11 @@ qla24xx_link_initialize(scsi_qla_host_t *vha)
 		return QLA_FUNCTION_FAILED;
 
 	mcp->mb[0] = MBC_LINK_INITIALIZATION;
-	mcp->mb[1] = BIT_6|BIT_4;
+	mcp->mb[1] = BIT_4;
+	if (vha->hw->operating_mode == LOOP)
+		mcp->mb[1] |= BIT_6;
+	else
+		mcp->mb[1] |= BIT_5;
 	mcp->mb[2] = 0;
 	mcp->mb[3] = 0;
 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -3574,7 +3576,6 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
 	unsigned long flags;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
-	struct device_reg_25xxmq __iomem *reg;
 	struct qla_hw_data *ha = vha->hw;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
@@ -3595,9 +3596,6 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
 	if (IS_QLA83XX(ha))
 		mcp->mb[15] = 0;
 
-	reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) +
-		QLA_QUE_PAGE * req->id);
-
 	mcp->mb[4] = req->id;
 	/* que in ptr index */
 	mcp->mb[8] = 0;
@@ -3619,12 +3617,10 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	if (!(req->options & BIT_0)) {
-		WRT_REG_DWORD(&reg->req_q_in, 0);
+		WRT_REG_DWORD(req->req_q_in, 0);
 		if (!IS_QLA83XX(ha))
-			WRT_REG_DWORD(&reg->req_q_out, 0);
+			WRT_REG_DWORD(req->req_q_out, 0);
 	}
-	req->req_q_in = &reg->req_q_in;
-	req->req_q_out = &reg->req_q_out;
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
 	rval = qla2x00_mailbox_command(vha, mcp);
@@ -3646,7 +3642,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
 	unsigned long flags;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
-	struct device_reg_25xxmq __iomem *reg;
 	struct qla_hw_data *ha = vha->hw;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
@@ -3664,9 +3659,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
 	if (IS_QLA83XX(ha))
 		mcp->mb[15] = 0;
 
-	reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) +
-		QLA_QUE_PAGE * rsp->id);
-
 	mcp->mb[4] = rsp->id;
 	/* que in ptr index */
 	mcp->mb[8] = 0;
@@ -3690,9 +3682,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	if (!(rsp->options & BIT_0)) {
-		WRT_REG_DWORD(&reg->rsp_q_out, 0);
+		WRT_REG_DWORD(rsp->rsp_q_out, 0);
 		if (!IS_QLA83XX(ha))
-			WRT_REG_DWORD(&reg->rsp_q_in, 0);
+			WRT_REG_DWORD(rsp->rsp_q_in, 0);
 	}
 
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3872,6 +3864,112 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
 	return rval;
 }
 
+int
+qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	int i;
+	int len;
+	uint16_t *str;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_P3P_TYPE(ha))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
+	    "Entered %s.\n", __func__);
+
+	str = (void *)version;
+	len = strlen(version);
+
+	mcp->mb[0] = MBC_SET_RNID_PARAMS;
+	mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
+	mcp->out_mb = MBX_1|MBX_0;
+	for (i = 4; i < 16 && len; i++, str++, len -= 2) {
+		mcp->mb[i] = cpu_to_le16p(str);
+		mcp->out_mb |= 1<<i;
+	}
+	for (; i < 16; i++) {
+		mcp->mb[i] = 0;
+		mcp->out_mb |= 1<<i;
+	}
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x117c,
+		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	int len;
+	uint16_t dwlen;
+	uint8_t *str;
+	dma_addr_t str_dma;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
+	    IS_P3P_TYPE(ha))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
+	    "Entered %s.\n", __func__);
+
+	str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
+	if (!str) {
+		ql_log(ql_log_warn, vha, 0x117f,
+		    "Failed to allocate driver version param.\n");
+		return QLA_MEMORY_ALLOC_FAILED;
+	}
+
+	memcpy(str, "\x7\x3\x11\x0", 4);
+	dwlen = str[0];
+	len = dwlen * 4 - 4;
+	memset(str + 4, 0, len);
+	if (len > strlen(version))
+		len = strlen(version);
+	memcpy(str + 4, version, len);
+
+	mcp->mb[0] = MBC_SET_RNID_PARAMS;
+	mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
+	mcp->mb[2] = MSW(LSD(str_dma));
+	mcp->mb[3] = LSW(LSD(str_dma));
+	mcp->mb[6] = MSW(MSD(str_dma));
+	mcp->mb[7] = LSW(MSD(str_dma));
+	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1180,
+		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
+		    "Done %s.\n", __func__);
+	}
+
+	dma_pool_free(ha->s_dma_pool, str, str_dma);
+
+	return rval;
+}
+
 static int
 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
 {
@@ -4407,7 +4505,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
 	    "Entered %s.\n", __func__);
 
-	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha))
 		return QLA_FUNCTION_FAILED;
 	mcp->mb[0] = MBC_GET_PORT_CONFIG;
 	mcp->out_mb = MBX_0;
@@ -4512,40 +4610,43 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
 	struct qla_hw_data *ha = vha->hw;
 	uint8_t byte;
 
-	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
-	    "Entered %s.\n", __func__);
-
-	if (ha->thermal_support & THERMAL_SUPPORT_I2C) {
-		rval = qla2x00_read_sfp(vha, 0, &byte,
-		    0x98, 0x1, 1, BIT_13|BIT_12|BIT_0);
-		*temp = byte;
-		if (rval == QLA_SUCCESS)
-			goto done;
-
-		ql_log(ql_log_warn, vha, 0x10c9,
-		    "Thermal not supported through I2C bus, trying alternate "
-		    "method (ISP access).\n");
-		ha->thermal_support &= ~THERMAL_SUPPORT_I2C;
+	if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1150,
+		    "Thermal not supported by this card.\n");
+		return rval;
 	}
 
-	if (ha->thermal_support & THERMAL_SUPPORT_ISP) {
-		rval = qla2x00_read_asic_temperature(vha, temp);
-		if (rval == QLA_SUCCESS)
-			goto done;
-
-		ql_log(ql_log_warn, vha, 0x1019,
-		    "Thermal not supported through ISP.\n");
-		ha->thermal_support &= ~THERMAL_SUPPORT_ISP;
+	if (IS_QLA25XX(ha)) {
+		if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
+		    ha->pdev->subsystem_device == 0x0175) {
+			rval = qla2x00_read_sfp(vha, 0, &byte,
+			    0x98, 0x1, 1, BIT_13|BIT_0);
+			*temp = byte;
+			return rval;
+		}
+		if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
+		    ha->pdev->subsystem_device == 0x338e) {
+			rval = qla2x00_read_sfp(vha, 0, &byte,
+			    0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
+			*temp = byte;
+			return rval;
+		}
+		ql_dbg(ql_dbg_mbx, vha, 0x10c9,
+		    "Thermal not supported by this card.\n");
+		return rval;
 	}
 
-	ql_log(ql_log_warn, vha, 0x1150,
-	    "Thermal not supported by this card "
-	    "(ignoring further requests).\n");
-	return  rval;
+	if (IS_QLA82XX(ha)) {
+		*temp = qla82xx_read_temperature(vha);
+		rval = QLA_SUCCESS;
+		return rval;
+	} else if (IS_QLA8044(ha)) {
+		*temp = qla8044_read_temperature(vha);
+		rval = QLA_SUCCESS;
+		return rval;
+	}
 
-done:
-	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
-	    "Done %s.\n", __func__);
+	rval = qla2x00_read_asic_temperature(vha, temp);
 	return rval;
 }
 
@@ -4595,7 +4696,7 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
 	    "Entered %s.\n", __func__);
 
-	if (!IS_QLA82XX(ha))
+	if (!IS_P3P_TYPE(ha))
 		return QLA_FUNCTION_FAILED;
 
 	memset(mcp, 0, sizeof(mbx_cmd_t));
@@ -4712,6 +4813,60 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
 	return rval;
 }
 
+int
+qla8044_md_get_template(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	int rval = QLA_FUNCTION_FAILED;
+	int offset = 0, size = MINIDUMP_SIZE_36K;
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
+	    "Entered %s.\n", __func__);
+
+	ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
+	   ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
+	if (!ha->md_tmplt_hdr) {
+		ql_log(ql_log_warn, vha, 0xb11b,
+		    "Unable to allocate memory for Minidump template.\n");
+		return rval;
+	}
+
+	memset(mcp->mb, 0 , sizeof(mcp->mb));
+	while (offset < ha->md_template_size) {
+		mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+		mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+		mcp->mb[2] = LSW(RQST_TMPLT);
+		mcp->mb[3] = MSW(RQST_TMPLT);
+		mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
+		mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
+		mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
+		mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
+		mcp->mb[8] = LSW(size);
+		mcp->mb[9] = MSW(size);
+		mcp->mb[10] = offset & 0x0000FFFF;
+		mcp->mb[11] = offset & 0xFFFF0000;
+		mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+		mcp->tov = MBX_TOV_SECONDS;
+		mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
+			MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+		mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+		rval = qla2x00_mailbox_command(vha, mcp);
+
+		if (rval != QLA_SUCCESS) {
+			ql_dbg(ql_dbg_mbx, vha, 0xb11c,
+				"mailbox command FAILED=0x%x, subcode=%x.\n",
+				((mcp->mb[1] << 16) | mcp->mb[0]),
+				((mcp->mb[3] << 16) | mcp->mb[2]));
+			return rval;
+		} else
+			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
+				"Done %s.\n", __func__);
+		offset = offset + size;
+	}
+	return rval;
+}
+
 int
 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
 {
@@ -4808,7 +4963,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	if (!IS_QLA82XX(ha))
+	if (!IS_P3P_TYPE(ha))
 		return QLA_FUNCTION_FAILED;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,

+ 2 - 0
drivers/scsi/qla2xxx/qla_mid.c

@@ -699,6 +699,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
 	req->cnt = req->length;
 	req->id = que_id;
 	reg = ISP_QUE_REG(ha, que_id);
+	req->req_q_in = &reg->isp25mq.req_q_in;
+	req->req_q_out = &reg->isp25mq.req_q_out;
 	req->max_q_depth = ha->req_q_map[0]->max_q_depth;
 	mutex_unlock(&ha->vport_lock);
 	ql_dbg(ql_dbg_multiq, base_vha, 0xc004,

+ 138 - 26
drivers/scsi/qla2xxx/qla_mr.c

@@ -294,7 +294,7 @@ premature_exit:
  * Context:
  *	Kernel context.
  */
-static int
+int
 qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo)
 {
 	int rval;
@@ -775,6 +775,29 @@ qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag)
 	return qlafx00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
 }
 
+int
+qlafx00_loop_reset(scsi_qla_host_t *vha)
+{
+	int ret;
+	struct fc_port *fcport;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (ql2xtargetreset) {
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
+			if (fcport->port_type != FCT_TARGET)
+				continue;
+
+			ret = ha->isp_ops->target_reset(fcport, 0, 0);
+			if (ret != QLA_SUCCESS) {
+				ql_dbg(ql_dbg_taskm, vha, 0x803d,
+				    "Bus Reset failed: Reset=%d "
+				    "d_id=%x.\n", ret, fcport->d_id.b24);
+			}
+		}
+	}
+	return QLA_SUCCESS;
+}
+
 int
 qlafx00_iospace_config(struct qla_hw_data *ha)
 {
@@ -918,12 +941,23 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
 	uint32_t aenmbx, aenmbx7 = 0;
+	uint32_t pseudo_aen;
 	uint32_t state[5];
 	bool done = false;
 
 	/* 30 seconds wait - Adjust if required */
 	wait_time = 30;
 
+	pseudo_aen = RD_REG_DWORD(&reg->pseudoaen);
+	if (pseudo_aen == 1) {
+		aenmbx7 = RD_REG_DWORD(&reg->initval7);
+		ha->mbx_intr_code = MSW(aenmbx7);
+		ha->rqstq_intr_code = LSW(aenmbx7);
+		rval = qlafx00_driver_shutdown(vha, 10);
+		if (rval != QLA_SUCCESS)
+			qlafx00_soft_reset(vha);
+	}
+
 	/* wait time before firmware ready */
 	wtime = jiffies + (wait_time * HZ);
 	do {
@@ -1349,21 +1383,22 @@ qlafx00_configure_devices(scsi_qla_host_t *vha)
 }
 
 static void
-qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha)
+qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp)
 {
 	struct qla_hw_data *ha = vha->hw;
 	fc_port_t *fcport;
 
 	vha->flags.online = 0;
-	ha->flags.chip_reset_done = 0;
 	ha->mr.fw_hbt_en = 0;
-	clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
-	vha->qla_stats.total_isp_aborts++;
-
-	ql_log(ql_log_info, vha, 0x013f,
-	    "Performing ISP error recovery - ha = %p.\n", ha);
 
-	ha->isp_ops->reset_chip(vha);
+	if (!critemp) {
+		ha->flags.chip_reset_done = 0;
+		clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		vha->qla_stats.total_isp_aborts++;
+		ql_log(ql_log_info, vha, 0x013f,
+		    "Performing ISP error recovery - ha = %p.\n", ha);
+		ha->isp_ops->reset_chip(vha);
+	}
 
 	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
 		atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -1383,12 +1418,19 @@ qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha)
 	}
 
 	if (!ha->flags.eeh_busy) {
-		/* Requeue all commands in outstanding command list. */
-		qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+		if (critemp) {
+			qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+		} else {
+			/* Requeue all commands in outstanding command list. */
+			qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+		}
 	}
 
 	qla2x00_free_irqs(vha);
-	set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
+	if (critemp)
+		set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags);
+	else
+		set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
 
 	/* Clear the Interrupts */
 	QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
@@ -1475,6 +1517,7 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
 	uint32_t fw_heart_beat;
 	uint32_t aenmbx0;
 	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+	uint32_t tempc;
 
 	/* Check firmware health */
 	if (ha->mr.fw_hbt_cnt)
@@ -1539,10 +1582,36 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
 		} else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) {
 			ha->mr.fw_reset_timer_tick =
 			    QLAFX00_MAX_RESET_INTERVAL;
+		} else if (aenmbx0 == MBA_FW_RESET_FCT) {
+			ha->mr.fw_reset_timer_tick =
+			    QLAFX00_MAX_RESET_INTERVAL;
 		}
 		ha->mr.old_aenmbx0_state = aenmbx0;
 		ha->mr.fw_reset_timer_tick--;
 	}
+	if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) {
+		/*
+		 * Critical temperature recovery to be
+		 * performed in timer routine
+		 */
+		if (ha->mr.fw_critemp_timer_tick == 0) {
+			tempc = QLAFX00_GET_TEMPERATURE(ha);
+			ql_dbg(ql_dbg_timer, vha, 0x6012,
+			    "ISPFx00(%s): Critical temp timer, "
+			    "current SOC temperature: %d\n",
+			    __func__, tempc);
+			if (tempc < ha->mr.critical_temperature) {
+				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+				clear_bit(FX00_CRITEMP_RECOVERY,
+				    &vha->dpc_flags);
+				qla2xxx_wake_dpc(vha);
+			}
+			ha->mr.fw_critemp_timer_tick =
+			    QLAFX00_CRITEMP_INTERVAL;
+		} else {
+			ha->mr.fw_critemp_timer_tick--;
+		}
+	}
 }
 
 /*
@@ -1570,7 +1639,7 @@ qlafx00_reset_initialize(scsi_qla_host_t *vha)
 
 	if (vha->flags.online) {
 		scsi_block_requests(vha->host);
-		qlafx00_abort_isp_cleanup(vha);
+		qlafx00_abort_isp_cleanup(vha, false);
 	}
 
 	ql_log(ql_log_info, vha, 0x0143,
@@ -1602,7 +1671,15 @@ qlafx00_abort_isp(scsi_qla_host_t *vha)
 		}
 
 		scsi_block_requests(vha->host);
-		qlafx00_abort_isp_cleanup(vha);
+		qlafx00_abort_isp_cleanup(vha, false);
+	} else {
+		scsi_block_requests(vha->host);
+		clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		vha->qla_stats.total_isp_aborts++;
+		ha->isp_ops->reset_chip(vha);
+		set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
+		/* Clear the Interrupts */
+		QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
 	}
 
 	ql_log(ql_log_info, vha, 0x0145,
@@ -1688,6 +1765,15 @@ qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
 		aen_code = FCH_EVT_LINKDOWN;
 		aen_data = 0;
 		break;
+	case QLAFX00_MBA_TEMP_CRIT:	/* Critical temperature event */
+		ql_log(ql_log_info, vha, 0x5082,
+		    "Process critical temperature event "
+		    "aenmb[0]: %x\n",
+		    evt->u.aenfx.evtcode);
+		scsi_block_requests(vha->host);
+		qlafx00_abort_isp_cleanup(vha, true);
+		scsi_unblock_requests(vha->host);
+		break;
 	}
 
 	fc_host_post_event(vha->host, fc_get_event_number(),
@@ -1879,6 +1965,11 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
 		    sizeof(vha->hw->mr.uboot_version));
 		memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num,
 		    sizeof(vha->hw->mr.fru_serial_num));
+		vha->hw->mr.critical_temperature =
+		    (pinfo->nominal_temp_value) ?
+		    pinfo->nominal_temp_value : QLAFX00_CRITEMP_THRSHLD;
+		ha->mr.extended_io_enabled = (pinfo->enabled_capabilities &
+		    QLAFX00_EXTENDED_IO_EN_MASK) != 0;
 	} else if (fx_type == FXDISC_GET_PORT_INFO) {
 		struct port_info_data *pinfo =
 		    (struct port_info_data *) fdisc->u.fxiocb.rsp_addr;
@@ -2021,6 +2112,7 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha)
 {
 	int	rval;
 	struct qla_hw_data *ha = vha->hw;
+	uint32_t tempc;
 
 	/* Clear adapter flags. */
 	vha->flags.online = 0;
@@ -2028,7 +2120,6 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha)
 	vha->flags.reset_active = 0;
 	ha->flags.pci_channel_io_perm_failure = 0;
 	ha->flags.eeh_busy = 0;
-	ha->thermal_support = 0;
 	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
 	atomic_set(&vha->loop_state, LOOP_DOWN);
 	vha->device_flags = DFLG_NO_CABLE;
@@ -2072,6 +2163,11 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha)
 	rval = qla2x00_init_rings(vha);
 	ha->flags.chip_reset_done = 1;
 
+	tempc = QLAFX00_GET_TEMPERATURE(ha);
+	ql_dbg(ql_dbg_init, vha, 0x0152,
+	    "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n",
+	    __func__, tempc);
+
 	return rval;
 }
 
@@ -2526,16 +2622,13 @@ check_scsi_status:
 
 	if (logit)
 		ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
-		    "FCP command status: 0x%x-0x%x (0x%x) "
-		    "nexus=%ld:%d:%d tgt_id: 0x%x lscsi_status: 0x%x"
-		    "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
-		    "rsp_info=0x%x resid=0x%x fw_resid=0x%x "
-		    "sense_len=0x%x, par_sense_len=0x%x, rsp_info_len=0x%x\n",
+		    "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d "
+		    "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x "
+		    "rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, "
+		    "par_sense_len=0x%x, rsp_info_len=0x%x\n",
 		    comp_status, scsi_status, res, vha->host_no,
 		    cp->device->id, cp->device->lun, fcport->tgt_id,
-		    lscsi_status, cp->cmnd[0], cp->cmnd[1], cp->cmnd[2],
-		    cp->cmnd[3], cp->cmnd[4], cp->cmnd[5], cp->cmnd[6],
-		    cp->cmnd[7], cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp),
+		    lscsi_status, cp->cmnd, scsi_bufflen(cp),
 		    rsp_info_len, resid_len, fw_resid_len, sense_len,
 		    par_sense_len, rsp_info_len);
 
@@ -2720,9 +2813,6 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
 	struct sts_entry_fx00 *pkt;
 	response_t *lptr;
 
-	if (!vha->flags.online)
-		return;
-
 	while (RD_REG_DWORD((void __iomem *)&(rsp->ring_ptr->signature)) !=
 	    RESPONSE_PROCESSED) {
 		lptr = rsp->ring_ptr;
@@ -2824,6 +2914,28 @@ qlafx00_async_event(scsi_qla_host_t *vha)
 		    ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]);
 		data_size = 4;
 		break;
+
+	case QLAFX00_MBA_TEMP_OVER:	/* Over temperature event */
+		ql_log(ql_log_info, vha, 0x5085,
+		    "Asynchronous over temperature event received "
+		    "aenmb[0]: %x\n",
+		    ha->aenmb[0]);
+		break;
+
+	case QLAFX00_MBA_TEMP_NORM:	/* Normal temperature event */
+		ql_log(ql_log_info, vha, 0x5086,
+		    "Asynchronous normal temperature event received "
+		    "aenmb[0]: %x\n",
+		    ha->aenmb[0]);
+		break;
+
+	case QLAFX00_MBA_TEMP_CRIT:	/* Critical temperature event */
+		ql_log(ql_log_info, vha, 0x5083,
+		    "Asynchronous critical temperature event received "
+		    "aenmb[0]: %x\n",
+		ha->aenmb[0]);
+		break;
+
 	default:
 		ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
 		ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);

+ 37 - 4
drivers/scsi/qla2xxx/qla_mr.h

@@ -329,11 +329,13 @@ struct config_info_data {
 	uint64_t	adapter_id;
 
 	uint32_t	cluster_key_len;
-	uint8_t		cluster_key[10];
+	uint8_t		cluster_key[16];
 
 	uint64_t	cluster_master_id;
 	uint64_t	cluster_slave_id;
 	uint8_t		cluster_flags;
+	uint32_t	enabled_capabilities;
+	uint32_t	nominal_temp_value;
 } __packed;
 
 #define FXDISC_GET_CONFIG_INFO		0x01
@@ -342,10 +344,11 @@ struct config_info_data {
 #define FXDISC_GET_TGT_NODE_LIST	0x81
 #define FXDISC_REG_HOST_INFO		0x99
 
-#define QLAFX00_HBA_ICNTRL_REG		0x21B08
+#define QLAFX00_HBA_ICNTRL_REG		0x20B08
 #define QLAFX00_ICR_ENB_MASK            0x80000000
 #define QLAFX00_ICR_DIS_MASK            0x7fffffff
 #define QLAFX00_HST_RST_REG		0x18264
+#define QLAFX00_SOC_TEMP_REG		0x184C4
 #define QLAFX00_HST_TO_HBA_REG		0x20A04
 #define QLAFX00_HBA_TO_HOST_REG		0x21B70
 #define QLAFX00_HST_INT_STS_BITS	0x7
@@ -361,6 +364,9 @@ struct config_info_data {
 #define QLAFX00_INTR_ALL_CMPLT		0x7
 
 #define QLAFX00_MBA_SYSTEM_ERR		0x8002
+#define QLAFX00_MBA_TEMP_OVER		0x8005
+#define QLAFX00_MBA_TEMP_NORM		0x8006
+#define	QLAFX00_MBA_TEMP_CRIT		0x8007
 #define QLAFX00_MBA_LINK_UP		0x8011
 #define QLAFX00_MBA_LINK_DOWN		0x8012
 #define QLAFX00_MBA_PORT_UPDATE		0x8014
@@ -434,9 +440,11 @@ struct qla_mt_iocb_rqst_fx00 {
 
 	__le32 dataword_extra;
 
-	__le32 req_len;
+	__le16 req_len;
+	__le16 reserved_2;
 
-	__le32 rsp_len;
+	__le16 rsp_len;
+	__le16 reserved_3;
 };
 
 struct qla_mt_iocb_rsp_fx00 {
@@ -499,12 +507,37 @@ struct mr_data_fx00 {
 	uint32_t old_fw_hbt_cnt;
 	uint16_t fw_reset_timer_tick;
 	uint8_t fw_reset_timer_exp;
+	uint16_t fw_critemp_timer_tick;
 	uint32_t old_aenmbx0_state;
+	uint32_t critical_temperature;
+	bool extended_io_enabled;
 };
 
+#define QLAFX00_EXTENDED_IO_EN_MASK    0x20
+
+/*
+ * SoC Junction Temperature is stored in
+ * bits 9:1 of SoC Junction Temperature Register
+ * in a firmware specific format format.
+ * To get the temperature in Celsius degrees
+ * the value from this bitfiled should be converted
+ * using this formula:
+ * Temperature (degrees C) = ((3,153,000 - (10,000 * X)) / 13,825)
+ * where X is the bit field value
+ * this macro reads the register, extracts the bitfield value,
+ * performs the calcualtions and returns temperature in Celsius
+ */
+#define QLAFX00_GET_TEMPERATURE(ha) ((3153000 - (10000 * \
+	((QLAFX00_RD_REG(ha, QLAFX00_SOC_TEMP_REG) & 0x3FE) >> 1))) / 13825)
+
+
 #define QLAFX00_LOOP_DOWN_TIME		615     /* 600 */
 #define QLAFX00_HEARTBEAT_INTERVAL	6	/* number of seconds */
 #define QLAFX00_HEARTBEAT_MISS_CNT	3	/* number of miss */
 #define QLAFX00_RESET_INTERVAL		120	/* number of seconds */
 #define QLAFX00_MAX_RESET_INTERVAL	600	/* number of seconds */
+#define QLAFX00_CRITEMP_INTERVAL	60	/* number of seconds */
+
+#define QLAFX00_CRITEMP_THRSHLD		80	/* Celsius degrees */
+
 #endif

+ 78 - 39
drivers/scsi/qla2xxx/qla_nx.c

@@ -848,7 +848,6 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
 {
 	int done = 0, timeout = 0;
 	uint32_t lock_owner = 0;
-	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 
 	while (!done) {
 		/* acquire semaphore2 from PCI HW block */
@@ -857,9 +856,6 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
 			break;
 		if (timeout >= qla82xx_rom_lock_timeout) {
 			lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
-			ql_dbg(ql_dbg_p3p, vha, 0xb085,
-			    "Failed to acquire rom lock, acquired by %d.\n",
-			    lock_owner);
 			return -1;
 		}
 		timeout++;
@@ -1666,8 +1662,14 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
 	}
 
 	/* Mapping of IO base pointer */
-	ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
-	    0xbc000 + (ha->pdev->devfn << 11));
+	if (IS_QLA8044(ha)) {
+		ha->iobase =
+		    (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase);
+	} else if (IS_QLA82XX(ha)) {
+		ha->iobase =
+		    (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
+			0xbc000 + (ha->pdev->devfn << 11));
+	}
 
 	if (!ql2xdbwr) {
 		ha->nxdb_wr_ptr =
@@ -1967,7 +1969,7 @@ static struct qla82xx_legacy_intr_set legacy_intr[] = \
  * @ha: SCSI driver HA context
  * @mb0: Mailbox0 register
  */
-static void
+void
 qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
 {
 	uint16_t	cnt;
@@ -2075,13 +2077,6 @@ qla82xx_intr_handler(int irq, void *dev_id)
 		WRT_REG_DWORD(&reg->host_int, 0);
 	}
 
-#ifdef QL_DEBUG_LEVEL_17
-	if (!irq && ha->flags.eeh_busy)
-		ql_log(ql_log_warn, vha, 0x503d,
-		    "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
-		    status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
-#endif
-
 	qla2x00_handle_mbx_completion(ha, status);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
@@ -2147,13 +2142,6 @@ qla82xx_msix_default(int irq, void *dev_id)
 		WRT_REG_DWORD(&reg->host_int, 0);
 	} while (0);
 
-#ifdef QL_DEBUG_LEVEL_17
-	if (!irq && ha->flags.eeh_busy)
-		ql_log(ql_log_warn, vha, 0x5044,
-		    "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
-		    status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
-#endif
-
 	qla2x00_handle_mbx_completion(ha, status);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
@@ -2247,7 +2235,10 @@ qla82xx_enable_intrs(struct qla_hw_data *ha)
 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 	qla82xx_mbx_intr_enable(vha);
 	spin_lock_irq(&ha->hardware_lock);
-	qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+	if (IS_QLA8044(ha))
+		qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 0);
+	else
+		qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
 	spin_unlock_irq(&ha->hardware_lock);
 	ha->interrupts_on = 1;
 }
@@ -2258,7 +2249,10 @@ qla82xx_disable_intrs(struct qla_hw_data *ha)
 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 	qla82xx_mbx_intr_disable(vha);
 	spin_lock_irq(&ha->hardware_lock);
-	qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
+	if (IS_QLA8044(ha))
+		qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 1);
+	else
+		qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
 	spin_unlock_irq(&ha->hardware_lock);
 	ha->interrupts_on = 0;
 }
@@ -3008,6 +3002,9 @@ qla8xxx_dev_failed_handler(scsi_qla_host_t *vha)
 	if (IS_QLA82XX(ha)) {
 		qla82xx_clear_drv_active(ha);
 		qla82xx_idc_unlock(ha);
+	} else if (IS_QLA8044(ha)) {
+		qla8044_clear_drv_active(vha);
+		qla8044_idc_unlock(ha);
 	}
 
 	/* Set DEV_FAILED flag to disable timer */
@@ -3134,7 +3131,7 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
 			if (fw_major_version != ha->fw_major_version ||
 			    fw_minor_version != ha->fw_minor_version ||
 			    fw_subminor_version != ha->fw_subminor_version) {
-				ql_log(ql_log_info, vha, 0xb02d,
+				ql_dbg(ql_dbg_p3p, vha, 0xb02d,
 				    "Firmware version differs "
 				    "Previous version: %d:%d:%d - "
 				    "New version: %d:%d:%d\n",
@@ -3330,6 +3327,14 @@ static int qla82xx_check_temp(scsi_qla_host_t *vha)
 	return 0;
 }
 
+int qla82xx_read_temperature(scsi_qla_host_t *vha)
+{
+	uint32_t temp;
+
+	temp = qla82xx_rd_32(vha->hw, CRB_TEMP_STATE);
+	return qla82xx_get_temp_val(temp);
+}
+
 void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
 {
 	struct qla_hw_data *ha = vha->hw;
@@ -3423,8 +3428,18 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
 
 int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
 {
-	int rval;
-	rval = qla82xx_device_state_handler(vha);
+	int rval = -1;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (IS_QLA82XX(ha))
+		rval = qla82xx_device_state_handler(vha);
+	else if (IS_QLA8044(ha)) {
+		qla8044_idc_lock(ha);
+		/* Decide the reset ownership */
+		qla83xx_reset_ownership(vha);
+		qla8044_idc_unlock(ha);
+		rval = qla8044_device_state_handler(vha);
+	}
 	return rval;
 }
 
@@ -3432,17 +3447,25 @@ void
 qla82xx_set_reset_owner(scsi_qla_host_t *vha)
 {
 	struct qla_hw_data *ha = vha->hw;
-	uint32_t dev_state;
+	uint32_t dev_state = 0;
+
+	if (IS_QLA82XX(ha))
+		dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+	else if (IS_QLA8044(ha))
+		dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
 
-	dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
 	if (dev_state == QLA8XXX_DEV_READY) {
 		ql_log(ql_log_info, vha, 0xb02f,
 		    "HW State: NEED RESET\n");
-		qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-			QLA8XXX_DEV_NEED_RESET);
-		ha->flags.nic_core_reset_owner = 1;
-		ql_dbg(ql_dbg_p3p, vha, 0xb030,
-		    "reset_owner is 0x%x\n", ha->portnum);
+		if (IS_QLA82XX(ha)) {
+			qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+			    QLA8XXX_DEV_NEED_RESET);
+			ha->flags.nic_core_reset_owner = 1;
+			ql_dbg(ql_dbg_p3p, vha, 0xb030,
+			    "reset_owner is 0x%x\n", ha->portnum);
+		} else if (IS_QLA8044(ha))
+			qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+			    QLA8XXX_DEV_NEED_RESET);
 	} else
 		ql_log(ql_log_info, vha, 0xb031,
 		    "Device state is 0x%x = %s.\n",
@@ -3463,7 +3486,7 @@ qla82xx_set_reset_owner(scsi_qla_host_t *vha)
 int
 qla82xx_abort_isp(scsi_qla_host_t *vha)
 {
-	int rval;
+	int rval = -1;
 	struct qla_hw_data *ha = vha->hw;
 
 	if (vha->device_flags & DFLG_DEV_FAILED) {
@@ -3477,7 +3500,15 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
 	qla82xx_set_reset_owner(vha);
 	qla82xx_idc_unlock(ha);
 
-	rval = qla82xx_device_state_handler(vha);
+	if (IS_QLA82XX(ha))
+		rval = qla82xx_device_state_handler(vha);
+	else if (IS_QLA8044(ha)) {
+		qla8044_idc_lock(ha);
+		/* Decide the reset ownership */
+		qla83xx_reset_ownership(vha);
+		qla8044_idc_unlock(ha);
+		rval = qla8044_device_state_handler(vha);
+	}
 
 	qla82xx_idc_lock(ha);
 	qla82xx_clear_rst_ready(ha);
@@ -3597,7 +3628,7 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
 void
 qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
 {
-	int i;
+	int i, fw_state = 0;
 	unsigned long flags;
 	struct qla_hw_data *ha = vha->hw;
 
@@ -3608,7 +3639,11 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
 	if (!ha->flags.isp82xx_fw_hung) {
 		for (i = 0; i < 2; i++) {
 			msleep(1000);
-			if (qla82xx_check_fw_alive(vha)) {
+			if (IS_QLA82XX(ha))
+				fw_state = qla82xx_check_fw_alive(vha);
+			else if (IS_QLA8044(ha))
+				fw_state = qla8044_check_fw_alive(vha);
+			if (fw_state) {
 				ha->flags.isp82xx_fw_hung = 1;
 				qla82xx_clear_pending_mbx(vha);
 				break;
@@ -4072,7 +4107,7 @@ qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
 	return QLA_SUCCESS;
 }
 
-static int
+int
 qla82xx_validate_template_chksum(scsi_qla_host_t *vha)
 {
 	struct qla_hw_data *ha = vha->hw;
@@ -4384,7 +4419,11 @@ qla82xx_md_prep(scsi_qla_host_t *vha)
 		    ha->md_template_size / 1024);
 
 		/* Get Minidump template */
-		rval = qla82xx_md_get_template(vha);
+		if (IS_QLA8044(ha))
+			rval = qla8044_md_get_template(vha);
+		else
+			rval = qla82xx_md_get_template(vha);
+
 		if (rval == QLA_SUCCESS) {
 			ql_dbg(ql_dbg_p3p, vha, 0xb04b,
 			    "MiniDump Template obtained\n");

+ 10 - 0
drivers/scsi/qla2xxx/qla_nx.h

@@ -589,6 +589,7 @@
  * The PCI VendorID and DeviceID for our board.
  */
 #define PCI_DEVICE_ID_QLOGIC_ISP8021		0x8021
+#define PCI_DEVICE_ID_QLOGIC_ISP8044		0x8044
 
 #define QLA82XX_MSIX_TBL_SPACE			8192
 #define QLA82XX_PCI_REG_MSIX_TBL		0x44
@@ -954,6 +955,11 @@ struct ct6_dsd {
 #define QLA82XX_CNTRL                  98
 #define QLA82XX_TLHDR                  99
 #define QLA82XX_RDEND                  255
+#define QLA8044_POLLRD			35
+#define QLA8044_RDMUX2			36
+#define QLA8044_L1DTG			8
+#define QLA8044_L1ITG			9
+#define QLA8044_POLLRDMWR		37
 
 /*
  * Opcodes for Control Entries.
@@ -1191,4 +1197,8 @@ enum {
 	QLA82XX_TEMP_WARN,	   /* Sound alert, temperature getting high */
 	QLA82XX_TEMP_PANIC	   /* Fatal error, hardware has shut down. */
 };
+
+#define LEG_INTR_PTR_OFFSET	0x38C0
+#define LEG_INTR_TRIG_OFFSET	0x38C4
+#define LEG_INTR_MASK_OFFSET	0x38C8
 #endif

+ 3716 - 0
drivers/scsi/qla2xxx/qla_nx2.c

@@ -0,0 +1,3716 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#include <linux/vmalloc.h>
+
+#include "qla_def.h"
+#include "qla_gbl.h"
+
+#include <linux/delay.h>
+
+/* 8044 Flash Read/Write functions */
+uint32_t
+qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
+{
+	return readl((void __iomem *) (ha->nx_pcibase + addr));
+}
+
+void
+qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val)
+{
+	writel(val, (void __iomem *)((ha)->nx_pcibase + addr));
+}
+
+int
+qla8044_rd_direct(struct scsi_qla_host *vha,
+	const uint32_t crb_reg)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (crb_reg < CRB_REG_INDEX_MAX)
+		return qla8044_rd_reg(ha, qla8044_reg_tbl[crb_reg]);
+	else
+		return QLA_FUNCTION_FAILED;
+}
+
+void
+qla8044_wr_direct(struct scsi_qla_host *vha,
+	const uint32_t crb_reg,
+	const uint32_t value)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (crb_reg < CRB_REG_INDEX_MAX)
+		qla8044_wr_reg(ha, qla8044_reg_tbl[crb_reg], value);
+}
+
+static int
+qla8044_set_win_base(scsi_qla_host_t *vha, uint32_t addr)
+{
+	uint32_t val;
+	int ret_val = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+
+	qla8044_wr_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum), addr);
+	val = qla8044_rd_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum));
+
+	if (val != addr) {
+		ql_log(ql_log_warn, vha, 0xb087,
+		    "%s: Failed to set register window : "
+		    "addr written 0x%x, read 0x%x!\n",
+		    __func__, addr, val);
+		ret_val = QLA_FUNCTION_FAILED;
+	}
+	return ret_val;
+}
+
+static int
+qla8044_rd_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
+{
+	int ret_val = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+
+	ret_val = qla8044_set_win_base(vha, addr);
+	if (!ret_val)
+		*data = qla8044_rd_reg(ha, QLA8044_WILDCARD);
+	else
+		ql_log(ql_log_warn, vha, 0xb088,
+		    "%s: failed read of addr 0x%x!\n", __func__, addr);
+	return ret_val;
+}
+
+static int
+qla8044_wr_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
+{
+	int ret_val = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+
+	ret_val = qla8044_set_win_base(vha, addr);
+	if (!ret_val)
+		qla8044_wr_reg(ha, QLA8044_WILDCARD, data);
+	else
+		ql_log(ql_log_warn, vha, 0xb089,
+		    "%s: failed wrt to addr 0x%x, data 0x%x\n",
+		    __func__, addr, data);
+	return ret_val;
+}
+
+/*
+ * qla8044_read_write_crb_reg - Read from raddr and write value to waddr.
+ *
+ * @ha : Pointer to adapter structure
+ * @raddr : CRB address to read from
+ * @waddr : CRB address to write to
+ *
+ */
+static void
+qla8044_read_write_crb_reg(struct scsi_qla_host *vha,
+	uint32_t raddr, uint32_t waddr)
+{
+	uint32_t value;
+
+	qla8044_rd_reg_indirect(vha, raddr, &value);
+	qla8044_wr_reg_indirect(vha, waddr, value);
+}
+
+/*
+ * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask,
+ * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
+ *
+ * @vha : Pointer to adapter structure
+ * @raddr : CRB address to read from
+ * @waddr : CRB address to write to
+ * @p_rmw_hdr : header with shift/or/xor values.
+ *
+ */
+static void
+qla8044_rmw_crb_reg(struct scsi_qla_host *vha,
+	uint32_t raddr, uint32_t waddr,	struct qla8044_rmw *p_rmw_hdr)
+{
+	uint32_t value;
+
+	if (p_rmw_hdr->index_a)
+		value = vha->reset_tmplt.array[p_rmw_hdr->index_a];
+	else
+		qla8044_rd_reg_indirect(vha, raddr, &value);
+	value &= p_rmw_hdr->test_mask;
+	value <<= p_rmw_hdr->shl;
+	value >>= p_rmw_hdr->shr;
+	value |= p_rmw_hdr->or_value;
+	value ^= p_rmw_hdr->xor_value;
+	qla8044_wr_reg_indirect(vha, waddr, value);
+	return;
+}
+
+inline void
+qla8044_set_qsnt_ready(struct scsi_qla_host *vha)
+{
+	uint32_t qsnt_state;
+	struct qla_hw_data *ha = vha->hw;
+
+	qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+	qsnt_state |= (1 << ha->portnum);
+	qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
+	ql_log(ql_log_info, vha, 0xb08e, "%s(%ld): qsnt_state: 0x%08x\n",
+	     __func__, vha->host_no, qsnt_state);
+}
+
+void
+qla8044_clear_qsnt_ready(struct scsi_qla_host *vha)
+{
+	uint32_t qsnt_state;
+	struct qla_hw_data *ha = vha->hw;
+
+	qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+	qsnt_state &= ~(1 << ha->portnum);
+	qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
+	ql_log(ql_log_info, vha, 0xb08f, "%s(%ld): qsnt_state: 0x%08x\n",
+	    __func__, vha->host_no, qsnt_state);
+}
+
+/**
+ *
+ * qla8044_lock_recovery - Recovers the idc_lock.
+ * @ha : Pointer to adapter structure
+ *
+ * Lock Recovery Register
+ * 5-2	Lock recovery owner: Function ID of driver doing lock recovery,
+ *	valid if bits 1..0 are set by driver doing lock recovery.
+ * 1-0  1 - Driver intends to force unlock the IDC lock.
+ *	2 - Driver is moving forward to unlock the IDC lock. Driver clears
+ *	    this field after force unlocking the IDC lock.
+ *
+ * Lock Recovery process
+ * a. Read the IDC_LOCK_RECOVERY register. If the value in bits 1..0 is
+ *    greater than 0, then wait for the other driver to unlock otherwise
+ *    move to the next step.
+ * b. Indicate intent to force-unlock by writing 1h to the IDC_LOCK_RECOVERY
+ *    register bits 1..0 and also set the function# in bits 5..2.
+ * c. Read the IDC_LOCK_RECOVERY register again after a delay of 200ms.
+ *    Wait for the other driver to perform lock recovery if the function
+ *    number in bits 5..2 has changed, otherwise move to the next step.
+ * d. Write a value of 2h to the IDC_LOCK_RECOVERY register bits 1..0
+ *    leaving your function# in bits 5..2.
+ * e. Force unlock using the DRIVER_UNLOCK register and immediately clear
+ *    the IDC_LOCK_RECOVERY bits 5..0 by writing 0.
+ **/
+static int
+qla8044_lock_recovery(struct scsi_qla_host *vha)
+{
+	uint32_t lock = 0, lockid;
+	struct qla_hw_data *ha = vha->hw;
+
+	lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
+
+	/* Check for other Recovery in progress, go wait */
+	if ((lockid & IDC_LOCK_RECOVERY_STATE_MASK) != 0)
+		return QLA_FUNCTION_FAILED;
+
+	/* Intent to Recover */
+	qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
+	    (ha->portnum <<
+	     IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | INTENT_TO_RECOVER);
+	msleep(200);
+
+	/* Check Intent to Recover is advertised */
+	lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
+	if ((lockid & IDC_LOCK_RECOVERY_OWNER_MASK) != (ha->portnum <<
+	    IDC_LOCK_RECOVERY_STATE_SHIFT_BITS))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb08B, "%s:%d: IDC Lock recovery initiated\n"
+	    , __func__, ha->portnum);
+
+	/* Proceed to Recover */
+	qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
+	    (ha->portnum << IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) |
+	    PROCEED_TO_RECOVER);
+
+	/* Force Unlock() */
+	qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, 0xFF);
+	qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
+
+	/* Clear bits 0-5 in IDC_RECOVERY register*/
+	qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, 0);
+
+	/* Get lock() */
+	lock = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
+	if (lock) {
+		lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+		lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->portnum;
+		qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lockid);
+		return QLA_SUCCESS;
+	} else
+		return QLA_FUNCTION_FAILED;
+}
+
+int
+qla8044_idc_lock(struct qla_hw_data *ha)
+{
+	uint32_t ret_val = QLA_SUCCESS, timeout = 0, status = 0;
+	uint32_t lock_id, lock_cnt, func_num, tmo_owner = 0, first_owner = 0;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	while (status == 0) {
+		/* acquire semaphore5 from PCI HW block */
+		status = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
+
+		if (status) {
+			/* Increment Counter (8-31) and update func_num (0-7) on
+			 * getting a successful lock  */
+			lock_id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+			lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->portnum;
+			qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lock_id);
+			break;
+		}
+
+		if (timeout == 0)
+			first_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+
+		if (++timeout >=
+		    (QLA8044_DRV_LOCK_TIMEOUT / QLA8044_DRV_LOCK_MSLEEP)) {
+			tmo_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+			func_num = tmo_owner & 0xFF;
+			lock_cnt = tmo_owner >> 8;
+			ql_log(ql_log_warn, vha, 0xb114,
+			    "%s: Lock by func %d failed after 2s, lock held "
+			    "by func %d, lock count %d, first_owner %d\n",
+			    __func__, ha->portnum, func_num, lock_cnt,
+			    (first_owner & 0xFF));
+			if (first_owner != tmo_owner) {
+				/* Some other driver got lock,
+				 * OR same driver got lock again (counter
+				 * value changed), when we were waiting for
+				 * lock. Retry for another 2 sec */
+				ql_dbg(ql_dbg_p3p, vha, 0xb115,
+				    "%s: %d: IDC lock failed\n",
+				    __func__, ha->portnum);
+				timeout = 0;
+			} else {
+				/* Same driver holding lock > 2sec.
+				 * Force Recovery */
+				if (qla8044_lock_recovery(vha) == QLA_SUCCESS) {
+					/* Recovered and got lock */
+					ret_val = QLA_SUCCESS;
+					ql_dbg(ql_dbg_p3p, vha, 0xb116,
+					    "%s:IDC lock Recovery by %d"
+					    "successful...\n", __func__,
+					     ha->portnum);
+				}
+				/* Recovery Failed, some other function
+				 * has the lock, wait for 2secs
+				 * and retry
+				 */
+				 ql_dbg(ql_dbg_p3p, vha, 0xb08a,
+				     "%s: IDC lock Recovery by %d "
+				     "failed, Retrying timout\n", __func__,
+				     ha->portnum);
+				 timeout = 0;
+			}
+		}
+		msleep(QLA8044_DRV_LOCK_MSLEEP);
+	}
+	return ret_val;
+}
+
+void
+qla8044_idc_unlock(struct qla_hw_data *ha)
+{
+	int id;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+
+	if ((id & 0xFF) != ha->portnum) {
+		ql_log(ql_log_warn, vha, 0xb118,
+		    "%s: IDC Unlock by %d failed, lock owner is %d!\n",
+		    __func__, ha->portnum, (id & 0xFF));
+		return;
+	}
+
+	/* Keep lock counter value, update the ha->func_num to 0xFF */
+	qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, (id | 0xFF));
+	qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
+}
+
+/* 8044 Flash Lock/Unlock functions */
+static int
+qla8044_flash_lock(scsi_qla_host_t *vha)
+{
+	int lock_owner;
+	int timeout = 0;
+	uint32_t lock_status = 0;
+	int ret_val = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+
+	while (lock_status == 0) {
+		lock_status = qla8044_rd_reg(ha, QLA8044_FLASH_LOCK);
+		if (lock_status)
+			break;
+
+		if (++timeout >= QLA8044_FLASH_LOCK_TIMEOUT / 20) {
+			lock_owner = qla8044_rd_reg(ha,
+			    QLA8044_FLASH_LOCK_ID);
+			ql_log(ql_log_warn, vha, 0xb113,
+			    "%s: flash lock by %d failed, held by %d\n",
+				__func__, ha->portnum, lock_owner);
+			ret_val = QLA_FUNCTION_FAILED;
+			break;
+		}
+		msleep(20);
+	}
+	qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, ha->portnum);
+	return ret_val;
+}
+
+static void
+qla8044_flash_unlock(scsi_qla_host_t *vha)
+{
+	int ret_val;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Reading FLASH_UNLOCK register unlocks the Flash */
+	qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF);
+	ret_val = qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK);
+}
+
+
+static
+void qla8044_flash_lock_recovery(struct scsi_qla_host *vha)
+{
+
+	if (qla8044_flash_lock(vha)) {
+		/* Someone else is holding the lock. */
+		ql_log(ql_log_warn, vha, 0xb120, "Resetting flash_lock\n");
+	}
+
+	/*
+	 * Either we got the lock, or someone
+	 * else died while holding it.
+	 * In either case, unlock.
+	 */
+	qla8044_flash_unlock(vha);
+}
+
+/*
+ * Address and length are byte address
+ */
+static int
+qla8044_read_flash_data(scsi_qla_host_t *vha,  uint8_t *p_data,
+	uint32_t flash_addr, int u32_word_count)
+{
+	int i, ret_val = QLA_SUCCESS;
+	uint32_t u32_word;
+
+	if (qla8044_flash_lock(vha) != QLA_SUCCESS) {
+		ret_val = QLA_FUNCTION_FAILED;
+		goto exit_lock_error;
+	}
+
+	if (flash_addr & 0x03) {
+		ql_log(ql_log_warn, vha, 0xb117,
+		    "%s: Illegal addr = 0x%x\n", __func__, flash_addr);
+		ret_val = QLA_FUNCTION_FAILED;
+		goto exit_flash_read;
+	}
+
+	for (i = 0; i < u32_word_count; i++) {
+		if (qla8044_wr_reg_indirect(vha, QLA8044_FLASH_DIRECT_WINDOW,
+		    (flash_addr & 0xFFFF0000))) {
+			ql_log(ql_log_warn, vha, 0xb119,
+			    "%s: failed to write addr 0x%x to "
+			    "FLASH_DIRECT_WINDOW\n! ",
+			    __func__, flash_addr);
+			ret_val = QLA_FUNCTION_FAILED;
+			goto exit_flash_read;
+		}
+
+		ret_val = qla8044_rd_reg_indirect(vha,
+		    QLA8044_FLASH_DIRECT_DATA(flash_addr),
+		    &u32_word);
+		if (ret_val != QLA_SUCCESS) {
+			ql_log(ql_log_warn, vha, 0xb08c,
+			    "%s: failed to read addr 0x%x!\n",
+			    __func__, flash_addr);
+			goto exit_flash_read;
+		}
+
+		*(uint32_t *)p_data = u32_word;
+		p_data = p_data + 4;
+		flash_addr = flash_addr + 4;
+	}
+
+exit_flash_read:
+	qla8044_flash_unlock(vha);
+
+exit_lock_error:
+	return ret_val;
+}
+
+/*
+ * Address and length are byte address
+ */
+uint8_t *
+qla8044_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+	uint32_t offset, uint32_t length)
+{
+	scsi_block_requests(vha->host);
+	if (qla8044_read_flash_data(vha, (uint8_t *)buf, offset, length / 4)
+	    != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha,  0xb08d,
+		    "%s: Failed to read from flash\n",
+		    __func__);
+	}
+	scsi_unblock_requests(vha->host);
+	return buf;
+}
+
+inline int
+qla8044_need_reset(struct scsi_qla_host *vha)
+{
+	uint32_t drv_state, drv_active;
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+
+	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+	drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+
+	rval = drv_state & (1 << ha->portnum);
+
+	if (ha->flags.eeh_busy && drv_active)
+		rval = 1;
+	return rval;
+}
+
+/*
+ * qla8044_write_list - Write the value (p_entry->arg2) to address specified
+ * by p_entry->arg1 for all entries in header with delay of p_hdr->delay between
+ * entries.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : reset_entry header for WRITE_LIST opcode.
+ *
+ */
+static void
+qla8044_write_list(struct scsi_qla_host *vha,
+	struct qla8044_reset_entry_hdr *p_hdr)
+{
+	struct qla8044_entry *p_entry;
+	uint32_t i;
+
+	p_entry = (struct qla8044_entry *)((char *)p_hdr +
+	    sizeof(struct qla8044_reset_entry_hdr));
+
+	for (i = 0; i < p_hdr->count; i++, p_entry++) {
+		qla8044_wr_reg_indirect(vha, p_entry->arg1, p_entry->arg2);
+		if (p_hdr->delay)
+			udelay((uint32_t)(p_hdr->delay));
+	}
+}
+
+/*
+ * qla8044_read_write_list - Read from address specified by p_entry->arg1,
+ * write value read to address specified by p_entry->arg2, for all entries in
+ * header with delay of p_hdr->delay between entries.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : reset_entry header for READ_WRITE_LIST opcode.
+ *
+ */
+static void
+qla8044_read_write_list(struct scsi_qla_host *vha,
+	struct qla8044_reset_entry_hdr *p_hdr)
+{
+	struct qla8044_entry *p_entry;
+	uint32_t i;
+
+	p_entry = (struct qla8044_entry *)((char *)p_hdr +
+	    sizeof(struct qla8044_reset_entry_hdr));
+
+	for (i = 0; i < p_hdr->count; i++, p_entry++) {
+		qla8044_read_write_crb_reg(vha, p_entry->arg1,
+		    p_entry->arg2);
+		if (p_hdr->delay)
+			udelay((uint32_t)(p_hdr->delay));
+	}
+}
+
+/*
+ * qla8044_poll_reg - Poll the given CRB addr for duration msecs till
+ * value read ANDed with test_mask is equal to test_result.
+ *
+ * @ha : Pointer to adapter structure
+ * @addr : CRB register address
+ * @duration : Poll for total of "duration" msecs
+ * @test_mask : Mask value read with "test_mask"
+ * @test_result : Compare (value&test_mask) with test_result.
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_poll_reg(struct scsi_qla_host *vha, uint32_t addr,
+	int duration, uint32_t test_mask, uint32_t test_result)
+{
+	uint32_t value;
+	int timeout_error;
+	uint8_t retries;
+	int ret_val = QLA_SUCCESS;
+
+	ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
+	if (ret_val == QLA_FUNCTION_FAILED) {
+		timeout_error = 1;
+		goto exit_poll_reg;
+	}
+
+	/* poll every 1/10 of the total duration */
+	retries = duration/10;
+
+	do {
+		if ((value & test_mask) != test_result) {
+			timeout_error = 1;
+			msleep(duration/10);
+			ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
+			if (ret_val == QLA_FUNCTION_FAILED) {
+				timeout_error = 1;
+				goto exit_poll_reg;
+			}
+		} else {
+			timeout_error = 0;
+			break;
+		}
+	} while (retries--);
+
+exit_poll_reg:
+	if (timeout_error) {
+		vha->reset_tmplt.seq_error++;
+		ql_log(ql_log_fatal, vha, 0xb090,
+		    "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n",
+		    __func__, value, test_mask, test_result);
+	}
+
+	return timeout_error;
+}
+
+/*
+ * qla8044_poll_list - For all entries in the POLL_LIST header, poll read CRB
+ * register specified by p_entry->arg1 and compare (value AND test_mask) with
+ * test_result to validate it. Wait for p_hdr->delay between processing entries.
+ *
+ * @ha : Pointer to adapter structure
+ * @p_hdr : reset_entry header for POLL_LIST opcode.
+ *
+ */
+static void
+qla8044_poll_list(struct scsi_qla_host *vha,
+	struct qla8044_reset_entry_hdr *p_hdr)
+{
+	long delay;
+	struct qla8044_entry *p_entry;
+	struct qla8044_poll *p_poll;
+	uint32_t i;
+	uint32_t value;
+
+	p_poll = (struct qla8044_poll *)
+		((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
+
+	/* Entries start after 8 byte qla8044_poll, poll header contains
+	 * the test_mask, test_value.
+	 */
+	p_entry = (struct qla8044_entry *)((char *)p_poll +
+	    sizeof(struct qla8044_poll));
+
+	delay = (long)p_hdr->delay;
+
+	if (!delay) {
+		for (i = 0; i < p_hdr->count; i++, p_entry++)
+			qla8044_poll_reg(vha, p_entry->arg1,
+			    delay, p_poll->test_mask, p_poll->test_value);
+	} else {
+		for (i = 0; i < p_hdr->count; i++, p_entry++) {
+			if (delay) {
+				if (qla8044_poll_reg(vha,
+				    p_entry->arg1, delay,
+				    p_poll->test_mask,
+				    p_poll->test_value)) {
+					/*If
+					* (data_read&test_mask != test_value)
+					* read TIMEOUT_ADDR (arg1) and
+					* ADDR (arg2) registers
+					*/
+					qla8044_rd_reg_indirect(vha,
+					    p_entry->arg1, &value);
+					qla8044_rd_reg_indirect(vha,
+					    p_entry->arg2, &value);
+				}
+			}
+		}
+	}
+}
+
+/*
+ * qla8044_poll_write_list - Write dr_value, ar_value to dr_addr/ar_addr,
+ * read ar_addr, if (value& test_mask != test_mask) re-read till timeout
+ * expires.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : reset entry header for POLL_WRITE_LIST opcode.
+ *
+ */
+static void
+qla8044_poll_write_list(struct scsi_qla_host *vha,
+	struct qla8044_reset_entry_hdr *p_hdr)
+{
+	long delay;
+	struct qla8044_quad_entry *p_entry;
+	struct qla8044_poll *p_poll;
+	uint32_t i;
+
+	p_poll = (struct qla8044_poll *)((char *)p_hdr +
+	    sizeof(struct qla8044_reset_entry_hdr));
+
+	p_entry = (struct qla8044_quad_entry *)((char *)p_poll +
+	    sizeof(struct qla8044_poll));
+
+	delay = (long)p_hdr->delay;
+
+	for (i = 0; i < p_hdr->count; i++, p_entry++) {
+		qla8044_wr_reg_indirect(vha,
+		    p_entry->dr_addr, p_entry->dr_value);
+		qla8044_wr_reg_indirect(vha,
+		    p_entry->ar_addr, p_entry->ar_value);
+		if (delay) {
+			if (qla8044_poll_reg(vha,
+			    p_entry->ar_addr, delay,
+			    p_poll->test_mask,
+			    p_poll->test_value)) {
+				ql_dbg(ql_dbg_p3p, vha, 0xb091,
+				    "%s: Timeout Error: poll list, ",
+				    __func__);
+				ql_dbg(ql_dbg_p3p, vha, 0xb092,
+				    "item_num %d, entry_num %d\n", i,
+				    vha->reset_tmplt.seq_index);
+			}
+		}
+	}
+}
+
+/*
+ * qla8044_read_modify_write - Read value from p_entry->arg1, modify the
+ * value, write value to p_entry->arg2. Process entries with p_hdr->delay
+ * between entries.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : header with shift/or/xor values.
+ *
+ */
+static void
+qla8044_read_modify_write(struct scsi_qla_host *vha,
+	struct qla8044_reset_entry_hdr *p_hdr)
+{
+	struct qla8044_entry *p_entry;
+	struct qla8044_rmw *p_rmw_hdr;
+	uint32_t i;
+
+	p_rmw_hdr = (struct qla8044_rmw *)((char *)p_hdr +
+	    sizeof(struct qla8044_reset_entry_hdr));
+
+	p_entry = (struct qla8044_entry *)((char *)p_rmw_hdr +
+	    sizeof(struct qla8044_rmw));
+
+	for (i = 0; i < p_hdr->count; i++, p_entry++) {
+		qla8044_rmw_crb_reg(vha, p_entry->arg1,
+		    p_entry->arg2, p_rmw_hdr);
+		if (p_hdr->delay)
+			udelay((uint32_t)(p_hdr->delay));
+	}
+}
+
+/*
+ * qla8044_pause - Wait for p_hdr->delay msecs, called between processing
+ * two entries of a sequence.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : Common reset entry header.
+ *
+ */
+static
+void qla8044_pause(struct scsi_qla_host *vha,
+	struct qla8044_reset_entry_hdr *p_hdr)
+{
+	if (p_hdr->delay)
+		mdelay((uint32_t)((long)p_hdr->delay));
+}
+
+/*
+ * qla8044_template_end - Indicates end of reset sequence processing.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : Common reset entry header.
+ *
+ */
+static void
+qla8044_template_end(struct scsi_qla_host *vha,
+	struct qla8044_reset_entry_hdr *p_hdr)
+{
+	vha->reset_tmplt.template_end = 1;
+
+	if (vha->reset_tmplt.seq_error == 0) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb093,
+		    "%s: Reset sequence completed SUCCESSFULLY.\n", __func__);
+	} else {
+		ql_log(ql_log_fatal, vha, 0xb094,
+		    "%s: Reset sequence completed with some timeout "
+		    "errors.\n", __func__);
+	}
+}
+
+/*
+ * qla8044_poll_read_list - Write ar_value to ar_addr register, read ar_addr,
+ * if (value & test_mask != test_value) re-read till timeout value expires,
+ * read dr_addr register and assign to reset_tmplt.array.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : Common reset entry header.
+ *
+ */
+static void
+qla8044_poll_read_list(struct scsi_qla_host *vha,
+	struct qla8044_reset_entry_hdr *p_hdr)
+{
+	long delay;
+	int index;
+	struct qla8044_quad_entry *p_entry;
+	struct qla8044_poll *p_poll;
+	uint32_t i;
+	uint32_t value;
+
+	p_poll = (struct qla8044_poll *)
+		((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
+
+	p_entry = (struct qla8044_quad_entry *)
+		((char *)p_poll + sizeof(struct qla8044_poll));
+
+	delay = (long)p_hdr->delay;
+
+	for (i = 0; i < p_hdr->count; i++, p_entry++) {
+		qla8044_wr_reg_indirect(vha, p_entry->ar_addr,
+		    p_entry->ar_value);
+		if (delay) {
+			if (qla8044_poll_reg(vha, p_entry->ar_addr, delay,
+			    p_poll->test_mask, p_poll->test_value)) {
+				ql_dbg(ql_dbg_p3p, vha, 0xb095,
+				    "%s: Timeout Error: poll "
+				    "list, ", __func__);
+				ql_dbg(ql_dbg_p3p, vha, 0xb096,
+				    "Item_num %d, "
+				    "entry_num %d\n", i,
+				    vha->reset_tmplt.seq_index);
+			} else {
+				index = vha->reset_tmplt.array_index;
+				qla8044_rd_reg_indirect(vha,
+				    p_entry->dr_addr, &value);
+				vha->reset_tmplt.array[index++] = value;
+				if (index == QLA8044_MAX_RESET_SEQ_ENTRIES)
+					vha->reset_tmplt.array_index = 1;
+			}
+		}
+	}
+}
+
+/*
+ * qla8031_process_reset_template - Process all entries in reset template
+ * till entry with SEQ_END opcode, which indicates end of the reset template
+ * processing. Each entry has a Reset Entry header, entry opcode/command, with
+ * size of the entry, number of entries in sub-sequence and delay in microsecs
+ * or timeout in millisecs.
+ *
+ * @ha : Pointer to adapter structure
+ * @p_buff : Common reset entry header.
+ *
+ */
+static void
+qla8044_process_reset_template(struct scsi_qla_host *vha,
+	char *p_buff)
+{
+	int index, entries;
+	struct qla8044_reset_entry_hdr *p_hdr;
+	char *p_entry = p_buff;
+
+	vha->reset_tmplt.seq_end = 0;
+	vha->reset_tmplt.template_end = 0;
+	entries = vha->reset_tmplt.hdr->entries;
+	index = vha->reset_tmplt.seq_index;
+
+	for (; (!vha->reset_tmplt.seq_end) && (index  < entries); index++) {
+		p_hdr = (struct qla8044_reset_entry_hdr *)p_entry;
+		switch (p_hdr->cmd) {
+		case OPCODE_NOP:
+			break;
+		case OPCODE_WRITE_LIST:
+			qla8044_write_list(vha, p_hdr);
+			break;
+		case OPCODE_READ_WRITE_LIST:
+			qla8044_read_write_list(vha, p_hdr);
+			break;
+		case OPCODE_POLL_LIST:
+			qla8044_poll_list(vha, p_hdr);
+			break;
+		case OPCODE_POLL_WRITE_LIST:
+			qla8044_poll_write_list(vha, p_hdr);
+			break;
+		case OPCODE_READ_MODIFY_WRITE:
+			qla8044_read_modify_write(vha, p_hdr);
+			break;
+		case OPCODE_SEQ_PAUSE:
+			qla8044_pause(vha, p_hdr);
+			break;
+		case OPCODE_SEQ_END:
+			vha->reset_tmplt.seq_end = 1;
+			break;
+		case OPCODE_TMPL_END:
+			qla8044_template_end(vha, p_hdr);
+			break;
+		case OPCODE_POLL_READ_LIST:
+			qla8044_poll_read_list(vha, p_hdr);
+			break;
+		default:
+			ql_log(ql_log_fatal, vha, 0xb097,
+			    "%s: Unknown command ==> 0x%04x on "
+			    "entry = %d\n", __func__, p_hdr->cmd, index);
+			break;
+		}
+		/*
+		 *Set pointer to next entry in the sequence.
+		*/
+		p_entry += p_hdr->size;
+	}
+	vha->reset_tmplt.seq_index = index;
+}
+
+static void
+qla8044_process_init_seq(struct scsi_qla_host *vha)
+{
+	qla8044_process_reset_template(vha,
+	    vha->reset_tmplt.init_offset);
+	if (vha->reset_tmplt.seq_end != 1)
+		ql_log(ql_log_fatal, vha, 0xb098,
+		    "%s: Abrupt INIT Sub-Sequence end.\n",
+		    __func__);
+}
+
+static void
+qla8044_process_stop_seq(struct scsi_qla_host *vha)
+{
+	vha->reset_tmplt.seq_index = 0;
+	qla8044_process_reset_template(vha, vha->reset_tmplt.stop_offset);
+	if (vha->reset_tmplt.seq_end != 1)
+		ql_log(ql_log_fatal, vha, 0xb099,
+		    "%s: Abrupt STOP Sub-Sequence end.\n", __func__);
+}
+
+static void
+qla8044_process_start_seq(struct scsi_qla_host *vha)
+{
+	qla8044_process_reset_template(vha, vha->reset_tmplt.start_offset);
+	if (vha->reset_tmplt.template_end != 1)
+		ql_log(ql_log_fatal, vha, 0xb09a,
+		    "%s: Abrupt START Sub-Sequence end.\n",
+		    __func__);
+}
+
+static int
+qla8044_lockless_flash_read_u32(struct scsi_qla_host *vha,
+	uint32_t flash_addr, uint8_t *p_data, int u32_word_count)
+{
+	uint32_t i;
+	uint32_t u32_word;
+	uint32_t flash_offset;
+	uint32_t addr = flash_addr;
+	int ret_val = QLA_SUCCESS;
+
+	flash_offset = addr & (QLA8044_FLASH_SECTOR_SIZE - 1);
+
+	if (addr & 0x3) {
+		ql_log(ql_log_fatal, vha, 0xb09b, "%s: Illegal addr = 0x%x\n",
+		    __func__, addr);
+		ret_val = QLA_FUNCTION_FAILED;
+		goto exit_lockless_read;
+	}
+
+	ret_val = qla8044_wr_reg_indirect(vha,
+	    QLA8044_FLASH_DIRECT_WINDOW, (addr));
+
+	if (ret_val != QLA_SUCCESS) {
+		ql_log(ql_log_fatal, vha, 0xb09c,
+		    "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
+		    __func__, addr);
+		goto exit_lockless_read;
+	}
+
+	/* Check if data is spread across multiple sectors  */
+	if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
+	    (QLA8044_FLASH_SECTOR_SIZE - 1)) {
+		/* Multi sector read */
+		for (i = 0; i < u32_word_count; i++) {
+			ret_val = qla8044_rd_reg_indirect(vha,
+			    QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
+			if (ret_val != QLA_SUCCESS) {
+				ql_log(ql_log_fatal, vha, 0xb09d,
+				    "%s: failed to read addr 0x%x!\n",
+				    __func__, addr);
+				goto exit_lockless_read;
+			}
+			*(uint32_t *)p_data  = u32_word;
+			p_data = p_data + 4;
+			addr = addr + 4;
+			flash_offset = flash_offset + 4;
+			if (flash_offset > (QLA8044_FLASH_SECTOR_SIZE - 1)) {
+				/* This write is needed once for each sector */
+				ret_val = qla8044_wr_reg_indirect(vha,
+				    QLA8044_FLASH_DIRECT_WINDOW, (addr));
+				if (ret_val != QLA_SUCCESS) {
+					ql_log(ql_log_fatal, vha, 0xb09f,
+					    "%s: failed to write addr "
+					    "0x%x to FLASH_DIRECT_WINDOW!\n",
+					    __func__, addr);
+					goto exit_lockless_read;
+				}
+				flash_offset = 0;
+			}
+		}
+	} else {
+		/* Single sector read */
+		for (i = 0; i < u32_word_count; i++) {
+			ret_val = qla8044_rd_reg_indirect(vha,
+			    QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
+			if (ret_val != QLA_SUCCESS) {
+				ql_log(ql_log_fatal, vha, 0xb0a0,
+				    "%s: failed to read addr 0x%x!\n",
+				    __func__, addr);
+				goto exit_lockless_read;
+			}
+			*(uint32_t *)p_data = u32_word;
+			p_data = p_data + 4;
+			addr = addr + 4;
+		}
+	}
+
+exit_lockless_read:
+	return ret_val;
+}
+
+/*
+ * qla8044_ms_mem_write_128b - Writes data to MS/off-chip memory
+ *
+ * @vha : Pointer to adapter structure
+ * addr : Flash address to write to
+ * data : Data to be written
+ * count : word_count to be written
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_ms_mem_write_128b(struct scsi_qla_host *vha,
+	uint64_t addr, uint32_t *data, uint32_t count)
+{
+	int i, j, ret_val = QLA_SUCCESS;
+	uint32_t agt_ctrl;
+	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Only 128-bit aligned access */
+	if (addr & 0xF) {
+		ret_val = QLA_FUNCTION_FAILED;
+		goto exit_ms_mem_write;
+	}
+	write_lock_irqsave(&ha->hw_lock, flags);
+
+	/* Write address */
+	ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, 0);
+	if (ret_val == QLA_FUNCTION_FAILED) {
+		ql_log(ql_log_fatal, vha, 0xb0a1,
+		    "%s: write to AGT_ADDR_HI failed!\n", __func__);
+		goto exit_ms_mem_write_unlock;
+	}
+
+	for (i = 0; i < count; i++, addr += 16) {
+		if (!((QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_QDR_NET,
+		    QLA8044_ADDR_QDR_NET_MAX)) ||
+		    (QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_DDR_NET,
+			QLA8044_ADDR_DDR_NET_MAX)))) {
+			ret_val = QLA_FUNCTION_FAILED;
+			goto exit_ms_mem_write_unlock;
+		}
+
+		ret_val = qla8044_wr_reg_indirect(vha,
+		    MD_MIU_TEST_AGT_ADDR_LO, addr);
+
+		/* Write data */
+		ret_val += qla8044_wr_reg_indirect(vha,
+		    MD_MIU_TEST_AGT_WRDATA_LO, *data++);
+		ret_val += qla8044_wr_reg_indirect(vha,
+		    MD_MIU_TEST_AGT_WRDATA_HI, *data++);
+		ret_val += qla8044_wr_reg_indirect(vha,
+		    MD_MIU_TEST_AGT_WRDATA_ULO, *data++);
+		ret_val += qla8044_wr_reg_indirect(vha,
+		    MD_MIU_TEST_AGT_WRDATA_UHI, *data++);
+		if (ret_val == QLA_FUNCTION_FAILED) {
+			ql_log(ql_log_fatal, vha, 0xb0a2,
+			    "%s: write to AGT_WRDATA failed!\n",
+			    __func__);
+			goto exit_ms_mem_write_unlock;
+		}
+
+		/* Check write status */
+		ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
+		    MIU_TA_CTL_WRITE_ENABLE);
+		ret_val += qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
+		    MIU_TA_CTL_WRITE_START);
+		if (ret_val == QLA_FUNCTION_FAILED) {
+			ql_log(ql_log_fatal, vha, 0xb0a3,
+			    "%s: write to AGT_CTRL failed!\n", __func__);
+			goto exit_ms_mem_write_unlock;
+		}
+
+		for (j = 0; j < MAX_CTL_CHECK; j++) {
+			ret_val = qla8044_rd_reg_indirect(vha,
+			    MD_MIU_TEST_AGT_CTRL, &agt_ctrl);
+			if (ret_val == QLA_FUNCTION_FAILED) {
+				ql_log(ql_log_fatal, vha, 0xb0a4,
+				    "%s: failed to read "
+				    "MD_MIU_TEST_AGT_CTRL!\n", __func__);
+				goto exit_ms_mem_write_unlock;
+			}
+			if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
+				break;
+		}
+
+		/* Status check failed */
+		if (j >= MAX_CTL_CHECK) {
+			ql_log(ql_log_fatal, vha, 0xb0a5,
+			    "%s: MS memory write failed!\n",
+			   __func__);
+			ret_val = QLA_FUNCTION_FAILED;
+			goto exit_ms_mem_write_unlock;
+		}
+	}
+
+exit_ms_mem_write_unlock:
+	write_unlock_irqrestore(&ha->hw_lock, flags);
+
+exit_ms_mem_write:
+	return ret_val;
+}
+
+static int
+qla8044_copy_bootloader(struct scsi_qla_host *vha)
+{
+	uint8_t *p_cache;
+	uint32_t src, count, size;
+	uint64_t dest;
+	int ret_val = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+
+	src = QLA8044_BOOTLOADER_FLASH_ADDR;
+	dest = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_ADDR);
+	size = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_SIZE);
+
+	/* 128 bit alignment check */
+	if (size & 0xF)
+		size = (size + 16) & ~0xF;
+
+	/* 16 byte count */
+	count = size/16;
+
+	p_cache = vmalloc(size);
+	if (p_cache == NULL) {
+		ql_log(ql_log_fatal, vha, 0xb0a6,
+		    "%s: Failed to allocate memory for "
+		    "boot loader cache\n", __func__);
+		ret_val = QLA_FUNCTION_FAILED;
+		goto exit_copy_bootloader;
+	}
+
+	ret_val = qla8044_lockless_flash_read_u32(vha, src,
+	    p_cache, size/sizeof(uint32_t));
+	if (ret_val == QLA_FUNCTION_FAILED) {
+		ql_log(ql_log_fatal, vha, 0xb0a7,
+		    "%s: Error reading F/W from flash!!!\n", __func__);
+		goto exit_copy_error;
+	}
+	ql_dbg(ql_dbg_p3p, vha, 0xb0a8, "%s: Read F/W from flash!\n",
+	    __func__);
+
+	/* 128 bit/16 byte write to MS memory */
+	ret_val = qla8044_ms_mem_write_128b(vha, dest,
+	    (uint32_t *)p_cache, count);
+	if (ret_val == QLA_FUNCTION_FAILED) {
+		ql_log(ql_log_fatal, vha, 0xb0a9,
+		    "%s: Error writing F/W to MS !!!\n", __func__);
+		goto exit_copy_error;
+	}
+	ql_dbg(ql_dbg_p3p, vha, 0xb0aa,
+	    "%s: Wrote F/W (size %d) to MS !!!\n",
+	    __func__, size);
+
+exit_copy_error:
+	vfree(p_cache);
+
+exit_copy_bootloader:
+	return ret_val;
+}
+
+static int
+qla8044_restart(struct scsi_qla_host *vha)
+{
+	int ret_val = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+
+	qla8044_process_stop_seq(vha);
+
+	/* Collect minidump */
+	if (ql2xmdenable)
+		qla8044_get_minidump(vha);
+	else
+		ql_log(ql_log_fatal, vha, 0xb14c,
+		    "Minidump disabled.\n");
+
+	qla8044_process_init_seq(vha);
+
+	if (qla8044_copy_bootloader(vha)) {
+		ql_log(ql_log_fatal, vha, 0xb0ab,
+		    "%s: Copy bootloader, firmware restart failed!\n",
+		    __func__);
+		ret_val = QLA_FUNCTION_FAILED;
+		goto exit_restart;
+	}
+
+	/*
+	 *  Loads F/W from flash
+	 */
+	qla8044_wr_reg(ha, QLA8044_FW_IMAGE_VALID, QLA8044_BOOT_FROM_FLASH);
+
+	qla8044_process_start_seq(vha);
+
+exit_restart:
+	return ret_val;
+}
+
+/*
+ * qla8044_check_cmd_peg_status - Check peg status to see if Peg is
+ * initialized.
+ *
+ * @ha : Pointer to adapter structure
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_check_cmd_peg_status(struct scsi_qla_host *vha)
+{
+	uint32_t val, ret_val = QLA_FUNCTION_FAILED;
+	int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
+	struct qla_hw_data *ha = vha->hw;
+
+	do {
+		val = qla8044_rd_reg(ha, QLA8044_CMDPEG_STATE);
+		if (val == PHAN_INITIALIZE_COMPLETE) {
+			ql_dbg(ql_dbg_p3p, vha, 0xb0ac,
+			    "%s: Command Peg initialization "
+			    "complete! state=0x%x\n", __func__, val);
+			ret_val = QLA_SUCCESS;
+			break;
+		}
+		msleep(CRB_CMDPEG_CHECK_DELAY);
+	} while (--retries);
+
+	return ret_val;
+}
+
+static int
+qla8044_start_firmware(struct scsi_qla_host *vha)
+{
+	int ret_val = QLA_SUCCESS;
+
+	if (qla8044_restart(vha)) {
+		ql_log(ql_log_fatal, vha, 0xb0ad,
+		    "%s: Restart Error!!!, Need Reset!!!\n",
+		    __func__);
+		ret_val = QLA_FUNCTION_FAILED;
+		goto exit_start_fw;
+	} else
+		ql_dbg(ql_dbg_p3p, vha, 0xb0af,
+		    "%s: Restart done!\n", __func__);
+
+	ret_val = qla8044_check_cmd_peg_status(vha);
+	if (ret_val) {
+		ql_log(ql_log_fatal, vha, 0xb0b0,
+		    "%s: Peg not initialized!\n", __func__);
+		ret_val = QLA_FUNCTION_FAILED;
+	}
+
+exit_start_fw:
+	return ret_val;
+}
+
+void
+qla8044_clear_drv_active(struct scsi_qla_host *vha)
+{
+	uint32_t drv_active;
+	struct qla_hw_data *ha = vha->hw;
+
+	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+	drv_active &= ~(1 << (ha->portnum));
+
+	ql_log(ql_log_info, vha, 0xb0b1,
+	    "%s(%ld): drv_active: 0x%08x\n",
+	    __func__, vha->host_no, drv_active);
+
+	qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
+}
+
+/*
+ * qla8044_device_bootstrap - Initialize device, set DEV_READY, start fw
+ * @ha: pointer to adapter structure
+ *
+ * Note: IDC lock must be held upon entry
+ **/
+static int
+qla8044_device_bootstrap(struct scsi_qla_host *vha)
+{
+	int rval = QLA_FUNCTION_FAILED;
+	int i;
+	uint32_t old_count = 0, count = 0;
+	int need_reset = 0;
+	uint32_t idc_ctrl;
+	struct qla_hw_data *ha = vha->hw;
+
+	need_reset = qla8044_need_reset(vha);
+
+	if (!need_reset) {
+		old_count = qla8044_rd_direct(vha,
+		    QLA8044_PEG_ALIVE_COUNTER_INDEX);
+
+		for (i = 0; i < 10; i++) {
+			msleep(200);
+
+			count = qla8044_rd_direct(vha,
+			    QLA8044_PEG_ALIVE_COUNTER_INDEX);
+			if (count != old_count) {
+				rval = QLA_SUCCESS;
+				goto dev_ready;
+			}
+		}
+		qla8044_flash_lock_recovery(vha);
+	} else {
+		/* We are trying to perform a recovery here. */
+		if (ha->flags.isp82xx_fw_hung)
+			qla8044_flash_lock_recovery(vha);
+	}
+
+	/* set to DEV_INITIALIZING */
+	ql_log(ql_log_info, vha, 0xb0b2,
+	    "%s: HW State: INITIALIZING\n", __func__);
+	qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+	    QLA8XXX_DEV_INITIALIZING);
+
+	qla8044_idc_unlock(ha);
+	rval = qla8044_start_firmware(vha);
+	qla8044_idc_lock(ha);
+
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_info, vha, 0xb0b3,
+		     "%s: HW State: FAILED\n", __func__);
+		qla8044_clear_drv_active(vha);
+		qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+		    QLA8XXX_DEV_FAILED);
+		return rval;
+	}
+
+	/* For ISP8044, If IDC_CTRL GRACEFUL_RESET_BIT1 is set , reset it after
+	 * device goes to INIT state. */
+	idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+	if (idc_ctrl & GRACEFUL_RESET_BIT1) {
+		qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
+		    (idc_ctrl & ~GRACEFUL_RESET_BIT1));
+		ha->fw_dumped = 0;
+	}
+
+dev_ready:
+	ql_log(ql_log_info, vha, 0xb0b4,
+	    "%s: HW State: READY\n", __func__);
+	qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_READY);
+
+	return rval;
+}
+
+/*-------------------------Reset Sequence Functions-----------------------*/
+static void
+qla8044_dump_reset_seq_hdr(struct scsi_qla_host *vha)
+{
+	u8 *phdr;
+
+	if (!vha->reset_tmplt.buff) {
+		ql_log(ql_log_fatal, vha, 0xb0b5,
+		    "%s: Error Invalid reset_seq_template\n", __func__);
+		return;
+	}
+
+	phdr = vha->reset_tmplt.buff;
+	ql_dbg(ql_dbg_p3p, vha, 0xb0b6,
+	    "Reset Template :\n\t0x%X 0x%X 0x%X 0x%X"
+	    "0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n"
+	    "\t0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n\n",
+	    *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
+	    *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
+	    *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
+	    *(phdr+13), *(phdr+14), *(phdr+15));
+}
+
+/*
+ * qla8044_reset_seq_checksum_test - Validate Reset Sequence template.
+ *
+ * @ha : Pointer to adapter structure
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_reset_seq_checksum_test(struct scsi_qla_host *vha)
+{
+	uint32_t sum =  0;
+	uint16_t *buff = (uint16_t *)vha->reset_tmplt.buff;
+	int u16_count =  vha->reset_tmplt.hdr->size / sizeof(uint16_t);
+
+	while (u16_count-- > 0)
+		sum += *buff++;
+
+	while (sum >> 16)
+		sum = (sum & 0xFFFF) +  (sum >> 16);
+
+	/* checksum of 0 indicates a valid template */
+	if (~sum) {
+		return QLA_SUCCESS;
+	} else {
+		ql_log(ql_log_fatal, vha, 0xb0b7,
+		    "%s: Reset seq checksum failed\n", __func__);
+		return QLA_FUNCTION_FAILED;
+	}
+}
+
+/*
+ * qla8044_read_reset_template - Read Reset Template from Flash, validate
+ * the template and store offsets of stop/start/init offsets in ha->reset_tmplt.
+ *
+ * @ha : Pointer to adapter structure
+ */
+void
+qla8044_read_reset_template(struct scsi_qla_host *vha)
+{
+	uint8_t *p_buff;
+	uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
+
+	vha->reset_tmplt.seq_error = 0;
+	vha->reset_tmplt.buff = vmalloc(QLA8044_RESTART_TEMPLATE_SIZE);
+	if (vha->reset_tmplt.buff == NULL) {
+		ql_log(ql_log_fatal, vha, 0xb0b8,
+		    "%s: Failed to allocate reset template resources\n",
+		    __func__);
+		goto exit_read_reset_template;
+	}
+
+	p_buff = vha->reset_tmplt.buff;
+	addr = QLA8044_RESET_TEMPLATE_ADDR;
+
+	tmplt_hdr_def_size =
+	    sizeof(struct qla8044_reset_template_hdr) / sizeof(uint32_t);
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0b9,
+	    "%s: Read template hdr size %d from Flash\n",
+	    __func__, tmplt_hdr_def_size);
+
+	/* Copy template header from flash */
+	if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
+		ql_log(ql_log_fatal, vha, 0xb0ba,
+		    "%s: Failed to read reset template\n", __func__);
+		goto exit_read_template_error;
+	}
+
+	vha->reset_tmplt.hdr =
+	 (struct qla8044_reset_template_hdr *) vha->reset_tmplt.buff;
+
+	/* Validate the template header size and signature */
+	tmplt_hdr_size = vha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
+	if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
+	    (vha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
+		ql_log(ql_log_fatal, vha, 0xb0bb,
+		    "%s: Template Header size invalid %d "
+		    "tmplt_hdr_def_size %d!!!\n", __func__,
+		    tmplt_hdr_size, tmplt_hdr_def_size);
+		goto exit_read_template_error;
+	}
+
+	addr = QLA8044_RESET_TEMPLATE_ADDR + vha->reset_tmplt.hdr->hdr_size;
+	p_buff = vha->reset_tmplt.buff + vha->reset_tmplt.hdr->hdr_size;
+	tmplt_hdr_def_size = (vha->reset_tmplt.hdr->size -
+	    vha->reset_tmplt.hdr->hdr_size)/sizeof(uint32_t);
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0bc,
+	    "%s: Read rest of the template size %d\n",
+	    __func__, vha->reset_tmplt.hdr->size);
+
+	/* Copy rest of the template */
+	if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
+		ql_log(ql_log_fatal, vha, 0xb0bd,
+		    "%s: Failed to read reset tempelate\n", __func__);
+		goto exit_read_template_error;
+	}
+
+	/* Integrity check */
+	if (qla8044_reset_seq_checksum_test(vha)) {
+		ql_log(ql_log_fatal, vha, 0xb0be,
+		    "%s: Reset Seq checksum failed!\n", __func__);
+		goto exit_read_template_error;
+	}
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0bf,
+	    "%s: Reset Seq checksum passed! Get stop, "
+	    "start and init seq offsets\n", __func__);
+
+	/* Get STOP, START, INIT sequence offsets */
+	vha->reset_tmplt.init_offset = vha->reset_tmplt.buff +
+	    vha->reset_tmplt.hdr->init_seq_offset;
+
+	vha->reset_tmplt.start_offset = vha->reset_tmplt.buff +
+	    vha->reset_tmplt.hdr->start_seq_offset;
+
+	vha->reset_tmplt.stop_offset = vha->reset_tmplt.buff +
+	    vha->reset_tmplt.hdr->hdr_size;
+
+	qla8044_dump_reset_seq_hdr(vha);
+
+	goto exit_read_reset_template;
+
+exit_read_template_error:
+	vfree(vha->reset_tmplt.buff);
+
+exit_read_reset_template:
+	return;
+}
+
+void
+qla8044_set_idc_dontreset(struct scsi_qla_host *vha)
+{
+	uint32_t idc_ctrl;
+	struct qla_hw_data *ha = vha->hw;
+
+	idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+	idc_ctrl |= DONTRESET_BIT0;
+	ql_dbg(ql_dbg_p3p, vha, 0xb0c0,
+	    "%s: idc_ctrl = %d\n", __func__, idc_ctrl);
+	qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
+}
+
+inline void
+qla8044_set_rst_ready(struct scsi_qla_host *vha)
+{
+	uint32_t drv_state;
+	struct qla_hw_data *ha = vha->hw;
+
+	drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+
+	/* For ISP8044, drv_active register has 1 bit per function,
+	 * shift 1 by func_num to set a bit for the function.*/
+	drv_state |= (1 << ha->portnum);
+
+	ql_log(ql_log_info, vha, 0xb0c1,
+	    "%s(%ld): drv_state: 0x%08x\n",
+	    __func__, vha->host_no, drv_state);
+	qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
+}
+
+/**
+ * qla8044_need_reset_handler - Code to start reset sequence
+ * @ha: pointer to adapter structure
+ *
+ * Note: IDC lock must be held upon entry
+ **/
+static void
+qla8044_need_reset_handler(struct scsi_qla_host *vha)
+{
+	uint32_t dev_state = 0, drv_state, drv_active;
+	unsigned long reset_timeout, dev_init_timeout;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_log(ql_log_fatal, vha, 0xb0c2,
+	    "%s: Performing ISP error recovery\n", __func__);
+
+	if (vha->flags.online) {
+		qla8044_idc_unlock(ha);
+		qla2x00_abort_isp_cleanup(vha);
+		ha->isp_ops->get_flash_version(vha, vha->req->ring);
+		ha->isp_ops->nvram_config(vha);
+		qla8044_idc_lock(ha);
+	}
+
+	if (!ha->flags.nic_core_reset_owner) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb0c3,
+		    "%s(%ld): reset acknowledged\n",
+		    __func__, vha->host_no);
+		qla8044_set_rst_ready(vha);
+
+		/* Non-reset owners ACK Reset and wait for device INIT state
+		 * as part of Reset Recovery by Reset Owner
+		 */
+		dev_init_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
+
+		do {
+			if (time_after_eq(jiffies, dev_init_timeout)) {
+				ql_log(ql_log_info, vha, 0xb0c4,
+				    "%s: Non Reset owner DEV INIT "
+				    "TIMEOUT!\n", __func__);
+				break;
+			}
+
+			qla8044_idc_unlock(ha);
+			msleep(1000);
+			qla8044_idc_lock(ha);
+
+			dev_state = qla8044_rd_direct(vha,
+					QLA8044_CRB_DEV_STATE_INDEX);
+		} while (dev_state == QLA8XXX_DEV_NEED_RESET);
+	} else {
+		qla8044_set_rst_ready(vha);
+
+		/* wait for 10 seconds for reset ack from all functions */
+		reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
+
+		drv_state = qla8044_rd_direct(vha,
+		    QLA8044_CRB_DRV_STATE_INDEX);
+		drv_active = qla8044_rd_direct(vha,
+		    QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+		ql_log(ql_log_info, vha, 0xb0c5,
+		    "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
+		    __func__, vha->host_no, drv_state, drv_active);
+
+		while (drv_state != drv_active) {
+			if (time_after_eq(jiffies, reset_timeout)) {
+				ql_log(ql_log_info, vha, 0xb0c6,
+				    "%s: RESET TIMEOUT!"
+				    "drv_state: 0x%08x, drv_active: 0x%08x\n",
+				    QLA2XXX_DRIVER_NAME, drv_state, drv_active);
+				break;
+			}
+
+			qla8044_idc_unlock(ha);
+			msleep(1000);
+			qla8044_idc_lock(ha);
+
+			drv_state = qla8044_rd_direct(vha,
+			    QLA8044_CRB_DRV_STATE_INDEX);
+			drv_active = qla8044_rd_direct(vha,
+			    QLA8044_CRB_DRV_ACTIVE_INDEX);
+		}
+
+		if (drv_state != drv_active) {
+			ql_log(ql_log_info, vha, 0xb0c7,
+			    "%s(%ld): Reset_owner turning off drv_active "
+			    "of non-acking function 0x%x\n", __func__,
+			    vha->host_no, (drv_active ^ drv_state));
+			drv_active = drv_active & drv_state;
+			qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX,
+			    drv_active);
+		}
+
+		/*
+		* Clear RESET OWNER, will be set at next reset
+		* by next RST_OWNER
+		*/
+		ha->flags.nic_core_reset_owner = 0;
+
+		/* Start Reset Recovery */
+		qla8044_device_bootstrap(vha);
+	}
+}
+
+static void
+qla8044_set_drv_active(struct scsi_qla_host *vha)
+{
+	uint32_t drv_active;
+	struct qla_hw_data *ha = vha->hw;
+
+	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+	/* For ISP8044, drv_active register has 1 bit per function,
+	 * shift 1 by func_num to set a bit for the function.*/
+	drv_active |= (1 << ha->portnum);
+
+	ql_log(ql_log_info, vha, 0xb0c8,
+	    "%s(%ld): drv_active: 0x%08x\n",
+	    __func__, vha->host_no, drv_active);
+	qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
+}
+
+static void
+qla8044_clear_idc_dontreset(struct scsi_qla_host *vha)
+{
+	uint32_t idc_ctrl;
+	struct qla_hw_data *ha = vha->hw;
+
+	idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+	idc_ctrl &= ~DONTRESET_BIT0;
+	ql_log(ql_log_info, vha, 0xb0c9,
+	    "%s: idc_ctrl = %d\n", __func__,
+	    idc_ctrl);
+	qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
+}
+
+static int
+qla8044_set_idc_ver(struct scsi_qla_host *vha)
+{
+	int idc_ver;
+	uint32_t drv_active;
+	int rval = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+
+	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+	if (drv_active == (1 << ha->portnum)) {
+		idc_ver = qla8044_rd_direct(vha,
+		    QLA8044_CRB_DRV_IDC_VERSION_INDEX);
+		idc_ver &= (~0xFF);
+		idc_ver |= QLA8044_IDC_VER_MAJ_VALUE;
+		qla8044_wr_direct(vha, QLA8044_CRB_DRV_IDC_VERSION_INDEX,
+		    idc_ver);
+		ql_log(ql_log_info, vha, 0xb0ca,
+		    "%s: IDC version updated to %d\n",
+		    __func__, idc_ver);
+	} else {
+		idc_ver = qla8044_rd_direct(vha,
+		    QLA8044_CRB_DRV_IDC_VERSION_INDEX);
+		idc_ver &= 0xFF;
+		if (QLA8044_IDC_VER_MAJ_VALUE != idc_ver) {
+			ql_log(ql_log_info, vha, 0xb0cb,
+			    "%s: qla4xxx driver IDC version %d "
+			    "is not compatible with IDC version %d "
+			    "of other drivers!\n",
+			    __func__, QLA8044_IDC_VER_MAJ_VALUE,
+			    idc_ver);
+			rval = QLA_FUNCTION_FAILED;
+			goto exit_set_idc_ver;
+		}
+	}
+
+	/* Update IDC_MINOR_VERSION */
+	idc_ver = qla8044_rd_reg(ha, QLA8044_CRB_IDC_VER_MINOR);
+	idc_ver &= ~(0x03 << (ha->portnum * 2));
+	idc_ver |= (QLA8044_IDC_VER_MIN_VALUE << (ha->portnum * 2));
+	qla8044_wr_reg(ha, QLA8044_CRB_IDC_VER_MINOR, idc_ver);
+
+exit_set_idc_ver:
+	return rval;
+}
+
+static int
+qla8044_update_idc_reg(struct scsi_qla_host *vha)
+{
+	uint32_t drv_active;
+	int rval = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (vha->flags.init_done)
+		goto exit_update_idc_reg;
+
+	qla8044_idc_lock(ha);
+	qla8044_set_drv_active(vha);
+
+	drv_active = qla8044_rd_direct(vha,
+	    QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+	/* If we are the first driver to load and
+	 * ql2xdontresethba is not set, clear IDC_CTRL BIT0. */
+	if ((drv_active == (1 << ha->portnum)) && !ql2xdontresethba)
+		qla8044_clear_idc_dontreset(vha);
+
+	rval = qla8044_set_idc_ver(vha);
+	if (rval == QLA_FUNCTION_FAILED)
+		qla8044_clear_drv_active(vha);
+	qla8044_idc_unlock(ha);
+
+exit_update_idc_reg:
+	return rval;
+}
+
+/**
+ * qla8044_need_qsnt_handler - Code to start qsnt
+ * @ha: pointer to adapter structure
+ **/
+static void
+qla8044_need_qsnt_handler(struct scsi_qla_host *vha)
+{
+	unsigned long qsnt_timeout;
+	uint32_t drv_state, drv_active, dev_state;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (vha->flags.online)
+		qla2x00_quiesce_io(vha);
+	else
+		return;
+
+	qla8044_set_qsnt_ready(vha);
+
+	/* Wait for 30 secs for all functions to ack qsnt mode */
+	qsnt_timeout = jiffies + (QSNT_ACK_TOV * HZ);
+	drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+	/* Shift drv_active by 1 to match drv_state. As quiescent ready bit
+	   position is at bit 1 and drv active is at bit 0 */
+	drv_active = drv_active << 1;
+
+	while (drv_state != drv_active) {
+		if (time_after_eq(jiffies, qsnt_timeout)) {
+			/* Other functions did not ack, changing state to
+			 * DEV_READY
+			 */
+			clear_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+			qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+					    QLA8XXX_DEV_READY);
+			qla8044_clear_qsnt_ready(vha);
+			ql_log(ql_log_info, vha, 0xb0cc,
+			    "Timeout waiting for quiescent ack!!!\n");
+			return;
+		}
+		qla8044_idc_unlock(ha);
+		msleep(1000);
+		qla8044_idc_lock(ha);
+
+		drv_state = qla8044_rd_direct(vha,
+		    QLA8044_CRB_DRV_STATE_INDEX);
+		drv_active = qla8044_rd_direct(vha,
+		    QLA8044_CRB_DRV_ACTIVE_INDEX);
+		drv_active = drv_active << 1;
+	}
+
+	/* All functions have Acked. Set quiescent state */
+	dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+
+	if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
+		qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+		    QLA8XXX_DEV_QUIESCENT);
+		ql_log(ql_log_info, vha, 0xb0cd,
+		    "%s: HW State: QUIESCENT\n", __func__);
+	}
+}
+
+/*
+ * qla8044_device_state_handler - Adapter state machine
+ * @ha: pointer to host adapter structure.
+ *
+ * Note: IDC lock must be UNLOCKED upon entry
+ **/
+int
+qla8044_device_state_handler(struct scsi_qla_host *vha)
+{
+	uint32_t dev_state;
+	int rval = QLA_SUCCESS;
+	unsigned long dev_init_timeout;
+	struct qla_hw_data *ha = vha->hw;
+
+	rval = qla8044_update_idc_reg(vha);
+	if (rval == QLA_FUNCTION_FAILED)
+		goto exit_error;
+
+	dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+	ql_dbg(ql_dbg_p3p, vha, 0xb0ce,
+	    "Device state is 0x%x = %s\n",
+	    dev_state, dev_state < MAX_STATES ?
+	    qdev_state(dev_state) : "Unknown");
+
+	/* wait for 30 seconds for device to go ready */
+	dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
+
+	qla8044_idc_lock(ha);
+
+	while (1) {
+		if (time_after_eq(jiffies, dev_init_timeout)) {
+			ql_log(ql_log_warn, vha, 0xb0cf,
+			    "%s: Device Init Failed 0x%x = %s\n",
+			    QLA2XXX_DRIVER_NAME, dev_state,
+			    dev_state < MAX_STATES ?
+			    qdev_state(dev_state) : "Unknown");
+
+			qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+			    QLA8XXX_DEV_FAILED);
+		}
+
+		dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+		ql_log(ql_log_info, vha, 0xb0d0,
+		    "Device state is 0x%x = %s\n",
+		    dev_state, dev_state < MAX_STATES ?
+		    qdev_state(dev_state) : "Unknown");
+
+		/* NOTE: Make sure idc unlocked upon exit of switch statement */
+		switch (dev_state) {
+		case QLA8XXX_DEV_READY:
+			ha->flags.nic_core_reset_owner = 0;
+			goto exit;
+		case QLA8XXX_DEV_COLD:
+			rval = qla8044_device_bootstrap(vha);
+			goto exit;
+		case QLA8XXX_DEV_INITIALIZING:
+			qla8044_idc_unlock(ha);
+			msleep(1000);
+			qla8044_idc_lock(ha);
+			break;
+		case QLA8XXX_DEV_NEED_RESET:
+			/* For ISP8044, if NEED_RESET is set by any driver,
+			 * it should be honored, irrespective of IDC_CTRL
+			 * DONTRESET_BIT0 */
+			qla8044_need_reset_handler(vha);
+			break;
+		case QLA8XXX_DEV_NEED_QUIESCENT:
+			/* idc locked/unlocked in handler */
+			qla8044_need_qsnt_handler(vha);
+
+			/* Reset the init timeout after qsnt handler */
+			dev_init_timeout = jiffies +
+			    (ha->fcoe_reset_timeout * HZ);
+			break;
+		case QLA8XXX_DEV_QUIESCENT:
+			ql_log(ql_log_info, vha, 0xb0d1,
+			    "HW State: QUIESCENT\n");
+
+			qla8044_idc_unlock(ha);
+			msleep(1000);
+			qla8044_idc_lock(ha);
+
+			/* Reset the init timeout after qsnt handler */
+			dev_init_timeout = jiffies +
+			    (ha->fcoe_reset_timeout * HZ);
+			break;
+		case QLA8XXX_DEV_FAILED:
+			ha->flags.nic_core_reset_owner = 0;
+			qla8044_idc_unlock(ha);
+			qla8xxx_dev_failed_handler(vha);
+			rval = QLA_FUNCTION_FAILED;
+			qla8044_idc_lock(ha);
+			goto exit;
+		default:
+			qla8044_idc_unlock(ha);
+			qla8xxx_dev_failed_handler(vha);
+			rval = QLA_FUNCTION_FAILED;
+			qla8044_idc_lock(ha);
+			goto exit;
+		}
+	}
+exit:
+	qla8044_idc_unlock(ha);
+
+exit_error:
+	return rval;
+}
+
+/**
+ * qla4_8xxx_check_temp - Check the ISP82XX temperature.
+ * @ha: adapter block pointer.
+ *
+ * Note: The caller should not hold the idc lock.
+ **/
+static int
+qla8044_check_temp(struct scsi_qla_host *vha)
+{
+	uint32_t temp, temp_state, temp_val;
+	int status = QLA_SUCCESS;
+
+	temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
+	temp_state = qla82xx_get_temp_state(temp);
+	temp_val = qla82xx_get_temp_val(temp);
+
+	if (temp_state == QLA82XX_TEMP_PANIC) {
+		ql_log(ql_log_warn, vha, 0xb0d2,
+		    "Device temperature %d degrees C"
+		    " exceeds maximum allowed. Hardware has been shut"
+		    " down\n", temp_val);
+		status = QLA_FUNCTION_FAILED;
+		return status;
+	} else if (temp_state == QLA82XX_TEMP_WARN) {
+		ql_log(ql_log_warn, vha, 0xb0d3,
+		    "Device temperature %d"
+		    " degrees C exceeds operating range."
+		    " Immediate action needed.\n", temp_val);
+	}
+	return 0;
+}
+
+int qla8044_read_temperature(scsi_qla_host_t *vha)
+{
+	uint32_t temp;
+
+	temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
+	return qla82xx_get_temp_val(temp);
+}
+
+/**
+ * qla8044_check_fw_alive  - Check firmware health
+ * @ha: Pointer to host adapter structure.
+ *
+ * Context: Interrupt
+ **/
+int
+qla8044_check_fw_alive(struct scsi_qla_host *vha)
+{
+	uint32_t fw_heartbeat_counter;
+	uint32_t halt_status1, halt_status2;
+	int status = QLA_SUCCESS;
+
+	fw_heartbeat_counter = qla8044_rd_direct(vha,
+	    QLA8044_PEG_ALIVE_COUNTER_INDEX);
+
+	/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
+	if (fw_heartbeat_counter == 0xffffffff) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb0d4,
+		    "scsi%ld: %s: Device in frozen "
+		    "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
+		    vha->host_no, __func__);
+		return status;
+	}
+
+	if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
+		vha->seconds_since_last_heartbeat++;
+		/* FW not alive after 2 seconds */
+		if (vha->seconds_since_last_heartbeat == 2) {
+			vha->seconds_since_last_heartbeat = 0;
+			halt_status1 = qla8044_rd_direct(vha,
+			    QLA8044_PEG_HALT_STATUS1_INDEX);
+			halt_status2 = qla8044_rd_direct(vha,
+			    QLA8044_PEG_HALT_STATUS2_INDEX);
+
+			ql_log(ql_log_info, vha, 0xb0d5,
+			    "scsi(%ld): %s, ISP8044 "
+			    "Dumping hw/fw registers:\n"
+			    " PEG_HALT_STATUS1: 0x%x, "
+			    "PEG_HALT_STATUS2: 0x%x,\n",
+			    vha->host_no, __func__, halt_status1,
+			    halt_status2);
+			status = QLA_FUNCTION_FAILED;
+		}
+	} else
+		vha->seconds_since_last_heartbeat = 0;
+
+	vha->fw_heartbeat_counter = fw_heartbeat_counter;
+	return status;
+}
+
+void
+qla8044_watchdog(struct scsi_qla_host *vha)
+{
+	uint32_t dev_state, halt_status;
+	int halt_status_unrecoverable = 0;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* don't poll if reset is going on or FW hang in quiescent state */
+	if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+	    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+	    test_bit(ISP_ABORT_RETRY, &vha->dpc_flags) ||
+	    test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {
+		dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+
+		if (qla8044_check_temp(vha)) {
+			set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+			ha->flags.isp82xx_fw_hung = 1;
+			qla2xxx_wake_dpc(vha);
+		} else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
+			   !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
+			ql_log(ql_log_info, vha, 0xb0d6,
+			    "%s: HW State: NEED RESET!\n",
+			    __func__);
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+		} else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
+		    !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
+			ql_log(ql_log_info, vha, 0xb0d7,
+			    "%s: HW State: NEED QUIES detected!\n",
+			    __func__);
+			set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+		} else  {
+			/* Check firmware health */
+			if (qla8044_check_fw_alive(vha)) {
+				halt_status = qla8044_rd_direct(vha,
+					QLA8044_PEG_HALT_STATUS1_INDEX);
+				if (halt_status &
+				    QLA8044_HALT_STATUS_FW_RESET) {
+					ql_log(ql_log_fatal, vha,
+					    0xb0d8, "%s: Firmware "
+					    "error detected device "
+					    "is being reset\n",
+					    __func__);
+				} else if (halt_status &
+					    QLA8044_HALT_STATUS_UNRECOVERABLE) {
+						halt_status_unrecoverable = 1;
+				}
+
+				/* Since we cannot change dev_state in interrupt
+				 * context, set appropriate DPC flag then wakeup
+				 *  DPC */
+				if (halt_status_unrecoverable) {
+					set_bit(ISP_UNRECOVERABLE,
+					    &vha->dpc_flags);
+				} else {
+					if (dev_state ==
+					    QLA8XXX_DEV_QUIESCENT) {
+						set_bit(FCOE_CTX_RESET_NEEDED,
+						    &vha->dpc_flags);
+						ql_log(ql_log_info, vha, 0xb0d9,
+						    "%s: FW CONTEXT Reset "
+						    "needed!\n", __func__);
+					} else {
+						ql_log(ql_log_info, vha,
+						    0xb0da, "%s: "
+						    "detect abort needed\n",
+						    __func__);
+						set_bit(ISP_ABORT_NEEDED,
+						    &vha->dpc_flags);
+						qla82xx_clear_pending_mbx(vha);
+					}
+				}
+				ha->flags.isp82xx_fw_hung = 1;
+				ql_log(ql_log_warn, vha, 0xb10a,
+				    "Firmware hung.\n");
+				qla2xxx_wake_dpc(vha);
+			}
+		}
+
+	}
+}
+
+static int
+qla8044_minidump_process_control(struct scsi_qla_host *vha,
+				 struct qla8044_minidump_entry_hdr *entry_hdr)
+{
+	struct qla8044_minidump_entry_crb *crb_entry;
+	uint32_t read_value, opcode, poll_time, addr, index;
+	uint32_t crb_addr, rval = QLA_SUCCESS;
+	unsigned long wtime;
+	struct qla8044_minidump_template_hdr *tmplt_hdr;
+	int i;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0dd, "Entering fn: %s\n", __func__);
+	tmplt_hdr = (struct qla8044_minidump_template_hdr *)
+		ha->md_tmplt_hdr;
+	crb_entry = (struct qla8044_minidump_entry_crb *)entry_hdr;
+
+	crb_addr = crb_entry->addr;
+	for (i = 0; i < crb_entry->op_count; i++) {
+		opcode = crb_entry->crb_ctrl.opcode;
+
+		if (opcode & QLA82XX_DBG_OPCODE_WR) {
+			qla8044_wr_reg_indirect(vha, crb_addr,
+			    crb_entry->value_1);
+			opcode &= ~QLA82XX_DBG_OPCODE_WR;
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_RW) {
+			qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+			qla8044_wr_reg_indirect(vha, crb_addr, read_value);
+			opcode &= ~QLA82XX_DBG_OPCODE_RW;
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_AND) {
+			qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+			read_value &= crb_entry->value_2;
+			opcode &= ~QLA82XX_DBG_OPCODE_AND;
+			if (opcode & QLA82XX_DBG_OPCODE_OR) {
+				read_value |= crb_entry->value_3;
+				opcode &= ~QLA82XX_DBG_OPCODE_OR;
+			}
+			qla8044_wr_reg_indirect(vha, crb_addr, read_value);
+		}
+		if (opcode & QLA82XX_DBG_OPCODE_OR) {
+			qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+			read_value |= crb_entry->value_3;
+			qla8044_wr_reg_indirect(vha, crb_addr, read_value);
+			opcode &= ~QLA82XX_DBG_OPCODE_OR;
+		}
+		if (opcode & QLA82XX_DBG_OPCODE_POLL) {
+			poll_time = crb_entry->crb_strd.poll_timeout;
+			wtime = jiffies + poll_time;
+			qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+
+			do {
+				if ((read_value & crb_entry->value_2) ==
+				    crb_entry->value_1) {
+					break;
+				} else if (time_after_eq(jiffies, wtime)) {
+					/* capturing dump failed */
+					rval = QLA_FUNCTION_FAILED;
+					break;
+				} else {
+					qla8044_rd_reg_indirect(vha,
+					    crb_addr, &read_value);
+				}
+			} while (1);
+			opcode &= ~QLA82XX_DBG_OPCODE_POLL;
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
+			if (crb_entry->crb_strd.state_index_a) {
+				index = crb_entry->crb_strd.state_index_a;
+				addr = tmplt_hdr->saved_state_array[index];
+			} else {
+				addr = crb_addr;
+			}
+
+			qla8044_rd_reg_indirect(vha, addr, &read_value);
+			index = crb_entry->crb_ctrl.state_index_v;
+			tmplt_hdr->saved_state_array[index] = read_value;
+			opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
+			if (crb_entry->crb_strd.state_index_a) {
+				index = crb_entry->crb_strd.state_index_a;
+				addr = tmplt_hdr->saved_state_array[index];
+			} else {
+				addr = crb_addr;
+			}
+
+			if (crb_entry->crb_ctrl.state_index_v) {
+				index = crb_entry->crb_ctrl.state_index_v;
+				read_value =
+				    tmplt_hdr->saved_state_array[index];
+			} else {
+				read_value = crb_entry->value_1;
+			}
+
+			qla8044_wr_reg_indirect(vha, addr, read_value);
+			opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
+			index = crb_entry->crb_ctrl.state_index_v;
+			read_value = tmplt_hdr->saved_state_array[index];
+			read_value <<= crb_entry->crb_ctrl.shl;
+			read_value >>= crb_entry->crb_ctrl.shr;
+			if (crb_entry->value_2)
+				read_value &= crb_entry->value_2;
+			read_value |= crb_entry->value_3;
+			read_value += crb_entry->value_1;
+			tmplt_hdr->saved_state_array[index] = read_value;
+			opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
+		}
+		crb_addr += crb_entry->crb_strd.addr_stride;
+	}
+	return rval;
+}
+
+static void
+qla8044_minidump_process_rdcrb(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+	uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+	struct qla8044_minidump_entry_crb *crb_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0de, "Entering fn: %s\n", __func__);
+	crb_hdr = (struct qla8044_minidump_entry_crb *)entry_hdr;
+	r_addr = crb_hdr->addr;
+	r_stride = crb_hdr->crb_strd.addr_stride;
+	loop_cnt = crb_hdr->op_count;
+
+	for (i = 0; i < loop_cnt; i++) {
+		qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+		*data_ptr++ = r_addr;
+		*data_ptr++ = r_value;
+		r_addr += r_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+static int
+qla8044_minidump_process_rdmem(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+	uint32_t r_addr, r_value, r_data;
+	uint32_t i, j, loop_cnt;
+	struct qla8044_minidump_entry_rdmem *m_hdr;
+	unsigned long flags;
+	uint32_t *data_ptr = *d_ptr;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0df, "Entering fn: %s\n", __func__);
+	m_hdr = (struct qla8044_minidump_entry_rdmem *)entry_hdr;
+	r_addr = m_hdr->read_addr;
+	loop_cnt = m_hdr->read_data_size/16;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0f0,
+	    "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
+	    __func__, r_addr, m_hdr->read_data_size);
+
+	if (r_addr & 0xf) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb0f1,
+		    "[%s]: Read addr 0x%x not 16 bytes alligned\n",
+		    __func__, r_addr);
+		return QLA_FUNCTION_FAILED;
+	}
+
+	if (m_hdr->read_data_size % 16) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb0f2,
+		    "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
+		    __func__, m_hdr->read_data_size);
+		return QLA_FUNCTION_FAILED;
+	}
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0f3,
+	    "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
+	    __func__, r_addr, m_hdr->read_data_size, loop_cnt);
+
+	write_lock_irqsave(&ha->hw_lock, flags);
+	for (i = 0; i < loop_cnt; i++) {
+		qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_LO, r_addr);
+		r_value = 0;
+		qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, r_value);
+		r_value = MIU_TA_CTL_ENABLE;
+		qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
+		r_value = MIU_TA_CTL_START_ENABLE;
+		qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
+
+		for (j = 0; j < MAX_CTL_CHECK; j++) {
+			qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
+			    &r_value);
+			if ((r_value & MIU_TA_CTL_BUSY) == 0)
+				break;
+		}
+
+		if (j >= MAX_CTL_CHECK) {
+			printk_ratelimited(KERN_ERR
+			    "%s: failed to read through agent\n", __func__);
+			write_unlock_irqrestore(&ha->hw_lock, flags);
+			return QLA_SUCCESS;
+		}
+
+		for (j = 0; j < 4; j++) {
+			qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_RDDATA[j],
+			    &r_data);
+			*data_ptr++ = r_data;
+		}
+
+		r_addr += 16;
+	}
+	write_unlock_irqrestore(&ha->hw_lock, flags);
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0f4,
+	    "Leaving fn: %s datacount: 0x%x\n",
+	     __func__, (loop_cnt * 16));
+
+	*d_ptr = data_ptr;
+	return QLA_SUCCESS;
+}
+
+/* ISP83xx flash read for _RDROM _BOARD */
+static uint32_t
+qla8044_minidump_process_rdrom(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+	uint32_t fl_addr, u32_count, rval;
+	struct qla8044_minidump_entry_rdrom *rom_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	rom_hdr = (struct qla8044_minidump_entry_rdrom *)entry_hdr;
+	fl_addr = rom_hdr->read_addr;
+	u32_count = (rom_hdr->read_data_size)/sizeof(uint32_t);
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0f5, "[%s]: fl_addr: 0x%x, count: 0x%x\n",
+	    __func__, fl_addr, u32_count);
+
+	rval = qla8044_lockless_flash_read_u32(vha, fl_addr,
+	    (u8 *)(data_ptr), u32_count);
+
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_fatal, vha, 0xb0f6,
+		    "%s: Flash Read Error,Count=%d\n", __func__, u32_count);
+		return QLA_FUNCTION_FAILED;
+	} else {
+		data_ptr += u32_count;
+		*d_ptr = data_ptr;
+		return QLA_SUCCESS;
+	}
+}
+
+static void
+qla8044_mark_entry_skipped(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr, int index)
+{
+	entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
+
+	ql_log(ql_log_info, vha, 0xb0f7,
+	    "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
+	    vha->host_no, index, entry_hdr->entry_type,
+	    entry_hdr->d_ctrl.entry_capture_mask);
+}
+
+static int
+qla8044_minidump_process_l2tag(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr,
+				 uint32_t **d_ptr)
+{
+	uint32_t addr, r_addr, c_addr, t_r_addr;
+	uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+	unsigned long p_wait, w_time, p_mask;
+	uint32_t c_value_w, c_value_r;
+	struct qla8044_minidump_entry_cache *cache_hdr;
+	int rval = QLA_FUNCTION_FAILED;
+	uint32_t *data_ptr = *d_ptr;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0f8, "Entering fn: %s\n", __func__);
+	cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
+
+	loop_count = cache_hdr->op_count;
+	r_addr = cache_hdr->read_addr;
+	c_addr = cache_hdr->control_addr;
+	c_value_w = cache_hdr->cache_ctrl.write_value;
+
+	t_r_addr = cache_hdr->tag_reg_addr;
+	t_value = cache_hdr->addr_ctrl.init_tag_value;
+	r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+	p_wait = cache_hdr->cache_ctrl.poll_wait;
+	p_mask = cache_hdr->cache_ctrl.poll_mask;
+
+	for (i = 0; i < loop_count; i++) {
+		qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
+		if (c_value_w)
+			qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
+
+		if (p_mask) {
+			w_time = jiffies + p_wait;
+			do {
+				qla8044_rd_reg_indirect(vha, c_addr,
+				    &c_value_r);
+				if ((c_value_r & p_mask) == 0) {
+					break;
+				} else if (time_after_eq(jiffies, w_time)) {
+					/* capturing dump failed */
+					return rval;
+				}
+			} while (1);
+		}
+
+		addr = r_addr;
+		for (k = 0; k < r_cnt; k++) {
+			qla8044_rd_reg_indirect(vha, addr, &r_value);
+			*data_ptr++ = r_value;
+			addr += cache_hdr->read_ctrl.read_addr_stride;
+		}
+		t_value += cache_hdr->addr_ctrl.tag_value_stride;
+	}
+	*d_ptr = data_ptr;
+	return QLA_SUCCESS;
+}
+
+static void
+qla8044_minidump_process_l1cache(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+	uint32_t addr, r_addr, c_addr, t_r_addr;
+	uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+	uint32_t c_value_w;
+	struct qla8044_minidump_entry_cache *cache_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
+	loop_count = cache_hdr->op_count;
+	r_addr = cache_hdr->read_addr;
+	c_addr = cache_hdr->control_addr;
+	c_value_w = cache_hdr->cache_ctrl.write_value;
+
+	t_r_addr = cache_hdr->tag_reg_addr;
+	t_value = cache_hdr->addr_ctrl.init_tag_value;
+	r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+
+	for (i = 0; i < loop_count; i++) {
+		qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
+		qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
+		addr = r_addr;
+		for (k = 0; k < r_cnt; k++) {
+			qla8044_rd_reg_indirect(vha, addr, &r_value);
+			*data_ptr++ = r_value;
+			addr += cache_hdr->read_ctrl.read_addr_stride;
+		}
+		t_value += cache_hdr->addr_ctrl.tag_value_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+static void
+qla8044_minidump_process_rdocm(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+	uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+	struct qla8044_minidump_entry_rdocm *ocm_hdr;
+	uint32_t *data_ptr = *d_ptr;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0f9, "Entering fn: %s\n", __func__);
+
+	ocm_hdr = (struct qla8044_minidump_entry_rdocm *)entry_hdr;
+	r_addr = ocm_hdr->read_addr;
+	r_stride = ocm_hdr->read_addr_stride;
+	loop_cnt = ocm_hdr->op_count;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0fa,
+	    "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
+	    __func__, r_addr, r_stride, loop_cnt);
+
+	for (i = 0; i < loop_cnt; i++) {
+		r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
+		*data_ptr++ = r_value;
+		r_addr += r_stride;
+	}
+	ql_dbg(ql_dbg_p3p, vha, 0xb0fb, "Leaving fn: %s datacount: 0x%lx\n",
+	    __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t)));
+
+	*d_ptr = data_ptr;
+}
+
+static void
+qla8044_minidump_process_rdmux(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr,
+	uint32_t **d_ptr)
+{
+	uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
+	struct qla8044_minidump_entry_mux *mux_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0fc, "Entering fn: %s\n", __func__);
+
+	mux_hdr = (struct qla8044_minidump_entry_mux *)entry_hdr;
+	r_addr = mux_hdr->read_addr;
+	s_addr = mux_hdr->select_addr;
+	s_stride = mux_hdr->select_value_stride;
+	s_value = mux_hdr->select_value;
+	loop_cnt = mux_hdr->op_count;
+
+	for (i = 0; i < loop_cnt; i++) {
+		qla8044_wr_reg_indirect(vha, s_addr, s_value);
+		qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+		*data_ptr++ = s_value;
+		*data_ptr++ = r_value;
+		s_value += s_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+static void
+qla8044_minidump_process_queue(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr,
+	uint32_t **d_ptr)
+{
+	uint32_t s_addr, r_addr;
+	uint32_t r_stride, r_value, r_cnt, qid = 0;
+	uint32_t i, k, loop_cnt;
+	struct qla8044_minidump_entry_queue *q_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0fd, "Entering fn: %s\n", __func__);
+	q_hdr = (struct qla8044_minidump_entry_queue *)entry_hdr;
+	s_addr = q_hdr->select_addr;
+	r_cnt = q_hdr->rd_strd.read_addr_cnt;
+	r_stride = q_hdr->rd_strd.read_addr_stride;
+	loop_cnt = q_hdr->op_count;
+
+	for (i = 0; i < loop_cnt; i++) {
+		qla8044_wr_reg_indirect(vha, s_addr, qid);
+		r_addr = q_hdr->read_addr;
+		for (k = 0; k < r_cnt; k++) {
+			qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+			*data_ptr++ = r_value;
+			r_addr += r_stride;
+		}
+		qid += q_hdr->q_strd.queue_id_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+/* ISP83xx functions to process new minidump entries... */
+static uint32_t
+qla8044_minidump_process_pollrd(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr,
+	uint32_t **d_ptr)
+{
+	uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask;
+	uint16_t s_stride, i;
+	struct qla8044_minidump_entry_pollrd *pollrd_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	pollrd_hdr = (struct qla8044_minidump_entry_pollrd *) entry_hdr;
+	s_addr = pollrd_hdr->select_addr;
+	r_addr = pollrd_hdr->read_addr;
+	s_value = pollrd_hdr->select_value;
+	s_stride = pollrd_hdr->select_value_stride;
+
+	poll_wait = pollrd_hdr->poll_wait;
+	poll_mask = pollrd_hdr->poll_mask;
+
+	for (i = 0; i < pollrd_hdr->op_count; i++) {
+		qla8044_wr_reg_indirect(vha, s_addr, s_value);
+		poll_wait = pollrd_hdr->poll_wait;
+		while (1) {
+			qla8044_rd_reg_indirect(vha, s_addr, &r_value);
+			if ((r_value & poll_mask) != 0) {
+				break;
+			} else {
+				usleep_range(1000, 1100);
+				if (--poll_wait == 0) {
+					ql_log(ql_log_fatal, vha, 0xb0fe,
+					    "%s: TIMEOUT\n", __func__);
+					goto error;
+				}
+			}
+		}
+		qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+		*data_ptr++ = s_value;
+		*data_ptr++ = r_value;
+
+		s_value += s_stride;
+	}
+	*d_ptr = data_ptr;
+	return QLA_SUCCESS;
+
+error:
+	return QLA_FUNCTION_FAILED;
+}
+
+static void
+qla8044_minidump_process_rdmux2(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+	uint32_t sel_val1, sel_val2, t_sel_val, data, i;
+	uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr;
+	struct qla8044_minidump_entry_rdmux2 *rdmux2_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	rdmux2_hdr = (struct qla8044_minidump_entry_rdmux2 *) entry_hdr;
+	sel_val1 = rdmux2_hdr->select_value_1;
+	sel_val2 = rdmux2_hdr->select_value_2;
+	sel_addr1 = rdmux2_hdr->select_addr_1;
+	sel_addr2 = rdmux2_hdr->select_addr_2;
+	sel_val_mask = rdmux2_hdr->select_value_mask;
+	read_addr = rdmux2_hdr->read_addr;
+
+	for (i = 0; i < rdmux2_hdr->op_count; i++) {
+		qla8044_wr_reg_indirect(vha, sel_addr1, sel_val1);
+		t_sel_val = sel_val1 & sel_val_mask;
+		*data_ptr++ = t_sel_val;
+
+		qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
+		qla8044_rd_reg_indirect(vha, read_addr, &data);
+
+		*data_ptr++ = data;
+
+		qla8044_wr_reg_indirect(vha, sel_addr1, sel_val2);
+		t_sel_val = sel_val2 & sel_val_mask;
+		*data_ptr++ = t_sel_val;
+
+		qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
+		qla8044_rd_reg_indirect(vha, read_addr, &data);
+
+		*data_ptr++ = data;
+
+		sel_val1 += rdmux2_hdr->select_value_stride;
+		sel_val2 += rdmux2_hdr->select_value_stride;
+	}
+
+	*d_ptr = data_ptr;
+}
+
+static uint32_t
+qla8044_minidump_process_pollrdmwr(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr,
+	uint32_t **d_ptr)
+{
+	uint32_t poll_wait, poll_mask, r_value, data;
+	uint32_t addr_1, addr_2, value_1, value_2;
+	struct qla8044_minidump_entry_pollrdmwr *poll_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	poll_hdr = (struct qla8044_minidump_entry_pollrdmwr *) entry_hdr;
+	addr_1 = poll_hdr->addr_1;
+	addr_2 = poll_hdr->addr_2;
+	value_1 = poll_hdr->value_1;
+	value_2 = poll_hdr->value_2;
+	poll_mask = poll_hdr->poll_mask;
+
+	qla8044_wr_reg_indirect(vha, addr_1, value_1);
+
+	poll_wait = poll_hdr->poll_wait;
+	while (1) {
+		qla8044_rd_reg_indirect(vha, addr_1, &r_value);
+
+		if ((r_value & poll_mask) != 0) {
+			break;
+		} else {
+			usleep_range(1000, 1100);
+			if (--poll_wait == 0) {
+				ql_log(ql_log_fatal, vha, 0xb0ff,
+				    "%s: TIMEOUT\n", __func__);
+				goto error;
+			}
+		}
+	}
+
+	qla8044_rd_reg_indirect(vha, addr_2, &data);
+	data &= poll_hdr->modify_mask;
+	qla8044_wr_reg_indirect(vha, addr_2, data);
+	qla8044_wr_reg_indirect(vha, addr_1, value_2);
+
+	poll_wait = poll_hdr->poll_wait;
+	while (1) {
+		qla8044_rd_reg_indirect(vha, addr_1, &r_value);
+
+		if ((r_value & poll_mask) != 0) {
+			break;
+		} else {
+			usleep_range(1000, 1100);
+			if (--poll_wait == 0) {
+				ql_log(ql_log_fatal, vha, 0xb100,
+				    "%s: TIMEOUT2\n", __func__);
+				goto error;
+			}
+		}
+	}
+
+	*data_ptr++ = addr_2;
+	*data_ptr++ = data;
+
+	*d_ptr = data_ptr;
+
+	return QLA_SUCCESS;
+
+error:
+	return QLA_FUNCTION_FAILED;
+}
+
+#define ISP8044_PEX_DMA_ENGINE_INDEX		8
+#define ISP8044_PEX_DMA_BASE_ADDRESS		0x77320000
+#define ISP8044_PEX_DMA_NUM_OFFSET		0x10000
+#define ISP8044_PEX_DMA_CMD_ADDR_LOW		0x0
+#define ISP8044_PEX_DMA_CMD_ADDR_HIGH		0x04
+#define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL	0x08
+
+#define ISP8044_PEX_DMA_READ_SIZE	(16 * 1024)
+#define ISP8044_PEX_DMA_MAX_WAIT	(100 * 100) /* Max wait of 100 msecs */
+
+static int
+qla8044_check_dma_engine_state(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	int rval = QLA_SUCCESS;
+	uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
+	uint64_t dma_base_addr = 0;
+	struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
+
+	tmplt_hdr = ha->md_tmplt_hdr;
+	dma_eng_num =
+	    tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
+	dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
+		(dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
+
+	/* Read the pex-dma's command-status-and-control register. */
+	rval = qla8044_rd_reg_indirect(vha,
+	    (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
+	    &cmd_sts_and_cntrl);
+	if (rval)
+		return QLA_FUNCTION_FAILED;
+
+	/* Check if requested pex-dma engine is available. */
+	if (cmd_sts_and_cntrl & BIT_31)
+		return QLA_SUCCESS;
+
+	return QLA_FUNCTION_FAILED;
+}
+
+static int
+qla8044_start_pex_dma(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr)
+{
+	struct qla_hw_data *ha = vha->hw;
+	int rval = QLA_SUCCESS, wait = 0;
+	uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
+	uint64_t dma_base_addr = 0;
+	struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
+
+	tmplt_hdr = ha->md_tmplt_hdr;
+	dma_eng_num =
+	    tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
+	dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
+		(dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
+
+	rval = qla8044_wr_reg_indirect(vha,
+	    dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_LOW,
+	    m_hdr->desc_card_addr);
+	if (rval)
+		goto error_exit;
+
+	rval = qla8044_wr_reg_indirect(vha,
+	    dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_HIGH, 0);
+	if (rval)
+		goto error_exit;
+
+	rval = qla8044_wr_reg_indirect(vha,
+	    dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL,
+	    m_hdr->start_dma_cmd);
+	if (rval)
+		goto error_exit;
+
+	/* Wait for dma operation to complete. */
+	for (wait = 0; wait < ISP8044_PEX_DMA_MAX_WAIT; wait++) {
+		rval = qla8044_rd_reg_indirect(vha,
+		    (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
+		    &cmd_sts_and_cntrl);
+		if (rval)
+			goto error_exit;
+
+		if ((cmd_sts_and_cntrl & BIT_1) == 0)
+			break;
+
+		udelay(10);
+	}
+
+	/* Wait a max of 100 ms, otherwise fallback to rdmem entry read */
+	if (wait >= ISP8044_PEX_DMA_MAX_WAIT) {
+		rval = QLA_FUNCTION_FAILED;
+		goto error_exit;
+	}
+
+error_exit:
+	return rval;
+}
+
+static int
+qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+	struct qla_hw_data *ha = vha->hw;
+	int rval = QLA_SUCCESS;
+	struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr = NULL;
+	uint32_t chunk_size, read_size;
+	uint8_t *data_ptr = (uint8_t *)*d_ptr;
+	void *rdmem_buffer = NULL;
+	dma_addr_t rdmem_dma;
+	struct qla8044_pex_dma_descriptor dma_desc;
+
+	rval = qla8044_check_dma_engine_state(vha);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb147,
+		    "DMA engine not available. Fallback to rdmem-read.\n");
+		return QLA_FUNCTION_FAILED;
+	}
+
+	m_hdr = (void *)entry_hdr;
+
+	rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev,
+	    ISP8044_PEX_DMA_READ_SIZE, &rdmem_dma, GFP_KERNEL);
+	if (!rdmem_buffer) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb148,
+		    "Unable to allocate rdmem dma buffer\n");
+		return QLA_FUNCTION_FAILED;
+	}
+
+	/* Prepare pex-dma descriptor to be written to MS memory. */
+	/* dma-desc-cmd layout:
+	 *		0-3: dma-desc-cmd 0-3
+	 *		4-7: pcid function number
+	 *		8-15: dma-desc-cmd 8-15
+	 * dma_bus_addr: dma buffer address
+	 * cmd.read_data_size: amount of data-chunk to be read.
+	 */
+	dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f);
+	dma_desc.cmd.dma_desc_cmd |=
+	    ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4);
+
+	dma_desc.dma_bus_addr = rdmem_dma;
+	dma_desc.cmd.read_data_size = chunk_size = ISP8044_PEX_DMA_READ_SIZE;
+	read_size = 0;
+
+	/*
+	 * Perform rdmem operation using pex-dma.
+	 * Prepare dma in chunks of ISP8044_PEX_DMA_READ_SIZE.
+	 */
+	while (read_size < m_hdr->read_data_size) {
+		if (m_hdr->read_data_size - read_size <
+		    ISP8044_PEX_DMA_READ_SIZE) {
+			chunk_size = (m_hdr->read_data_size - read_size);
+			dma_desc.cmd.read_data_size = chunk_size;
+		}
+
+		dma_desc.src_addr = m_hdr->read_addr + read_size;
+
+		/* Prepare: Write pex-dma descriptor to MS memory. */
+		rval = qla8044_ms_mem_write_128b(vha,
+		    m_hdr->desc_card_addr, (void *)&dma_desc,
+		    (sizeof(struct qla8044_pex_dma_descriptor)/16));
+		if (rval) {
+			ql_log(ql_log_warn, vha, 0xb14a,
+			    "%s: Error writing rdmem-dma-init to MS !!!\n",
+			    __func__);
+			goto error_exit;
+		}
+		ql_dbg(ql_dbg_p3p, vha, 0xb14b,
+		    "%s: Dma-descriptor: Instruct for rdmem dma "
+		    "(chunk_size 0x%x).\n", __func__, chunk_size);
+
+		/* Execute: Start pex-dma operation. */
+		rval = qla8044_start_pex_dma(vha, m_hdr);
+		if (rval)
+			goto error_exit;
+
+		memcpy(data_ptr, rdmem_buffer, chunk_size);
+		data_ptr += chunk_size;
+		read_size += chunk_size;
+	}
+
+	*d_ptr = (void *)data_ptr;
+
+error_exit:
+	if (rdmem_buffer)
+		dma_free_coherent(&ha->pdev->dev, ISP8044_PEX_DMA_READ_SIZE,
+		    rdmem_buffer, rdmem_dma);
+
+	return rval;
+}
+
+/*
+ *
+ * qla8044_collect_md_data - Retrieve firmware minidump data.
+ * @ha: pointer to adapter structure
+ **/
+int
+qla8044_collect_md_data(struct scsi_qla_host *vha)
+{
+	int num_entry_hdr = 0;
+	struct qla8044_minidump_entry_hdr *entry_hdr;
+	struct qla8044_minidump_template_hdr *tmplt_hdr;
+	uint32_t *data_ptr;
+	uint32_t data_collected = 0, f_capture_mask;
+	int i, rval = QLA_FUNCTION_FAILED;
+	uint64_t now;
+	uint32_t timestamp, idc_control;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!ha->md_dump) {
+		ql_log(ql_log_info, vha, 0xb101,
+		    "%s(%ld) No buffer to dump\n",
+		    __func__, vha->host_no);
+		return rval;
+	}
+
+	if (ha->fw_dumped) {
+		ql_log(ql_log_warn, vha, 0xb10d,
+		    "Firmware has been previously dumped (%p) "
+		    "-- ignoring request.\n", ha->fw_dump);
+		goto md_failed;
+	}
+
+	ha->fw_dumped = 0;
+
+	if (!ha->md_tmplt_hdr || !ha->md_dump) {
+		ql_log(ql_log_warn, vha, 0xb10e,
+		    "Memory not allocated for minidump capture\n");
+		goto md_failed;
+	}
+
+	qla8044_idc_lock(ha);
+	idc_control = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+	if (idc_control & GRACEFUL_RESET_BIT1) {
+		ql_log(ql_log_warn, vha, 0xb112,
+		    "Forced reset from application, "
+		    "ignore minidump capture\n");
+		qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
+		    (idc_control & ~GRACEFUL_RESET_BIT1));
+		qla8044_idc_unlock(ha);
+
+		goto md_failed;
+	}
+	qla8044_idc_unlock(ha);
+
+	if (qla82xx_validate_template_chksum(vha)) {
+		ql_log(ql_log_info, vha, 0xb109,
+		    "Template checksum validation error\n");
+		goto md_failed;
+	}
+
+	tmplt_hdr = (struct qla8044_minidump_template_hdr *)
+		ha->md_tmplt_hdr;
+	data_ptr = (uint32_t *)((uint8_t *)ha->md_dump);
+	num_entry_hdr = tmplt_hdr->num_of_entries;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb11a,
+	    "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level);
+
+	f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF;
+
+	/* Validate whether required debug level is set */
+	if ((f_capture_mask & 0x3) != 0x3) {
+		ql_log(ql_log_warn, vha, 0xb10f,
+		    "Minimum required capture mask[0x%x] level not set\n",
+		    f_capture_mask);
+
+	}
+	tmplt_hdr->driver_capture_mask = ql2xmdcapmask;
+	ql_log(ql_log_info, vha, 0xb102,
+	    "[%s]: starting data ptr: %p\n",
+	   __func__, data_ptr);
+	ql_log(ql_log_info, vha, 0xb10b,
+	   "[%s]: no of entry headers in Template: 0x%x\n",
+	   __func__, num_entry_hdr);
+	ql_log(ql_log_info, vha, 0xb10c,
+	    "[%s]: Total_data_size 0x%x, %d obtained\n",
+	   __func__, ha->md_dump_size, ha->md_dump_size);
+
+	/* Update current timestamp before taking dump */
+	now = get_jiffies_64();
+	timestamp = (u32)(jiffies_to_msecs(now) / 1000);
+	tmplt_hdr->driver_timestamp = timestamp;
+
+	entry_hdr = (struct qla8044_minidump_entry_hdr *)
+		(((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
+	tmplt_hdr->saved_state_array[QLA8044_SS_OCM_WNDREG_INDEX] =
+	    tmplt_hdr->ocm_window_reg[ha->portnum];
+
+	/* Walk through the entry headers - validate/perform required action */
+	for (i = 0; i < num_entry_hdr; i++) {
+		if (data_collected > ha->md_dump_size) {
+			ql_log(ql_log_info, vha, 0xb103,
+			    "Data collected: [0x%x], "
+			    "Total Dump size: [0x%x]\n",
+			    data_collected, ha->md_dump_size);
+			return rval;
+		}
+
+		if (!(entry_hdr->d_ctrl.entry_capture_mask &
+		      ql2xmdcapmask)) {
+			entry_hdr->d_ctrl.driver_flags |=
+			    QLA82XX_DBG_SKIPPED_FLAG;
+			goto skip_nxt_entry;
+		}
+
+		ql_dbg(ql_dbg_p3p, vha, 0xb104,
+		    "Data collected: [0x%x], Dump size left:[0x%x]\n",
+		    data_collected,
+		    (ha->md_dump_size - data_collected));
+
+		/* Decode the entry type and take required action to capture
+		 * debug data
+		 */
+		switch (entry_hdr->entry_type) {
+		case QLA82XX_RDEND:
+			qla8044_mark_entry_skipped(vha, entry_hdr, i);
+			break;
+		case QLA82XX_CNTRL:
+			rval = qla8044_minidump_process_control(vha,
+			    entry_hdr);
+			if (rval != QLA_SUCCESS) {
+				qla8044_mark_entry_skipped(vha, entry_hdr, i);
+				goto md_failed;
+			}
+			break;
+		case QLA82XX_RDCRB:
+			qla8044_minidump_process_rdcrb(vha,
+			    entry_hdr, &data_ptr);
+			break;
+		case QLA82XX_RDMEM:
+			rval = qla8044_minidump_pex_dma_read(vha,
+			    entry_hdr, &data_ptr);
+			if (rval != QLA_SUCCESS) {
+				rval = qla8044_minidump_process_rdmem(vha,
+				    entry_hdr, &data_ptr);
+				if (rval != QLA_SUCCESS) {
+					qla8044_mark_entry_skipped(vha,
+					    entry_hdr, i);
+					goto md_failed;
+				}
+			}
+			break;
+		case QLA82XX_BOARD:
+		case QLA82XX_RDROM:
+			rval = qla8044_minidump_process_rdrom(vha,
+			    entry_hdr, &data_ptr);
+			if (rval != QLA_SUCCESS) {
+				qla8044_mark_entry_skipped(vha,
+				    entry_hdr, i);
+			}
+			break;
+		case QLA82XX_L2DTG:
+		case QLA82XX_L2ITG:
+		case QLA82XX_L2DAT:
+		case QLA82XX_L2INS:
+			rval = qla8044_minidump_process_l2tag(vha,
+			    entry_hdr, &data_ptr);
+			if (rval != QLA_SUCCESS) {
+				qla8044_mark_entry_skipped(vha, entry_hdr, i);
+				goto md_failed;
+			}
+			break;
+		case QLA8044_L1DTG:
+		case QLA8044_L1ITG:
+		case QLA82XX_L1DAT:
+		case QLA82XX_L1INS:
+			qla8044_minidump_process_l1cache(vha,
+			    entry_hdr, &data_ptr);
+			break;
+		case QLA82XX_RDOCM:
+			qla8044_minidump_process_rdocm(vha,
+			    entry_hdr, &data_ptr);
+			break;
+		case QLA82XX_RDMUX:
+			qla8044_minidump_process_rdmux(vha,
+			    entry_hdr, &data_ptr);
+			break;
+		case QLA82XX_QUEUE:
+			qla8044_minidump_process_queue(vha,
+			    entry_hdr, &data_ptr);
+			break;
+		case QLA8044_POLLRD:
+			rval = qla8044_minidump_process_pollrd(vha,
+			    entry_hdr, &data_ptr);
+			if (rval != QLA_SUCCESS)
+				qla8044_mark_entry_skipped(vha, entry_hdr, i);
+			break;
+		case QLA8044_RDMUX2:
+			qla8044_minidump_process_rdmux2(vha,
+			    entry_hdr, &data_ptr);
+			break;
+		case QLA8044_POLLRDMWR:
+			rval = qla8044_minidump_process_pollrdmwr(vha,
+			    entry_hdr, &data_ptr);
+			if (rval != QLA_SUCCESS)
+				qla8044_mark_entry_skipped(vha, entry_hdr, i);
+			break;
+		case QLA82XX_RDNOP:
+		default:
+			qla8044_mark_entry_skipped(vha, entry_hdr, i);
+			break;
+		}
+
+		data_collected = (uint8_t *)data_ptr -
+		    (uint8_t *)((uint8_t *)ha->md_dump);
+skip_nxt_entry:
+		/*
+		 * next entry in the template
+		 */
+		entry_hdr = (struct qla8044_minidump_entry_hdr *)
+		    (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
+	}
+
+	if (data_collected != ha->md_dump_size) {
+		ql_log(ql_log_info, vha, 0xb105,
+		    "Dump data mismatch: Data collected: "
+		    "[0x%x], total_data_size:[0x%x]\n",
+		    data_collected, ha->md_dump_size);
+		goto md_failed;
+	}
+
+	ql_log(ql_log_info, vha, 0xb110,
+	    "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
+	    vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
+	ha->fw_dumped = 1;
+	qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
+
+
+	ql_log(ql_log_info, vha, 0xb106,
+	    "Leaving fn: %s Last entry: 0x%x\n",
+	    __func__, i);
+md_failed:
+	return rval;
+}
+
+void
+qla8044_get_minidump(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!qla8044_collect_md_data(vha)) {
+		ha->fw_dumped = 1;
+	} else {
+		ql_log(ql_log_fatal, vha, 0xb0db,
+		    "%s: Unable to collect minidump\n",
+		    __func__);
+	}
+}
+
+static int
+qla8044_poll_flash_status_reg(struct scsi_qla_host *vha)
+{
+	uint32_t flash_status;
+	int retries = QLA8044_FLASH_READ_RETRY_COUNT;
+	int ret_val = QLA_SUCCESS;
+
+	while (retries--) {
+		ret_val = qla8044_rd_reg_indirect(vha, QLA8044_FLASH_STATUS,
+		    &flash_status);
+		if (ret_val) {
+			ql_log(ql_log_warn, vha, 0xb13c,
+			    "%s: Failed to read FLASH_STATUS reg.\n",
+			    __func__);
+			break;
+		}
+		if ((flash_status & QLA8044_FLASH_STATUS_READY) ==
+		    QLA8044_FLASH_STATUS_READY)
+			break;
+		msleep(QLA8044_FLASH_STATUS_REG_POLL_DELAY);
+	}
+
+	if (!retries)
+		ret_val = QLA_FUNCTION_FAILED;
+
+	return ret_val;
+}
+
+static int
+qla8044_write_flash_status_reg(struct scsi_qla_host *vha,
+			       uint32_t data)
+{
+	int ret_val = QLA_SUCCESS;
+	uint32_t cmd;
+
+	cmd = vha->hw->fdt_wrt_sts_reg_cmd;
+
+	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+	    QLA8044_FLASH_STATUS_WRITE_DEF_SIG | cmd);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb125,
+		    "%s: Failed to write to FLASH_ADDR.\n", __func__);
+		goto exit_func;
+	}
+
+	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, data);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb126,
+		    "%s: Failed to write to FLASH_WRDATA.\n", __func__);
+		goto exit_func;
+	}
+
+	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+	    QLA8044_FLASH_SECOND_ERASE_MS_VAL);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb127,
+		    "%s: Failed to write to FLASH_CONTROL.\n", __func__);
+		goto exit_func;
+	}
+
+	ret_val = qla8044_poll_flash_status_reg(vha);
+	if (ret_val)
+		ql_log(ql_log_warn, vha, 0xb128,
+		    "%s: Error polling flash status reg.\n", __func__);
+
+exit_func:
+	return ret_val;
+}
+
+/*
+ * This function assumes that the flash lock is held.
+ */
+static int
+qla8044_unprotect_flash(scsi_qla_host_t *vha)
+{
+	int ret_val;
+	struct qla_hw_data *ha = vha->hw;
+
+	ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_enable);
+	if (ret_val)
+		ql_log(ql_log_warn, vha, 0xb139,
+		    "%s: Write flash status failed.\n", __func__);
+
+	return ret_val;
+}
+
+/*
+ * This function assumes that the flash lock is held.
+ */
+static int
+qla8044_protect_flash(scsi_qla_host_t *vha)
+{
+	int ret_val;
+	struct qla_hw_data *ha = vha->hw;
+
+	ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_disable);
+	if (ret_val)
+		ql_log(ql_log_warn, vha, 0xb13b,
+		    "%s: Write flash status failed.\n", __func__);
+
+	return ret_val;
+}
+
+
+static int
+qla8044_erase_flash_sector(struct scsi_qla_host *vha,
+			   uint32_t sector_start_addr)
+{
+	uint32_t reversed_addr;
+	int ret_val = QLA_SUCCESS;
+
+	ret_val = qla8044_poll_flash_status_reg(vha);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb12e,
+		    "%s: Poll flash status after erase failed..\n", __func__);
+	}
+
+	reversed_addr = (((sector_start_addr & 0xFF) << 16) |
+	    (sector_start_addr & 0xFF00) |
+	    ((sector_start_addr & 0xFF0000) >> 16));
+
+	ret_val = qla8044_wr_reg_indirect(vha,
+	    QLA8044_FLASH_WRDATA, reversed_addr);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb12f,
+		    "%s: Failed to write to FLASH_WRDATA.\n", __func__);
+	}
+	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+	   QLA8044_FLASH_ERASE_SIG | vha->hw->fdt_erase_cmd);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb130,
+		    "%s: Failed to write to FLASH_ADDR.\n", __func__);
+	}
+	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+	    QLA8044_FLASH_LAST_ERASE_MS_VAL);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb131,
+		    "%s: Failed write to FLASH_CONTROL.\n", __func__);
+	}
+	ret_val = qla8044_poll_flash_status_reg(vha);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb132,
+		    "%s: Poll flash status failed.\n", __func__);
+	}
+
+
+	return ret_val;
+}
+
+/*
+ * qla8044_flash_write_u32 - Write data to flash
+ *
+ * @ha : Pointer to adapter structure
+ * addr : Flash address to write to
+ * p_data : Data to be written
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ *
+ * NOTE: Lock should be held on entry
+ */
+static int
+qla8044_flash_write_u32(struct scsi_qla_host *vha, uint32_t addr,
+			uint32_t *p_data)
+{
+	int ret_val = QLA_SUCCESS;
+
+	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+	    0x00800000 | (addr >> 2));
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb134,
+		    "%s: Failed write to FLASH_ADDR.\n", __func__);
+		goto exit_func;
+	}
+	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *p_data);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb135,
+		    "%s: Failed write to FLASH_WRDATA.\n", __func__);
+		goto exit_func;
+	}
+	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 0x3D);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb136,
+		    "%s: Failed write to FLASH_CONTROL.\n", __func__);
+		goto exit_func;
+	}
+	ret_val = qla8044_poll_flash_status_reg(vha);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb137,
+		    "%s: Poll flash status failed.\n", __func__);
+	}
+
+exit_func:
+	return ret_val;
+}
+
+static int
+qla8044_write_flash_buffer_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
+				uint32_t faddr, uint32_t dwords)
+{
+	int ret = QLA_FUNCTION_FAILED;
+	uint32_t spi_val;
+
+	if (dwords < QLA8044_MIN_OPTROM_BURST_DWORDS ||
+	    dwords > QLA8044_MAX_OPTROM_BURST_DWORDS) {
+		ql_dbg(ql_dbg_user, vha, 0xb123,
+		    "Got unsupported dwords = 0x%x.\n",
+		    dwords);
+		return QLA_FUNCTION_FAILED;
+	}
+
+	qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, &spi_val);
+	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
+	    spi_val | QLA8044_FLASH_SPI_CTL);
+	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+	    QLA8044_FLASH_FIRST_TEMP_VAL);
+
+	/* First DWORD write to FLASH_WRDATA */
+	ret = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA,
+	    *dwptr++);
+	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+	    QLA8044_FLASH_FIRST_MS_PATTERN);
+
+	ret = qla8044_poll_flash_status_reg(vha);
+	if (ret) {
+		ql_log(ql_log_warn, vha, 0xb124,
+		    "%s: Failed.\n", __func__);
+		goto exit_func;
+	}
+
+	dwords--;
+
+	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+	    QLA8044_FLASH_SECOND_TEMP_VAL);
+
+
+	/* Second to N-1 DWORDS writes */
+	while (dwords != 1) {
+		qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
+		qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+		    QLA8044_FLASH_SECOND_MS_PATTERN);
+		ret = qla8044_poll_flash_status_reg(vha);
+		if (ret) {
+			ql_log(ql_log_warn, vha, 0xb129,
+			    "%s: Failed.\n", __func__);
+			goto exit_func;
+		}
+		dwords--;
+	}
+
+	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+	    QLA8044_FLASH_FIRST_TEMP_VAL | (faddr >> 2));
+
+	/* Last DWORD write */
+	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
+	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+	    QLA8044_FLASH_LAST_MS_PATTERN);
+	ret = qla8044_poll_flash_status_reg(vha);
+	if (ret) {
+		ql_log(ql_log_warn, vha, 0xb12a,
+		    "%s: Failed.\n", __func__);
+		goto exit_func;
+	}
+	qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_STATUS, &spi_val);
+
+	if ((spi_val & QLA8044_FLASH_SPI_CTL) == QLA8044_FLASH_SPI_CTL) {
+		ql_log(ql_log_warn, vha, 0xb12b,
+		    "%s: Failed.\n", __func__);
+		spi_val = 0;
+		/* Operation failed, clear error bit. */
+		qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
+		    &spi_val);
+		qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
+		    spi_val | QLA8044_FLASH_SPI_CTL);
+	}
+exit_func:
+	return ret;
+}
+
+static int
+qla8044_write_flash_dword_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
+			       uint32_t faddr, uint32_t dwords)
+{
+	int ret = QLA_FUNCTION_FAILED;
+	uint32_t liter;
+
+	for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
+		ret = qla8044_flash_write_u32(vha, faddr, dwptr);
+		if (ret) {
+			ql_dbg(ql_dbg_p3p, vha, 0xb141,
+			    "%s: flash address=%x data=%x.\n", __func__,
+			     faddr, *dwptr);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+int
+qla8044_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+			  uint32_t offset, uint32_t length)
+{
+	int rval = QLA_FUNCTION_FAILED, i, burst_iter_count;
+	int dword_count, erase_sec_count;
+	uint32_t erase_offset;
+	uint8_t *p_cache, *p_src;
+
+	erase_offset = offset;
+
+	p_cache = kcalloc(length, sizeof(uint8_t), GFP_KERNEL);
+	if (!p_cache)
+		return QLA_FUNCTION_FAILED;
+
+	memcpy(p_cache, buf, length);
+	p_src = p_cache;
+	dword_count = length / sizeof(uint32_t);
+	/* Since the offset and legth are sector aligned, it will be always
+	 * multiple of burst_iter_count (64)
+	 */
+	burst_iter_count = dword_count / QLA8044_MAX_OPTROM_BURST_DWORDS;
+	erase_sec_count = length / QLA8044_SECTOR_SIZE;
+
+	/* Suspend HBA. */
+	scsi_block_requests(vha->host);
+	/* Lock and enable write for whole operation. */
+	qla8044_flash_lock(vha);
+	qla8044_unprotect_flash(vha);
+
+	/* Erasing the sectors */
+	for (i = 0; i < erase_sec_count; i++) {
+		rval = qla8044_erase_flash_sector(vha, erase_offset);
+		ql_dbg(ql_dbg_user, vha, 0xb138,
+		    "Done erase of sector=0x%x.\n",
+		    erase_offset);
+		if (rval) {
+			ql_log(ql_log_warn, vha, 0xb121,
+			    "Failed to erase the sector having address: "
+			    "0x%x.\n", erase_offset);
+			goto out;
+		}
+		erase_offset += QLA8044_SECTOR_SIZE;
+	}
+	ql_dbg(ql_dbg_user, vha, 0xb13f,
+	    "Got write for addr = 0x%x length=0x%x.\n",
+	    offset, length);
+
+	for (i = 0; i < burst_iter_count; i++) {
+
+		/* Go with write. */
+		rval = qla8044_write_flash_buffer_mode(vha, (uint32_t *)p_src,
+		    offset, QLA8044_MAX_OPTROM_BURST_DWORDS);
+		if (rval) {
+			/* Buffer Mode failed skip to dword mode */
+			ql_log(ql_log_warn, vha, 0xb122,
+			    "Failed to write flash in buffer mode, "
+			    "Reverting to slow-write.\n");
+			rval = qla8044_write_flash_dword_mode(vha,
+			    (uint32_t *)p_src, offset,
+			    QLA8044_MAX_OPTROM_BURST_DWORDS);
+		}
+		p_src +=  sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
+		offset += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
+	}
+	ql_dbg(ql_dbg_user, vha, 0xb133,
+	    "Done writing.\n");
+
+out:
+	qla8044_protect_flash(vha);
+	qla8044_flash_unlock(vha);
+	scsi_unblock_requests(vha->host);
+	kfree(p_cache);
+
+	return rval;
+}
+
+#define LEG_INT_PTR_B31		(1 << 31)
+#define LEG_INT_PTR_B30		(1 << 30)
+#define PF_BITS_MASK		(0xF << 16)
+/**
+ * qla8044_intr_handler() - Process interrupts for the ISP8044
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qla8044_intr_handler(int irq, void *dev_id)
+{
+	scsi_qla_host_t	*vha;
+	struct qla_hw_data *ha;
+	struct rsp_que *rsp;
+	struct device_reg_82xx __iomem *reg;
+	int		status = 0;
+	unsigned long	flags;
+	unsigned long	iter;
+	uint32_t	stat;
+	uint16_t	mb[4];
+	uint32_t leg_int_ptr = 0, pf_bit;
+
+	rsp = (struct rsp_que *) dev_id;
+	if (!rsp) {
+		ql_log(ql_log_info, NULL, 0xb143,
+		    "%s(): NULL response queue pointer\n", __func__);
+		return IRQ_NONE;
+	}
+	ha = rsp->hw;
+	vha = pci_get_drvdata(ha->pdev);
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		return IRQ_HANDLED;
+
+	leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
+
+	/* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
+	if (!(leg_int_ptr & (LEG_INT_PTR_B31))) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb144,
+		    "%s: Legacy Interrupt Bit 31 not set, "
+		    "spurious interrupt!\n", __func__);
+		return IRQ_NONE;
+	}
+
+	pf_bit = ha->portnum << 16;
+	/* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
+	if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb145,
+		    "%s: Incorrect function ID 0x%x in "
+		    "legacy interrupt register, "
+		    "ha->pf_bit = 0x%x\n", __func__,
+		    (leg_int_ptr & (PF_BITS_MASK)), pf_bit);
+		return IRQ_NONE;
+	}
+
+	/* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger
+	 * Control register and poll till Legacy Interrupt Pointer register
+	 * bit32 is 0.
+	 */
+	qla8044_wr_reg(ha, LEG_INTR_TRIG_OFFSET, 0);
+	do {
+		leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
+		if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit)
+			break;
+	} while (leg_int_ptr & (LEG_INT_PTR_B30));
+
+	reg = &ha->iobase->isp82;
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (iter = 1; iter--; ) {
+
+		if (RD_REG_DWORD(&reg->host_int)) {
+			stat = RD_REG_DWORD(&reg->host_status);
+			if ((stat & HSRX_RISC_INT) == 0)
+				break;
+
+			switch (stat & 0xff) {
+			case 0x1:
+			case 0x2:
+			case 0x10:
+			case 0x11:
+				qla82xx_mbx_completion(vha, MSW(stat));
+				status |= MBX_INTERRUPT;
+				break;
+			case 0x12:
+				mb[0] = MSW(stat);
+				mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+				mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+				mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+				qla2x00_async_event(vha, rsp, mb);
+				break;
+			case 0x13:
+				qla24xx_process_response_queue(vha, rsp);
+				break;
+			default:
+				ql_dbg(ql_dbg_p3p, vha, 0xb146,
+				    "Unrecognized interrupt type "
+				    "(%d).\n", stat & 0xff);
+				break;
+			}
+		}
+		WRT_REG_DWORD(&reg->host_int, 0);
+	}
+
+	qla2x00_handle_mbx_completion(ha, status);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static int
+qla8044_idc_dontreset(struct qla_hw_data *ha)
+{
+	uint32_t idc_ctrl;
+
+	idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+	return idc_ctrl & DONTRESET_BIT0;
+}
+
+static void
+qla8044_clear_rst_ready(scsi_qla_host_t *vha)
+{
+	uint32_t drv_state;
+
+	drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+
+	/*
+	 * For ISP8044, drv_active register has 1 bit per function,
+	 * shift 1 by func_num to set a bit for the function.
+	 * For ISP82xx, drv_active has 4 bits per function
+	 */
+	drv_state &= ~(1 << vha->hw->portnum);
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb13d,
+	    "drv_state: 0x%08x\n", drv_state);
+	qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
+}
+
+int
+qla8044_abort_isp(scsi_qla_host_t *vha)
+{
+	int rval;
+	uint32_t dev_state;
+	struct qla_hw_data *ha = vha->hw;
+
+	qla8044_idc_lock(ha);
+	dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+
+	if (ql2xdontresethba)
+		qla8044_set_idc_dontreset(vha);
+
+	/* If device_state is NEED_RESET, go ahead with
+	 * Reset,irrespective of ql2xdontresethba. This is to allow a
+	 * non-reset-owner to force a reset. Non-reset-owner sets
+	 * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
+	 * and then forces a Reset by setting device_state to
+	 * NEED_RESET. */
+	if (dev_state == QLA8XXX_DEV_READY) {
+		/* If IDC_CTRL DONTRESETHBA_BIT0 is set don't do reset
+		 * recovery */
+		if (qla8044_idc_dontreset(ha) == DONTRESET_BIT0) {
+			ql_dbg(ql_dbg_p3p, vha, 0xb13e,
+			    "Reset recovery disabled\n");
+			rval = QLA_FUNCTION_FAILED;
+			goto exit_isp_reset;
+		}
+
+		ql_dbg(ql_dbg_p3p, vha, 0xb140,
+		    "HW State: NEED RESET\n");
+		qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+		    QLA8XXX_DEV_NEED_RESET);
+	}
+
+	/* For ISP8044, Reset owner is NIC, iSCSI or FCOE based on priority
+	 * and which drivers are present. Unlike ISP82XX, the function setting
+	 * NEED_RESET, may not be the Reset owner. */
+	qla83xx_reset_ownership(vha);
+
+	qla8044_idc_unlock(ha);
+	rval = qla8044_device_state_handler(vha);
+	qla8044_idc_lock(ha);
+	qla8044_clear_rst_ready(vha);
+
+exit_isp_reset:
+	qla8044_idc_unlock(ha);
+	if (rval == QLA_SUCCESS) {
+		ha->flags.isp82xx_fw_hung = 0;
+		ha->flags.nic_core_reset_hdlr_active = 0;
+		rval = qla82xx_restart_isp(vha);
+	}
+
+	return rval;
+}
+

+ 551 - 0
drivers/scsi/qla2xxx/qla_nx2.h

@@ -0,0 +1,551 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#ifndef __QLA_NX2_H
+#define __QLA_NX2_H
+
+#define QSNT_ACK_TOV				30
+#define INTENT_TO_RECOVER			0x01
+#define PROCEED_TO_RECOVER			0x02
+#define IDC_LOCK_RECOVERY_OWNER_MASK		0x3C
+#define IDC_LOCK_RECOVERY_STATE_MASK		0x3
+#define IDC_LOCK_RECOVERY_STATE_SHIFT_BITS	2
+
+#define QLA8044_DRV_LOCK_MSLEEP		200
+#define QLA8044_ADDR_DDR_NET		(0x0000000000000000ULL)
+#define QLA8044_ADDR_DDR_NET_MAX	(0x000000000fffffffULL)
+
+#define MD_MIU_TEST_AGT_WRDATA_LO		0x410000A0
+#define MD_MIU_TEST_AGT_WRDATA_HI		0x410000A4
+#define MD_MIU_TEST_AGT_WRDATA_ULO		0x410000B0
+#define MD_MIU_TEST_AGT_WRDATA_UHI		0x410000B4
+#define MD_MIU_TEST_AGT_RDDATA_LO		0x410000A8
+#define MD_MIU_TEST_AGT_RDDATA_HI		0x410000AC
+#define MD_MIU_TEST_AGT_RDDATA_ULO		0x410000B8
+#define MD_MIU_TEST_AGT_RDDATA_UHI		0x410000BC
+
+/* MIU_TEST_AGT_CTRL flags. work for SIU as well */
+#define MIU_TA_CTL_WRITE_ENABLE	(MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE)
+#define MIU_TA_CTL_WRITE_START	(MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE |	\
+				 MIU_TA_CTL_START)
+#define MIU_TA_CTL_START_ENABLE	(MIU_TA_CTL_START | MIU_TA_CTL_ENABLE)
+
+/* Imbus address bit used to indicate a host address. This bit is
+ * eliminated by the pcie bar and bar select before presentation
+ * over pcie. */
+/* host memory via IMBUS */
+#define QLA8044_P2_ADDR_PCIE	(0x0000000800000000ULL)
+#define QLA8044_P3_ADDR_PCIE	(0x0000008000000000ULL)
+#define QLA8044_ADDR_PCIE_MAX	(0x0000000FFFFFFFFFULL)
+#define QLA8044_ADDR_OCM0	(0x0000000200000000ULL)
+#define QLA8044_ADDR_OCM0_MAX	(0x00000002000fffffULL)
+#define QLA8044_ADDR_OCM1	(0x0000000200400000ULL)
+#define QLA8044_ADDR_OCM1_MAX	(0x00000002004fffffULL)
+#define QLA8044_ADDR_QDR_NET	(0x0000000300000000ULL)
+#define QLA8044_P2_ADDR_QDR_NET_MAX	(0x00000003001fffffULL)
+#define QLA8044_P3_ADDR_QDR_NET_MAX	(0x0000000303ffffffULL)
+#define QLA8044_ADDR_QDR_NET_MAX	(0x0000000307ffffffULL)
+#define QLA8044_PCI_CRBSPACE		((unsigned long)0x06000000)
+#define QLA8044_PCI_DIRECT_CRB		((unsigned long)0x04400000)
+#define QLA8044_PCI_CAMQM		((unsigned long)0x04800000)
+#define QLA8044_PCI_CAMQM_MAX		((unsigned long)0x04ffffff)
+#define QLA8044_PCI_DDR_NET		((unsigned long)0x00000000)
+#define QLA8044_PCI_QDR_NET		((unsigned long)0x04000000)
+#define QLA8044_PCI_QDR_NET_MAX		((unsigned long)0x043fffff)
+
+/*  PCI Windowing for DDR regions.  */
+#define QLA8044_ADDR_IN_RANGE(addr, low, high)		\
+	(((addr) <= (high)) && ((addr) >= (low)))
+
+/* Indirectly Mapped Registers */
+#define QLA8044_FLASH_SPI_STATUS	0x2808E010
+#define QLA8044_FLASH_SPI_CONTROL	0x2808E014
+#define QLA8044_FLASH_STATUS		0x42100004
+#define QLA8044_FLASH_CONTROL		0x42110004
+#define QLA8044_FLASH_ADDR		0x42110008
+#define QLA8044_FLASH_WRDATA		0x4211000C
+#define QLA8044_FLASH_RDDATA		0x42110018
+#define QLA8044_FLASH_DIRECT_WINDOW	0x42110030
+#define QLA8044_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA))
+
+/* Flash access regs */
+#define QLA8044_FLASH_LOCK		0x3850
+#define QLA8044_FLASH_UNLOCK		0x3854
+#define QLA8044_FLASH_LOCK_ID		0x3500
+
+/* Driver Lock regs */
+#define QLA8044_DRV_LOCK		0x3868
+#define QLA8044_DRV_UNLOCK		0x386C
+#define QLA8044_DRV_LOCK_ID		0x3504
+#define QLA8044_DRV_LOCKRECOVERY	0x379C
+
+/* IDC version */
+#define QLA8044_IDC_VER_MAJ_VALUE       0x1
+#define QLA8044_IDC_VER_MIN_VALUE       0x0
+
+/* IDC Registers : Driver Coexistence Defines */
+#define QLA8044_CRB_IDC_VER_MAJOR	0x3780
+#define QLA8044_CRB_IDC_VER_MINOR	0x3798
+#define QLA8044_IDC_DRV_AUDIT		0x3794
+#define QLA8044_SRE_SHIM_CONTROL	0x0D200284
+#define QLA8044_PORT0_RXB_PAUSE_THRS	0x0B2003A4
+#define QLA8044_PORT1_RXB_PAUSE_THRS	0x0B2013A4
+#define QLA8044_PORT0_RXB_TC_MAX_CELL	0x0B200388
+#define QLA8044_PORT1_RXB_TC_MAX_CELL	0x0B201388
+#define QLA8044_PORT0_RXB_TC_STATS	0x0B20039C
+#define QLA8044_PORT1_RXB_TC_STATS	0x0B20139C
+#define QLA8044_PORT2_IFB_PAUSE_THRS	0x0B200704
+#define QLA8044_PORT3_IFB_PAUSE_THRS	0x0B201704
+
+/* set value to pause threshold value */
+#define QLA8044_SET_PAUSE_VAL		0x0
+#define QLA8044_SET_TC_MAX_CELL_VAL	0x03FF03FF
+#define QLA8044_PEG_HALT_STATUS1	0x34A8
+#define QLA8044_PEG_HALT_STATUS2	0x34AC
+#define QLA8044_PEG_ALIVE_COUNTER	0x34B0 /* FW_HEARTBEAT */
+#define QLA8044_FW_CAPABILITIES		0x3528
+#define QLA8044_CRB_DRV_ACTIVE		0x3788 /* IDC_DRV_PRESENCE */
+#define QLA8044_CRB_DEV_STATE		0x3784 /* IDC_DEV_STATE */
+#define QLA8044_CRB_DRV_STATE		0x378C /* IDC_DRV_ACK */
+#define QLA8044_CRB_DRV_SCRATCH		0x3548
+#define QLA8044_CRB_DEV_PART_INFO1	0x37E0
+#define QLA8044_CRB_DEV_PART_INFO2	0x37E4
+#define QLA8044_FW_VER_MAJOR		0x3550
+#define QLA8044_FW_VER_MINOR		0x3554
+#define QLA8044_FW_VER_SUB		0x3558
+#define QLA8044_NPAR_STATE		0x359C
+#define QLA8044_FW_IMAGE_VALID		0x35FC
+#define QLA8044_CMDPEG_STATE		0x3650
+#define QLA8044_ASIC_TEMP		0x37B4
+#define QLA8044_FW_API			0x356C
+#define QLA8044_DRV_OP_MODE		0x3570
+#define QLA8044_CRB_WIN_BASE		0x3800
+#define QLA8044_CRB_WIN_FUNC(f)		(QLA8044_CRB_WIN_BASE+((f)*4))
+#define QLA8044_SEM_LOCK_BASE		0x3840
+#define QLA8044_SEM_UNLOCK_BASE		0x3844
+#define QLA8044_SEM_LOCK_FUNC(f)	(QLA8044_SEM_LOCK_BASE+((f)*8))
+#define QLA8044_SEM_UNLOCK_FUNC(f)	(QLA8044_SEM_UNLOCK_BASE+((f)*8))
+#define QLA8044_LINK_STATE(f)		(0x3698+((f) > 7 ? 4 : 0))
+#define QLA8044_LINK_SPEED(f)		(0x36E0+(((f) >> 2) * 4))
+#define QLA8044_MAX_LINK_SPEED(f)       (0x36F0+(((f) / 4) * 4))
+#define QLA8044_LINK_SPEED_FACTOR	10
+
+/* FLASH API Defines */
+#define QLA8044_FLASH_MAX_WAIT_USEC	100
+#define QLA8044_FLASH_LOCK_TIMEOUT	10000
+#define QLA8044_FLASH_SECTOR_SIZE	65536
+#define QLA8044_DRV_LOCK_TIMEOUT	2000
+#define QLA8044_FLASH_SECTOR_ERASE_CMD	0xdeadbeef
+#define QLA8044_FLASH_WRITE_CMD		0xdacdacda
+#define QLA8044_FLASH_BUFFER_WRITE_CMD	0xcadcadca
+#define QLA8044_FLASH_READ_RETRY_COUNT	2000
+#define QLA8044_FLASH_STATUS_READY	0x6
+#define QLA8044_FLASH_BUFFER_WRITE_MIN	2
+#define QLA8044_FLASH_BUFFER_WRITE_MAX	64
+#define QLA8044_FLASH_STATUS_REG_POLL_DELAY 1
+#define QLA8044_ERASE_MODE		1
+#define QLA8044_WRITE_MODE		2
+#define QLA8044_DWORD_WRITE_MODE	3
+#define QLA8044_GLOBAL_RESET		0x38CC
+#define QLA8044_WILDCARD		0x38F0
+#define QLA8044_INFORMANT		0x38FC
+#define QLA8044_HOST_MBX_CTRL		0x3038
+#define QLA8044_FW_MBX_CTRL		0x303C
+#define QLA8044_BOOTLOADER_ADDR		0x355C
+#define QLA8044_BOOTLOADER_SIZE		0x3560
+#define QLA8044_FW_IMAGE_ADDR		0x3564
+#define QLA8044_MBX_INTR_ENABLE		0x1000
+#define QLA8044_MBX_INTR_MASK		0x1200
+
+/* IDC Control Register bit defines */
+#define DONTRESET_BIT0		0x1
+#define GRACEFUL_RESET_BIT1	0x2
+
+/* ISP8044 PEG_HALT_STATUS1 bits */
+#define QLA8044_HALT_STATUS_INFORMATIONAL (0x1 << 29)
+#define QLA8044_HALT_STATUS_FW_RESET	  (0x2 << 29)
+#define QLA8044_HALT_STATUS_UNRECOVERABLE (0x4 << 29)
+
+/* Firmware image definitions */
+#define QLA8044_BOOTLOADER_FLASH_ADDR	0x10000
+#define QLA8044_BOOT_FROM_FLASH		0
+#define QLA8044_IDC_PARAM_ADDR		0x3e8020
+
+/* FLASH related definitions */
+#define QLA8044_OPTROM_BURST_SIZE		0x100
+#define QLA8044_MAX_OPTROM_BURST_DWORDS		(QLA8044_OPTROM_BURST_SIZE / 4)
+#define QLA8044_MIN_OPTROM_BURST_DWORDS		2
+#define QLA8044_SECTOR_SIZE			(64 * 1024)
+
+#define QLA8044_FLASH_SPI_CTL			0x4
+#define QLA8044_FLASH_FIRST_TEMP_VAL		0x00800000
+#define QLA8044_FLASH_SECOND_TEMP_VAL		0x00800001
+#define QLA8044_FLASH_FIRST_MS_PATTERN		0x43
+#define QLA8044_FLASH_SECOND_MS_PATTERN		0x7F
+#define QLA8044_FLASH_LAST_MS_PATTERN		0x7D
+#define QLA8044_FLASH_STATUS_WRITE_DEF_SIG	0xFD0100
+#define QLA8044_FLASH_SECOND_ERASE_MS_VAL	0x5
+#define QLA8044_FLASH_ERASE_SIG			0xFD0300
+#define QLA8044_FLASH_LAST_ERASE_MS_VAL		0x3D
+
+/* Reset template definitions */
+#define QLA8044_MAX_RESET_SEQ_ENTRIES	16
+#define QLA8044_RESTART_TEMPLATE_SIZE	0x2000
+#define QLA8044_RESET_TEMPLATE_ADDR	0x4F0000
+#define QLA8044_RESET_SEQ_VERSION	0x0101
+
+/* Reset template entry opcodes */
+#define OPCODE_NOP			0x0000
+#define OPCODE_WRITE_LIST		0x0001
+#define OPCODE_READ_WRITE_LIST		0x0002
+#define OPCODE_POLL_LIST		0x0004
+#define OPCODE_POLL_WRITE_LIST		0x0008
+#define OPCODE_READ_MODIFY_WRITE	0x0010
+#define OPCODE_SEQ_PAUSE		0x0020
+#define OPCODE_SEQ_END			0x0040
+#define OPCODE_TMPL_END			0x0080
+#define OPCODE_POLL_READ_LIST		0x0100
+
+/* Template Header */
+#define RESET_TMPLT_HDR_SIGNATURE	0xCAFE
+#define QLA8044_IDC_DRV_CTRL            0x3790
+#define AF_8044_NO_FW_DUMP              27 /* 0x08000000 */
+
+#define MINIDUMP_SIZE_36K		36864
+
+struct qla8044_reset_template_hdr {
+	uint16_t	version;
+	uint16_t	signature;
+	uint16_t	size;
+	uint16_t	entries;
+	uint16_t	hdr_size;
+	uint16_t	checksum;
+	uint16_t	init_seq_offset;
+	uint16_t	start_seq_offset;
+} __packed;
+
+/* Common Entry Header. */
+struct qla8044_reset_entry_hdr {
+	uint16_t cmd;
+	uint16_t size;
+	uint16_t count;
+	uint16_t delay;
+} __packed;
+
+/* Generic poll entry type. */
+struct qla8044_poll {
+	uint32_t  test_mask;
+	uint32_t  test_value;
+} __packed;
+
+/* Read modify write entry type. */
+struct qla8044_rmw {
+	uint32_t test_mask;
+	uint32_t xor_value;
+	uint32_t  or_value;
+	uint8_t shl;
+	uint8_t shr;
+	uint8_t index_a;
+	uint8_t rsvd;
+} __packed;
+
+/* Generic Entry Item with 2 DWords. */
+struct qla8044_entry {
+	uint32_t arg1;
+	uint32_t arg2;
+} __packed;
+
+/* Generic Entry Item with 4 DWords.*/
+struct qla8044_quad_entry {
+	uint32_t dr_addr;
+	uint32_t dr_value;
+	uint32_t ar_addr;
+	uint32_t ar_value;
+} __packed;
+
+struct qla8044_reset_template {
+	int seq_index;
+	int seq_error;
+	int array_index;
+	uint32_t array[QLA8044_MAX_RESET_SEQ_ENTRIES];
+	uint8_t *buff;
+	uint8_t *stop_offset;
+	uint8_t *start_offset;
+	uint8_t *init_offset;
+	struct qla8044_reset_template_hdr *hdr;
+	uint8_t seq_end;
+	uint8_t template_end;
+};
+
+/* Driver_code is for driver to write some info about the entry
+ * currently not used.
+ */
+struct qla8044_minidump_entry_hdr {
+	uint32_t entry_type;
+	uint32_t entry_size;
+	uint32_t entry_capture_size;
+	struct {
+		uint8_t entry_capture_mask;
+		uint8_t entry_code;
+		uint8_t driver_code;
+		uint8_t driver_flags;
+	} d_ctrl;
+} __packed;
+
+/*  Read CRB entry header */
+struct qla8044_minidump_entry_crb {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t addr;
+	struct {
+		uint8_t addr_stride;
+		uint8_t state_index_a;
+		uint16_t poll_timeout;
+	} crb_strd;
+	uint32_t data_size;
+	uint32_t op_count;
+
+	struct {
+		uint8_t opcode;
+		uint8_t state_index_v;
+		uint8_t shl;
+		uint8_t shr;
+	} crb_ctrl;
+
+	uint32_t value_1;
+	uint32_t value_2;
+	uint32_t value_3;
+} __packed;
+
+struct qla8044_minidump_entry_cache {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t tag_reg_addr;
+	struct {
+		uint16_t tag_value_stride;
+		uint16_t init_tag_value;
+	} addr_ctrl;
+	uint32_t data_size;
+	uint32_t op_count;
+	uint32_t control_addr;
+	struct {
+		uint16_t write_value;
+		uint8_t poll_mask;
+		uint8_t poll_wait;
+	} cache_ctrl;
+	uint32_t read_addr;
+	struct {
+		uint8_t read_addr_stride;
+		uint8_t read_addr_cnt;
+		uint16_t rsvd_1;
+	} read_ctrl;
+} __packed;
+
+/* Read OCM */
+struct qla8044_minidump_entry_rdocm {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t rsvd_0;
+	uint32_t rsvd_1;
+	uint32_t data_size;
+	uint32_t op_count;
+	uint32_t rsvd_2;
+	uint32_t rsvd_3;
+	uint32_t read_addr;
+	uint32_t read_addr_stride;
+} __packed;
+
+/* Read Memory */
+struct qla8044_minidump_entry_rdmem {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t rsvd[6];
+	uint32_t read_addr;
+	uint32_t read_data_size;
+};
+
+/* Read Memory: For Pex-DMA */
+struct qla8044_minidump_entry_rdmem_pex_dma {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t desc_card_addr;
+	uint16_t dma_desc_cmd;
+	uint8_t rsvd[2];
+	uint32_t start_dma_cmd;
+	uint8_t rsvd2[12];
+	uint32_t read_addr;
+	uint32_t read_data_size;
+} __packed;
+
+/* Read ROM */
+struct qla8044_minidump_entry_rdrom {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t rsvd[6];
+	uint32_t read_addr;
+	uint32_t read_data_size;
+} __packed;
+
+/* Mux entry */
+struct qla8044_minidump_entry_mux {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t select_addr;
+	uint32_t rsvd_0;
+	uint32_t data_size;
+	uint32_t op_count;
+	uint32_t select_value;
+	uint32_t select_value_stride;
+	uint32_t read_addr;
+	uint32_t rsvd_1;
+} __packed;
+
+/* Queue entry */
+struct qla8044_minidump_entry_queue {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t select_addr;
+	struct {
+		uint16_t queue_id_stride;
+		uint16_t rsvd_0;
+	} q_strd;
+	uint32_t data_size;
+	uint32_t op_count;
+	uint32_t rsvd_1;
+	uint32_t rsvd_2;
+	uint32_t read_addr;
+	struct {
+		uint8_t read_addr_stride;
+		uint8_t read_addr_cnt;
+		uint16_t rsvd_3;
+	} rd_strd;
+} __packed;
+
+/* POLLRD Entry */
+struct qla8044_minidump_entry_pollrd {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t select_addr;
+	uint32_t read_addr;
+	uint32_t select_value;
+	uint16_t select_value_stride;
+	uint16_t op_count;
+	uint32_t poll_wait;
+	uint32_t poll_mask;
+	uint32_t data_size;
+	uint32_t rsvd_1;
+} __packed;
+
+/* RDMUX2 Entry */
+struct qla8044_minidump_entry_rdmux2 {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t select_addr_1;
+	uint32_t select_addr_2;
+	uint32_t select_value_1;
+	uint32_t select_value_2;
+	uint32_t op_count;
+	uint32_t select_value_mask;
+	uint32_t read_addr;
+	uint8_t select_value_stride;
+	uint8_t data_size;
+	uint8_t rsvd[2];
+} __packed;
+
+/* POLLRDMWR Entry */
+struct qla8044_minidump_entry_pollrdmwr {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t addr_1;
+	uint32_t addr_2;
+	uint32_t value_1;
+	uint32_t value_2;
+	uint32_t poll_wait;
+	uint32_t poll_mask;
+	uint32_t modify_mask;
+	uint32_t data_size;
+} __packed;
+
+/* IDC additional information */
+struct qla8044_idc_information {
+	uint32_t request_desc;  /* IDC request descriptor */
+	uint32_t info1; /* IDC additional info */
+	uint32_t info2; /* IDC additional info */
+	uint32_t info3; /* IDC additional info */
+} __packed;
+
+enum qla_regs {
+	QLA8044_PEG_HALT_STATUS1_INDEX = 0,
+	QLA8044_PEG_HALT_STATUS2_INDEX,
+	QLA8044_PEG_ALIVE_COUNTER_INDEX,
+	QLA8044_CRB_DRV_ACTIVE_INDEX,
+	QLA8044_CRB_DEV_STATE_INDEX,
+	QLA8044_CRB_DRV_STATE_INDEX,
+	QLA8044_CRB_DRV_SCRATCH_INDEX,
+	QLA8044_CRB_DEV_PART_INFO_INDEX,
+	QLA8044_CRB_DRV_IDC_VERSION_INDEX,
+	QLA8044_FW_VERSION_MAJOR_INDEX,
+	QLA8044_FW_VERSION_MINOR_INDEX,
+	QLA8044_FW_VERSION_SUB_INDEX,
+	QLA8044_CRB_CMDPEG_STATE_INDEX,
+	QLA8044_CRB_TEMP_STATE_INDEX,
+} __packed;
+
+#define CRB_REG_INDEX_MAX	14
+#define CRB_CMDPEG_CHECK_RETRY_COUNT    60
+#define CRB_CMDPEG_CHECK_DELAY          500
+
+static const uint32_t qla8044_reg_tbl[] = {
+	QLA8044_PEG_HALT_STATUS1,
+	QLA8044_PEG_HALT_STATUS2,
+	QLA8044_PEG_ALIVE_COUNTER,
+	QLA8044_CRB_DRV_ACTIVE,
+	QLA8044_CRB_DEV_STATE,
+	QLA8044_CRB_DRV_STATE,
+	QLA8044_CRB_DRV_SCRATCH,
+	QLA8044_CRB_DEV_PART_INFO1,
+	QLA8044_CRB_IDC_VER_MAJOR,
+	QLA8044_FW_VER_MAJOR,
+	QLA8044_FW_VER_MINOR,
+	QLA8044_FW_VER_SUB,
+	QLA8044_CMDPEG_STATE,
+	QLA8044_ASIC_TEMP,
+};
+
+/* MiniDump Structures */
+
+/* Driver_code is for driver to write some info about the entry
+ * currently not used.
+ */
+#define QLA8044_SS_OCM_WNDREG_INDEX             3
+#define QLA8044_DBG_STATE_ARRAY_LEN             16
+#define QLA8044_DBG_CAP_SIZE_ARRAY_LEN          8
+#define QLA8044_DBG_RSVD_ARRAY_LEN              8
+#define QLA8044_DBG_OCM_WNDREG_ARRAY_LEN        16
+#define QLA8044_SS_PCI_INDEX                    0
+
+struct qla8044_minidump_template_hdr {
+	uint32_t entry_type;
+	uint32_t first_entry_offset;
+	uint32_t size_of_template;
+	uint32_t capture_debug_level;
+	uint32_t num_of_entries;
+	uint32_t version;
+	uint32_t driver_timestamp;
+	uint32_t checksum;
+
+	uint32_t driver_capture_mask;
+	uint32_t driver_info_word2;
+	uint32_t driver_info_word3;
+	uint32_t driver_info_word4;
+
+	uint32_t saved_state_array[QLA8044_DBG_STATE_ARRAY_LEN];
+	uint32_t capture_size_array[QLA8044_DBG_CAP_SIZE_ARRAY_LEN];
+	uint32_t ocm_window_reg[QLA8044_DBG_OCM_WNDREG_ARRAY_LEN];
+};
+
+struct qla8044_pex_dma_descriptor {
+	struct {
+		uint32_t read_data_size; /* 0-23: size, 24-31: rsvd */
+		uint8_t rsvd[2];
+		uint16_t dma_desc_cmd;
+	} cmd;
+	uint64_t src_addr;
+	uint64_t dma_bus_addr; /*0-3: desc-cmd, 4-7: pci-func, 8-15: desc-cmd*/
+	uint8_t rsvd[24];
+} __packed;
+
+#endif

Some files were not shown because too many files changed in this diff