Browse Source

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull first round of SCSI updates from James Bottomley:
 "The patch set is mostly driver updates (usf, zfcp, lpfc, mpt2sas,
  megaraid_sas, bfa, ipr) and a few bug fixes.  Also of note is that the
  Buslogic driver has been rewritten to a better coding style and 64 bit
  support added.  We also removed the libsas limitation on 16 bytes for
  the command size (currently no drivers make use of this)"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (101 commits)
  [SCSI] megaraid: minor cut and paste error fixed.
  [SCSI] ufshcd-pltfrm: remove unnecessary dma_set_coherent_mask() call
  [SCSI] ufs: fix register address in UIC error interrupt handling
  [SCSI] ufshcd-pltfrm: add missing empty slot in ufs_of_match[]
  [SCSI] ufs: use devres functions for ufshcd
  [SCSI] ufs: Fix the response UPIU length setting
  [SCSI] ufs: rework link start-up process
  [SCSI] ufs: remove version check before IS reg clear
  [SCSI] ufs: amend interrupt configuration
  [SCSI] ufs: wrap the i/o access operations
  [SCSI] storvsc: Update the storage protocol to win8 level
  [SCSI] storvsc: Increase the value of scsi timeout for storvsc devices
  [SCSI] MAINTAINERS: Add myself as the maintainer for BusLogic SCSI driver
  [SCSI] BusLogic: Port driver to 64-bit.
  [SCSI] BusLogic: Fix style issues
  [SCSI] libiscsi: Added new boot entries in the session sysfs
  [SCSI] aacraid: Fix for arrays are going offline in the system. System hangs
  [SCSI] ipr: IOA Status Code(IOASC) update
  [SCSI] sd: Update WRITE SAME heuristics
  [SCSI] fnic: potential dead lock in fnic_is_abts_pending()
  ...
Linus Torvalds 12 years ago
parent
commit
84cbd7222b
100 changed files with 4745 additions and 2609 deletions
  1. 16 0
      Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
  2. 22 0
      Documentation/scsi/ChangeLog.megaraid_sas
  3. 7 0
      MAINTAINERS
  4. 1 1
      drivers/s390/scsi/Makefile
  5. 5 31
      drivers/s390/scsi/zfcp_aux.c
  6. 2 11
      drivers/s390/scsi/zfcp_ccw.c
  7. 0 446
      drivers/s390/scsi/zfcp_cfdc.c
  8. 9 2
      drivers/s390/scsi/zfcp_dbf.c
  9. 0 4
      drivers/s390/scsi/zfcp_def.h
  10. 1 2
      drivers/s390/scsi/zfcp_erp.c
  11. 2 18
      drivers/s390/scsi/zfcp_ext.h
  12. 1 1
      drivers/s390/scsi/zfcp_fc.c
  13. 33 121
      drivers/s390/scsi/zfcp_fsf.c
  14. 0 26
      drivers/s390/scsi/zfcp_fsf.h
  15. 7 3
      drivers/s390/scsi/zfcp_scsi.c
  16. 11 16
      drivers/s390/scsi/zfcp_sysfs.c
  17. 2 7
      drivers/s390/scsi/zfcp_unit.c
  18. 3 1
      drivers/scsi/3w-xxxx.c
  19. 388 345
      drivers/scsi/BusLogic.c
  20. 360 371
      drivers/scsi/BusLogic.h
  21. 204 227
      drivers/scsi/FlashPoint.c
  22. 1 1
      drivers/scsi/Kconfig
  23. 3 0
      drivers/scsi/aacraid/src.c
  24. 2 1
      drivers/scsi/aic94xx/aic94xx_task.c
  25. 1 2
      drivers/scsi/bfa/bfa_core.c
  26. 89 14
      drivers/scsi/bfa/bfa_defs.h
  27. 66 11
      drivers/scsi/bfa/bfa_defs_svc.h
  28. 15 0
      drivers/scsi/bfa/bfa_fc.h
  29. 1 1
      drivers/scsi/bfa/bfa_fcpim.c
  30. 6 56
      drivers/scsi/bfa/bfa_fcs.c
  31. 23 11
      drivers/scsi/bfa/bfa_fcs.h
  32. 207 2
      drivers/scsi/bfa/bfa_fcs_lport.c
  33. 6 5
      drivers/scsi/bfa/bfa_fcs_rport.c
  34. 48 26
      drivers/scsi/bfa/bfa_ioc.c
  35. 8 1
      drivers/scsi/bfa/bfa_ioc.h
  36. 79 7
      drivers/scsi/bfa/bfa_ioc_cb.c
  37. 46 0
      drivers/scsi/bfa/bfa_ioc_ct.c
  38. 649 51
      drivers/scsi/bfa/bfa_svc.c
  39. 24 10
      drivers/scsi/bfa/bfa_svc.h
  40. 11 3
      drivers/scsi/bfa/bfad.c
  41. 7 26
      drivers/scsi/bfa/bfad_attr.c
  42. 100 37
      drivers/scsi/bfa/bfad_bsg.c
  43. 40 12
      drivers/scsi/bfa/bfad_bsg.h
  44. 1 1
      drivers/scsi/bfa/bfad_drv.h
  45. 6 4
      drivers/scsi/bfa/bfad_im.c
  46. 72 6
      drivers/scsi/bfa/bfi.h
  47. 2 3
      drivers/scsi/bfa/bfi_ms.h
  48. 0 91
      drivers/scsi/csiostor/csio_hw.c
  49. 0 11
      drivers/scsi/csiostor/csio_hw.h
  50. 0 77
      drivers/scsi/csiostor/csio_mb.c
  51. 0 11
      drivers/scsi/csiostor/csio_mb.h
  52. 2 2
      drivers/scsi/csiostor/csio_scsi.c
  53. 128 31
      drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
  54. 2 4
      drivers/scsi/fnic/fnic_scsi.c
  55. 48 9
      drivers/scsi/ipr.c
  56. 2 2
      drivers/scsi/isci/request.c
  57. 18 0
      drivers/scsi/libiscsi.c
  58. 1 1
      drivers/scsi/libsas/sas_scsi_host.c
  59. 1 1
      drivers/scsi/lpfc/lpfc.h
  60. 20 3
      drivers/scsi/lpfc/lpfc_attr.c
  61. 2 2
      drivers/scsi/lpfc/lpfc_bsg.c
  62. 1 1
      drivers/scsi/lpfc/lpfc_crtn.h
  63. 1 1
      drivers/scsi/lpfc/lpfc_ct.c
  64. 1 1
      drivers/scsi/lpfc/lpfc_els.c
  65. 35 3
      drivers/scsi/lpfc/lpfc_hbadisc.c
  66. 1 1
      drivers/scsi/lpfc/lpfc_hw.h
  67. 1 1
      drivers/scsi/lpfc/lpfc_hw4.h
  68. 59 70
      drivers/scsi/lpfc/lpfc_init.c
  69. 1 1
      drivers/scsi/lpfc/lpfc_mbox.c
  70. 1 1
      drivers/scsi/lpfc/lpfc_nportdisc.c
  71. 71 54
      drivers/scsi/lpfc/lpfc_scsi.c
  72. 1 1
      drivers/scsi/lpfc/lpfc_scsi.h
  73. 20 32
      drivers/scsi/lpfc/lpfc_sli.c
  74. 1 2
      drivers/scsi/lpfc/lpfc_sli4.h
  75. 3 3
      drivers/scsi/lpfc/lpfc_version.h
  76. 177 9
      drivers/scsi/megaraid/megaraid_sas.h
  77. 166 60
      drivers/scsi/megaraid/megaraid_sas_base.c
  78. 739 45
      drivers/scsi/megaraid/megaraid_sas_fp.c
  79. 127 21
      drivers/scsi/megaraid/megaraid_sas_fusion.c
  80. 33 2
      drivers/scsi/megaraid/megaraid_sas_fusion.h
  81. 4 2
      drivers/scsi/mpt2sas/mpi/mpi2.h
  82. 3 1
      drivers/scsi/mpt2sas/mpi/mpi2_init.h
  83. 6 2
      drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
  84. 7 2
      drivers/scsi/mpt2sas/mpi/mpi2_raid.h
  85. 6 4
      drivers/scsi/mpt2sas/mpi/mpi2_tool.h
  86. 30 29
      drivers/scsi/mpt2sas/mpt2sas_base.c
  87. 5 2
      drivers/scsi/mpt2sas/mpt2sas_base.h
  88. 129 14
      drivers/scsi/mpt2sas/mpt2sas_scsih.c
  89. 2 1
      drivers/scsi/mvsas/mv_sas.c
  90. 3 2
      drivers/scsi/pm8001/pm8001_hwi.c
  91. 11 10
      drivers/scsi/pm8001/pm80xx_hwi.c
  92. 4 4
      drivers/scsi/scsi.c
  93. 1 0
      drivers/scsi/scsi_devinfo.c
  94. 3 4
      drivers/scsi/scsi_error.c
  95. 5 0
      drivers/scsi/scsi_scan.c
  96. 30 0
      drivers/scsi/scsi_sysfs.c
  97. 12 0
      drivers/scsi/scsi_transport_iscsi.c
  98. 58 19
      drivers/scsi/sd.c
  99. 1 0
      drivers/scsi/sd.h
  100. 175 35
      drivers/scsi/storvsc_drv.c

+ 16 - 0
Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt

@@ -0,0 +1,16 @@
+* Universal Flash Storage (UFS) Host Controller
+
+UFSHC nodes are defined to describe on-chip UFS host controllers.
+Each UFS controller instance should have its own node.
+
+Required properties:
+- compatible        : compatible list, contains "jedec,ufs-1.1"
+- interrupts        : <interrupt mapping for UFS host controller IRQ>
+- reg               : <registers mapping>
+
+Example:
+	ufshc@0xfc598000 {
+		compatible = "jedec,ufs-1.1";
+		reg = <0xfc598000 0x800>;
+		interrupts = <0 28 0>;
+	};

+ 22 - 0
Documentation/scsi/ChangeLog.megaraid_sas

@@ -1,3 +1,25 @@
+Release Date    : Wed. May 15, 2013 17:00:00 PST 2013 -
+			(emaild-id:megaraidlinux@lsi.com)
+			Adam Radford
+			Kashyap Desai
+			Sumit Saxena
+Current Version : 06.600.18.00-rc1
+Old Version     : 06.506.00.00-rc1
+    1. Return DID_ERROR for scsi io, when controller is in critical h/w error.
+    2. Fix the interrupt mask for Gen2 controller.
+    3. Update balance count in driver to be in sync of firmware.
+    4. Free event detail memory without device ID check.
+    5. Set IO request timeout value provided by OS timeout for Tape devices.
+    6. Add support for MegaRAID Fury (device ID-0x005f) 12Gb/s controllers.
+    7. Add support to display Customer branding details in syslog.
+    8. Set IoFlags to enable Fast Path for JBODs for Invader/Fury(12 Gb/s)
+    controllers.
+    9. Add support for Extended MSI-x vectors for Invader and Fury(12Gb/s
+    HBA).
+    10.Add support for Uneven Span PRL11.
+    11.Add support to differentiate between iMR and MR Firmware.
+    12.Version and Changelog update.
+-------------------------------------------------------------------------------
 Release Date    : Sat. Feb 9, 2013 17:00:00 PST 2013 -
 			(emaild-id:megaraidlinux@lsi.com)
 			Adam Radford

+ 7 - 0
MAINTAINERS

@@ -1876,6 +1876,13 @@ S:	Odd fixes
 F:	Documentation/video4linux/bttv/
 F:	drivers/media/pci/bt8xx/bttv*
 
+BUSLOGIC SCSI DRIVER
+M:	Khalid Aziz <khalid@gonehiking.org>
+L:	linux-scsi@vger.kernel.org
+S:	Maintained
+F:	drivers/scsi/BusLogic.*
+F:	drivers/scsi/FlashPoint.*
+
 C-MEDIA CMI8788 DRIVER
 M:	Clemens Ladisch <clemens@ladisch.de>
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)

+ 1 - 1
drivers/s390/scsi/Makefile

@@ -2,7 +2,7 @@
 # Makefile for the S/390 specific device drivers
 #
 
-zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_cfdc.o zfcp_dbf.o zfcp_erp.o \
+zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_dbf.o zfcp_erp.o \
 	     zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \
 	     zfcp_unit.o
 

+ 5 - 31
drivers/s390/scsi/zfcp_aux.c

@@ -3,7 +3,7 @@
  *
  * Module interface and handling of zfcp data structures.
  *
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2013
  */
 
 /*
@@ -23,6 +23,7 @@
  *            Christof Schmitt
  *            Martin Petermann
  *            Sven Schuetz
+ *            Steffen Maier
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -140,13 +141,6 @@ static int __init zfcp_module_init(void)
 	scsi_transport_reserve_device(zfcp_scsi_transport_template,
 				      sizeof(struct zfcp_scsi_dev));
 
-
-	retval = misc_register(&zfcp_cfdc_misc);
-	if (retval) {
-		pr_err("Registering the misc device zfcp_cfdc failed\n");
-		goto out_misc;
-	}
-
 	retval = ccw_driver_register(&zfcp_ccw_driver);
 	if (retval) {
 		pr_err("The zfcp device driver could not register with "
@@ -159,8 +153,6 @@ static int __init zfcp_module_init(void)
 	return 0;
 
 out_ccw_register:
-	misc_deregister(&zfcp_cfdc_misc);
-out_misc:
 	fc_release_transport(zfcp_scsi_transport_template);
 out_transport:
 	kmem_cache_destroy(zfcp_fc_req_cache);
@@ -175,7 +167,6 @@ module_init(zfcp_module_init);
 static void __exit zfcp_module_exit(void)
 {
 	ccw_driver_unregister(&zfcp_ccw_driver);
-	misc_deregister(&zfcp_cfdc_misc);
 	fc_release_transport(zfcp_scsi_transport_template);
 	kmem_cache_destroy(zfcp_fc_req_cache);
 	kmem_cache_destroy(zfcp_fsf_qtcb_cache);
@@ -415,6 +406,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
 	adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
 	adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
 
+	adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
+
 	if (!zfcp_scsi_adapter_register(adapter))
 		return adapter;
 
@@ -464,20 +457,6 @@ void zfcp_adapter_release(struct kref *ref)
 	put_device(&cdev->dev);
 }
 
-/**
- * zfcp_device_unregister - remove port, unit from system
- * @dev: reference to device which is to be removed
- * @grp: related reference to attribute group
- *
- * Helper function to unregister port, unit from system
- */
-void zfcp_device_unregister(struct device *dev,
-			    const struct attribute_group *grp)
-{
-	sysfs_remove_group(&dev->kobj, grp);
-	device_unregister(dev);
-}
-
 static void zfcp_port_release(struct device *dev)
 {
 	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
@@ -530,6 +509,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
 	port->wwpn = wwpn;
 	port->rport_task = RPORT_NONE;
 	port->dev.parent = &adapter->ccw_device->dev;
+	port->dev.groups = zfcp_port_attr_groups;
 	port->dev.release = zfcp_port_release;
 
 	if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
@@ -543,10 +523,6 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
 		goto err_out;
 	}
 
-	if (sysfs_create_group(&port->dev.kobj,
-			       &zfcp_sysfs_port_attrs))
-		goto err_out_put;
-
 	write_lock_irq(&adapter->port_list_lock);
 	list_add_tail(&port->list, &adapter->port_list);
 	write_unlock_irq(&adapter->port_list_lock);
@@ -555,8 +531,6 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
 
 	return port;
 
-err_out_put:
-	device_unregister(&port->dev);
 err_out:
 	zfcp_ccw_adapter_put(adapter);
 	return ERR_PTR(retval);

+ 2 - 11
drivers/s390/scsi/zfcp_ccw.c

@@ -71,15 +71,6 @@ static struct ccw_device_id zfcp_ccw_device_id[] = {
 };
 MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
 
-/**
- * zfcp_ccw_priv_sch - check if subchannel is privileged
- * @adapter: Adapter/Subchannel to check
- */
-int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter)
-{
-	return adapter->ccw_device->id.dev_model == ZFCP_MODEL_PRIV;
-}
-
 /**
  * zfcp_ccw_probe - probe function of zfcp driver
  * @cdev: pointer to belonging ccw device
@@ -129,10 +120,10 @@ static void zfcp_ccw_remove(struct ccw_device *cdev)
 	zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */
 
 	list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
-		zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
+		device_unregister(&unit->dev);
 
 	list_for_each_entry_safe(port, p, &port_remove_lh, list)
-		zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
+		device_unregister(&port->dev);
 
 	zfcp_adapter_unregister(adapter);
 }

+ 0 - 446
drivers/s390/scsi/zfcp_cfdc.c

@@ -1,446 +0,0 @@
-/*
- * zfcp device driver
- *
- * Userspace interface for accessing the
- * Access Control Lists / Control File Data Channel;
- * handling of response code and states for ports and LUNs.
- *
- * Copyright IBM Corp. 2008, 2010
- */
-
-#define KMSG_COMPONENT "zfcp"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <linux/compat.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/miscdevice.h>
-#include <asm/compat.h>
-#include <asm/ccwdev.h>
-#include "zfcp_def.h"
-#include "zfcp_ext.h"
-#include "zfcp_fsf.h"
-
-#define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL		0x00010001
-#define ZFCP_CFDC_CMND_DOWNLOAD_FORCE		0x00010101
-#define ZFCP_CFDC_CMND_FULL_ACCESS		0x00000201
-#define ZFCP_CFDC_CMND_RESTRICTED_ACCESS	0x00000401
-#define ZFCP_CFDC_CMND_UPLOAD			0x00010002
-
-#define ZFCP_CFDC_DOWNLOAD			0x00000001
-#define ZFCP_CFDC_UPLOAD			0x00000002
-#define ZFCP_CFDC_WITH_CONTROL_FILE		0x00010000
-
-#define ZFCP_CFDC_IOC_MAGIC                     0xDD
-#define ZFCP_CFDC_IOC \
-	_IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_data)
-
-/**
- * struct zfcp_cfdc_data - data for ioctl cfdc interface
- * @signature: request signature
- * @devno: FCP adapter device number
- * @command: command code
- * @fsf_status: returns status of FSF command to userspace
- * @fsf_status_qual: returned to userspace
- * @payloads: access conflicts list
- * @control_file: access control table
- */
-struct zfcp_cfdc_data {
-	u32 signature;
-	u32 devno;
-	u32 command;
-	u32 fsf_status;
-	u8  fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
-	u8  payloads[256];
-	u8  control_file[0];
-};
-
-static int zfcp_cfdc_copy_from_user(struct scatterlist *sg,
-				    void __user *user_buffer)
-{
-	unsigned int length;
-	unsigned int size = ZFCP_CFDC_MAX_SIZE;
-
-	while (size) {
-		length = min((unsigned int)size, sg->length);
-		if (copy_from_user(sg_virt(sg++), user_buffer, length))
-			return -EFAULT;
-		user_buffer += length;
-		size -= length;
-	}
-	return 0;
-}
-
-static int zfcp_cfdc_copy_to_user(void __user  *user_buffer,
-				  struct scatterlist *sg)
-{
-	unsigned int length;
-	unsigned int size = ZFCP_CFDC_MAX_SIZE;
-
-	while (size) {
-		length = min((unsigned int) size, sg->length);
-		if (copy_to_user(user_buffer, sg_virt(sg++), length))
-			return -EFAULT;
-		user_buffer += length;
-		size -= length;
-	}
-	return 0;
-}
-
-static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno)
-{
-	char busid[9];
-	struct ccw_device *cdev;
-	struct zfcp_adapter *adapter;
-
-	snprintf(busid, sizeof(busid), "0.0.%04x", devno);
-	cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
-	if (!cdev)
-		return NULL;
-
-	adapter = zfcp_ccw_adapter_by_cdev(cdev);
-
-	put_device(&cdev->dev);
-	return adapter;
-}
-
-static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command)
-{
-	switch (command) {
-	case ZFCP_CFDC_CMND_DOWNLOAD_NORMAL:
-		fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
-		fsf_cfdc->option = FSF_CFDC_OPTION_NORMAL_MODE;
-		break;
-	case ZFCP_CFDC_CMND_DOWNLOAD_FORCE:
-		fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
-		fsf_cfdc->option = FSF_CFDC_OPTION_FORCE;
-		break;
-	case ZFCP_CFDC_CMND_FULL_ACCESS:
-		fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
-		fsf_cfdc->option = FSF_CFDC_OPTION_FULL_ACCESS;
-		break;
-	case ZFCP_CFDC_CMND_RESTRICTED_ACCESS:
-		fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
-		fsf_cfdc->option = FSF_CFDC_OPTION_RESTRICTED_ACCESS;
-		break;
-	case ZFCP_CFDC_CMND_UPLOAD:
-		fsf_cfdc->command = FSF_QTCB_UPLOAD_CONTROL_FILE;
-		fsf_cfdc->option = 0;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int zfcp_cfdc_sg_setup(int command, struct scatterlist *sg,
-			      u8 __user *control_file)
-{
-	int retval;
-	retval = zfcp_sg_setup_table(sg, ZFCP_CFDC_PAGES);
-	if (retval)
-		return retval;
-
-	sg[ZFCP_CFDC_PAGES - 1].length = ZFCP_CFDC_MAX_SIZE % PAGE_SIZE;
-
-	if (command & ZFCP_CFDC_WITH_CONTROL_FILE &&
-	    command & ZFCP_CFDC_DOWNLOAD) {
-		retval = zfcp_cfdc_copy_from_user(sg, control_file);
-		if (retval) {
-			zfcp_sg_free_table(sg, ZFCP_CFDC_PAGES);
-			return -EFAULT;
-		}
-	}
-
-	return 0;
-}
-
-static void zfcp_cfdc_req_to_sense(struct zfcp_cfdc_data *data,
-				   struct zfcp_fsf_req *req)
-{
-	data->fsf_status = req->qtcb->header.fsf_status;
-	memcpy(&data->fsf_status_qual, &req->qtcb->header.fsf_status_qual,
-	       sizeof(union fsf_status_qual));
-	memcpy(&data->payloads, &req->qtcb->bottom.support.els,
-	       sizeof(req->qtcb->bottom.support.els));
-}
-
-static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
-				unsigned long arg)
-{
-	struct zfcp_cfdc_data *data;
-	struct zfcp_cfdc_data __user *data_user;
-	struct zfcp_adapter *adapter;
-	struct zfcp_fsf_req *req;
-	struct zfcp_fsf_cfdc *fsf_cfdc;
-	int retval;
-
-	if (command != ZFCP_CFDC_IOC)
-		return -ENOTTY;
-
-	if (is_compat_task())
-		data_user = compat_ptr(arg);
-	else
-		data_user = (void __user *)arg;
-
-	if (!data_user)
-		return -EINVAL;
-
-	fsf_cfdc = kmalloc(sizeof(struct zfcp_fsf_cfdc), GFP_KERNEL);
-	if (!fsf_cfdc)
-		return -ENOMEM;
-
-	data = memdup_user(data_user, sizeof(*data_user));
-	if (IS_ERR(data)) {
-		retval = PTR_ERR(data);
-		goto no_mem_sense;
-	}
-
-	if (data->signature != 0xCFDCACDF) {
-		retval = -EINVAL;
-		goto free_buffer;
-	}
-
-	retval = zfcp_cfdc_set_fsf(fsf_cfdc, data->command);
-
-	adapter = zfcp_cfdc_get_adapter(data->devno);
-	if (!adapter) {
-		retval = -ENXIO;
-		goto free_buffer;
-	}
-
-	retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg,
-				    data_user->control_file);
-	if (retval)
-		goto adapter_put;
-	req = zfcp_fsf_control_file(adapter, fsf_cfdc);
-	if (IS_ERR(req)) {
-		retval = PTR_ERR(req);
-		goto free_sg;
-	}
-
-	if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
-		retval = -ENXIO;
-		goto free_fsf;
-	}
-
-	zfcp_cfdc_req_to_sense(data, req);
-	retval = copy_to_user(data_user, data, sizeof(*data_user));
-	if (retval) {
-		retval = -EFAULT;
-		goto free_fsf;
-	}
-
-	if (data->command & ZFCP_CFDC_UPLOAD)
-		retval = zfcp_cfdc_copy_to_user(&data_user->control_file,
-						fsf_cfdc->sg);
-
- free_fsf:
-	zfcp_fsf_req_free(req);
- free_sg:
-	zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES);
- adapter_put:
-	zfcp_ccw_adapter_put(adapter);
- free_buffer:
-	kfree(data);
- no_mem_sense:
-	kfree(fsf_cfdc);
-	return retval;
-}
-
-static const struct file_operations zfcp_cfdc_fops = {
-	.open = nonseekable_open,
-	.unlocked_ioctl = zfcp_cfdc_dev_ioctl,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl = zfcp_cfdc_dev_ioctl,
-#endif
-	.llseek = no_llseek,
-};
-
-struct miscdevice zfcp_cfdc_misc = {
-	.minor = MISC_DYNAMIC_MINOR,
-	.name = "zfcp_cfdc",
-	.fops = &zfcp_cfdc_fops,
-};
-
-/**
- * zfcp_cfdc_adapter_access_changed - Process change in adapter ACT
- * @adapter: Adapter where the Access Control Table (ACT) changed
- *
- * After a change in the adapter ACT, check if access to any
- * previously denied resources is now possible.
- */
-void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
-{
-	unsigned long flags;
-	struct zfcp_port *port;
-	struct scsi_device *sdev;
-	struct zfcp_scsi_dev *zfcp_sdev;
-	int status;
-
-	if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
-		return;
-
-	read_lock_irqsave(&adapter->port_list_lock, flags);
-	list_for_each_entry(port, &adapter->port_list, list) {
-		status = atomic_read(&port->status);
-		if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
-		    (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
-			zfcp_erp_port_reopen(port,
-					     ZFCP_STATUS_COMMON_ERP_FAILED,
-					     "cfaac_1");
-	}
-	read_unlock_irqrestore(&adapter->port_list_lock, flags);
-
-	shost_for_each_device(sdev, adapter->scsi_host) {
-		zfcp_sdev = sdev_to_zfcp(sdev);
-		status = atomic_read(&zfcp_sdev->status);
-		if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
-		    (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
-			zfcp_erp_lun_reopen(sdev,
-					    ZFCP_STATUS_COMMON_ERP_FAILED,
-					    "cfaac_2");
-	}
-}
-
-static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
-{
-	u16 subtable = table >> 16;
-	u16 rule = table & 0xffff;
-	const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
-
-	if (subtable && subtable < ARRAY_SIZE(act_type))
-		dev_warn(&adapter->ccw_device->dev,
-			 "Access denied according to ACT rule type %s, "
-			 "rule %d\n", act_type[subtable], rule);
-}
-
-/**
- * zfcp_cfdc_port_denied - Process "access denied" for port
- * @port: The port where the access has been denied
- * @qual: The FSF status qualifier for the access denied FSF status
- */
-void zfcp_cfdc_port_denied(struct zfcp_port *port,
-			   union fsf_status_qual *qual)
-{
-	dev_warn(&port->adapter->ccw_device->dev,
-		 "Access denied to port 0x%016Lx\n",
-		 (unsigned long long)port->wwpn);
-
-	zfcp_act_eval_err(port->adapter, qual->halfword[0]);
-	zfcp_act_eval_err(port->adapter, qual->halfword[1]);
-	zfcp_erp_set_port_status(port,
-				 ZFCP_STATUS_COMMON_ERP_FAILED |
-				 ZFCP_STATUS_COMMON_ACCESS_DENIED);
-}
-
-/**
- * zfcp_cfdc_lun_denied - Process "access denied" for LUN
- * @sdev: The SCSI device / LUN where the access has been denied
- * @qual: The FSF status qualifier for the access denied FSF status
- */
-void zfcp_cfdc_lun_denied(struct scsi_device *sdev,
-			  union fsf_status_qual *qual)
-{
-	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
-
-	dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
-		 "Access denied to LUN 0x%016Lx on port 0x%016Lx\n",
-		 zfcp_scsi_dev_lun(sdev),
-		 (unsigned long long)zfcp_sdev->port->wwpn);
-	zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[0]);
-	zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[1]);
-	zfcp_erp_set_lun_status(sdev,
-				ZFCP_STATUS_COMMON_ERP_FAILED |
-				ZFCP_STATUS_COMMON_ACCESS_DENIED);
-
-	atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
-	atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
-}
-
-/**
- * zfcp_cfdc_lun_shrng_vltn - Evaluate LUN sharing violation status
- * @sdev: The LUN / SCSI device where sharing violation occurred
- * @qual: The FSF status qualifier from the LUN sharing violation
- */
-void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *sdev,
-			      union fsf_status_qual *qual)
-{
-	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
-
-	if (qual->word[0])
-		dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
-			 "LUN 0x%Lx on port 0x%Lx is already in "
-			 "use by CSS%d, MIF Image ID %x\n",
-			 zfcp_scsi_dev_lun(sdev),
-			 (unsigned long long)zfcp_sdev->port->wwpn,
-			 qual->fsf_queue_designator.cssid,
-			 qual->fsf_queue_designator.hla);
-	else
-		zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->word[2]);
-
-	zfcp_erp_set_lun_status(sdev,
-				ZFCP_STATUS_COMMON_ERP_FAILED |
-				ZFCP_STATUS_COMMON_ACCESS_DENIED);
-	atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
-	atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
-}
-
-/**
- * zfcp_cfdc_open_lun_eval - Eval access ctrl. status for successful "open lun"
- * @sdev: The SCSI device / LUN where to evaluate the status
- * @bottom: The qtcb bottom with the status from the "open lun"
- *
- * Returns: 0 if LUN is usable, -EACCES if the access control table
- *          reports an unsupported configuration.
- */
-int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev,
-			    struct fsf_qtcb_bottom_support *bottom)
-{
-	int shared, rw;
-	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
-	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
-
-	if ((adapter->connection_features & FSF_FEATURE_NPIV_MODE) ||
-	    !(adapter->adapter_features & FSF_FEATURE_LUN_SHARING) ||
-	    zfcp_ccw_priv_sch(adapter))
-		return 0;
-
-	shared = !(bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE);
-	rw = (bottom->lun_access_info & FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
-
-	if (shared)
-		atomic_set_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
-
-	if (!rw) {
-		atomic_set_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
-		dev_info(&adapter->ccw_device->dev, "SCSI device at LUN "
-			 "0x%016Lx on port 0x%016Lx opened read-only\n",
-			 zfcp_scsi_dev_lun(sdev),
-			 (unsigned long long)zfcp_sdev->port->wwpn);
-	}
-
-	if (!shared && !rw) {
-		dev_err(&adapter->ccw_device->dev, "Exclusive read-only access "
-			"not supported (LUN 0x%016Lx, port 0x%016Lx)\n",
-			zfcp_scsi_dev_lun(sdev),
-			(unsigned long long)zfcp_sdev->port->wwpn);
-		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
-		zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6");
-		return -EACCES;
-	}
-
-	if (shared && rw) {
-		dev_err(&adapter->ccw_device->dev,
-			"Shared read-write access not supported "
-			"(LUN 0x%016Lx, port 0x%016Lx)\n",
-			zfcp_scsi_dev_lun(sdev),
-			(unsigned long long)zfcp_sdev->port->wwpn);
-		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
-		zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8");
-		return -EACCES;
-	}
-
-	return 0;
-}

+ 9 - 2
drivers/s390/scsi/zfcp_dbf.c

@@ -3,7 +3,7 @@
  *
  * Debug traces for zfcp.
  *
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2013
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -23,6 +23,13 @@ module_param(dbfsize, uint, 0400);
 MODULE_PARM_DESC(dbfsize,
 		 "number of pages for each debug feature area (default 4)");
 
+static u32 dbflevel = 3;
+
+module_param(dbflevel, uint, 0400);
+MODULE_PARM_DESC(dbflevel,
+		 "log level for each debug feature area "
+		 "(default 3, range 0..6)");
+
 static inline unsigned int zfcp_dbf_plen(unsigned int offset)
 {
 	return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC;
@@ -447,7 +454,7 @@ static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
 		return NULL;
 
 	debug_register_view(d, &debug_hex_ascii_view);
-	debug_set_level(d, 3);
+	debug_set_level(d, dbflevel);
 
 	return d;
 }

+ 0 - 4
drivers/s390/scsi/zfcp_def.h

@@ -86,10 +86,6 @@ struct zfcp_reqlist;
 #define ZFCP_STATUS_PORT_PHYS_OPEN		0x00000001
 #define ZFCP_STATUS_PORT_LINK_TEST		0x00000002
 
-/* logical unit status */
-#define ZFCP_STATUS_LUN_SHARED			0x00000004
-#define ZFCP_STATUS_LUN_READONLY		0x00000008
-
 /* FSF request status (this does not have a common part) */
 #define ZFCP_STATUS_FSFREQ_ERROR		0x00000008
 #define ZFCP_STATUS_FSFREQ_CLEANUP		0x00000010

+ 1 - 2
drivers/s390/scsi/zfcp_erp.c

@@ -950,8 +950,7 @@ static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev)
 {
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
-	atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
-			  ZFCP_STATUS_LUN_SHARED | ZFCP_STATUS_LUN_READONLY,
+	atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED,
 			  &zfcp_sdev->status);
 }
 

+ 2 - 18
drivers/s390/scsi/zfcp_ext.h

@@ -21,28 +21,14 @@ extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
 					   u32);
 extern void zfcp_sg_free_table(struct scatterlist *, int);
 extern int zfcp_sg_setup_table(struct scatterlist *, int);
-extern void zfcp_device_unregister(struct device *,
-				   const struct attribute_group *);
 extern void zfcp_adapter_release(struct kref *);
 extern void zfcp_adapter_unregister(struct zfcp_adapter *);
 
 /* zfcp_ccw.c */
-extern int zfcp_ccw_priv_sch(struct zfcp_adapter *);
 extern struct ccw_driver zfcp_ccw_driver;
 extern struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *);
 extern void zfcp_ccw_adapter_put(struct zfcp_adapter *);
 
-/* zfcp_cfdc.c */
-extern struct miscdevice zfcp_cfdc_misc;
-extern void zfcp_cfdc_port_denied(struct zfcp_port *, union fsf_status_qual *);
-extern void zfcp_cfdc_lun_denied(struct scsi_device *, union fsf_status_qual *);
-extern void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *,
-				     union fsf_status_qual *);
-extern int zfcp_cfdc_open_lun_eval(struct scsi_device *,
-				   struct fsf_qtcb_bottom_support *);
-extern void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *);
-
-
 /* zfcp_dbf.c */
 extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
 extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
@@ -117,8 +103,6 @@ extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
 extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
 extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *,
 					    struct fsf_qtcb_bottom_port *);
-extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *,
-						  struct zfcp_fsf_cfdc *);
 extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
 extern int zfcp_fsf_status_read(struct zfcp_qdio *);
 extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
@@ -158,9 +142,9 @@ extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
 extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
 
 /* zfcp_sysfs.c */
-extern struct attribute_group zfcp_sysfs_unit_attrs;
+extern const struct attribute_group *zfcp_unit_attr_groups[];
 extern struct attribute_group zfcp_sysfs_adapter_attrs;
-extern struct attribute_group zfcp_sysfs_port_attrs;
+extern const struct attribute_group *zfcp_port_attr_groups[];
 extern struct mutex zfcp_sysfs_port_units_mutex;
 extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
 extern struct device_attribute *zfcp_sysfs_shost_attrs[];

+ 1 - 1
drivers/s390/scsi/zfcp_fc.c

@@ -668,7 +668,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
 
 	list_for_each_entry_safe(port, tmp, &remove_lh, list) {
 		zfcp_erp_port_shutdown(port, 0, "fcegpf2");
-		zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
+		device_unregister(&port->dev);
 	}
 
 	return ret;

+ 33 - 121
drivers/s390/scsi/zfcp_fsf.c

@@ -3,7 +3,7 @@
  *
  * Implementation of FSF commands.
  *
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2013
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -254,14 +254,9 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
 
 		break;
 	case FSF_STATUS_READ_NOTIFICATION_LOST:
-		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
-			zfcp_cfdc_adapter_access_changed(adapter);
 		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
 			zfcp_fc_conditional_port_scan(adapter);
 		break;
-	case FSF_STATUS_READ_CFDC_UPDATED:
-		zfcp_cfdc_adapter_access_changed(adapter);
-		break;
 	case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
 		adapter->adapter_features = sr_buf->payload.word[0];
 		break;
@@ -483,12 +478,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
 
 	fc_host_port_name(shost) = nsp->fl_wwpn;
 	fc_host_node_name(shost) = nsp->fl_wwnn;
-	fc_host_port_id(shost) = ntoh24(bottom->s_id);
-	fc_host_speed(shost) =
-		zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
 	fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
 
-	adapter->hydra_version = bottom->adapter_type;
 	adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
 	adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
 					 (u16)FSF_STATUS_READS_RECOM);
@@ -496,6 +487,19 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
 	if (fc_host_permanent_port_name(shost) == -1)
 		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
 
+	zfcp_scsi_set_prot(adapter);
+
+	/* no error return above here, otherwise must fix call chains */
+	/* do not evaluate invalid fields */
+	if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
+		return 0;
+
+	fc_host_port_id(shost) = ntoh24(bottom->s_id);
+	fc_host_speed(shost) =
+		zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
+
+	adapter->hydra_version = bottom->adapter_type;
+
 	switch (bottom->fc_topology) {
 	case FSF_TOPO_P2P:
 		adapter->peer_d_id = ntoh24(bottom->peer_d_id);
@@ -517,8 +521,6 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
 		return -EIO;
 	}
 
-	zfcp_scsi_set_prot(adapter);
-
 	return 0;
 }
 
@@ -563,8 +565,14 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
 		fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
 		adapter->hydra_version = 0;
 
+		/* avoids adapter shutdown to be able to recognize
+		 * events such as LINK UP */
+		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
+				&adapter->status);
 		zfcp_fsf_link_down_info_eval(req,
 			&qtcb->header.fsf_status_qual.link_down_info);
+		if (zfcp_fsf_exchange_config_evaluate(req))
+			return;
 		break;
 	default:
 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
@@ -931,8 +939,6 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
 			break;
                 }
                 break;
-	case FSF_ACCESS_DENIED:
-		break;
         case FSF_PORT_BOXED:
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
@@ -1086,7 +1092,6 @@ out:
 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
 {
 	struct zfcp_fsf_ct_els *send_els = req->data;
-	struct zfcp_port *port = send_els->port;
 	struct fsf_qtcb_header *header = &req->qtcb->header;
 
 	send_els->status = -EINVAL;
@@ -1116,12 +1121,6 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
 	case FSF_REQUEST_SIZE_TOO_LARGE:
 	case FSF_RESPONSE_SIZE_TOO_LARGE:
 		break;
-	case FSF_ACCESS_DENIED:
-		if (port) {
-			zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
-			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-		}
-		break;
 	case FSF_SBAL_MISMATCH:
 		/* should never occur, avoided in zfcp_fsf_send_els */
 		/* fall through */
@@ -1209,8 +1208,6 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
 
 	req->qtcb->bottom.config.feature_selection =
-			FSF_FEATURE_CFDC |
-			FSF_FEATURE_LUN_SHARING |
 			FSF_FEATURE_NOTIFICATION_LOST |
 			FSF_FEATURE_UPDATE_ALERT;
 	req->erp_action = erp_action;
@@ -1250,8 +1247,6 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
 	req->handler = zfcp_fsf_exchange_config_data_handler;
 
 	req->qtcb->bottom.config.feature_selection =
-			FSF_FEATURE_CFDC |
-			FSF_FEATURE_LUN_SHARING |
 			FSF_FEATURE_NOTIFICATION_LOST |
 			FSF_FEATURE_UPDATE_ALERT;
 
@@ -1378,10 +1373,6 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
 	switch (header->fsf_status) {
 	case FSF_PORT_ALREADY_OPEN:
 		break;
-	case FSF_ACCESS_DENIED:
-		zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
-		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-		break;
 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
 		dev_warn(&req->adapter->ccw_device->dev,
 			 "Not enough FCP adapter resources to open "
@@ -1564,8 +1555,6 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
 		/* fall through */
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-		/* fall through */
-	case FSF_ACCESS_DENIED:
 		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
 		break;
 	case FSF_GOOD:
@@ -1685,9 +1674,6 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
 		zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
-	case FSF_ACCESS_DENIED:
-		zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
-		break;
 	case FSF_PORT_BOXED:
 		/* can't use generic zfcp_erp_modify_port_status because
 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
@@ -1773,7 +1759,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
 	struct scsi_device *sdev = req->data;
 	struct zfcp_scsi_dev *zfcp_sdev;
 	struct fsf_qtcb_header *header = &req->qtcb->header;
-	struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
+	union fsf_status_qual *qual = &header->fsf_status_qual;
 
 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
 		return;
@@ -1781,9 +1767,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
 	zfcp_sdev = sdev_to_zfcp(sdev);
 
 	atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
-			  ZFCP_STATUS_COMMON_ACCESS_BOXED |
-			  ZFCP_STATUS_LUN_SHARED |
-			  ZFCP_STATUS_LUN_READONLY,
+			  ZFCP_STATUS_COMMON_ACCESS_BOXED,
 			  &zfcp_sdev->status);
 
 	switch (header->fsf_status) {
@@ -1793,10 +1777,6 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
 		/* fall through */
 	case FSF_LUN_ALREADY_OPEN:
 		break;
-	case FSF_ACCESS_DENIED:
-		zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
-		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-		break;
 	case FSF_PORT_BOXED:
 		zfcp_erp_set_port_status(zfcp_sdev->port,
 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
@@ -1805,7 +1785,17 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_LUN_SHARING_VIOLATION:
-		zfcp_cfdc_lun_shrng_vltn(sdev, &header->fsf_status_qual);
+		if (qual->word[0])
+			dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
+				 "LUN 0x%Lx on port 0x%Lx is already in "
+				 "use by CSS%d, MIF Image ID %x\n",
+				 zfcp_scsi_dev_lun(sdev),
+				 (unsigned long long)zfcp_sdev->port->wwpn,
+				 qual->fsf_queue_designator.cssid,
+				 qual->fsf_queue_designator.hla);
+		zfcp_erp_set_lun_status(sdev,
+					ZFCP_STATUS_COMMON_ERP_FAILED |
+					ZFCP_STATUS_COMMON_ACCESS_DENIED);
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
@@ -1833,7 +1823,6 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
 	case FSF_GOOD:
 		zfcp_sdev->lun_handle = header->lun_handle;
 		atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
-		zfcp_cfdc_open_lun_eval(sdev, bottom);
 		break;
 	}
 }
@@ -2061,10 +2050,6 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
 		zfcp_fsf_class_not_supp(req);
 		break;
-	case FSF_ACCESS_DENIED:
-		zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
-		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-		break;
 	case FSF_DIRECTION_INDICATOR_NOT_VALID:
 		dev_err(&req->adapter->ccw_device->dev,
 			"Incorrect direction %d, LUN 0x%016Lx on port "
@@ -2365,79 +2350,6 @@ out:
 	return req;
 }
 
-static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
-{
-}
-
-/**
- * zfcp_fsf_control_file - control file upload/download
- * @adapter: pointer to struct zfcp_adapter
- * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc
- * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise
- */
-struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
-					   struct zfcp_fsf_cfdc *fsf_cfdc)
-{
-	struct zfcp_qdio *qdio = adapter->qdio;
-	struct zfcp_fsf_req *req = NULL;
-	struct fsf_qtcb_bottom_support *bottom;
-	int retval = -EIO;
-	u8 direction;
-
-	if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
-		return ERR_PTR(-EOPNOTSUPP);
-
-	switch (fsf_cfdc->command) {
-	case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
-		direction = SBAL_SFLAGS0_TYPE_WRITE;
-		break;
-	case FSF_QTCB_UPLOAD_CONTROL_FILE:
-		direction = SBAL_SFLAGS0_TYPE_READ;
-		break;
-	default:
-		return ERR_PTR(-EINVAL);
-	}
-
-	spin_lock_irq(&qdio->req_q_lock);
-	if (zfcp_qdio_sbal_get(qdio))
-		goto out;
-
-	req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, direction, NULL);
-	if (IS_ERR(req)) {
-		retval = -EPERM;
-		goto out;
-	}
-
-	req->handler = zfcp_fsf_control_file_handler;
-
-	bottom = &req->qtcb->bottom.support;
-	bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
-	bottom->option = fsf_cfdc->option;
-
-	retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg);
-
-	if (retval ||
-		(zfcp_qdio_real_bytes(fsf_cfdc->sg) != ZFCP_CFDC_MAX_SIZE)) {
-		zfcp_fsf_req_free(req);
-		retval = -EIO;
-		goto out;
-	}
-	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
-	if (zfcp_adapter_multi_buffer_active(adapter))
-		zfcp_qdio_set_scount(qdio, &req->qdio_req);
-
-	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
-	retval = zfcp_fsf_req_send(req);
-out:
-	spin_unlock_irq(&qdio->req_q_lock);
-
-	if (!retval) {
-		wait_for_completion(&req->completion);
-		return req;
-	}
-	return ERR_PTR(retval);
-}
-
 /**
  * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
  * @adapter: pointer to struct zfcp_adapter

+ 0 - 26
drivers/s390/scsi/zfcp_fsf.h

@@ -36,13 +36,6 @@
 #define FSF_CONFIG_COMMAND			0x00000003
 #define FSF_PORT_COMMAND			0x00000004
 
-/* FSF control file upload/download operations' subtype and options */
-#define FSF_CFDC_OPERATION_SUBTYPE		0x00020001
-#define FSF_CFDC_OPTION_NORMAL_MODE		0x00000000
-#define FSF_CFDC_OPTION_FORCE			0x00000001
-#define FSF_CFDC_OPTION_FULL_ACCESS		0x00000002
-#define FSF_CFDC_OPTION_RESTRICTED_ACCESS	0x00000004
-
 /* FSF protocol states */
 #define FSF_PROT_GOOD				0x00000001
 #define FSF_PROT_QTCB_VERSION_ERROR		0x00000010
@@ -64,7 +57,6 @@
 #define FSF_HANDLE_MISMATCH			0x00000005
 #define FSF_SERVICE_CLASS_NOT_SUPPORTED		0x00000006
 #define FSF_FCPLUN_NOT_VALID			0x00000009
-#define FSF_ACCESS_DENIED			0x00000010
 #define FSF_LUN_SHARING_VIOLATION               0x00000012
 #define FSF_FCP_COMMAND_DOES_NOT_EXIST		0x00000022
 #define FSF_DIRECTION_INDICATOR_NOT_VALID	0x00000030
@@ -130,7 +122,6 @@
 #define FSF_STATUS_READ_LINK_DOWN		0x00000005
 #define FSF_STATUS_READ_LINK_UP          	0x00000006
 #define FSF_STATUS_READ_NOTIFICATION_LOST	0x00000009
-#define FSF_STATUS_READ_CFDC_UPDATED		0x0000000A
 #define FSF_STATUS_READ_FEATURE_UPDATE_ALERT	0x0000000C
 
 /* status subtypes for link down */
@@ -140,7 +131,6 @@
 
 /* status subtypes for unsolicited status notification lost */
 #define FSF_STATUS_READ_SUB_INCOMING_ELS	0x00000001
-#define FSF_STATUS_READ_SUB_ACT_UPDATED		0x00000020
 
 /* topologie that is detected by the adapter */
 #define FSF_TOPO_P2P				0x00000001
@@ -166,8 +156,6 @@
 #define FSF_QTCB_LOG_SIZE			1024
 
 /* channel features */
-#define FSF_FEATURE_CFDC			0x00000002
-#define FSF_FEATURE_LUN_SHARING			0x00000004
 #define FSF_FEATURE_NOTIFICATION_LOST		0x00000008
 #define FSF_FEATURE_HBAAPI_MANAGEMENT           0x00000010
 #define FSF_FEATURE_ELS_CT_CHAINED_SBALS	0x00000020
@@ -182,20 +170,6 @@
 /* option */
 #define FSF_OPEN_LUN_SUPPRESS_BOXING		0x00000001
 
-/* open LUN access flags*/
-#define FSF_UNIT_ACCESS_EXCLUSIVE		0x02000000
-#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER	0x10000000
-
-/* FSF interface for CFDC */
-#define ZFCP_CFDC_MAX_SIZE		127 * 1024
-#define ZFCP_CFDC_PAGES 		PFN_UP(ZFCP_CFDC_MAX_SIZE)
-
-struct zfcp_fsf_cfdc {
-	struct scatterlist sg[ZFCP_CFDC_PAGES];
-	u32 command;
-	u32 option;
-};
-
 struct fsf_queue_designator {
 	u8  cssid;
 	u8  chpid;

+ 7 - 3
drivers/s390/scsi/zfcp_scsi.c

@@ -3,7 +3,7 @@
  *
  * Interface to Linux SCSI midlayer.
  *
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2013
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -311,8 +311,12 @@ static struct scsi_host_template zfcp_scsi_host_template = {
 	.proc_name		 = "zfcp",
 	.can_queue		 = 4096,
 	.this_id		 = -1,
-	.sg_tablesize		 = 1, /* adjusted later */
-	.max_sectors		 = 8, /* adjusted later */
+	.sg_tablesize		 = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
+				     * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2),
+				   /* GCD, adjusted later */
+	.max_sectors		 = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
+				     * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
+				   /* GCD, adjusted later */
 	.dma_boundary		 = ZFCP_QDIO_SBALE_LEN - 1,
 	.cmd_per_lun		 = 1,
 	.use_clustering		 = 1,

+ 11 - 16
drivers/s390/scsi/zfcp_sysfs.c

@@ -75,12 +75,6 @@ ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
 ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
 		 (zfcp_unit_sdev_status(unit) &
 		  ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
-ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n",
-		 (zfcp_unit_sdev_status(unit) &
-		  ZFCP_STATUS_LUN_SHARED) != 0);
-ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n",
-		 (zfcp_unit_sdev_status(unit) &
-		  ZFCP_STATUS_LUN_READONLY) != 0);
 
 static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
 					   struct device_attribute *attr,
@@ -268,7 +262,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
 	put_device(&port->dev);
 
 	zfcp_erp_port_shutdown(port, 0, "syprs_1");
-	zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
+	device_unregister(&port->dev);
  out:
 	zfcp_ccw_adapter_put(adapter);
 	return retval ? retval : (ssize_t) count;
@@ -340,27 +334,28 @@ static struct attribute *zfcp_port_attrs[] = {
 	&dev_attr_port_access_denied.attr,
 	NULL
 };
-
-/**
- * zfcp_sysfs_port_attrs - sysfs attributes for all other ports
- */
-struct attribute_group zfcp_sysfs_port_attrs = {
+static struct attribute_group zfcp_port_attr_group = {
 	.attrs = zfcp_port_attrs,
 };
+const struct attribute_group *zfcp_port_attr_groups[] = {
+	&zfcp_port_attr_group,
+	NULL,
+};
 
 static struct attribute *zfcp_unit_attrs[] = {
 	&dev_attr_unit_failed.attr,
 	&dev_attr_unit_in_recovery.attr,
 	&dev_attr_unit_status.attr,
 	&dev_attr_unit_access_denied.attr,
-	&dev_attr_unit_access_shared.attr,
-	&dev_attr_unit_access_readonly.attr,
 	NULL
 };
-
-struct attribute_group zfcp_sysfs_unit_attrs = {
+static struct attribute_group zfcp_unit_attr_group = {
 	.attrs = zfcp_unit_attrs,
 };
+const struct attribute_group *zfcp_unit_attr_groups[] = {
+	&zfcp_unit_attr_group,
+	NULL,
+};
 
 #define ZFCP_DEFINE_LATENCY_ATTR(_name) 				\
 static ssize_t								\

+ 2 - 7
drivers/s390/scsi/zfcp_unit.c

@@ -145,6 +145,7 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
 	unit->fcp_lun = fcp_lun;
 	unit->dev.parent = &port->dev;
 	unit->dev.release = zfcp_unit_release;
+	unit->dev.groups = zfcp_unit_attr_groups;
 	INIT_WORK(&unit->scsi_work, zfcp_unit_scsi_scan_work);
 
 	if (dev_set_name(&unit->dev, "0x%016llx",
@@ -160,12 +161,6 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
 		goto out;
 	}
 
-	if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) {
-		device_unregister(&unit->dev);
-		retval = -EINVAL;
-		goto out;
-	}
-
 	atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */
 
 	write_lock_irq(&port->unit_list_lock);
@@ -254,7 +249,7 @@ int zfcp_unit_remove(struct zfcp_port *port, u64 fcp_lun)
 
 	put_device(&unit->dev);
 
-	zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
+	device_unregister(&unit->dev);
 
 	return 0;
 }

+ 3 - 1
drivers/scsi/3w-xxxx.c

@@ -216,6 +216,7 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_eh.h>
 #include "3w-xxxx.h"
 
 /* Globals */
@@ -2009,7 +2010,8 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
 			printk(KERN_NOTICE "3w-xxxx: scsi%d: Unknown scsi opcode: 0x%x\n", tw_dev->host->host_no, *command);
 			tw_dev->state[request_id] = TW_S_COMPLETED;
 			tw_state_request_finish(tw_dev, request_id);
-			SCpnt->result = (DID_BAD_TARGET << 16);
+			SCpnt->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+			scsi_build_sense_buffer(1, SCpnt->sense_buffer, ILLEGAL_REQUEST, 0x20, 0);
 			done(SCpnt);
 			retval = 0;
 	}

File diff suppressed because it is too large
+ 388 - 345
drivers/scsi/BusLogic.c


File diff suppressed because it is too large
+ 360 - 371
drivers/scsi/BusLogic.h


File diff suppressed because it is too large
+ 204 - 227
drivers/scsi/FlashPoint.c


+ 1 - 1
drivers/scsi/Kconfig

@@ -633,7 +633,7 @@ config SCSI_BUSLOGIC
 
 config SCSI_FLASHPOINT
 	bool "FlashPoint support"
-	depends on SCSI_BUSLOGIC && PCI && X86_32
+	depends on SCSI_BUSLOGIC && PCI
 	help
 	  This option allows you to add FlashPoint support to the
 	  BusLogic SCSI driver. The FlashPoint SCCB Manager code is

+ 3 - 0
drivers/scsi/aacraid/src.c

@@ -93,6 +93,9 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
 			int send_it = 0;
 			extern int aac_sync_mode;
 
+			src_writel(dev, MUnit.ODR_C, bellbits);
+			src_readl(dev, MUnit.ODR_C);
+
 			if (!aac_sync_mode) {
 				src_writel(dev, MUnit.ODR_C, bellbits);
 				src_readl(dev, MUnit.ODR_C);

+ 2 - 1
drivers/scsi/aic94xx/aic94xx_task.c

@@ -505,7 +505,8 @@ static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task,
 		scb->ssp_task.ssp_cmd.efb_prio_attr |= EFB_MASK;
 	scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_prio << 3);
 	scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7);
-	memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cdb, 16);
+	memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cmd->cmnd,
+	       task->ssp_task.cmd->cmd_len);
 
 	scb->ssp_task.sister_scb = cpu_to_le16(0xFFFF);
 	scb->ssp_task.conn_handle = cpu_to_le16(

+ 1 - 2
drivers/scsi/bfa/bfa_core.c

@@ -1432,6 +1432,7 @@ bfa_iocfc_disable_cbfn(void *bfa_arg)
 {
 	struct bfa_s	*bfa = bfa_arg;
 
+	bfa->queue_process = BFA_FALSE;
 	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED);
 }
 
@@ -1567,7 +1568,6 @@ bfa_iocfc_start(struct bfa_s *bfa)
 void
 bfa_iocfc_stop(struct bfa_s *bfa)
 {
-	bfa->queue_process = BFA_FALSE;
 	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP);
 }
 
@@ -1674,7 +1674,6 @@ bfa_iocfc_disable(struct bfa_s *bfa)
 	bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
 		     "IOC Disable");
 
-	bfa->queue_process = BFA_FALSE;
 	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE);
 }
 

+ 89 - 14
drivers/scsi/bfa/bfa_defs.h

@@ -45,6 +45,7 @@ enum {
 	BFA_MFG_TYPE_PROWLER_C = 1710,   /*  Prowler CNA only cards	*/
 	BFA_MFG_TYPE_PROWLER_D = 1860,   /*  Prowler Dual cards		*/
 	BFA_MFG_TYPE_CHINOOK   = 1867,   /*  Chinook cards		*/
+	BFA_MFG_TYPE_CHINOOK2   = 1869,	 /*!< Chinook2 cards		*/
 	BFA_MFG_TYPE_INVALID = 0,        /*  Invalid card type		*/
 };
 
@@ -59,7 +60,8 @@ enum {
 	(type) == BFA_MFG_TYPE_ASTRA || \
 	(type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
 	(type) == BFA_MFG_TYPE_LIGHTNING || \
-	(type) == BFA_MFG_TYPE_CHINOOK))
+	(type) == BFA_MFG_TYPE_CHINOOK || \
+	(type) == BFA_MFG_TYPE_CHINOOK2))
 
 /*
  * Check if the card having old wwn/mac handling
@@ -185,6 +187,8 @@ enum bfa_status {
 	BFA_STATUS_FAA_DISABLED = 198,	/* FAA is already disabled */
 	BFA_STATUS_FAA_ACQUIRED = 199,	/* FAA is already acquired */
 	BFA_STATUS_FAA_ACQ_ADDR = 200,	/* Acquiring addr */
+	BFA_STATUS_BBCR_FC_ONLY = 201, /*!< BBCredit Recovery is supported for *
+					* FC mode only */
 	BFA_STATUS_ERROR_TRUNK_ENABLED = 203,	/* Trunk enabled on adapter */
 	BFA_STATUS_MAX_ENTRY_REACHED = 212,	/* MAX entry reached */
 	BFA_STATUS_TOPOLOGY_LOOP = 230, /* Topology is set to Loop */
@@ -197,7 +201,34 @@ enum bfa_status {
 	BFA_STATUS_DPORT_DISABLED = 236, /* D-port mode is already disabled */
 	BFA_STATUS_CMD_NOTSUPP_MEZZ = 239, /* Cmd not supported for MEZZ card */
 	BFA_STATUS_FRU_NOT_PRESENT = 240, /* fru module not present */
+	BFA_STATUS_DPORT_NO_SFP = 243, /* SFP is not present.\n D-port will be
+					* enabled but it will be operational
+					* only after inserting a valid SFP. */
 	BFA_STATUS_DPORT_ERR = 245,	/* D-port mode is enabled */
+	BFA_STATUS_DPORT_ENOSYS = 254, /* Switch has no D_Port functionality */
+	BFA_STATUS_DPORT_CANT_PERF = 255, /* Switch port is not D_Port capable
+					* or D_Port is disabled */
+	BFA_STATUS_DPORT_LOGICALERR = 256, /* Switch D_Port fail */
+	BFA_STATUS_DPORT_SWBUSY = 257, /* Switch port busy */
+	BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT = 258, /*!< BB credit recovery is
+					* supported at max port speed alone */
+	BFA_STATUS_ERROR_BBCR_ENABLED  = 259, /*!< BB credit recovery
+					* is enabled */
+	BFA_STATUS_INVALID_BBSCN = 260, /*!< Invalid BBSCN value.
+					 * Valid range is [1-15] */
+	BFA_STATUS_DDPORT_ERR = 261, /* Dynamic D_Port mode is active.\n To
+					* exit dynamic mode, disable D_Port on
+					* the remote port */
+	BFA_STATUS_DPORT_SFPWRAP_ERR = 262, /* Clear e/o_wrap fail, check or
+						* replace SFP */
+	BFA_STATUS_BBCR_CFG_NO_CHANGE = 265, /*!< BBCR is operational.
+			* Disable BBCR and try this operation again. */
+	BFA_STATUS_DPORT_SW_NOTREADY = 268, /* Remote port is not ready to
+					* start dport test. Check remote
+					* port status. */
+	BFA_STATUS_DPORT_INV_SFP = 271, /* Invalid SFP for D-PORT mode. */
+	BFA_STATUS_DPORT_CMD_NOTSUPP    = 273, /* Dport is not supported by
+					* remote port */
 	BFA_STATUS_MAX_VAL		/* Unknown error code */
 };
 #define bfa_status_t enum bfa_status
@@ -234,6 +265,7 @@ enum {
 	BFA_ADAPTER_MFG_NAME_LEN    = 8,   /*  manufacturer name length */
 	BFA_ADAPTER_SYM_NAME_LEN    = 64,  /*  adapter symbolic name length */
 	BFA_ADAPTER_OS_TYPE_LEN	    = 64,  /*  adapter os type length */
+	BFA_ADAPTER_UUID_LEN	    = 16,  /* adapter uuid length */
 };
 
 struct bfa_adapter_attr_s {
@@ -267,6 +299,7 @@ struct bfa_adapter_attr_s {
 	u8		mfg_month;	/* manufacturing month */
 	u16		mfg_year;	/* manufacturing year */
 	u16		rsvd;
+	u8		uuid[BFA_ADAPTER_UUID_LEN];
 };
 
 /*
@@ -380,7 +413,8 @@ struct bfa_ioc_attr_s {
 	u8				port_mode;	/*  bfa_mode_s	*/
 	u8				cap_bm;		/*  capability	*/
 	u8				port_mode_cfg;	/*  bfa_mode_s	*/
-	u8				rsvd[4];	/*  64bit align	*/
+	u8				def_fn;		/* 1 if default fn */
+	u8				rsvd[3];	/*  64bit align	*/
 };
 
 /*
@@ -516,17 +550,6 @@ struct bfa_ioc_aen_data_s {
 	mac_t	mac;
 };
 
-/*
- *	D-port states
- *
-*/
-enum bfa_dport_state {
-	BFA_DPORT_ST_DISABLED	= 0,	/* D-port is Disabled */
-	BFA_DPORT_ST_DISABLING	= 1,	/* D-port is Disabling */
-	BFA_DPORT_ST_ENABLING	= 2,	/* D-port is Enabling */
-	BFA_DPORT_ST_ENABLED	= 3,	/* D-port is Enabled */
-};
-
 /*
  * ---------------------- mfg definitions ------------
  */
@@ -614,6 +637,7 @@ enum {
 	BFA_PCI_DEVICE_ID_CT		= 0x14,
 	BFA_PCI_DEVICE_ID_CT_FC		= 0x21,
 	BFA_PCI_DEVICE_ID_CT2		= 0x22,
+	BFA_PCI_DEVICE_ID_CT2_QUAD	= 0x23,
 };
 
 #define bfa_asic_id_cb(__d)			\
@@ -622,7 +646,9 @@ enum {
 #define bfa_asic_id_ct(__d)			\
 	((__d) == BFA_PCI_DEVICE_ID_CT ||	\
 	 (__d) == BFA_PCI_DEVICE_ID_CT_FC)
-#define bfa_asic_id_ct2(__d)	((__d) == BFA_PCI_DEVICE_ID_CT2)
+#define bfa_asic_id_ct2(__d)			\
+	((__d) == BFA_PCI_DEVICE_ID_CT2 ||	\
+	(__d) == BFA_PCI_DEVICE_ID_CT2_QUAD)
 #define bfa_asic_id_ctc(__d)	\
 	(bfa_asic_id_ct(__d) || bfa_asic_id_ct2(__d))
 
@@ -1126,6 +1152,7 @@ struct bfa_flash_attr_s {
 #define LB_PATTERN_DEFAULT	0xB5B5B5B5
 #define QTEST_CNT_DEFAULT	10
 #define QTEST_PAT_DEFAULT	LB_PATTERN_DEFAULT
+#define DPORT_ENABLE_LOOPCNT_DEFAULT (1024 * 1024)
 
 struct bfa_diag_memtest_s {
 	u8	algo;
@@ -1154,6 +1181,54 @@ struct bfa_diag_loopback_result_s {
 	u8	rsvd[3];
 };
 
+enum bfa_diag_dport_test_status {
+	DPORT_TEST_ST_IDLE	= 0,    /* the test has not started yet. */
+	DPORT_TEST_ST_FINAL	= 1,    /* the test done successfully */
+	DPORT_TEST_ST_SKIP	= 2,    /* the test skipped */
+	DPORT_TEST_ST_FAIL	= 3,    /* the test failed */
+	DPORT_TEST_ST_INPRG	= 4,    /* the testing is in progress */
+	DPORT_TEST_ST_RESPONDER	= 5,    /* test triggered from remote port */
+	DPORT_TEST_ST_STOPPED	= 6,    /* the test stopped by user. */
+	DPORT_TEST_ST_MAX
+};
+
+enum bfa_diag_dport_test_type {
+	DPORT_TEST_ELOOP	= 0,
+	DPORT_TEST_OLOOP	= 1,
+	DPORT_TEST_ROLOOP	= 2,
+	DPORT_TEST_LINK		= 3,
+	DPORT_TEST_MAX
+};
+
+enum bfa_diag_dport_test_opmode {
+	BFA_DPORT_OPMODE_AUTO	= 0,
+	BFA_DPORT_OPMODE_MANU	= 1,
+};
+
+struct bfa_diag_dport_subtest_result_s {
+	u8	status;		/* bfa_diag_dport_test_status */
+	u8	rsvd[7];	/* 64bit align */
+	u64	start_time;	/* timestamp  */
+};
+
+struct bfa_diag_dport_result_s {
+	wwn_t	rp_pwwn;	/* switch port wwn  */
+	wwn_t	rp_nwwn;	/* switch node wwn  */
+	u64	start_time;	/* user/sw start time */
+	u64	end_time;	/* timestamp  */
+	u8	status;		/* bfa_diag_dport_test_status */
+	u8	mode;		/* bfa_diag_dport_test_opmode */
+	u8	rsvd;		/* 64bit align */
+	u8	speed;		/* link speed for buf_reqd */
+	u16	buffer_required;
+	u16	frmsz;		/* frame size for buf_reqd */
+	u32	lpcnt;		/* Frame count */
+	u32	pat;		/* Pattern */
+	u32	roundtrip_latency;	/* in nano sec */
+	u32	est_cable_distance;	/* in meter */
+	struct bfa_diag_dport_subtest_result_s subtest[DPORT_TEST_MAX];
+};
+
 struct bfa_diag_ledtest_s {
 	u32	cmd;    /* bfa_led_op_t */
 	u32	color;  /* bfa_led_color_t */

+ 66 - 11
drivers/scsi/bfa/bfa_defs_svc.h

@@ -105,6 +105,9 @@ struct bfa_fw_ioim_stats_s {
 					 *  an error condition*/
 	u32	wait_for_si;		/*  FW wait for SI */
 	u32	rec_rsp_inval;		/*  REC rsp invalid */
+	u32     rec_rsp_xchg_comp;	/*  REC rsp xchg complete */
+	u32     rec_rsp_rd_si_ownd;	/*  REC rsp read si owned */
+
 	u32	seqr_io_abort;		/*  target does not know cmd so abort */
 	u32	seqr_io_retry;		/*  SEQR failed so retry IO */
 
@@ -257,8 +260,6 @@ struct bfa_fw_port_lksm_stats_s {
 	u32    nos_tx;             /*  No. of times NOS tx started         */
 	u32    hwsm_lrr_rx;        /*  No. of times LRR rx-ed by HWSM      */
 	u32    hwsm_lr_rx;         /*  No. of times LR rx-ed by HWSM       */
-	u32    bbsc_lr;		   /* LKSM LR tx for credit recovery       */
-	u32	rsvd;
 };
 
 struct bfa_fw_port_snsm_stats_s {
@@ -409,7 +410,7 @@ struct bfa_fw_trunk_stats_s {
 	u32 rsvd;		/*  padding for 64 bit alignment */
 };
 
-struct bfa_fw_advsm_stats_s {
+struct bfa_fw_aport_stats_s {
 	u32 flogi_sent;		/*  Flogi sent			*/
 	u32 flogi_acc_recvd;	/*  Flogi Acc received		*/
 	u32 flogi_rjt_recvd;	/*  Flogi rejects received	*/
@@ -419,6 +420,12 @@ struct bfa_fw_advsm_stats_s {
 	u32 elp_accepted;	/*  ELP Accepted		*/
 	u32 elp_rejected;	/*  ELP rejected		*/
 	u32 elp_dropped;	/*  ELP dropped			*/
+
+	u32 bbcr_lr_count;	/*!< BBCR Link Resets		*/
+	u32 frame_lost_intrs;	/*!< BBCR Frame loss intrs	*/
+	u32 rrdy_lost_intrs;	/*!< BBCR Rrdy loss intrs	*/
+
+	u32 rsvd;
 };
 
 /*
@@ -478,6 +485,14 @@ struct bfa_fw_ct_mod_stats_s {
 	u32	rsvd;		/*  64bit align    */
 };
 
+/*
+ * RDS mod stats
+ */
+struct bfa_fw_rds_stats_s {
+	u32	no_fid_drop_err; /* RDS no fid drop error */
+	u32	rsvd;		 /* 64bit align */
+};
+
 /*
  * IOC firmware stats
  */
@@ -489,10 +504,11 @@ struct bfa_fw_stats_s {
 	struct bfa_fw_fcxchg_stats_s	fcxchg_stats;
 	struct bfa_fw_lps_stats_s	lps_stats;
 	struct bfa_fw_trunk_stats_s	trunk_stats;
-	struct bfa_fw_advsm_stats_s	advsm_stats;
+	struct bfa_fw_aport_stats_s	aport_stats;
 	struct bfa_fw_mac_mod_stats_s	macmod_stats;
 	struct bfa_fw_ct_mod_stats_s	ctmod_stats;
 	struct bfa_fw_eth_sndrcv_stats_s	ethsndrcv_stats;
+	struct bfa_fw_rds_stats_s	rds_stats;
 };
 
 #define BFA_IOCFC_PATHTOV_MAX	60
@@ -545,6 +561,27 @@ struct bfa_qos_attr_s {
 	struct bfa_qos_bw_s qos_bw_op;	/* QOS bw operational */
 };
 
+enum bfa_bbcr_state {
+	BFA_BBCR_DISABLED,	/*!< BBCR is disable */
+	BFA_BBCR_ONLINE,	/*!< BBCR is online  */
+	BFA_BBCR_OFFLINE,	/*!< BBCR is offline */
+};
+
+enum bfa_bbcr_err_reason {
+	BFA_BBCR_ERR_REASON_NONE, /*!< Unknown */
+	BFA_BBCR_ERR_REASON_SPEED_UNSUP, /*!< Port speed < max sup_speed */
+	BFA_BBCR_ERR_REASON_PEER_UNSUP,	/*!< BBCR is disable on peer port */
+	BFA_BBCR_ERR_REASON_NON_BRCD_SW, /*!< Connected to non BRCD switch */
+	BFA_BBCR_ERR_REASON_FLOGI_RJT, /*!< Login rejected by the switch */
+};
+
+struct bfa_bbcr_attr_s {
+	u8	state;
+	u8	peer_bb_scn;
+	u8	reason;
+	u8	rsvd;
+};
+
 /*
  * These fields should be displayed only from the CLI.
  * There will be a separate BFAL API (get_qos_vc_attr ?)
@@ -736,6 +773,7 @@ enum bfa_port_states {
 	BFA_PORT_ST_TOGGLING_QWAIT	= 14,
 	BFA_PORT_ST_FAA_MISCONFIG	= 15,
 	BFA_PORT_ST_DPORT		= 16,
+	BFA_PORT_ST_DDPORT		= 17,
 	BFA_PORT_ST_MAX_STATE,
 };
 
@@ -857,6 +895,15 @@ enum bfa_lunmask_state_s {
 	BFA_LUNMASK_UNINITIALIZED = 0xff,
 };
 
+/**
+ * FEC states
+ */
+enum bfa_fec_state_s {
+	BFA_FEC_ONLINE = 1,		/*!< FEC is online */
+	BFA_FEC_OFFLINE = 2,		/*!< FEC is offline */
+	BFA_FEC_OFFLINE_NOT_16G = 3,	/*!< FEC is offline (speed not 16Gig) */
+};
+
 #pragma pack(1)
 /*
  * LUN mask configuration
@@ -892,6 +939,9 @@ struct bfa_defs_fcpim_throttle_s {
 	u16	rsvd;
 };
 
+#define BFA_BB_SCN_DEF 3
+#define BFA_BB_SCN_MAX 0x0F
+
 /*
  *      Physical port configuration
  */
@@ -907,8 +957,8 @@ struct bfa_port_cfg_s {
 	u8	 tx_bbcredit;	/*  transmit buffer credits	*/
 	u8	 ratelimit;	/*  ratelimit enabled or not	*/
 	u8	 trl_def_speed;	/*  ratelimit default speed	*/
-	u8	 bb_scn;	/*  BB_SCN value from FLOGI Exchg */
-	u8	 bb_scn_state;	/*  Config state of BB_SCN */
+	u8	 bb_cr_enabled; /*!< Config state of BB_SCN	*/
+	u8	 bb_scn;	/*!< BB_SCN value for FLOGI Exchg */
 	u8	 faa_state;	/*  FAA enabled/disabled        */
 	u8	 rsvd1;
 	u16	 path_tov;	/*  device path timeout	*/
@@ -950,6 +1000,7 @@ struct bfa_port_attr_s {
 	bfa_boolean_t		link_e2e_beacon; /* link beacon is on */
 	bfa_boolean_t		bbsc_op_status;	/* fc credit recovery oper
 						 * state */
+	enum bfa_fec_state_s	fec_state;	/*!< current FEC state */
 
 	/*
 	 * Dynamic field - info from FCS
@@ -961,7 +1012,7 @@ struct bfa_port_attr_s {
 
 	/* FCoE specific  */
 	u16			fcoe_vlan;
-	u8			rsvd1[6];
+	u8			rsvd1[2];
 };
 
 /*
@@ -1048,10 +1099,12 @@ struct bfa_port_link_s {
 	u8	 speed;		/*  Link speed (1/2/4/8 G) */
 	u32	 linkstate_opt; /*  Linkstate optional data (debug) */
 	u8	 trunked;	/*  Trunked or not (1 or 0) */
-	u8	 resvd[7];
+	u8	 fec_state;	/*!< State of FEC */
+	u8	 resvd[6];
 	struct bfa_qos_attr_s  qos_attr;   /* QoS Attributes */
 	union {
 		struct bfa_fcport_loop_info_s loop_info;
+		struct bfa_bbcr_attr_s bbcr_attr;
 		union {
 			struct bfa_qos_vc_attr_s qos_vc_attr;
 					/*  VC info from ELP */
@@ -1215,9 +1268,11 @@ struct bfa_port_fc_stats_s {
 	u64     bad_os_count;   /*  Invalid ordered sets        */
 	u64     err_enc_out;    /*  Encoding err nonframe_8b10b */
 	u64     err_enc;        /*  Encoding err frame_8b10b    */
-	u64	bbsc_frames_lost; /* Credit Recovery-Frames Lost  */
-	u64	bbsc_credits_lost; /* Credit Recovery-Credits Lost */
-	u64	bbsc_link_resets; /* Credit Recovery-Link Resets   */
+	u64	bbcr_frames_lost; /*!< BBCR Frames Lost */
+	u64	bbcr_rrdys_lost; /*!< BBCR RRDYs Lost */
+	u64	bbcr_link_resets; /*!< BBCR Link Resets */
+	u64	bbcr_frame_lost_intrs; /*!< BBCR Frame loss intrs */
+	u64	bbcr_rrdy_lost_intrs; /*!< BBCR Rrdy loss intrs */
 	u64	loop_timeouts;	/*  Loop timeouts		*/
 };
 

+ 15 - 0
drivers/scsi/bfa/bfa_fc.h

@@ -1531,6 +1531,12 @@ enum fdmi_hba_attribute_type {
 	FDMI_HBA_ATTRIB_FW_VERSION,	/* 0x0009 */
 	FDMI_HBA_ATTRIB_OS_NAME,	/* 0x000A */
 	FDMI_HBA_ATTRIB_MAX_CT,		/* 0x000B */
+	FDMI_HBA_ATTRIB_NODE_SYM_NAME,  /* 0x000C */
+	FDMI_HBA_ATTRIB_VENDOR_INFO,    /* 0x000D */
+	FDMI_HBA_ATTRIB_NUM_PORTS,  /* 0x000E */
+	FDMI_HBA_ATTRIB_FABRIC_NAME,    /* 0x000F */
+	FDMI_HBA_ATTRIB_BIOS_VER,   /* 0x0010 */
+	FDMI_HBA_ATTRIB_VENDOR_ID = 0x00E0,
 
 	FDMI_HBA_ATTRIB_MAX_TYPE
 };
@@ -1545,6 +1551,15 @@ enum fdmi_port_attribute_type {
 	FDMI_PORT_ATTRIB_FRAME_SIZE,	/* 0x0004 */
 	FDMI_PORT_ATTRIB_DEV_NAME,	/* 0x0005 */
 	FDMI_PORT_ATTRIB_HOST_NAME,	/* 0x0006 */
+	FDMI_PORT_ATTRIB_NODE_NAME,     /* 0x0007 */
+	FDMI_PORT_ATTRIB_PORT_NAME,     /* 0x0008 */
+	FDMI_PORT_ATTRIB_PORT_SYM_NAME, /* 0x0009 */
+	FDMI_PORT_ATTRIB_PORT_TYPE,     /* 0x000A */
+	FDMI_PORT_ATTRIB_SUPP_COS,      /* 0x000B */
+	FDMI_PORT_ATTRIB_PORT_FAB_NAME, /* 0x000C */
+	FDMI_PORT_ATTRIB_PORT_FC4_TYPE, /* 0x000D */
+	FDMI_PORT_ATTRIB_PORT_STATE = 0x101,    /* 0x0101 */
+	FDMI_PORT_ATTRIB_PORT_NUM_RPRT = 0x102, /* 0x0102 */
 
 	FDMI_PORT_ATTR_MAX_TYPE
 };

+ 1 - 1
drivers/scsi/bfa/bfa_fcpim.c

@@ -2882,7 +2882,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 	iotag = be16_to_cpu(rsp->io_tag);
 
 	ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
-	WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
+	WARN_ON(ioim->iotag != iotag);
 
 	bfa_ioim_cb_profile_comp(fcpim, ioim);
 

+ 6 - 56
drivers/scsi/bfa/bfa_fcs.c

@@ -240,9 +240,6 @@ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
 					 u32 rsp_len,
 					 u32 resid_len,
 					 struct fchs_s *rspfchs);
-static u8 bfa_fcs_fabric_oper_bbscn(struct bfa_fcs_fabric_s *fabric);
-static bfa_boolean_t bfa_fcs_fabric_is_bbscn_enabled(
-				struct bfa_fcs_fabric_s *fabric);
 
 static void	bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
 					 enum bfa_fcs_fabric_event event);
@@ -404,8 +401,7 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
 	case BFA_FCS_FABRIC_SM_CONT_OP:
 
 		bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
-					   fabric->bb_credit,
-					   bfa_fcs_fabric_oper_bbscn(fabric));
+					   fabric->bb_credit);
 		fabric->fab_type = BFA_FCS_FABRIC_SWITCHED;
 
 		if (fabric->auth_reqd && fabric->is_auth) {
@@ -433,8 +429,7 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
 	case BFA_FCS_FABRIC_SM_NO_FABRIC:
 		fabric->fab_type = BFA_FCS_FABRIC_N2N;
 		bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
-					   fabric->bb_credit,
-					   bfa_fcs_fabric_oper_bbscn(fabric));
+					   fabric->bb_credit);
 		bfa_fcs_fabric_notify_online(fabric);
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric);
 		break;
@@ -602,8 +597,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
 	case BFA_FCS_FABRIC_SM_NO_FABRIC:
 		bfa_trc(fabric->fcs, fabric->bb_credit);
 		bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
-					   fabric->bb_credit,
-					   bfa_fcs_fabric_oper_bbscn(fabric));
+					   fabric->bb_credit);
 		break;
 
 	case BFA_FCS_FABRIC_SM_RETRY_OP:
@@ -965,10 +959,6 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
 
 	case BFA_STATUS_FABRIC_RJT:
 		fabric->stats.flogi_rejects++;
-		if (fabric->lps->lsrjt_rsn == FC_LS_RJT_RSN_LOGICAL_ERROR &&
-		    fabric->lps->lsrjt_expl == FC_LS_RJT_EXP_NO_ADDL_INFO)
-			fabric->fcs->bbscn_flogi_rjt = BFA_TRUE;
-
 		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
 		return;
 
@@ -1014,14 +1004,11 @@ bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric)
 {
 	struct bfa_s		*bfa = fabric->fcs->bfa;
 	struct bfa_lport_cfg_s	*pcfg = &fabric->bport.port_cfg;
-	u8			alpa = 0, bb_scn = 0;
+	u8			alpa = 0;
 
-	if (bfa_fcs_fabric_is_bbscn_enabled(fabric) &&
-	    (!fabric->fcs->bbscn_flogi_rjt))
-		bb_scn = BFA_FCS_PORT_DEF_BB_SCN;
 
 	bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa),
-		      pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd, bb_scn);
+		      pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd);
 
 	fabric->stats.flogi_sent++;
 }
@@ -1101,40 +1088,6 @@ bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric)
 	bfa_wc_wait(&fabric->stop_wc);
 }
 
-/*
- * Computes operating BB_SCN value
- */
-static u8
-bfa_fcs_fabric_oper_bbscn(struct bfa_fcs_fabric_s *fabric)
-{
-	u8	pr_bbscn = fabric->lps->pr_bbscn;
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fabric->fcs->bfa);
-
-	if (!(fcport->cfg.bb_scn_state && pr_bbscn))
-		return 0;
-
-	/* return max of local/remote bb_scn values */
-	return ((pr_bbscn > BFA_FCS_PORT_DEF_BB_SCN) ?
-		pr_bbscn : BFA_FCS_PORT_DEF_BB_SCN);
-}
-
-/*
- * Check if BB_SCN can be enabled.
- */
-static bfa_boolean_t
-bfa_fcs_fabric_is_bbscn_enabled(struct bfa_fcs_fabric_s *fabric)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fabric->fcs->bfa);
-
-	if (bfa_ioc_get_fcmode(&fabric->fcs->bfa->ioc) &&
-			fcport->cfg.bb_scn_state &&
-			!bfa_fcport_is_qos_enabled(fabric->fcs->bfa) &&
-			!bfa_fcport_is_trunk_enabled(fabric->fcs->bfa))
-		return BFA_TRUE;
-	else
-		return BFA_FALSE;
-}
-
 /*
  * Delete all vports and wait for vport delete completions.
  */
@@ -1273,7 +1226,6 @@ void
 bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric)
 {
 	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
-	fabric->fcs->bbscn_flogi_rjt = BFA_FALSE;
 	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
 }
 
@@ -1480,7 +1432,6 @@ bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
 	}
 
 	fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred);
-	fabric->lps->pr_bbscn = (be16_to_cpu(flogi->csp.rxsz) >> 12);
 	bport->port_topo.pn2n.rem_port_wwn = flogi->port_name;
 	bport->port_topo.pn2n.reply_oxid = fchs->ox_id;
 
@@ -1513,8 +1464,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
 				    n2n_port->reply_oxid, pcfg->pwwn,
 				    pcfg->nwwn,
 				    bfa_fcport_get_maxfrsize(bfa),
-				    bfa_fcport_get_rx_bbcredit(bfa),
-				    bfa_fcs_fabric_oper_bbscn(fabric));
+				    bfa_fcport_get_rx_bbcredit(bfa), 0);
 
 	bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->bfa_tag,
 		      BFA_FALSE, FC_CLASS_3,

+ 23 - 11
drivers/scsi/bfa/bfa_fcs.h

@@ -243,24 +243,21 @@ struct bfa_fcs_fabric_s;
  *  Symbolic Name.
  *
  *  Physical Port's symbolic name Format : (Total 128 bytes)
- *  Adapter Model number/name : 12 bytes
+ *  Adapter Model number/name : 16 bytes
  *  Driver Version     : 10 bytes
  *  Host Machine Name  : 30 bytes
- *  Host OS Info	   : 48 bytes
+ *  Host OS Info	   : 44 bytes
  *  Host OS PATCH Info : 16 bytes
  *  ( remaining 12 bytes reserved to be used for separator)
  */
 #define BFA_FCS_PORT_SYMBNAME_SEPARATOR			" | "
 
-#define BFA_FCS_PORT_SYMBNAME_MODEL_SZ			12
+#define BFA_FCS_PORT_SYMBNAME_MODEL_SZ			16
 #define BFA_FCS_PORT_SYMBNAME_VERSION_SZ		10
 #define BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ		30
-#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ			48
+#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ			44
 #define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ		16
 
-/* bb_scn value in 2^bb_scn */
-#define BFA_FCS_PORT_DEF_BB_SCN				3
-
 /*
  * Get FC port ID for a logical port.
  */
@@ -630,6 +627,9 @@ void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
 
 #define BFA_FCS_FDMI_SUPP_SPEEDS_10G	FDMI_TRANS_SPEED_10G
 
+#define BFA_FCS_FDMI_VENDOR_INFO_LEN    8
+#define BFA_FCS_FDMI_FC4_TYPE_LEN       32
+
 /*
  * HBA Attribute Block : BFA internal representation. Note : Some variable
  * sizes have been trimmed to suit BFA For Ex : Model will be "Brocade". Based
@@ -640,25 +640,39 @@ struct bfa_fcs_fdmi_hba_attr_s {
 	u8         manufacturer[64];
 	u8         serial_num[64];
 	u8         model[16];
-	u8         model_desc[256];
+	u8         model_desc[128];
 	u8         hw_version[8];
 	u8         driver_version[BFA_VERSION_LEN];
 	u8         option_rom_ver[BFA_VERSION_LEN];
 	u8         fw_version[BFA_VERSION_LEN];
 	u8         os_name[256];
 	__be32        max_ct_pyld;
+	struct      bfa_lport_symname_s node_sym_name;
+	u8     vendor_info[BFA_FCS_FDMI_VENDOR_INFO_LEN];
+	__be32    num_ports;
+	wwn_t       fabric_name;
+	u8     bios_ver[BFA_VERSION_LEN];
 };
 
 /*
  * Port Attribute Block
  */
 struct bfa_fcs_fdmi_port_attr_s {
-	u8         supp_fc4_types[32];	/* supported FC4 types */
+	u8         supp_fc4_types[BFA_FCS_FDMI_FC4_TYPE_LEN];
 	__be32        supp_speed;	/* supported speed */
 	__be32        curr_speed;	/* current Speed */
 	__be32        max_frm_size;	/* max frame size */
 	u8         os_device_name[256];	/* OS device Name */
 	u8         host_name[256];	/* host name */
+	wwn_t       port_name;
+	wwn_t       node_name;
+	struct      bfa_lport_symname_s port_sym_name;
+	__be32    port_type;
+	enum fc_cos    scos;
+	wwn_t       port_fabric_name;
+	u8     port_act_fc4_type[BFA_FCS_FDMI_FC4_TYPE_LEN];
+	__be32    port_state;
+	__be32    num_ports;
 };
 
 struct bfa_fcs_stats_s {
@@ -683,8 +697,6 @@ struct bfa_fcs_s {
 	struct bfa_trc_mod_s  *trcmod;	/*  tracing module */
 	bfa_boolean_t	vf_enabled;	/*  VF mode is enabled */
 	bfa_boolean_t	fdmi_enabled;	/*  FDMI is enabled */
-	bfa_boolean_t	bbscn_enabled;	/*  Driver Config Parameter */
-	bfa_boolean_t	bbscn_flogi_rjt;/*  FLOGI reject due to BB_SCN */
 	bfa_boolean_t min_cfg;		/* min cfg enabled/disabled */
 	u16	port_vfid;	/*  port default VF ID */
 	struct bfa_fcs_driver_info_s driver_info;

+ 207 - 2
drivers/scsi/bfa/bfa_fcs_lport.c

@@ -2048,10 +2048,71 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT);
 	templen = sizeof(fcs_hba_attr->max_ct_pyld);
 	memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
 	len += templen;
 	count++;
 	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
 			     sizeof(templen));
+	/*
+	 * Send extended attributes ( FOS 7.1 support )
+	 */
+	if (fdmi->retry_cnt == 0) {
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODE_SYM_NAME);
+		templen = sizeof(fcs_hba_attr->node_sym_name);
+		memcpy(attr->value, &fcs_hba_attr->node_sym_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		count++;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_VENDOR_ID);
+		templen = sizeof(fcs_hba_attr->vendor_info);
+		memcpy(attr->value, &fcs_hba_attr->vendor_info, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		count++;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NUM_PORTS);
+		templen = sizeof(fcs_hba_attr->num_ports);
+		memcpy(attr->value, &fcs_hba_attr->num_ports, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		count++;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FABRIC_NAME);
+		templen = sizeof(fcs_hba_attr->fabric_name);
+		memcpy(attr->value, &fcs_hba_attr->fabric_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		count++;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_BIOS_VER);
+		templen = sizeof(fcs_hba_attr->bios_ver);
+		memcpy(attr->value, &fcs_hba_attr->bios_ver, templen);
+		templen = fc_roundup(attr->len, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		count++;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					sizeof(templen));
+	}
 
 	/*
 	 * Update size of payload
@@ -2252,6 +2313,113 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
 				sizeof(templen));
 	}
 
+	if (fdmi->retry_cnt == 0) {
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_NODE_NAME);
+		templen = sizeof(fcs_port_attr.node_name);
+		memcpy(attr->value, &fcs_port_attr.node_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		++count;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				 sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_NAME);
+		templen = sizeof(fcs_port_attr.port_name);
+		memcpy(attr->value, &fcs_port_attr.port_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + templen;
+		len += templen;
+		++count;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				 sizeof(templen));
+
+		if (fcs_port_attr.port_sym_name.symname[0] != '\0') {
+			attr = (struct fdmi_attr_s *) curr_ptr;
+			attr->type =
+				cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SYM_NAME);
+			templen = sizeof(fcs_port_attr.port_sym_name);
+			memcpy(attr->value,
+				&fcs_port_attr.port_sym_name, templen);
+			templen = fc_roundup(templen, sizeof(u32));
+			curr_ptr += sizeof(attr->type) +
+					sizeof(templen) + templen;
+			len += templen;
+			++count;
+			attr->len = cpu_to_be16(templen +
+				sizeof(attr->type) + sizeof(templen));
+		}
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_TYPE);
+		templen = sizeof(fcs_port_attr.port_type);
+		memcpy(attr->value, &fcs_port_attr.port_type, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		++count;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				 sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_COS);
+		templen = sizeof(fcs_port_attr.scos);
+		memcpy(attr->value, &fcs_port_attr.scos, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		++count;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				 sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_FAB_NAME);
+		templen = sizeof(fcs_port_attr.port_fabric_name);
+		memcpy(attr->value, &fcs_port_attr.port_fabric_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		++count;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				 sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_FC4_TYPE);
+		templen = sizeof(fcs_port_attr.port_act_fc4_type);
+		memcpy(attr->value, fcs_port_attr.port_act_fc4_type,
+				templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		++count;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				 sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_STATE);
+		templen = sizeof(fcs_port_attr.port_state);
+		memcpy(attr->value, &fcs_port_attr.port_state, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		++count;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				 sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_NUM_RPRT);
+		templen = sizeof(fcs_port_attr.num_ports);
+		memcpy(attr->value, &fcs_port_attr.num_ports, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		++count;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				sizeof(templen));
+	}
+
 	/*
 	 * Update size of payload
 	 */
@@ -2458,6 +2626,15 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
 	/* Retrieve the max frame size from the port attr */
 	bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
 	hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size;
+
+	strncpy(hba_attr->node_sym_name.symname,
+		port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN);
+	strcpy(hba_attr->vendor_info, "BROCADE");
+	hba_attr->num_ports =
+		cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc));
+	hba_attr->fabric_name = port->fabric->lps->pr_nwwn;
+	strncpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
+
 }
 
 static void
@@ -2467,6 +2644,7 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
 	struct bfa_fcs_lport_s *port = fdmi->ms->port;
 	struct bfa_fcs_driver_info_s  *driver_info = &port->fcs->driver_info;
 	struct bfa_port_attr_s pport_attr;
+	struct bfa_lport_attr_s lport_attr;
 
 	memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s));
 
@@ -2531,6 +2709,18 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
 	strncpy(port_attr->host_name, (char *)driver_info->host_machine_name,
 		sizeof(port_attr->host_name));
 
+	port_attr->node_name = bfa_fcs_lport_get_nwwn(port);
+	port_attr->port_name = bfa_fcs_lport_get_pwwn(port);
+
+	strncpy(port_attr->port_sym_name.symname,
+		(char *)&bfa_fcs_lport_get_psym_name(port), BFA_SYMNAME_MAXLEN);
+	bfa_fcs_lport_get_attr(port, &lport_attr);
+	port_attr->port_type = cpu_to_be32(lport_attr.port_type);
+	port_attr->scos = pport_attr.cos_supported;
+	port_attr->port_fabric_name = port->fabric->lps->pr_nwwn;
+	fc_get_fc4type_bitmask(FC_TYPE_FCP, port_attr->port_act_fc4_type);
+	port_attr->port_state = cpu_to_be32(pport_attr.port_state);
+	port_attr->num_ports = cpu_to_be32(port->num_rports);
 }
 
 /*
@@ -5798,6 +5988,7 @@ enum bfa_fcs_vport_event {
 	BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12,	/*  Dup wnn error*/
 	BFA_FCS_VPORT_SM_RSP_FAILED = 13,	/*  non-retryable failure */
 	BFA_FCS_VPORT_SM_STOPCOMP = 14,	/* vport delete completion */
+	BFA_FCS_VPORT_SM_FABRIC_MAX = 15, /* max vports on fabric */
 };
 
 static void     bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
@@ -5983,6 +6174,7 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
 		break;
 
 	case BFA_FCS_VPORT_SM_RSP_FAILED:
+	case BFA_FCS_VPORT_SM_FABRIC_MAX:
 		bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
 		break;
 
@@ -6053,6 +6245,7 @@ bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport,
 	case BFA_FCS_VPORT_SM_OFFLINE:
 	case BFA_FCS_VPORT_SM_RSP_ERROR:
 	case BFA_FCS_VPORT_SM_RSP_FAILED:
+	case BFA_FCS_VPORT_SM_FABRIC_MAX:
 	case BFA_FCS_VPORT_SM_RSP_DUP_WWN:
 		bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
 		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
@@ -6338,7 +6531,7 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
 		else {
 			bfa_fcs_vport_aen_post(&vport->lport,
 					BFA_LPORT_AEN_NPIV_FABRIC_MAX);
-			bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
+			bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_FABRIC_MAX);
 		}
 		break;
 
@@ -6724,7 +6917,19 @@ bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
 			break;
 		}
 
-		bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
+		if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
+			bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
+		else
+			bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
+
+		break;
+
+	case BFA_STATUS_ETIMER:
+		vport->vport_stats.fdisc_timeouts++;
+		if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
+			bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
+		else
+			bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
 		break;
 
 	case BFA_STATUS_FABRIC_RJT:

+ 6 - 5
drivers/scsi/bfa/bfa_fcs_rport.c

@@ -189,8 +189,8 @@ bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
 		break;
 
 	case RPSM_EVENT_PLOGI_RCVD:
-		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
-		bfa_fcs_rport_fcs_online_action(rport);
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+		bfa_fcs_rport_send_plogiacc(rport, NULL);
 		break;
 
 	case RPSM_EVENT_PLOGI_COMP:
@@ -2577,7 +2577,7 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
 
 		port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred);
 		bfa_fcport_set_tx_bbcredit(port->fcs->bfa,
-					  port->fabric->bb_credit, 0);
+					  port->fabric->bb_credit);
 	}
 
 }
@@ -3430,9 +3430,10 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
 		num_ents = be16_to_cpu(rpsc2_acc->num_pids);
 		bfa_trc(rport->fcs, num_ents);
 		if (num_ents > 0) {
-			WARN_ON(rpsc2_acc->port_info[0].pid == rport->pid);
+			WARN_ON(be32_to_cpu(rpsc2_acc->port_info[0].pid) !=
+						bfa_ntoh3b(rport->pid));
 			bfa_trc(rport->fcs,
-				be16_to_cpu(rpsc2_acc->port_info[0].pid));
+				be32_to_cpu(rpsc2_acc->port_info[0].pid));
 			bfa_trc(rport->fcs,
 				be16_to_cpu(rpsc2_acc->port_info[0].speed));
 			bfa_trc(rport->fcs,

+ 48 - 26
drivers/scsi/bfa/bfa_ioc.c

@@ -67,6 +67,14 @@ BFA_TRC_FILE(CNA, IOC);
 			((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
 #define bfa_ioc_sync_complete(__ioc)            \
 			((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
+#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate)		\
+			((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
+#define bfa_ioc_get_cur_ioc_fwstate(__ioc)		\
+			((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
+#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate)		\
+		((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
+#define bfa_ioc_get_alt_ioc_fwstate(__ioc)		\
+			((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
 
 #define bfa_ioc_mbox_cmd_pending(__ioc)		\
 			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
@@ -698,7 +706,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
 	}
 
 	/* h/w sem init */
-	fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
+	fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc);
 	if (fwstate == BFI_IOC_UNINIT) {
 		writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 		goto sem_get;
@@ -725,8 +733,8 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
 
 	bfa_trc(iocpf->ioc, fwstate);
 	bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
-	writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
-	writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate);
+	bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
+	bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
 
 	/*
 	 * Unlock the hw semaphore. Should be here only once per boot.
@@ -1037,7 +1045,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 		 */
 
 	case IOCPF_E_TIMEOUT:
-		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
 		break;
 
@@ -1138,7 +1146,7 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 	case IOCPF_E_SEMLOCKED:
 		bfa_ioc_notify_fail(ioc);
 		bfa_ioc_sync_leave(ioc);
-		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
 		writel(1, ioc->ioc_regs.ioc_sem_reg);
 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
 		break;
@@ -1227,7 +1235,7 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 		bfa_ioc_notify_fail(ioc);
 		if (!iocpf->auto_recover) {
 			bfa_ioc_sync_leave(ioc);
-			writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+			bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
 			writel(1, ioc->ioc_regs.ioc_sem_reg);
 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
 		} else {
@@ -1519,7 +1527,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
 	u32 boot_type;
 	u32 boot_env;
 
-	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+	ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
 
 	if (force)
 		ioc_fwstate = BFI_IOC_UNINIT;
@@ -1850,7 +1858,7 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
 	bfa_trc(ioc, len);
 	for (i = 0; i < len; i++) {
 		r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
-		buf[i] = be32_to_cpu(r32);
+		buf[i] = swab32(r32);
 		loff += sizeof(u32);
 
 		/*
@@ -2006,11 +2014,11 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
 	 * Initialize IOC state of all functions on a chip reset.
 	 */
 	if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
-		writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
-		writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
+		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
+		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
 	} else {
-		writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
-		writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
+		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
+		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
 	}
 
 	bfa_ioc_msgflush(ioc);
@@ -2038,7 +2046,7 @@ bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
 bfa_boolean_t
 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
 {
-	u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
+	u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc);
 
 	return ((r32 != BFI_IOC_UNINIT) &&
 		(r32 != BFI_IOC_INITING) &&
@@ -2188,6 +2196,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
 		break;
 
 	case BFA_PCI_DEVICE_ID_CT2:
+	case BFA_PCI_DEVICE_ID_CT2_QUAD:
 		ioc->asic_gen = BFI_ASIC_GEN_CT2;
 		if (clscode == BFI_PCIFN_CLASS_FC &&
 		    pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
@@ -2430,12 +2439,12 @@ bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
 	if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
 		return BFA_FALSE;
 
-	ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
+	ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
 	if (!bfa_ioc_state_disabled(ioc_state))
 		return BFA_FALSE;
 
 	if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
-		ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
+		ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
 		if (!bfa_ioc_state_disabled(ioc_state))
 			return BFA_FALSE;
 	}
@@ -2449,8 +2458,8 @@ bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
 void
 bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
 {
-	writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
-	writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+	bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
+	bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
 }
 
 #define BFA_MFG_NAME "Brocade"
@@ -2500,6 +2509,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
 	ad_attr->mfg_day = ioc_attr->mfg_day;
 	ad_attr->mfg_month = ioc_attr->mfg_month;
 	ad_attr->mfg_year = ioc_attr->mfg_year;
+	memcpy(ad_attr->uuid, ioc_attr->uuid, BFA_ADAPTER_UUID_LEN);
 }
 
 enum bfa_ioc_type_e
@@ -2564,13 +2574,19 @@ void
 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
 {
 	struct bfi_ioc_attr_s	*ioc_attr;
+	u8 nports = bfa_ioc_get_nports(ioc);
 
 	WARN_ON(!model);
 	memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
 
 	ioc_attr = ioc->attr;
 
-	snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
+	if (bfa_asic_id_ct2(ioc->pcidev.device_id) &&
+		(!bfa_mfg_is_mezz(ioc_attr->card_type)))
+		snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u-%u%s",
+			BFA_MFG_NAME, ioc_attr->card_type, nports, "p");
+	else
+		snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
 			BFA_MFG_NAME, ioc_attr->card_type);
 }
 
@@ -2620,7 +2636,7 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
 	memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
 
 	ioc_attr->state = bfa_ioc_get_state(ioc);
-	ioc_attr->port_id = ioc->port_id;
+	ioc_attr->port_id = bfa_ioc_portid(ioc);
 	ioc_attr->port_mode = ioc->port_mode;
 	ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
 	ioc_attr->cap_bm = ioc->ad_cap_bm;
@@ -2629,8 +2645,9 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
 
 	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
 
-	ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
-	ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
+	ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
+	ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
+	ioc_attr->def_fn = (bfa_ioc_pcifn(ioc) == bfa_ioc_portid(ioc));
 	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
 }
 
@@ -2917,7 +2934,7 @@ bfa_iocpf_sem_timeout(void *ioc_arg)
 static void
 bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
 {
-	u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+	u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
 
 	bfa_trc(ioc, fwstate);
 
@@ -6010,6 +6027,7 @@ bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
 	 */
 	msg->last = (len == fru->residue) ? 1 : 0;
 
+	msg->trfr_cmpl = (len == fru->residue) ? fru->trfr_cmpl : 0;
 	bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
 	bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
 
@@ -6124,13 +6142,14 @@ bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa,
  */
 bfa_status_t
 bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
-		  bfa_cb_fru_t cbfn, void *cbarg)
+		  bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl)
 {
 	bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ);
 	bfa_trc(fru, len);
 	bfa_trc(fru, offset);
 
-	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
+	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2 &&
+		fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
 		return BFA_STATUS_FRU_NOT_PRESENT;
 
 	if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
@@ -6152,6 +6171,7 @@ bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
 	fru->offset = 0;
 	fru->addr_off = offset;
 	fru->ubuf = buf;
+	fru->trfr_cmpl = trfr_cmpl;
 
 	bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ);
 
@@ -6181,7 +6201,8 @@ bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
 	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
 		return BFA_STATUS_FRU_NOT_PRESENT;
 
-	if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
+	if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK &&
+		fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
 		return BFA_STATUS_CMD_NOTSUPP;
 
 	if (!bfa_ioc_is_operational(fru->ioc))
@@ -6222,7 +6243,8 @@ bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size)
 	if (!bfa_ioc_is_operational(fru->ioc))
 		return BFA_STATUS_IOC_NON_OP;
 
-	if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK)
+	if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK ||
+		fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK2)
 		*max_size = BFA_FRU_CHINOOK_MAX_SIZE;
 	else
 		return BFA_STATUS_CMD_NOTSUPP;

+ 8 - 1
drivers/scsi/bfa/bfa_ioc.h

@@ -346,6 +346,12 @@ struct bfa_ioc_hwif_s {
 	void		(*ioc_sync_ack)		(struct bfa_ioc_s *ioc);
 	bfa_boolean_t	(*ioc_sync_complete)	(struct bfa_ioc_s *ioc);
 	bfa_boolean_t	(*ioc_lpu_read_stat)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_set_fwstate)	(struct bfa_ioc_s *ioc,
+					enum bfi_ioc_state fwstate);
+	enum bfi_ioc_state	(*ioc_get_fwstate)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_set_alt_fwstate)	(struct bfa_ioc_s *ioc,
+					enum bfi_ioc_state fwstate);
+	enum bfi_ioc_state	(*ioc_get_alt_fwstate)	(struct bfa_ioc_s *ioc);
 };
 
 /*
@@ -725,6 +731,7 @@ struct bfa_fru_s {
 	struct bfa_mbox_cmd_s mb;	/* mailbox */
 	struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
 	struct bfa_mem_dma_s	fru_dma;
+	u8		trfr_cmpl;
 };
 
 #define BFA_FRU(__bfa)	(&(__bfa)->modules.fru)
@@ -732,7 +739,7 @@ struct bfa_fru_s {
 
 bfa_status_t bfa_fruvpd_update(struct bfa_fru_s *fru,
 			void *buf, u32 len, u32 offset,
-			bfa_cb_fru_t cbfn, void *cbarg);
+			bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl);
 bfa_status_t bfa_fruvpd_read(struct bfa_fru_s *fru,
 			void *buf, u32 len, u32 offset,
 			bfa_cb_fru_t cbfn, void *cbarg);

+ 79 - 7
drivers/scsi/bfa/bfa_ioc_cb.c

@@ -22,6 +22,8 @@
 
 BFA_TRC_FILE(CNA, IOC_CB);
 
+#define bfa_ioc_cb_join_pos(__ioc) ((u32) (1 << BFA_IOC_CB_JOIN_SH))
+
 /*
  * forward declarations
  */
@@ -37,6 +39,12 @@ static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
 static bfa_boolean_t bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_set_cur_ioc_fwstate(
+			struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_cb_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_set_alt_ioc_fwstate(
+			struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_cb_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc);
 
 static struct bfa_ioc_hwif_s hwif_cb;
 
@@ -59,6 +67,10 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
 	hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
 	hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
 	hwif_cb.ioc_sync_complete = bfa_ioc_cb_sync_complete;
+	hwif_cb.ioc_set_fwstate = bfa_ioc_cb_set_cur_ioc_fwstate;
+	hwif_cb.ioc_get_fwstate = bfa_ioc_cb_get_cur_ioc_fwstate;
+	hwif_cb.ioc_set_alt_fwstate = bfa_ioc_cb_set_alt_ioc_fwstate;
+	hwif_cb.ioc_get_alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate;
 
 	ioc->ioc_hwif = &hwif_cb;
 }
@@ -187,6 +199,20 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
 static bfa_boolean_t
 bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc)
 {
+	u32 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+	/**
+	 * Driver load time.  If the join bit is set,
+	 * it is due to an unclean exit by the driver for this
+	 * PCI fn in the previous incarnation. Whoever comes here first
+	 * should clean it up, no matter which PCI fn.
+	 */
+	if (ioc_fwstate & BFA_IOC_CB_JOIN_MASK) {
+		writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+		writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+		return BFA_TRUE;
+	}
+
 	return bfa_ioc_cb_sync_complete(ioc);
 }
 
@@ -212,24 +238,66 @@ bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
 static void
 bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc)
 {
+	u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
+	u32 join_pos = bfa_ioc_cb_join_pos(ioc);
+
+	writel((r32 | join_pos), ioc->ioc_regs.ioc_fwstate);
 }
 
 static void
 bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc)
 {
+	u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
+	u32 join_pos = bfa_ioc_cb_join_pos(ioc);
+
+	writel((r32 & ~join_pos), ioc->ioc_regs.ioc_fwstate);
+}
+
+static void
+bfa_ioc_cb_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc,
+			enum bfi_ioc_state fwstate)
+{
+	u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
+
+	writel((fwstate | (r32 & BFA_IOC_CB_JOIN_MASK)),
+				ioc->ioc_regs.ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_cb_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc)
+{
+	return (enum bfi_ioc_state)(readl(ioc->ioc_regs.ioc_fwstate) &
+			BFA_IOC_CB_FWSTATE_MASK);
+}
+
+static void
+bfa_ioc_cb_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc,
+			enum bfi_ioc_state fwstate)
+{
+	u32 r32 = readl(ioc->ioc_regs.alt_ioc_fwstate);
+
+	writel((fwstate | (r32 & BFA_IOC_CB_JOIN_MASK)),
+				ioc->ioc_regs.alt_ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_cb_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc)
+{
+	return (enum bfi_ioc_state)(readl(ioc->ioc_regs.alt_ioc_fwstate) &
+			BFA_IOC_CB_FWSTATE_MASK);
 }
 
 static void
 bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc)
 {
-	writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+	bfa_ioc_cb_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
 }
 
 static bfa_boolean_t
 bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc)
 {
-	uint32_t fwstate, alt_fwstate;
-	fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+	u32 fwstate, alt_fwstate;
+	fwstate = bfa_ioc_cb_get_cur_ioc_fwstate(ioc);
 
 	/*
 	 * At this point, this IOC is hoding the hw sem in the
@@ -257,7 +325,7 @@ bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc)
 		fwstate == BFI_IOC_OP)
 		return BFA_TRUE;
 	else {
-		alt_fwstate = readl(ioc->ioc_regs.alt_ioc_fwstate);
+		alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate(ioc);
 		if (alt_fwstate == BFI_IOC_FAIL ||
 			alt_fwstate == BFI_IOC_DISABLED ||
 			alt_fwstate == BFI_IOC_UNINIT ||
@@ -272,7 +340,7 @@ bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc)
 bfa_status_t
 bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode)
 {
-	u32	pll_sclk, pll_fclk;
+	u32	pll_sclk, pll_fclk, join_bits;
 
 	pll_sclk = __APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN |
 		__APP_PLL_SCLK_P0_1(3U) |
@@ -282,8 +350,12 @@ bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode)
 		__APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
 		__APP_PLL_LCLK_JITLMT0_1(3U) |
 		__APP_PLL_LCLK_CNTLMT0_1(3U);
-	writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
-	writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
+	join_bits = readl(rb + BFA_IOC0_STATE_REG) &
+			BFA_IOC_CB_JOIN_MASK;
+	writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC0_STATE_REG));
+	join_bits = readl(rb + BFA_IOC1_STATE_REG) &
+			BFA_IOC_CB_JOIN_MASK;
+	writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC1_STATE_REG));
 	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
 	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
 	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));

+ 46 - 0
drivers/scsi/bfa/bfa_ioc_ct.c

@@ -43,6 +43,12 @@ static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
 static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_set_cur_ioc_fwstate(
+			struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_set_alt_ioc_fwstate(
+			struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc);
 
 static struct bfa_ioc_hwif_s hwif_ct;
 static struct bfa_ioc_hwif_s hwif_ct2;
@@ -512,6 +518,10 @@ bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
 	hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
 	hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
 	hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
+	hwif->ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate;
+	hwif->ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate;
+	hwif->ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate;
+	hwif->ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate;
 }
 
 /**
@@ -918,6 +928,16 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
 
 		}
 	}
+	/*
+	* The very first PCIe DMA Read done by LPU fails with a fatal error,
+	* when Address Translation Cache (ATC) has been enabled by system BIOS.
+	*
+	* Workaround:
+	* Disable Invalidated Tag Match Enable capability by setting the bit 26
+	* of CHIP_MISC_PRG to 0, by default it is set to 1.
+	*/
+	r32 = readl(rb + CT2_CHIP_MISC_PRG);
+	writel((r32 & 0xfbffffff), (rb + CT2_CHIP_MISC_PRG));
 
 	/*
 	 * Mask the interrupts and clear any
@@ -949,3 +969,29 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
 
 	return BFA_STATUS_OK;
 }
+
+static void
+bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc,
+		enum bfi_ioc_state fwstate)
+{
+	writel(fwstate, ioc->ioc_regs.ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc)
+{
+	return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
+}
+
+static void
+bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc,
+		enum bfi_ioc_state fwstate)
+{
+	writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc)
+{
+	return (enum bfi_ioc_state) readl(ioc->ioc_regs.alt_ioc_fwstate);
+}

+ 649 - 51
drivers/scsi/bfa/bfa_svc.c

@@ -70,6 +70,8 @@ enum bfa_fcport_sm_event {
 	BFA_FCPORT_SM_DPORTENABLE = 10, /*  enable dport      */
 	BFA_FCPORT_SM_DPORTDISABLE = 11,/*  disable dport     */
 	BFA_FCPORT_SM_FAA_MISCONFIG = 12,	/* FAA misconfiguratin */
+	BFA_FCPORT_SM_DDPORTENABLE  = 13,	/* enable ddport	*/
+	BFA_FCPORT_SM_DDPORTDISABLE = 14,	/* disable ddport	*/
 };
 
 /*
@@ -202,6 +204,8 @@ static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
 					enum bfa_fcport_sm_event event);
 static void	bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
 					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
 static void	bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
 					enum bfa_fcport_sm_event event);
 
@@ -234,6 +238,7 @@ static struct bfa_sm_table_s hal_port_sm_table[] = {
 	{BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
 	{BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
 	{BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
+	{BFA_SM(bfa_fcport_sm_ddport), BFA_PORT_ST_DDPORT},
 	{BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG},
 };
 
@@ -1276,7 +1281,6 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
 
 	switch (event) {
 	case BFA_LPS_SM_FWRSP:
-	case BFA_LPS_SM_OFFLINE:
 		if (lps->status == BFA_STATUS_OK) {
 			bfa_sm_set_state(lps, bfa_lps_sm_online);
 			if (lps->fdisc)
@@ -1305,6 +1309,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
 		bfa_lps_login_comp(lps);
 		break;
 
+	case BFA_LPS_SM_OFFLINE:
 	case BFA_LPS_SM_DELETE:
 		bfa_sm_set_state(lps, bfa_lps_sm_init);
 		break;
@@ -1614,7 +1619,6 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
 		lps->lp_mac	= rsp->lp_mac;
 		lps->brcd_switch = rsp->brcd_switch;
 		lps->fcf_mac	= rsp->fcf_mac;
-		lps->pr_bbscn	= rsp->bb_scn;
 
 		break;
 
@@ -1744,7 +1748,6 @@ bfa_lps_send_login(struct bfa_lps_s *lps)
 	m->nwwn		= lps->nwwn;
 	m->fdisc	= lps->fdisc;
 	m->auth_en	= lps->auth_en;
-	m->bb_scn	= lps->bb_scn;
 
 	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
 	list_del(&lps->qe);
@@ -1940,7 +1943,7 @@ bfa_lps_delete(struct bfa_lps_s *lps)
  */
 void
 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
-	wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en, uint8_t bb_scn)
+	wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
 {
 	lps->uarg	= uarg;
 	lps->alpa	= alpa;
@@ -1949,7 +1952,6 @@ bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
 	lps->nwwn	= nwwn;
 	lps->fdisc	= BFA_FALSE;
 	lps->auth_en	= auth_en;
-	lps->bb_scn	= bb_scn;
 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
 }
 
@@ -2649,6 +2651,10 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
 		bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
 		break;
 
+	case BFA_FCPORT_SM_DDPORTENABLE:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_ddport);
+		break;
+
 	default:
 		bfa_sm_fault(fcport->bfa, event);
 	}
@@ -2761,6 +2767,40 @@ bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
 	}
 }
 
+static void
+bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
+			enum bfa_fcport_sm_event event)
+{
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_DISABLE:
+	case BFA_FCPORT_SM_DDPORTDISABLE:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+		break;
+
+	case BFA_FCPORT_SM_DPORTENABLE:
+	case BFA_FCPORT_SM_DPORTDISABLE:
+	case BFA_FCPORT_SM_ENABLE:
+	case BFA_FCPORT_SM_START:
+		/**
+		 * Ignore event for a port that is ddport
+		 */
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
 static void
 bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
 			    enum bfa_fcport_sm_event event)
@@ -3082,6 +3122,8 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 	port_cfg->qos_bw.med = BFA_QOS_BW_MED;
 	port_cfg->qos_bw.low = BFA_QOS_BW_LOW;
 
+	fcport->fec_state = BFA_FEC_OFFLINE;
+
 	INIT_LIST_HEAD(&fcport->stats_pending_q);
 	INIT_LIST_HEAD(&fcport->statsclr_pending_q);
 
@@ -3158,6 +3200,11 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
 	fcport->qos_attr = pevent->link_state.qos_attr;
 	fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
 
+	if (fcport->cfg.bb_cr_enabled)
+		fcport->bbcr_attr = pevent->link_state.attr.bbcr_attr;
+
+	fcport->fec_state = pevent->link_state.fec_state;
+
 	/*
 	 * update trunk state if applicable
 	 */
@@ -3177,7 +3224,7 @@ bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
 {
 	fcport->speed = BFA_PORT_SPEED_UNKNOWN;
 	fcport->topology = BFA_PORT_TOPOLOGY_NONE;
-	fcport->bbsc_op_state = BFA_FALSE;
+	fcport->fec_state = BFA_FEC_OFFLINE;
 }
 
 /*
@@ -3629,6 +3676,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
 			fcport->qos_attr.qos_bw_op =
 					i2hmsg.penable_rsp->port_cfg.qos_bw;
 
+			if (fcport->cfg.bb_cr_enabled)
+				fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
+			else
+				fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
+
 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
 		}
 		break;
@@ -3639,6 +3691,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
 		break;
 
 	case BFI_FCPORT_I2H_EVENT:
+		if (fcport->cfg.bb_cr_enabled)
+			fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
+		else
+			fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
+
 		if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
 		else {
@@ -3846,6 +3903,8 @@ bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
 			return BFA_STATUS_LOOP_UNSUPP_MEZZ;
 		if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
 			return BFA_STATUS_DPORT_ERR;
+		if (bfa_fcport_is_ddport(bfa) != BFA_FALSE)
+			return BFA_STATUS_DPORT_ERR;
 		break;
 
 	case BFA_PORT_TOPOLOGY_AUTO:
@@ -3964,14 +4023,11 @@ bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
 }
 
 void
-bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn)
+bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
 {
 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
 	fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
-	fcport->cfg.bb_scn = bb_scn;
-	if (bb_scn)
-		fcport->bbsc_op_state = BFA_TRUE;
 }
 
 /*
@@ -4021,7 +4077,8 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
 	attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
 	attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
 	attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
-	attr->bbsc_op_status =  fcport->bbsc_op_state;
+
+	attr->fec_state = fcport->fec_state;
 
 	/* PBC Disabled State */
 	if (bfa_fcport_is_pbcdisabled(bfa))
@@ -4115,6 +4172,15 @@ bfa_fcport_is_dport(struct bfa_s *bfa)
 		BFA_PORT_ST_DPORT);
 }
 
+bfa_boolean_t
+bfa_fcport_is_ddport(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
+		BFA_PORT_ST_DDPORT);
+}
+
 bfa_status_t
 bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw)
 {
@@ -4217,6 +4283,77 @@ bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
 	return fcport->cfg.trunked;
 }
 
+bfa_status_t
+bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, on_off);
+
+	if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
+		return BFA_STATUS_BBCR_FC_ONLY;
+
+	if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type) &&
+		(bfa->ioc.attr->card_type != BFA_MFG_TYPE_CHINOOK))
+		return BFA_STATUS_CMD_NOTSUPP_MEZZ;
+
+	if (on_off) {
+		if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
+			return BFA_STATUS_TOPOLOGY_LOOP;
+
+		if (fcport->cfg.qos_enabled)
+			return BFA_STATUS_ERROR_QOS_ENABLED;
+
+		if (fcport->cfg.trunked)
+			return BFA_STATUS_TRUNK_ENABLED;
+
+		if ((fcport->cfg.speed != BFA_PORT_SPEED_AUTO) &&
+			(fcport->cfg.speed < bfa_ioc_speed_sup(&bfa->ioc)))
+			return BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT;
+
+		if (bfa_ioc_speed_sup(&bfa->ioc) < BFA_PORT_SPEED_8GBPS)
+			return BFA_STATUS_FEATURE_NOT_SUPPORTED;
+
+		if (fcport->cfg.bb_cr_enabled) {
+			if (bb_scn != fcport->cfg.bb_scn)
+				return BFA_STATUS_BBCR_CFG_NO_CHANGE;
+			else
+				return BFA_STATUS_NO_CHANGE;
+		}
+
+		if ((bb_scn == 0) || (bb_scn > BFA_BB_SCN_MAX))
+			bb_scn = BFA_BB_SCN_DEF;
+
+		fcport->cfg.bb_cr_enabled = on_off;
+		fcport->cfg.bb_scn = bb_scn;
+	} else {
+		if (!fcport->cfg.bb_cr_enabled)
+			return BFA_STATUS_NO_CHANGE;
+
+		fcport->cfg.bb_cr_enabled = on_off;
+		fcport->cfg.bb_scn = 0;
+	}
+
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcport_get_bbcr_attr(struct bfa_s *bfa,
+		struct bfa_bbcr_attr_s *bbcr_attr)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
+		return BFA_STATUS_BBCR_FC_ONLY;
+
+	if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
+		return BFA_STATUS_TOPOLOGY_LOOP;
+
+	*bbcr_attr = fcport->bbcr_attr;
+
+	return BFA_STATUS_OK;
+}
+
 void
 bfa_fcport_dportenable(struct bfa_s *bfa)
 {
@@ -4237,6 +4374,24 @@ bfa_fcport_dportdisable(struct bfa_s *bfa)
 	bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
 }
 
+void
+bfa_fcport_ddportenable(struct bfa_s *bfa)
+{
+	/*
+	 * Assume caller check for port is in disable state
+	 */
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE);
+}
+
+void
+bfa_fcport_ddportdisable(struct bfa_s *bfa)
+{
+	/*
+	 * Assume caller check for port is in disable state
+	 */
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTDISABLE);
+}
+
 /*
  * Rport State machine functions
  */
@@ -5622,6 +5777,14 @@ bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
  *	Dport forward declaration
  */
 
+enum bfa_dport_test_state_e {
+	BFA_DPORT_ST_DISABLED	= 0,	/*!< dport is disabled */
+	BFA_DPORT_ST_INP	= 1,	/*!< test in progress */
+	BFA_DPORT_ST_COMP	= 2,	/*!< test complete successfully */
+	BFA_DPORT_ST_NO_SFP	= 3,	/*!< sfp is not present */
+	BFA_DPORT_ST_NOTSTART	= 4,	/*!< test not start dport is enabled */
+};
+
 /*
  * BFA DPORT state machine events
  */
@@ -5631,6 +5794,9 @@ enum bfa_dport_sm_event {
 	BFA_DPORT_SM_FWRSP      = 3,    /* fw enable/disable rsp      */
 	BFA_DPORT_SM_QRESUME    = 4,    /* CQ space available         */
 	BFA_DPORT_SM_HWFAIL     = 5,    /* IOC h/w failure            */
+	BFA_DPORT_SM_START	= 6,	/* re-start dport test        */
+	BFA_DPORT_SM_REQFAIL	= 7,	/* request failure            */
+	BFA_DPORT_SM_SCN	= 8,	/* state change notify frm fw */
 };
 
 static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
@@ -5645,9 +5811,19 @@ static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
 				 enum bfa_dport_sm_event event);
 static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
 				   enum bfa_dport_sm_event event);
+static void bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
+					enum bfa_dport_sm_event event);
+static void bfa_dport_sm_starting(struct bfa_dport_s *dport,
+				  enum bfa_dport_sm_event event);
+static void bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
+				   enum bfa_dport_sm_event event);
+static void bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
+				   enum bfa_dport_sm_event event);
 static void bfa_dport_qresume(void *cbarg);
 static void bfa_dport_req_comp(struct bfa_dport_s *dport,
-			       bfi_diag_dport_rsp_t *msg);
+				struct bfi_diag_dport_rsp_s *msg);
+static void bfa_dport_scn(struct bfa_dport_s *dport,
+				struct bfi_diag_dport_scn_s *msg);
 
 /*
  *	BFA fcdiag module
@@ -5689,6 +5865,8 @@ bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 	bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
 	dport->cbfn = NULL;
 	dport->cbarg = NULL;
+	dport->test_state = BFA_DPORT_ST_DISABLED;
+	memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s));
 }
 
 static void
@@ -5891,7 +6069,12 @@ bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
 		bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
 		break;
 	case BFI_DIAG_I2H_DPORT:
-		bfa_dport_req_comp(&fcdiag->dport, (bfi_diag_dport_rsp_t *)msg);
+		bfa_dport_req_comp(&fcdiag->dport,
+				(struct bfi_diag_dport_rsp_s *)msg);
+		break;
+	case BFI_DIAG_I2H_DPORT_SCN:
+		bfa_dport_scn(&fcdiag->dport,
+				(struct bfi_diag_dport_scn_s *)msg);
 		break;
 	default:
 		bfa_trc(fcdiag, msg->mhdr.msg_id);
@@ -5986,7 +6169,11 @@ bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
 				return BFA_STATUS_UNSUPP_SPEED;
 		}
 	}
-
+	/* check to see if fcport is dport */
+	if (bfa_fcport_is_dport(bfa)) {
+		bfa_trc(fcdiag, fcdiag->lb.lock);
+		return BFA_STATUS_DPORT_ENABLED;
+	}
 	/* check to see if there is another destructive diag cmd running */
 	if (fcdiag->lb.lock) {
 		bfa_trc(fcdiag, fcdiag->lb.lock);
@@ -6090,6 +6277,15 @@ bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
 /*
  *	D-port
  */
+#define bfa_dport_result_start(__dport, __mode) do {			\
+		(__dport)->result.start_time = bfa_get_log_time();	\
+		(__dport)->result.status = DPORT_TEST_ST_INPRG;		\
+		(__dport)->result.mode = (__mode);			\
+		(__dport)->result.rp_pwwn = (__dport)->rp_pwwn;		\
+		(__dport)->result.rp_nwwn = (__dport)->rp_nwwn;		\
+		(__dport)->result.lpcnt = (__dport)->lpcnt;		\
+} while (0)
+
 static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
 					enum bfi_dport_req req);
 static void
@@ -6124,6 +6320,18 @@ bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
 		/* ignore */
 		break;
 
+	case BFA_DPORT_SM_SCN:
+		if (dport->i2hmsg.scn.state ==  BFI_DPORT_SCN_DDPORT_ENABLE) {
+			bfa_fcport_ddportenable(dport->bfa);
+			dport->dynamic = BFA_TRUE;
+			dport->test_state = BFA_DPORT_ST_NOTSTART;
+			bfa_sm_set_state(dport, bfa_dport_sm_enabled);
+		} else {
+			bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
+			WARN_ON(1);
+		}
+		break;
+
 	default:
 		bfa_sm_fault(dport->bfa, event);
 	}
@@ -6159,9 +6367,23 @@ bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
 
 	switch (event) {
 	case BFA_DPORT_SM_FWRSP:
+		memset(&dport->result, 0,
+				sizeof(struct bfa_diag_dport_result_s));
+		if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
+			dport->test_state = BFA_DPORT_ST_NO_SFP;
+		} else {
+			dport->test_state = BFA_DPORT_ST_INP;
+			bfa_dport_result_start(dport, BFA_DPORT_OPMODE_AUTO);
+		}
 		bfa_sm_set_state(dport, bfa_dport_sm_enabled);
 		break;
 
+	case BFA_DPORT_SM_REQFAIL:
+		dport->test_state = BFA_DPORT_ST_DISABLED;
+		bfa_fcport_dportdisable(dport->bfa);
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		break;
+
 	case BFA_DPORT_SM_HWFAIL:
 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
@@ -6178,8 +6400,11 @@ bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
 	bfa_trc(dport->bfa, event);
 
 	switch (event) {
-	case BFA_DPORT_SM_ENABLE:
-		/* Already enabled */
+	case BFA_DPORT_SM_START:
+		if (bfa_dport_send_req(dport, BFI_DPORT_START))
+			bfa_sm_set_state(dport, bfa_dport_sm_starting);
+		else
+			bfa_sm_set_state(dport, bfa_dport_sm_starting_qwait);
 		break;
 
 	case BFA_DPORT_SM_DISABLE:
@@ -6194,6 +6419,48 @@ bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
 		break;
 
+	case BFA_DPORT_SM_SCN:
+		switch (dport->i2hmsg.scn.state) {
+		case BFI_DPORT_SCN_TESTCOMP:
+			dport->test_state = BFA_DPORT_ST_COMP;
+			break;
+
+		case BFI_DPORT_SCN_TESTSTART:
+			dport->test_state = BFA_DPORT_ST_INP;
+			break;
+
+		case BFI_DPORT_SCN_TESTSKIP:
+		case BFI_DPORT_SCN_SUBTESTSTART:
+			/* no state change */
+			break;
+
+		case BFI_DPORT_SCN_SFP_REMOVED:
+			dport->test_state = BFA_DPORT_ST_NO_SFP;
+			break;
+
+		case BFI_DPORT_SCN_DDPORT_DISABLE:
+			bfa_fcport_ddportdisable(dport->bfa);
+
+			if (bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE))
+				bfa_sm_set_state(dport,
+					 bfa_dport_sm_dynamic_disabling);
+			else
+				bfa_sm_set_state(dport,
+					 bfa_dport_sm_dynamic_disabling_qwait);
+			break;
+
+		case BFI_DPORT_SCN_FCPORT_DISABLE:
+			bfa_fcport_ddportdisable(dport->bfa);
+
+			bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+			dport->dynamic = BFA_FALSE;
+			break;
+
+		default:
+			bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
+			bfa_sm_fault(dport->bfa, event);
+		}
+		break;
 	default:
 		bfa_sm_fault(dport->bfa, event);
 	}
@@ -6217,6 +6484,10 @@ bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
 		break;
 
+	case BFA_DPORT_SM_SCN:
+		/* ignore */
+		break;
+
 	default:
 		bfa_sm_fault(dport->bfa, event);
 	}
@@ -6229,7 +6500,98 @@ bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
 
 	switch (event) {
 	case BFA_DPORT_SM_FWRSP:
+		dport->test_state = BFA_DPORT_ST_DISABLED;
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
+		break;
+
+	case BFA_DPORT_SM_SCN:
+		/* no state change */
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
+
+static void
+bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
+			    enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_QRESUME:
+		bfa_sm_set_state(dport, bfa_dport_sm_starting);
+		bfa_dport_send_req(dport, BFI_DPORT_START);
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
+		bfa_reqq_wcancel(&dport->reqq_wait);
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
+
+static void
+bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_FWRSP:
+		memset(&dport->result, 0,
+				sizeof(struct bfa_diag_dport_result_s));
+		if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
+			dport->test_state = BFA_DPORT_ST_NO_SFP;
+		} else {
+			dport->test_state = BFA_DPORT_ST_INP;
+			bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU);
+		}
+		/* fall thru */
+
+	case BFA_DPORT_SM_REQFAIL:
+		bfa_sm_set_state(dport, bfa_dport_sm_enabled);
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
+
+static void
+bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
+			       enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_SCN:
+		switch (dport->i2hmsg.scn.state) {
+		case BFI_DPORT_SCN_DDPORT_DISABLED:
+			bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+			dport->dynamic = BFA_FALSE;
+			bfa_fcport_enable(dport->bfa);
+			break;
+
+		default:
+			bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
+			bfa_sm_fault(dport->bfa, event);
+
+		}
 		break;
 
 	case BFA_DPORT_SM_HWFAIL:
@@ -6242,18 +6604,38 @@ bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
 	}
 }
 
+static void
+bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
+			    enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_QRESUME:
+		bfa_sm_set_state(dport, bfa_dport_sm_dynamic_disabling);
+		bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE);
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		bfa_reqq_wcancel(&dport->reqq_wait);
+		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
+		break;
+
+	case BFA_DPORT_SM_SCN:
+		/* ignore */
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
 
 static bfa_boolean_t
 bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
 {
 	struct bfi_diag_dport_req_s *m;
 
-	/*
-	 * Increment message tag before queue check, so that responses to old
-	 * requests are discarded.
-	 */
-	dport->msgtag++;
-
 	/*
 	 * check for room in queue to send request now
 	 */
@@ -6266,7 +6648,10 @@ bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
 	bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
 		    bfa_fn_lpu(dport->bfa));
 	m->req  = req;
-	m->msgtag = dport->msgtag;
+	if ((req == BFI_DPORT_ENABLE) || (req == BFI_DPORT_START)) {
+		m->lpcnt = cpu_to_be32(dport->lpcnt);
+		m->payload = cpu_to_be32(dport->payload);
+	}
 
 	/*
 	 * queue I/O message to firmware
@@ -6285,19 +6670,131 @@ bfa_dport_qresume(void *cbarg)
 }
 
 static void
-bfa_dport_req_comp(struct bfa_dport_s *dport, bfi_diag_dport_rsp_t *msg)
+bfa_dport_req_comp(struct bfa_dport_s *dport, struct bfi_diag_dport_rsp_s *msg)
 {
-	bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
+	msg->status = cpu_to_be32(msg->status);
+	dport->i2hmsg.rsp.status = msg->status;
+	dport->rp_pwwn = msg->pwwn;
+	dport->rp_nwwn = msg->nwwn;
+
+	if ((msg->status == BFA_STATUS_OK) ||
+	    (msg->status == BFA_STATUS_DPORT_NO_SFP)) {
+		bfa_trc(dport->bfa, msg->status);
+		bfa_trc(dport->bfa, dport->rp_pwwn);
+		bfa_trc(dport->bfa, dport->rp_nwwn);
+		bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
+
+	} else {
+		bfa_trc(dport->bfa, msg->status);
+		bfa_sm_send_event(dport, BFA_DPORT_SM_REQFAIL);
+	}
 	bfa_cb_fcdiag_dport(dport, msg->status);
 }
 
+static bfa_boolean_t
+bfa_dport_is_sending_req(struct bfa_dport_s *dport)
+{
+	if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling)	||
+	    bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
+	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling)	||
+	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait) ||
+	    bfa_sm_cmp_state(dport, bfa_dport_sm_starting)	||
+	    bfa_sm_cmp_state(dport, bfa_dport_sm_starting_qwait)) {
+		return BFA_TRUE;
+	} else {
+		return BFA_FALSE;
+	}
+}
+
+static void
+bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
+{
+	int i;
+	uint8_t subtesttype;
+
+	bfa_trc(dport->bfa, msg->state);
+	dport->i2hmsg.scn.state = msg->state;
+
+	switch (dport->i2hmsg.scn.state) {
+	case BFI_DPORT_SCN_TESTCOMP:
+		dport->result.end_time = bfa_get_log_time();
+		bfa_trc(dport->bfa, dport->result.end_time);
+
+		dport->result.status = msg->info.testcomp.status;
+		bfa_trc(dport->bfa, dport->result.status);
+
+		dport->result.roundtrip_latency =
+			cpu_to_be32(msg->info.testcomp.latency);
+		dport->result.est_cable_distance =
+			cpu_to_be32(msg->info.testcomp.distance);
+		dport->result.buffer_required =
+			be16_to_cpu(msg->info.testcomp.numbuffer);
+
+		dport->result.frmsz = be16_to_cpu(msg->info.testcomp.frm_sz);
+		dport->result.speed = msg->info.testcomp.speed;
+
+		bfa_trc(dport->bfa, dport->result.roundtrip_latency);
+		bfa_trc(dport->bfa, dport->result.est_cable_distance);
+		bfa_trc(dport->bfa, dport->result.buffer_required);
+		bfa_trc(dport->bfa, dport->result.frmsz);
+		bfa_trc(dport->bfa, dport->result.speed);
+
+		for (i = DPORT_TEST_ELOOP; i < DPORT_TEST_MAX; i++) {
+			dport->result.subtest[i].status =
+				msg->info.testcomp.subtest_status[i];
+			bfa_trc(dport->bfa, dport->result.subtest[i].status);
+		}
+		break;
+
+	case BFI_DPORT_SCN_TESTSKIP:
+	case BFI_DPORT_SCN_DDPORT_ENABLE:
+		memset(&dport->result, 0,
+				sizeof(struct bfa_diag_dport_result_s));
+		break;
+
+	case BFI_DPORT_SCN_TESTSTART:
+		memset(&dport->result, 0,
+				sizeof(struct bfa_diag_dport_result_s));
+		dport->rp_pwwn = msg->info.teststart.pwwn;
+		dport->rp_nwwn = msg->info.teststart.nwwn;
+		dport->lpcnt = cpu_to_be32(msg->info.teststart.numfrm);
+		bfa_dport_result_start(dport, BFA_DPORT_OPMODE_AUTO);
+		break;
+
+	case BFI_DPORT_SCN_SUBTESTSTART:
+		subtesttype = msg->info.teststart.type;
+		dport->result.subtest[subtesttype].start_time =
+			bfa_get_log_time();
+		dport->result.subtest[subtesttype].status =
+			DPORT_TEST_ST_INPRG;
+
+		bfa_trc(dport->bfa, subtesttype);
+		bfa_trc(dport->bfa,
+			dport->result.subtest[subtesttype].start_time);
+		break;
+
+	case BFI_DPORT_SCN_SFP_REMOVED:
+	case BFI_DPORT_SCN_DDPORT_DISABLED:
+	case BFI_DPORT_SCN_DDPORT_DISABLE:
+	case BFI_DPORT_SCN_FCPORT_DISABLE:
+		dport->result.status = DPORT_TEST_ST_IDLE;
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, msg->state);
+	}
+
+	bfa_sm_send_event(dport, BFA_DPORT_SM_SCN);
+}
+
 /*
  * Dport enable
  *
  * @param[in] *bfa            - bfa data struct
  */
 bfa_status_t
-bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
+bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat,
+				bfa_cb_diag_t cbfn, void *cbarg)
 {
 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
 	struct bfa_dport_s  *dport = &fcdiag->dport;
@@ -6310,6 +6807,14 @@ bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
 		return BFA_STATUS_CMD_NOTSUPP_MEZZ;
 	}
 
+	/*
+	 * Dport is supported in CT2 or above
+	 */
+	if (!(bfa_asic_id_ct2(dport->bfa->ioc.pcidev.device_id))) {
+		bfa_trc(dport->bfa, dport->bfa->ioc.pcidev.device_id);
+		return BFA_STATUS_FEATURE_NOT_SUPPORTED;
+	}
+
 	/*
 	 * Check to see if IOC is down
 	*/
@@ -6347,6 +6852,14 @@ bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
 		return BFA_STATUS_ERROR_TRUNK_ENABLED;
 	}
 
+	/*
+	 * Check if diag loopback is running
+	 */
+	if (bfa_fcdiag_lb_is_running(bfa)) {
+		bfa_trc(dport->bfa, 0);
+		return BFA_STATUS_DIAG_BUSY;
+	}
+
 	/*
 	 * Check to see if port is disable or in dport state
 	 */
@@ -6356,15 +6869,17 @@ bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
 		return BFA_STATUS_PORT_NOT_DISABLED;
 	}
 
+	/*
+	 * Check if dport is in dynamic mode
+	 */
+	if (dport->dynamic)
+		return BFA_STATUS_DDPORT_ERR;
+
 	/*
 	 * Check if dport is busy
 	 */
-	if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
-	    bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
-	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
-	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) {
+	if (bfa_dport_is_sending_req(dport))
 		return BFA_STATUS_DEVBUSY;
-	}
 
 	/*
 	 * Check if dport is already enabled
@@ -6374,6 +6889,10 @@ bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
 		return BFA_STATUS_DPORT_ENABLED;
 	}
 
+	bfa_trc(dport->bfa, lpcnt);
+	bfa_trc(dport->bfa, pat);
+	dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
+	dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
 	dport->cbfn = cbfn;
 	dport->cbarg = cbarg;
 
@@ -6401,6 +6920,13 @@ bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
 		return BFA_STATUS_PBC;
 	}
 
+	/*
+	 * Check if dport is in dynamic mode
+	 */
+	if (dport->dynamic) {
+		return BFA_STATUS_DDPORT_ERR;
+	}
+
 	/*
 	 * Check to see if port is disable or in dport state
 	 */
@@ -6413,10 +6939,7 @@ bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
 	/*
 	 * Check if dport is busy
 	 */
-	if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
-	    bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
-	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
-	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait))
+	if (bfa_dport_is_sending_req(dport))
 		return BFA_STATUS_DEVBUSY;
 
 	/*
@@ -6435,30 +6958,105 @@ bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
 }
 
 /*
- *	Get D-port state
+ * Dport start -- restart dport test
  *
- * @param[in] *bfa            - bfa data struct
+ *   @param[in] *bfa		- bfa data struct
  */
+bfa_status_t
+bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat,
+			bfa_cb_diag_t cbfn, void *cbarg)
+{
+	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+	struct bfa_dport_s *dport = &fcdiag->dport;
+
+	/*
+	 * Check to see if IOC is down
+	 */
+	if (!bfa_iocfc_is_operational(bfa))
+		return BFA_STATUS_IOC_NON_OP;
+
+	/*
+	 * Check if dport is in dynamic mode
+	 */
+	if (dport->dynamic)
+		return BFA_STATUS_DDPORT_ERR;
+
+	/*
+	 * Check if dport is busy
+	 */
+	if (bfa_dport_is_sending_req(dport))
+		return BFA_STATUS_DEVBUSY;
 
+	/*
+	 * Check if dport is in enabled state.
+	 * Test can only be restart when previous test has completed
+	 */
+	if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
+		bfa_trc(dport->bfa, 0);
+		return BFA_STATUS_DPORT_DISABLED;
+
+	} else {
+		if (dport->test_state == BFA_DPORT_ST_NO_SFP)
+			return BFA_STATUS_DPORT_INV_SFP;
+
+		if (dport->test_state == BFA_DPORT_ST_INP)
+			return BFA_STATUS_DEVBUSY;
+
+		WARN_ON(dport->test_state != BFA_DPORT_ST_COMP);
+	}
+
+	bfa_trc(dport->bfa, lpcnt);
+	bfa_trc(dport->bfa, pat);
+
+	dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
+	dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
+
+	dport->cbfn = cbfn;
+	dport->cbarg = cbarg;
+
+	bfa_sm_send_event(dport, BFA_DPORT_SM_START);
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Dport show -- return dport test result
+ *
+ *   @param[in] *bfa		- bfa data struct
+ */
 bfa_status_t
-bfa_dport_get_state(struct bfa_s *bfa, enum bfa_dport_state *state)
+bfa_dport_show(struct bfa_s *bfa, struct bfa_diag_dport_result_s *result)
 {
 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
 	struct bfa_dport_s *dport = &fcdiag->dport;
 
-	if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled))
-		*state = BFA_DPORT_ST_ENABLED;
-	else if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
-		 bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait))
-		*state = BFA_DPORT_ST_ENABLING;
-	else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled))
-		*state = BFA_DPORT_ST_DISABLED;
-	else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
-		 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait))
-		*state = BFA_DPORT_ST_DISABLING;
-	else {
-		bfa_trc(dport->bfa, BFA_STATUS_EINVAL);
-		return BFA_STATUS_EINVAL;
+	/*
+	 * Check to see if IOC is down
+	 */
+	if (!bfa_iocfc_is_operational(bfa))
+		return BFA_STATUS_IOC_NON_OP;
+
+	/*
+	 * Check if dport is busy
+	 */
+	if (bfa_dport_is_sending_req(dport))
+		return BFA_STATUS_DEVBUSY;
+
+	/*
+	 * Check if dport is in enabled state.
+	 */
+	if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
+		bfa_trc(dport->bfa, 0);
+		return BFA_STATUS_DPORT_DISABLED;
+
 	}
+
+	/*
+	 * Check if there is SFP
+	 */
+	if (dport->test_state == BFA_DPORT_ST_NO_SFP)
+		return BFA_STATUS_DPORT_INV_SFP;
+
+	memcpy(result, &dport->result, sizeof(struct bfa_diag_dport_result_s));
+
 	return BFA_STATUS_OK;
 }

+ 24 - 10
drivers/scsi/bfa/bfa_svc.h

@@ -405,8 +405,6 @@ struct bfa_lps_s {
 	bfa_status_t	status;		/*  login status		*/
 	u16		pdusz;		/*  max receive PDU size	*/
 	u16		pr_bbcred;	/*  BB_CREDIT from peer		*/
-	u8		pr_bbscn;	/*  BB_SCN from peer		*/
-	u8		bb_scn;		/*  local BB_SCN		*/
 	u8		lsrjt_rsn;	/*  LSRJT reason		*/
 	u8		lsrjt_expl;	/*  LSRJT explanation		*/
 	u8		lun_mask;	/*  LUN mask flag		*/
@@ -510,11 +508,12 @@ struct bfa_fcport_s {
 	bfa_boolean_t		diag_busy; /*  diag busy status */
 	bfa_boolean_t		beacon; /*  port beacon status */
 	bfa_boolean_t		link_e2e_beacon; /*  link beacon status */
-	bfa_boolean_t		bbsc_op_state;	/* Cred recov Oper State */
 	struct bfa_fcport_trunk_s trunk;
 	u16		fcoe_vlan;
 	struct bfa_mem_dma_s	fcport_dma;
 	bfa_boolean_t		stats_dma_ready;
+	struct bfa_bbcr_attr_s	bbcr_attr;
+	enum bfa_fec_state_s	fec_state;
 };
 
 #define BFA_FCPORT_MOD(__bfa)	(&(__bfa)->modules.fcport)
@@ -552,11 +551,12 @@ void bfa_fcport_event_register(struct bfa_s *bfa,
 			enum bfa_port_linkstate event), void *event_cbarg);
 bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
 bfa_boolean_t bfa_fcport_is_dport(struct bfa_s *bfa);
+bfa_boolean_t bfa_fcport_is_ddport(struct bfa_s *bfa);
 bfa_status_t bfa_fcport_set_qos_bw(struct bfa_s *bfa,
 				   struct bfa_qos_bw_s *qos_bw);
 enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
 
-void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn);
+void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
 bfa_boolean_t     bfa_fcport_is_ratelim(struct bfa_s *bfa);
 void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
 			bfa_boolean_t link_e2e_beacon);
@@ -571,6 +571,10 @@ void bfa_fcport_dportenable(struct bfa_s *bfa);
 void bfa_fcport_dportdisable(struct bfa_s *bfa);
 bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa);
 void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state);
+bfa_status_t bfa_fcport_cfg_bbcr(struct bfa_s *bfa,
+			bfa_boolean_t on_off, u8 bb_scn);
+bfa_status_t bfa_fcport_get_bbcr_attr(struct bfa_s *bfa,
+			struct bfa_bbcr_attr_s *bbcr_attr);
 
 /*
  * bfa rport API functions
@@ -667,7 +671,7 @@ struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
 void bfa_lps_delete(struct bfa_lps_s *lps);
 void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa,
 		   u16 pdusz, wwn_t pwwn, wwn_t nwwn,
-		   bfa_boolean_t auth_en, u8 bb_scn);
+		   bfa_boolean_t auth_en);
 void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz,
 		   wwn_t pwwn, wwn_t nwwn);
 void bfa_lps_fdisclogo(struct bfa_lps_s *lps);
@@ -712,10 +716,18 @@ struct bfa_fcdiag_lb_s {
 struct bfa_dport_s {
 	struct bfa_s	*bfa;		/* Back pointer to BFA	*/
 	bfa_sm_t	sm;		/* finite state machine */
-	u32		msgtag;		/* firmware msg tag for reply */
 	struct bfa_reqq_wait_s reqq_wait;
 	bfa_cb_diag_t	cbfn;
 	void		*cbarg;
+	union bfi_diag_dport_msg_u i2hmsg;
+	u8		test_state;	/* enum dport_test_state  */
+	u8		dynamic;	/* boolean_t  */
+	u8		rsvd[2];
+	u32		lpcnt;
+	u32		payload;	/* user defined payload pattern */
+	wwn_t		rp_pwwn;
+	wwn_t		rp_nwwn;
+	struct bfa_diag_dport_result_s result;
 };
 
 struct bfa_fcdiag_s {
@@ -739,11 +751,13 @@ bfa_status_t	bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 ignore,
 			u32 queue, struct bfa_diag_qtest_result_s *result,
 			bfa_cb_diag_t cbfn, void *cbarg);
 bfa_status_t	bfa_fcdiag_lb_is_running(struct bfa_s *bfa);
-bfa_status_t	bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn,
-				 void *cbarg);
+bfa_status_t	bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat,
+					bfa_cb_diag_t cbfn, void *cbarg);
 bfa_status_t	bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn,
 				  void *cbarg);
-bfa_status_t	bfa_dport_get_state(struct bfa_s *bfa,
-				    enum bfa_dport_state *state);
+bfa_status_t	bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat,
+				bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t	bfa_dport_show(struct bfa_s *bfa,
+				struct bfa_diag_dport_result_s *result);
 
 #endif /* __BFA_SVC_H__ */

+ 11 - 3
drivers/scsi/bfa/bfad.c

@@ -63,9 +63,9 @@ int		max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
 u32	bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
 u32	*bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
 
-#define BFAD_FW_FILE_CB		"cbfw-3.1.0.0.bin"
-#define BFAD_FW_FILE_CT		"ctfw-3.1.0.0.bin"
-#define BFAD_FW_FILE_CT2	"ct2fw-3.1.0.0.bin"
+#define BFAD_FW_FILE_CB		"cbfw-3.2.1.0.bin"
+#define BFAD_FW_FILE_CT		"ctfw-3.2.1.0.bin"
+#define BFAD_FW_FILE_CT2	"ct2fw-3.2.1.0.bin"
 
 static u32 *bfad_load_fwimg(struct pci_dev *pdev);
 static void bfad_free_fwimg(void);
@@ -1720,6 +1720,14 @@ struct pci_device_id bfad_id_table[] = {
 		.class_mask = ~0,
 	},
 
+	{
+		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
+		.device = BFA_PCI_DEVICE_ID_CT2_QUAD,
+		.subvendor = PCI_ANY_ID,
+		.subdevice = PCI_ANY_ID,
+		.class = (PCI_CLASS_SERIAL_FIBER << 8),
+		.class_mask = ~0,
+	},
 	{0, 0},
 };
 

+ 7 - 26
drivers/scsi/bfa/bfad_attr.c

@@ -334,24 +334,11 @@ bfad_im_reset_stats(struct Scsi_Host *shost)
 	return;
 }
 
-/*
- * FC transport template entry, get rport loss timeout.
- */
-static void
-bfad_im_get_rport_loss_tmo(struct fc_rport *rport)
-{
-	struct bfad_itnim_data_s *itnim_data = rport->dd_data;
-	struct bfad_itnim_s   *itnim = itnim_data->itnim;
-	struct bfad_s         *bfad = itnim->im->bfad;
-	unsigned long   flags;
-
-	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	rport->dev_loss_tmo = bfa_fcpim_path_tov_get(&bfad->bfa);
-	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-}
-
 /*
  * FC transport template entry, set rport loss timeout.
+ * Update dev_loss_tmo based on the value pushed down by the stack
+ * In case it is lesser than path_tov of driver, set it to path_tov + 1
+ * to ensure that the driver times out before the application
  */
 static void
 bfad_im_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
@@ -359,15 +346,11 @@ bfad_im_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
 	struct bfad_itnim_data_s *itnim_data = rport->dd_data;
 	struct bfad_itnim_s   *itnim = itnim_data->itnim;
 	struct bfad_s         *bfad = itnim->im->bfad;
-	unsigned long   flags;
-
-	if (timeout > 0) {
-		spin_lock_irqsave(&bfad->bfad_lock, flags);
-		bfa_fcpim_path_tov_set(&bfad->bfa, timeout);
-		rport->dev_loss_tmo = bfa_fcpim_path_tov_get(&bfad->bfa);
-		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-	}
+	uint16_t path_tov = bfa_fcpim_path_tov_get(&bfad->bfa);
 
+	rport->dev_loss_tmo = timeout;
+	if (timeout < path_tov)
+		rport->dev_loss_tmo = path_tov + 1;
 }
 
 static int
@@ -665,7 +648,6 @@ struct fc_function_template bfad_im_fc_function_template = {
 	.show_rport_maxframe_size = 1,
 	.show_rport_supported_classes = 1,
 	.show_rport_dev_loss_tmo = 1,
-	.get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo,
 	.set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
 	.issue_fc_host_lip = bfad_im_issue_fc_host_lip,
 	.vport_create = bfad_im_vport_create,
@@ -723,7 +705,6 @@ struct fc_function_template bfad_im_vport_fc_function_template = {
 	.show_rport_maxframe_size = 1,
 	.show_rport_supported_classes = 1,
 	.show_rport_dev_loss_tmo = 1,
-	.get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo,
 	.set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
 };
 

+ 100 - 37
drivers/scsi/bfa/bfad_bsg.c

@@ -402,25 +402,43 @@ bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
 }
 
 int
-bfad_iocmd_port_cfg_bbsc(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+bfad_iocmd_port_cfg_bbcr(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
 {
-	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
-	unsigned long	flags;
+	struct bfa_bsg_bbcr_enable_s *iocmd =
+			(struct bfa_bsg_bbcr_enable_s *)pcmd;
+	unsigned long flags;
+	int rc;
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
-		if (v_cmd == IOCMD_PORT_BBSC_ENABLE)
-			fcport->cfg.bb_scn_state = BFA_TRUE;
-		else if (v_cmd == IOCMD_PORT_BBSC_DISABLE)
-			fcport->cfg.bb_scn_state = BFA_FALSE;
+	if (cmd == IOCMD_PORT_BBCR_ENABLE)
+		rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_TRUE, iocmd->bb_scn);
+	else if (cmd == IOCMD_PORT_BBCR_DISABLE)
+		rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_FALSE, 0);
+	else {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		return -EINVAL;
 	}
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
-	iocmd->status = BFA_STATUS_OK;
+	iocmd->status = rc;
+	return 0;
+}
+
+int
+bfad_iocmd_port_get_bbcr_attr(struct bfad_s *bfad, void *pcmd)
+{
+	struct bfa_bsg_bbcr_attr_s *iocmd = (struct bfa_bsg_bbcr_attr_s *) pcmd;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status =
+		bfa_fcport_get_bbcr_attr(&bfad->bfa, &iocmd->attr);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
 	return 0;
 }
 
+
 static int
 bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
 {
@@ -1767,51 +1785,87 @@ bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
 }
 
 int
-bfad_iocmd_diag_cfg_dport(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+bfad_iocmd_diag_dport_enable(struct bfad_s *bfad, void *pcmd)
 {
-	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+	struct bfa_bsg_dport_enable_s *iocmd =
+				(struct bfa_bsg_dport_enable_s *)pcmd;
 	unsigned long	flags;
 	struct bfad_hal_comp fcomp;
 
 	init_completion(&fcomp.comp);
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	if (cmd == IOCMD_DIAG_DPORT_ENABLE)
-		iocmd->status = bfa_dport_enable(&bfad->bfa,
-					bfad_hcb_comp, &fcomp);
-	else if (cmd == IOCMD_DIAG_DPORT_DISABLE)
-		iocmd->status = bfa_dport_disable(&bfad->bfa,
-					bfad_hcb_comp, &fcomp);
+	iocmd->status = bfa_dport_enable(&bfad->bfa, iocmd->lpcnt,
+					iocmd->pat, bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		bfa_trc(bfad, iocmd->status);
 	else {
-		bfa_trc(bfad, 0);
-		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-		return -EINVAL;
+		wait_for_completion(&fcomp.comp);
+		iocmd->status = fcomp.status;
 	}
-	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	return 0;
+}
 
+int
+bfad_iocmd_diag_dport_disable(struct bfad_s *bfad, void *pcmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+	unsigned long	flags;
+	struct bfad_hal_comp fcomp;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_dport_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	if (iocmd->status != BFA_STATUS_OK)
 		bfa_trc(bfad, iocmd->status);
 	else {
 		wait_for_completion(&fcomp.comp);
 		iocmd->status = fcomp.status;
 	}
+	return 0;
+}
+
+int
+bfad_iocmd_diag_dport_start(struct bfad_s *bfad, void *pcmd)
+{
+	struct bfa_bsg_dport_enable_s *iocmd =
+				(struct bfa_bsg_dport_enable_s *)pcmd;
+	unsigned long   flags;
+	struct bfad_hal_comp fcomp;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_dport_start(&bfad->bfa, iocmd->lpcnt,
+					iocmd->pat, bfad_hcb_comp,
+					&fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (iocmd->status != BFA_STATUS_OK) {
+		bfa_trc(bfad, iocmd->status);
+	} else {
+		wait_for_completion(&fcomp.comp);
+		iocmd->status = fcomp.status;
+	}
 
 	return 0;
 }
 
 int
-bfad_iocmd_diag_dport_get_state(struct bfad_s *bfad, void *pcmd)
+bfad_iocmd_diag_dport_show(struct bfad_s *bfad, void *pcmd)
 {
-	struct bfa_bsg_diag_dport_get_state_s *iocmd =
-			(struct bfa_bsg_diag_dport_get_state_s *)pcmd;
-	unsigned long	flags;
+	struct bfa_bsg_diag_dport_show_s *iocmd =
+				(struct bfa_bsg_diag_dport_show_s *)pcmd;
+	unsigned long   flags;
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	iocmd->status = bfa_dport_get_state(&bfad->bfa, &iocmd->state);
+	iocmd->status = bfa_dport_show(&bfad->bfa, &iocmd->result);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
 	return 0;
 }
 
+
 int
 bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
 {
@@ -2662,7 +2716,7 @@ bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd)
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa),
 				&iocmd->data, iocmd->len, iocmd->offset,
-				bfad_hcb_comp, &fcomp);
+				bfad_hcb_comp, &fcomp, iocmd->trfr_cmpl);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	if (iocmd->status == BFA_STATUS_OK) {
 		wait_for_completion(&fcomp.comp);
@@ -2750,9 +2804,12 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
 	case IOCMD_PORT_CFG_MAXFRSZ:
 		rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
 		break;
-	case IOCMD_PORT_BBSC_ENABLE:
-	case IOCMD_PORT_BBSC_DISABLE:
-		rc = bfad_iocmd_port_cfg_bbsc(bfad, iocmd, cmd);
+	case IOCMD_PORT_BBCR_ENABLE:
+	case IOCMD_PORT_BBCR_DISABLE:
+		rc = bfad_iocmd_port_cfg_bbcr(bfad, cmd, iocmd);
+		break;
+	case IOCMD_PORT_BBCR_GET_ATTR:
+		rc = bfad_iocmd_port_get_bbcr_attr(bfad, iocmd);
 		break;
 	case IOCMD_LPORT_GET_ATTR:
 		rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
@@ -2913,11 +2970,16 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
 		rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
 		break;
 	case IOCMD_DIAG_DPORT_ENABLE:
+		rc = bfad_iocmd_diag_dport_enable(bfad, iocmd);
+		break;
 	case IOCMD_DIAG_DPORT_DISABLE:
-		rc = bfad_iocmd_diag_cfg_dport(bfad, cmd, iocmd);
+		rc = bfad_iocmd_diag_dport_disable(bfad, iocmd);
+		break;
+	case IOCMD_DIAG_DPORT_SHOW:
+		rc = bfad_iocmd_diag_dport_show(bfad, iocmd);
 		break;
-	case IOCMD_DIAG_DPORT_GET_STATE:
-		rc = bfad_iocmd_diag_dport_get_state(bfad, iocmd);
+	case IOCMD_DIAG_DPORT_START:
+		rc = bfad_iocmd_diag_dport_start(bfad, iocmd);
 		break;
 	case IOCMD_PHY_GET_ATTR:
 		rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
@@ -3309,7 +3371,8 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
 		goto out;
 	}
 
-	if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload,
+	if (copy_from_user((uint8_t *)bsg_fcpt,
+				(void *)(unsigned long)bsg_data->payload,
 				bsg_data->payload_len)) {
 		kfree(bsg_fcpt);
 		rc = -EIO;
@@ -3463,8 +3526,8 @@ out_free_mem:
 	kfree(rsp_kbuf);
 
 	/* Need a copy to user op */
-	if (copy_to_user(bsg_data->payload, (void *) bsg_fcpt,
-			 bsg_data->payload_len))
+	if (copy_to_user((void *)(unsigned long)bsg_data->payload,
+			(void *)bsg_fcpt, bsg_data->payload_len))
 		rc = -EIO;
 
 	kfree(bsg_fcpt);

+ 40 - 12
drivers/scsi/bfa/bfad_bsg.h

@@ -46,8 +46,9 @@ enum {
 	IOCMD_PORT_CFG_ALPA,
 	IOCMD_PORT_CFG_MAXFRSZ,
 	IOCMD_PORT_CLR_ALPA,
-	IOCMD_PORT_BBSC_ENABLE,
-	IOCMD_PORT_BBSC_DISABLE,
+	IOCMD_PORT_BBCR_ENABLE,
+	IOCMD_PORT_BBCR_DISABLE,
+	IOCMD_PORT_BBCR_GET_ATTR,
 	IOCMD_LPORT_GET_ATTR,
 	IOCMD_LPORT_GET_RPORTS,
 	IOCMD_LPORT_GET_STATS,
@@ -143,7 +144,6 @@ enum {
 	IOCMD_FCPIM_LUNMASK_DELETE,
 	IOCMD_DIAG_DPORT_ENABLE,
 	IOCMD_DIAG_DPORT_DISABLE,
-	IOCMD_DIAG_DPORT_GET_STATE,
 	IOCMD_QOS_SET_BW,
 	IOCMD_FCPIM_THROTTLE_QUERY,
 	IOCMD_FCPIM_THROTTLE_SET,
@@ -152,6 +152,8 @@ enum {
 	IOCMD_FRUVPD_READ,
 	IOCMD_FRUVPD_UPDATE,
 	IOCMD_FRUVPD_GET_MAX_SIZE,
+	IOCMD_DIAG_DPORT_SHOW,
+	IOCMD_DIAG_DPORT_START,
 };
 
 struct bfa_bsg_gen_s {
@@ -495,6 +497,20 @@ struct bfa_bsg_port_cfg_mode_s {
 	struct bfa_port_cfg_mode_s cfg;
 };
 
+struct bfa_bsg_bbcr_enable_s {
+	bfa_status_t    status;
+	u16		bfad_num;
+	u8		bb_scn;
+	u8		rsvd;
+};
+
+struct bfa_bsg_bbcr_attr_s {
+	bfa_status_t    status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_bbcr_attr_s attr;
+};
+
 struct bfa_bsg_faa_attr_s {
 	bfa_status_t		status;
 	u16			bfad_num;
@@ -578,6 +594,21 @@ struct bfa_bsg_diag_loopback_s {
 	struct bfa_diag_loopback_result_s result;
 };
 
+struct bfa_bsg_diag_dport_show_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_diag_dport_result_s result;
+};
+
+struct bfa_bsg_dport_enable_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	u16		lpcnt;
+	u16		pat;
+};
+
 struct bfa_bsg_diag_fwping_s {
 	bfa_status_t	status;
 	u16		bfad_num;
@@ -625,13 +656,6 @@ struct bfa_bsg_diag_lb_stat_s {
 	u16		rsvd;
 };
 
-struct bfa_bsg_diag_dport_get_state_s {
-	bfa_status_t	status;
-	u16		bfad_num;
-	u16		rsvd;
-	enum bfa_dport_state state;
-};
-
 struct bfa_bsg_phy_attr_s {
 	bfa_status_t	status;
 	u16	bfad_num;
@@ -770,10 +794,12 @@ struct bfa_bsg_tfru_s {
 struct bfa_bsg_fruvpd_s {
 	bfa_status_t	status;
 	u16		bfad_num;
-	u16		rsvd;
+	u16		rsvd1;
 	u32		offset;
 	u32		len;
 	u8		data[BFA_MAX_FRUVPD_TRANSFER_SIZE];
+	u8		trfr_cmpl;
+	u8		rsvd2[3];
 };
 
 struct bfa_bsg_fruvpd_max_size_s {
@@ -795,10 +821,12 @@ struct bfa_bsg_fcpt_s {
 };
 #define bfa_bsg_fcpt_t struct bfa_bsg_fcpt_s
 
+#pragma pack(1)
 struct bfa_bsg_data {
 	int payload_len;
-	void *payload;
+	u64 payload;
 };
+#pragma pack()
 
 #define bfad_chk_iocmd_sz(__payload_len, __hdrsz, __bufsz)	\
 	(((__payload_len) != ((__hdrsz) + (__bufsz))) ?		\

+ 1 - 1
drivers/scsi/bfa/bfad_drv.h

@@ -57,7 +57,7 @@
 #ifdef BFA_DRIVER_VERSION
 #define BFAD_DRIVER_VERSION    BFA_DRIVER_VERSION
 #else
-#define BFAD_DRIVER_VERSION    "3.1.2.1"
+#define BFAD_DRIVER_VERSION    "3.2.21.1"
 #endif
 
 #define BFAD_PROTO_NAME FCPI_NAME

+ 6 - 4
drivers/scsi/bfa/bfad_im.c

@@ -944,13 +944,15 @@ static int
 bfad_im_slave_alloc(struct scsi_device *sdev)
 {
 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
-	struct bfad_itnim_data_s *itnim_data =
-				(struct bfad_itnim_data_s *) rport->dd_data;
-	struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
+	struct bfad_itnim_data_s *itnim_data;
+	struct bfa_s *bfa;
 
 	if (!rport || fc_remote_port_chkready(rport))
 		return -ENXIO;
 
+	itnim_data = (struct bfad_itnim_data_s *) rport->dd_data;
+	bfa = itnim_data->itnim->bfa_itnim->bfa;
+
 	if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) {
 		/*
 		 * We should not mask LUN 0 - since this will translate
@@ -1035,7 +1037,7 @@ bfad_fc_host_init(struct bfad_im_port_s *im_port)
 	/* For fibre channel services type 0x20 */
 	fc_host_supported_fc4s(host)[7] = 1;
 
-	strncpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname,
+	strlcpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname,
 		BFA_SYMNAME_MAXLEN);
 	sprintf(fc_host_symbolic_name(host), "%s", symname);
 

+ 72 - 6
drivers/scsi/bfa/bfi.h

@@ -264,6 +264,7 @@ struct bfi_ioc_getattr_req_s {
 	union bfi_addr_u	attr_addr;
 };
 
+#define BFI_IOC_ATTR_UUID_SZ	16
 struct bfi_ioc_attr_s {
 	wwn_t		mfg_pwwn;	/*  Mfg port wwn	   */
 	wwn_t		mfg_nwwn;	/*  Mfg node wwn	   */
@@ -292,6 +293,7 @@ struct bfi_ioc_attr_s {
 	u8	mfg_day;	/* manufacturing day */
 	u8	mfg_month;	/* manufacturing month */
 	u16	mfg_year;	/* manufacturing year */
+	u8	uuid[BFI_IOC_ATTR_UUID_SZ];	/*!< chinook uuid */
 };
 
 /*
@@ -374,6 +376,10 @@ enum bfi_ioc_state {
 	BFI_IOC_MEMTEST		= 9,	/*  IOC is doing memtest	     */
 };
 
+#define BFA_IOC_CB_JOIN_SH	16
+#define BFA_IOC_CB_FWSTATE_MASK	0x0000ffff
+#define BFA_IOC_CB_JOIN_MASK	0xffff0000
+
 #define BFI_IOC_ENDIAN_SIG  0x12345678
 
 enum {
@@ -973,6 +979,7 @@ enum bfi_diag_i2h {
 	BFI_DIAG_I2H_LEDTEST = BFA_I2HM(BFI_DIAG_H2I_LEDTEST),
 	BFI_DIAG_I2H_QTEST      = BFA_I2HM(BFI_DIAG_H2I_QTEST),
 	BFI_DIAG_I2H_DPORT	= BFA_I2HM(BFI_DIAG_H2I_DPORT),
+	BFI_DIAG_I2H_DPORT_SCN	= BFA_I2HM(8),
 };
 
 #define BFI_DIAG_MAX_SGES	2
@@ -1064,16 +1071,73 @@ struct bfi_diag_qtest_req_s {
 enum bfi_dport_req {
 	BFI_DPORT_DISABLE	= 0,	/* disable dport request	*/
 	BFI_DPORT_ENABLE	= 1,	/* enable dport request		*/
+	BFI_DPORT_START		= 2,	/* start dport request	*/
+	BFI_DPORT_SHOW		= 3,	/* show dport request	*/
+	BFI_DPORT_DYN_DISABLE	= 4,	/* disable dynamic dport request */
+};
+
+enum bfi_dport_scn {
+	BFI_DPORT_SCN_TESTSTART		= 1,
+	BFI_DPORT_SCN_TESTCOMP		= 2,
+	BFI_DPORT_SCN_SFP_REMOVED	= 3,
+	BFI_DPORT_SCN_DDPORT_ENABLE	= 4,
+	BFI_DPORT_SCN_DDPORT_DISABLE	= 5,
+	BFI_DPORT_SCN_FCPORT_DISABLE	= 6,
+	BFI_DPORT_SCN_SUBTESTSTART	= 7,
+	BFI_DPORT_SCN_TESTSKIP		= 8,
+	BFI_DPORT_SCN_DDPORT_DISABLED	= 9,
 };
 
 struct bfi_diag_dport_req_s {
 	struct bfi_mhdr_s	mh;	/* 4 bytes                      */
-	u8			req;    /* request 1: enable 0: disable */
-	u8			status; /* reply status			*/
-	u8			rsvd[2];
-	u32			msgtag; /* msgtag for reply		*/
+	u8			req;	/* request 1: enable 0: disable	*/
+	u8			rsvd[3];
+	u32			lpcnt;
+	u32			payload;
+};
+
+struct bfi_diag_dport_rsp_s {
+	struct bfi_mhdr_s	mh;	/* header 4 bytes		*/
+	bfa_status_t		status;	/* reply status			*/
+	wwn_t			pwwn;	/* switch port wwn. 8 bytes	*/
+	wwn_t			nwwn;	/* switch node wwn. 8 bytes	*/
+};
+
+struct bfi_diag_dport_scn_teststart_s {
+	wwn_t	pwwn;	/* switch port wwn. 8 bytes */
+	wwn_t	nwwn;	/* switch node wwn. 8 bytes */
+	u8	type;	/* bfa_diag_dport_test_type_e */
+	u8	rsvd[3];
+	u32	numfrm; /* from switch uint in 1M */
+};
+
+struct bfi_diag_dport_scn_testcomp_s {
+	u8	status; /* bfa_diag_dport_test_status_e */
+	u8	speed;  /* bfa_port_speed_t  */
+	u16	numbuffer; /* from switch  */
+	u8	subtest_status[DPORT_TEST_MAX];  /* 4 bytes */
+	u32	latency;   /* from switch  */
+	u32	distance;  /* from swtich unit in meters  */
+			/* Buffers required to saturate the link */
+	u16	frm_sz;	/* from switch for buf_reqd */
+	u8	rsvd[2];
+};
+
+struct bfi_diag_dport_scn_s {		/* max size == RDS_RMESZ	*/
+	struct bfi_mhdr_s	mh;	/* header 4 bytes		*/
+	u8			state;  /* new state			*/
+	u8			rsvd[3];
+	union {
+		struct bfi_diag_dport_scn_teststart_s teststart;
+		struct bfi_diag_dport_scn_testcomp_s testcomp;
+	} info;
+};
+
+union bfi_diag_dport_msg_u {
+	struct bfi_diag_dport_req_s	req;
+	struct bfi_diag_dport_rsp_s	rsp;
+	struct bfi_diag_dport_scn_s	scn;
 };
-#define bfi_diag_dport_rsp_t struct bfi_diag_dport_req_s
 
 /*
  *	PHY module specific
@@ -1191,7 +1255,9 @@ enum bfi_fru_i2h_msgs {
 struct bfi_fru_write_req_s {
 	struct bfi_mhdr_s	mh;	/* Common msg header */
 	u8			last;
-	u8			rsv[3];
+	u8			rsv_1[3];
+	u8			trfr_cmpl;
+	u8			rsv_2[3];
 	u32			offset;
 	u32			length;
 	struct bfi_alen_s	alen;

+ 2 - 3
drivers/scsi/bfa/bfi_ms.h

@@ -276,8 +276,7 @@ struct bfi_fcport_enable_req_s {
 struct bfi_fcport_set_svc_params_req_s {
 	struct bfi_mhdr_s  mh;		/*  msg header */
 	__be16	   tx_bbcredit;	/*  Tx credits */
-	u8	bb_scn;		/* BB_SC FC credit recovery */
-	u8	rsvd;
+	u8	rsvd[2];
 };
 
 /*
@@ -446,8 +445,8 @@ struct bfi_lps_login_rsp_s {
 	mac_t		fcf_mac;
 	u8		ext_status;
 	u8		brcd_switch;	/*  attached peer is brcd switch */
-	u8		bb_scn;		/* atatched port's bb_scn */
 	u8		bfa_tag;
+	u8		rsvd;
 };
 
 struct bfi_lps_logout_req_s {

+ 0 - 91
drivers/scsi/csiostor/csio_hw.c

@@ -1597,87 +1597,6 @@ out:
 	return rv;
 }
 
-static int
-csio_config_global_rss(struct csio_hw *hw)
-{
-	struct csio_mb	*mbp;
-	enum fw_retval retval;
-
-	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
-	if (!mbp) {
-		CSIO_INC_STATS(hw, n_err_nomem);
-		return -ENOMEM;
-	}
-
-	csio_rss_glb_config(hw, mbp, CSIO_MB_DEFAULT_TMO,
-			    FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
-			    FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
-			    FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
-			    FW_RSS_GLB_CONFIG_CMD_TNLALLLKP,
-			    NULL);
-
-	if (csio_mb_issue(hw, mbp)) {
-		csio_err(hw, "Issue of FW_RSS_GLB_CONFIG_CMD failed!\n");
-		mempool_free(mbp, hw->mb_mempool);
-		return -EINVAL;
-	}
-
-	retval = csio_mb_fw_retval(mbp);
-	if (retval != FW_SUCCESS) {
-		csio_err(hw, "FW_RSS_GLB_CONFIG_CMD returned 0x%x!\n", retval);
-		mempool_free(mbp, hw->mb_mempool);
-		return -EINVAL;
-	}
-
-	mempool_free(mbp, hw->mb_mempool);
-
-	return 0;
-}
-
-/*
- * csio_config_pfvf - Configure Physical/Virtual functions settings.
- * @hw: HW module
- *
- */
-static int
-csio_config_pfvf(struct csio_hw *hw)
-{
-	struct csio_mb	*mbp;
-	enum fw_retval retval;
-
-	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
-	if (!mbp) {
-		CSIO_INC_STATS(hw, n_err_nomem);
-		return -ENOMEM;
-	}
-
-	/*
-	 * For now, allow all PFs to access to all ports using a pmask
-	 * value of 0xF (M_FW_PFVF_CMD_PMASK). Once we have VFs, we will
-	 * need to provide access based on some rule.
-	 */
-	csio_mb_pfvf(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, CSIO_NEQ,
-		     CSIO_NETH_CTRL, CSIO_NIQ_FLINT, 0, 0, CSIO_NVI, CSIO_CMASK,
-		     CSIO_PMASK, CSIO_NEXACTF, CSIO_R_CAPS, CSIO_WX_CAPS, NULL);
-
-	if (csio_mb_issue(hw, mbp)) {
-		csio_err(hw, "Issue of FW_PFVF_CMD failed!\n");
-		mempool_free(mbp, hw->mb_mempool);
-		return -EINVAL;
-	}
-
-	retval = csio_mb_fw_retval(mbp);
-	if (retval != FW_SUCCESS) {
-		csio_err(hw, "FW_PFVF_CMD returned 0x%x!\n", retval);
-		mempool_free(mbp, hw->mb_mempool);
-		return -EINVAL;
-	}
-
-	mempool_free(mbp, hw->mb_mempool);
-
-	return 0;
-}
-
 /*
  * csio_enable_ports - Bring up all available ports.
  * @hw: HW module.
@@ -2056,16 +1975,6 @@ csio_hw_no_fwconfig(struct csio_hw *hw, int reset)
 	if (rv != 0)
 		goto out;
 
-	/* Config Global RSS command */
-	rv = csio_config_global_rss(hw);
-	if (rv != 0)
-		goto out;
-
-	/* Configure PF/VF capabilities of device */
-	rv = csio_config_pfvf(hw);
-	if (rv != 0)
-		goto out;
-
 	/* device parameters */
 	rv = csio_get_device_params(hw);
 	if (rv != 0)

+ 0 - 11
drivers/scsi/csiostor/csio_hw.h

@@ -153,17 +153,6 @@ enum {
 	CSIO_SGE_INT_CNT_VAL_1		= 4,
 	CSIO_SGE_INT_CNT_VAL_2		= 8,
 	CSIO_SGE_INT_CNT_VAL_3		= 16,
-
-	/* Storage specific - used by FW_PFVF_CMD */
-	CSIO_WX_CAPS			= FW_CMD_CAP_PF, /* w/x all */
-	CSIO_R_CAPS			= FW_CMD_CAP_PF, /* r all */
-	CSIO_NVI			= 4,
-	CSIO_NIQ_FLINT			= 34,
-	CSIO_NETH_CTRL			= 32,
-	CSIO_NEQ			= 66,
-	CSIO_NEXACTF			= 32,
-	CSIO_CMASK			= FW_PFVF_CMD_CMASK_MASK,
-	CSIO_PMASK			= FW_PFVF_CMD_PMASK_MASK,
 };
 
 /* Slowpath events */

+ 0 - 77
drivers/scsi/csiostor/csio_mb.c

@@ -326,83 +326,6 @@ csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
 		cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_TARGET);
 }
 
-void
-csio_rss_glb_config(struct csio_hw *hw, struct csio_mb *mbp,
-		    uint32_t tmo, uint8_t mode, unsigned int flags,
-		    void (*cbfn)(struct csio_hw *, struct csio_mb *))
-{
-	struct fw_rss_glb_config_cmd *cmdp =
-				(struct fw_rss_glb_config_cmd *)(mbp->mb);
-
-	CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
-
-	cmdp->op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
-				  FW_CMD_REQUEST | FW_CMD_WRITE);
-	cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
-
-	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
-		cmdp->u.manual.mode_pkd =
-			htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
-	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
-		cmdp->u.basicvirtual.mode_pkd =
-			htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
-		cmdp->u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
-	}
-}
-
-
-/*
- * csio_mb_pfvf - FW Write PF/VF capabilities command helper.
- * @hw: The HW structure
- * @mbp: Mailbox structure
- * @pf:
- * @vf:
- * @txq:
- * @txq_eht_ctrl:
- * @rxqi:
- * @rxq:
- * @tc:
- * @vi:
- * @pmask:
- * @rcaps:
- * @wxcaps:
- * @cbfn: Callback, if any.
- *
- */
-void
-csio_mb_pfvf(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
-	     unsigned int pf, unsigned int vf, unsigned int txq,
-	     unsigned int txq_eth_ctrl, unsigned int rxqi,
-	     unsigned int rxq, unsigned int tc, unsigned int vi,
-	     unsigned int cmask, unsigned int pmask, unsigned int nexactf,
-	     unsigned int rcaps, unsigned int wxcaps,
-	     void (*cbfn) (struct csio_hw *, struct csio_mb *))
-{
-	struct fw_pfvf_cmd *cmdp = (struct fw_pfvf_cmd *)(mbp->mb);
-
-	CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
-
-	cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD)			|
-				FW_CMD_REQUEST				|
-				FW_CMD_WRITE				|
-				FW_PFVF_CMD_PFN(pf)			|
-				FW_PFVF_CMD_VFN(vf));
-	cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
-	cmdp->niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi)		|
-					     FW_PFVF_CMD_NIQ(rxq));
-
-	cmdp->type_to_neq = htonl(FW_PFVF_CMD_TYPE			|
-				  FW_PFVF_CMD_CMASK(cmask)		|
-				  FW_PFVF_CMD_PMASK(pmask)		|
-				  FW_PFVF_CMD_NEQ(txq));
-	cmdp->tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc)			|
-				    FW_PFVF_CMD_NVI(vi)			|
-				    FW_PFVF_CMD_NEXACTF(nexactf));
-	cmdp->r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps)	|
-					 FW_PFVF_CMD_WX_CAPS(wxcaps)	|
-					 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
-}
-
 #define CSIO_ADVERT_MASK     (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
 			      FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
 

+ 0 - 11
drivers/scsi/csiostor/csio_mb.h

@@ -183,17 +183,6 @@ void csio_mb_caps_config(struct csio_hw *, struct csio_mb *, uint32_t,
 			    bool, bool, bool, bool,
 			    void (*)(struct csio_hw *, struct csio_mb *));
 
-void csio_rss_glb_config(struct csio_hw *, struct csio_mb *,
-			 uint32_t, uint8_t, unsigned int,
-			 void (*)(struct csio_hw *, struct csio_mb *));
-
-void csio_mb_pfvf(struct csio_hw *, struct csio_mb *, uint32_t,
-		  unsigned int, unsigned int, unsigned int,
-		  unsigned int, unsigned int, unsigned int,
-		  unsigned int, unsigned int, unsigned int,
-		  unsigned int, unsigned int, unsigned int,
-		  unsigned int, void (*) (struct csio_hw *, struct csio_mb *));
-
 void csio_mb_port(struct csio_hw *, struct csio_mb *, uint32_t,
 		  uint8_t, bool, uint32_t, uint16_t,
 		  void (*) (struct csio_hw *, struct csio_mb *));

+ 2 - 2
drivers/scsi/csiostor/csio_scsi.c

@@ -1479,8 +1479,8 @@ csio_store_dbg_level(struct device *dev,
 }
 
 static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL);
-static DEVICE_ATTR(device_reset, S_IRUGO | S_IWUSR, NULL, csio_device_reset);
-static DEVICE_ATTR(disable_port, S_IRUGO | S_IWUSR, NULL, csio_disable_port);
+static DEVICE_ATTR(device_reset, S_IWUSR, NULL, csio_device_reset);
+static DEVICE_ATTR(disable_port, S_IWUSR, NULL, csio_disable_port);
 static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level,
 		  csio_store_dbg_level);
 

+ 128 - 31
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c

@@ -20,6 +20,7 @@
 #include <net/dst.h>
 #include <linux/netdevice.h>
 
+#include "t4_regs.h"
 #include "t4_msg.h"
 #include "cxgb4.h"
 #include "cxgb4_uld.h"
@@ -32,13 +33,12 @@ static unsigned int dbg_level;
 #include "../libcxgbi.h"
 
 #define	DRV_MODULE_NAME		"cxgb4i"
-#define DRV_MODULE_DESC		"Chelsio T4 iSCSI Driver"
-#define	DRV_MODULE_VERSION	"0.9.1"
-#define	DRV_MODULE_RELDATE	"Aug. 2010"
+#define DRV_MODULE_DESC		"Chelsio T4/T5 iSCSI Driver"
+#define	DRV_MODULE_VERSION	"0.9.4"
 
 static char version[] =
 	DRV_MODULE_DESC " " DRV_MODULE_NAME
-	" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+	" v" DRV_MODULE_VERSION "\n";
 
 MODULE_AUTHOR("Chelsio Communications, Inc.");
 MODULE_DESCRIPTION(DRV_MODULE_DESC);
@@ -175,10 +175,56 @@ static inline int is_ofld_imm(const struct sk_buff *skb)
 			sizeof(struct fw_ofld_tx_data_wr));
 }
 
+
+#define VLAN_NONE 0xfff
+#define FILTER_SEL_VLAN_NONE 0xffff
+#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
+#define FILTER_SEL_WIDTH_VIN_P_FC \
+	(6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
+#define FILTER_SEL_WIDTH_TAG_P_FC \
+	(3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
+#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
+
+static unsigned int select_ntuple(struct cxgbi_device *cdev,
+				struct l2t_entry *l2t)
+{
+	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+	unsigned int ntuple = 0;
+	u32 viid;
+
+	switch (lldi->filt_mode) {
+
+	/* default filter mode */
+	case HW_TPL_FR_MT_PR_IV_P_FC:
+		if (l2t->vlan == VLAN_NONE)
+			ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
+		else {
+			ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
+			ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
+		}
+		ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
+			  FILTER_SEL_WIDTH_VLD_TAG_P_FC;
+		break;
+	case HW_TPL_FR_MT_PR_OV_P_FC: {
+		viid = cxgb4_port_viid(l2t->neigh->dev);
+
+		ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
+		ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
+		ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
+		ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
+			  FILTER_SEL_WIDTH_VLD_TAG_P_FC;
+		break;
+	}
+	default:
+		break;
+	}
+	return ntuple;
+}
+
 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
 				struct l2t_entry *e)
 {
-	struct cpl_act_open_req *req;
+	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
 	int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
 	unsigned long long opt0;
 	unsigned int opt2;
@@ -195,29 +241,58 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
 		RCV_BUFSIZ(cxgb4i_rcv_win >> 10);
 	opt2 = RX_CHANNEL(0) |
 		RSS_QUEUE_VALID |
-		(1 << 20) | (1 << 22) |
+		(1 << 20) |
 		RSS_QUEUE(csk->rss_qid);
 
-	set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
-	req = (struct cpl_act_open_req *)skb->head;
+	if (is_t4(lldi->adapter_type)) {
+		struct cpl_act_open_req *req =
+				(struct cpl_act_open_req *)skb->head;
 
-	INIT_TP_WR(req, 0);
-	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+		req = (struct cpl_act_open_req *)skb->head;
+
+		INIT_TP_WR(req, 0);
+		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
 					qid_atid));
-	req->local_port = csk->saddr.sin_port;
-	req->peer_port = csk->daddr.sin_port;
-	req->local_ip = csk->saddr.sin_addr.s_addr;
-	req->peer_ip = csk->daddr.sin_addr.s_addr;
-	req->opt0 = cpu_to_be64(opt0);
-	req->params = 0;
-	req->opt2 = cpu_to_be32(opt2);
+		req->local_port = csk->saddr.sin_port;
+		req->peer_port = csk->daddr.sin_port;
+		req->local_ip = csk->saddr.sin_addr.s_addr;
+		req->peer_ip = csk->daddr.sin_addr.s_addr;
+		req->opt0 = cpu_to_be64(opt0);
+		req->params = cpu_to_be32(select_ntuple(csk->cdev, csk->l2t));
+		opt2 |= 1 << 22;
+		req->opt2 = cpu_to_be32(opt2);
 
-	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
-		"csk 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
-		csk, &req->local_ip, ntohs(req->local_port),
-		&req->peer_ip, ntohs(req->peer_port),
-		csk->atid, csk->rss_qid);
+		log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+			"csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
+			csk, &req->local_ip, ntohs(req->local_port),
+			&req->peer_ip, ntohs(req->peer_port),
+			csk->atid, csk->rss_qid);
+	} else {
+		struct cpl_t5_act_open_req *req =
+				(struct cpl_t5_act_open_req *)skb->head;
+
+		req = (struct cpl_t5_act_open_req *)skb->head;
+
+		INIT_TP_WR(req, 0);
+		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+					qid_atid));
+		req->local_port = csk->saddr.sin_port;
+		req->peer_port = csk->daddr.sin_port;
+		req->local_ip = csk->saddr.sin_addr.s_addr;
+		req->peer_ip = csk->daddr.sin_addr.s_addr;
+		req->opt0 = cpu_to_be64(opt0);
+		req->params = cpu_to_be32(select_ntuple(csk->cdev, csk->l2t));
+		opt2 |= 1 << 31;
+		req->opt2 = cpu_to_be32(opt2);
 
+		log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+			"csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
+			csk, &req->local_ip, ntohs(req->local_port),
+			&req->peer_ip, ntohs(req->peer_port),
+			csk->atid, csk->rss_qid);
+	}
+
+	set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
 	cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
 }
 
@@ -632,6 +707,7 @@ static void csk_act_open_retry_timer(unsigned long data)
 {
 	struct sk_buff *skb;
 	struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
+	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
 
 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 		"csk 0x%p,%u,0x%lx,%u.\n",
@@ -639,7 +715,10 @@ static void csk_act_open_retry_timer(unsigned long data)
 
 	cxgbi_sock_get(csk);
 	spin_lock_bh(&csk->lock);
-	skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC);
+	skb = alloc_wr(is_t4(lldi->adapter_type) ?
+				sizeof(struct cpl_act_open_req) :
+				sizeof(struct cpl_t5_act_open_req),
+			0, GFP_ATOMIC);
 	if (!skb)
 		cxgbi_sock_fail_act_open(csk, -ENOMEM);
 	else {
@@ -871,7 +950,7 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
 
 	if (!csk->skb_ulp_lhdr) {
 		unsigned char *bhs;
-		unsigned int hlen, dlen;
+		unsigned int hlen, dlen, plen;
 
 		log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
 			"csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
@@ -890,11 +969,15 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
 		hlen = ntohs(cpl->len);
 		dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
 
-		if ((hlen + dlen) != ISCSI_PDU_LEN(pdu_len_ddp) - 40) {
+		plen = ISCSI_PDU_LEN(pdu_len_ddp);
+		if (is_t4(lldi->adapter_type))
+			plen -= 40;
+
+		if ((hlen + dlen) != plen) {
 			pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
 				"mismatch %u != %u + %u, seq 0x%x.\n",
-				csk->tid, ISCSI_PDU_LEN(pdu_len_ddp) - 40,
-				hlen, dlen, cxgbi_skcb_tcp_seq(skb));
+				csk->tid, plen, hlen, dlen,
+				cxgbi_skcb_tcp_seq(skb));
 			goto abort_conn;
 		}
 
@@ -1154,7 +1237,10 @@ static int init_act_open(struct cxgbi_sock *csk)
 	}
 	cxgbi_sock_get(csk);
 
-	skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
+	skb = alloc_wr(is_t4(lldi->adapter_type) ?
+				sizeof(struct cpl_act_open_req) :
+				sizeof(struct cpl_t5_act_open_req),
+			0, GFP_ATOMIC);
 	if (!skb)
 		goto rel_resource;
 	skb->sk = (struct sock *)csk;
@@ -1193,6 +1279,8 @@ rel_resource:
 	return -EINVAL;
 }
 
+#define CPL_ISCSI_DATA		0xB2
+#define CPL_RX_ISCSI_DDP	0x49
 cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
 	[CPL_ACT_ESTABLISH] = do_act_establish,
 	[CPL_ACT_OPEN_RPL] = do_act_open_rpl,
@@ -1202,8 +1290,10 @@ cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
 	[CPL_CLOSE_CON_RPL] = do_close_con_rpl,
 	[CPL_FW4_ACK] = do_fw4_ack,
 	[CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
+	[CPL_ISCSI_DATA] = do_rx_iscsi_hdr,
 	[CPL_SET_TCB_RPL] = do_set_tcb_rpl,
 	[CPL_RX_DATA_DDP] = do_rx_data_ddp,
+	[CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
 };
 
 int cxgb4i_ofld_init(struct cxgbi_device *cdev)
@@ -1234,14 +1324,20 @@ int cxgb4i_ofld_init(struct cxgbi_device *cdev)
  * functions to program the pagepod in h/w
  */
 #define ULPMEM_IDATA_MAX_NPPODS	4 /* 256/PPOD_SIZE */
-static inline void ulp_mem_io_set_hdr(struct ulp_mem_io *req,
+static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi,
+				struct ulp_mem_io *req,
 				unsigned int wr_len, unsigned int dlen,
 				unsigned int pm_addr)
 {
 	struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
 
 	INIT_ULPTX_WR(req, wr_len, 0, 0);
-	req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1 << 23));
+	if (is_t4(lldi->adapter_type))
+		req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) |
+					(ULP_MEMIO_ORDER(1)));
+	else
+		req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) |
+					(V_T5_ULP_MEMIO_IMM(1)));
 	req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5));
 	req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5));
 	req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
@@ -1257,6 +1353,7 @@ static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id,
 				unsigned int gl_pidx)
 {
 	struct cxgbi_ddp_info *ddp = cdev->ddp;
+	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 	struct sk_buff *skb;
 	struct ulp_mem_io *req;
 	struct ulptx_idata *idata;
@@ -1276,7 +1373,7 @@ static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id,
 	req = (struct ulp_mem_io *)skb->head;
 	set_queue(skb, CPL_PRIORITY_CONTROL, NULL);
 
-	ulp_mem_io_set_hdr(req, wr_len, dlen, pm_addr);
+	ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr);
 	idata = (struct ulptx_idata *)(req + 1);
 	ppod = (struct cxgbi_pagepod *)(idata + 1);
 

+ 2 - 4
drivers/scsi/fnic/fnic_scsi.c

@@ -2432,11 +2432,9 @@ int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
 			      "Found IO in %s on lun\n",
 			      fnic_ioreq_state_to_str(CMD_STATE(sc)));
 
-		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
-			spin_unlock_irqrestore(io_lock, flags);
+		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
 			ret = 1;
-			continue;
-		}
+		spin_unlock_irqrestore(io_lock, flags);
 	}
 
 	return ret;

+ 48 - 9
drivers/scsi/ipr.c

@@ -281,12 +281,22 @@ struct ipr_error_table_t ipr_error_table[] = {
 	"FFF6: Failure prediction threshold exceeded"},
 	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
 	"8009: Impending cache battery pack failure"},
+	{0x02040100, 0, 0,
+	"Logical Unit in process of becoming ready"},
+	{0x02040200, 0, 0,
+	"Initializing command required"},
 	{0x02040400, 0, 0,
 	"34FF: Disk device format in progress"},
+	{0x02040C00, 0, 0,
+	"Logical unit not accessible, target port in unavailable state"},
 	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
 	"9070: IOA requested reset"},
 	{0x023F0000, 0, 0,
 	"Synchronization required"},
+	{0x02408500, 0, 0,
+	"IOA microcode download required"},
+	{0x02408600, 0, 0,
+	"Device bus connection is prohibited by host"},
 	{0x024E0000, 0, 0,
 	"No ready, IOA shutdown"},
 	{0x025A0000, 0, 0,
@@ -385,6 +395,8 @@ struct ipr_error_table_t ipr_error_table[] = {
 	"4030: Incorrect multipath connection"},
 	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
 	"4110: Unsupported enclosure function"},
+	{0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
+	"4120: SAS cable VPD cannot be read"},
 	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
 	"FFF4: Command to logical unit failed"},
 	{0x05240000, 1, 0,
@@ -407,10 +419,18 @@ struct ipr_error_table_t ipr_error_table[] = {
 	"Illegal request, command sequence error"},
 	{0x052C8000, 1, 0,
 	"Illegal request, dual adapter support not enabled"},
+	{0x052C8100, 1, 0,
+	"Illegal request, another cable connector was physically disabled"},
+	{0x054E8000, 1, 0,
+	"Illegal request, inconsistent group id/group count"},
 	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
 	"9031: Array protection temporarily suspended, protection resuming"},
 	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
 	"9040: Array protection temporarily suspended, protection resuming"},
+	{0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
+	"4080: IOA exceeded maximum operating temperature"},
+	{0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
+	"4085: Service required"},
 	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
 	"3140: Device bus not ready to ready transition"},
 	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
@@ -423,6 +443,8 @@ struct ipr_error_table_t ipr_error_table[] = {
 	"FFFB: SCSI bus was reset by another initiator"},
 	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
 	"3029: A device replacement has occurred"},
+	{0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
+	"4102: Device bus fabric performance degradation"},
 	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
 	"9051: IOA cache data exists for a missing or failed device"},
 	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
@@ -445,6 +467,14 @@ struct ipr_error_table_t ipr_error_table[] = {
 	"9076: Configuration error, missing remote IOA"},
 	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
 	"4050: Enclosure does not support a required multipath function"},
+	{0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
+	"4121: Configuration error, required cable is missing"},
+	{0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
+	"4122: Cable is not plugged into the correct location on remote IOA"},
+	{0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
+	"4123: Configuration error, invalid cable vital product data"},
+	{0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
+	"4124: Configuration error, both cable ends are plugged into the same IOA"},
 	{0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
 	"4070: Logically bad block written on device"},
 	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
@@ -507,10 +537,18 @@ struct ipr_error_table_t ipr_error_table[] = {
 	"9062: One or more disks are missing from an array"},
 	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
 	"9063: Maximum number of functional arrays has been exceeded"},
+	{0x07279A00, 0, 0,
+	"Data protect, other volume set problem"},
 	{0x0B260000, 0, 0,
 	"Aborted command, invalid descriptor"},
+	{0x0B3F9000, 0, 0,
+	"Target operating conditions have changed, dual adapter takeover"},
+	{0x0B530200, 0, 0,
+	"Aborted command, medium removal prevented"},
 	{0x0B5A0000, 0, 0,
-	"Command terminated by host"}
+	"Command terminated by host"},
+	{0x0B5B8000, 0, 0,
+	"Aborted command, command terminated by host"}
 };
 
 static const struct ipr_ses_table_entry ipr_ses_table[] = {
@@ -9391,7 +9429,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
 	void __iomem *ipr_regs;
 	int rc = PCIBIOS_SUCCESSFUL;
 	volatile u32 mask, uproc, interrupts;
-	unsigned long lock_flags;
+	unsigned long lock_flags, driver_lock_flags;
 
 	ENTER;
 
@@ -9614,9 +9652,9 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
 	} else
 		ioa_cfg->reset = ipr_reset_start_bist;
 
-	spin_lock(&ipr_driver_lock);
+	spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
 	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
-	spin_unlock(&ipr_driver_lock);
+	spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
 
 	LEAVE;
 out:
@@ -9699,6 +9737,7 @@ static void __ipr_remove(struct pci_dev *pdev)
 	unsigned long host_lock_flags = 0;
 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
 	int i;
+	unsigned long driver_lock_flags;
 	ENTER;
 
 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
@@ -9722,9 +9761,9 @@ static void __ipr_remove(struct pci_dev *pdev)
 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
 
-	spin_lock(&ipr_driver_lock);
+	spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
 	list_del(&ioa_cfg->queue);
-	spin_unlock(&ipr_driver_lock);
+	spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
 
 	if (ioa_cfg->sdt_state == ABORT_DUMP)
 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
@@ -9990,12 +10029,12 @@ static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
 {
 	struct ipr_cmnd *ipr_cmd;
 	struct ipr_ioa_cfg *ioa_cfg;
-	unsigned long flags = 0;
+	unsigned long flags = 0, driver_lock_flags;
 
 	if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
 		return NOTIFY_DONE;
 
-	spin_lock(&ipr_driver_lock);
+	spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
 
 	list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
@@ -10013,7 +10052,7 @@ static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
 		ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
 	}
-	spin_unlock(&ipr_driver_lock);
+	spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
 
 	return NOTIFY_OK;
 }

+ 2 - 2
drivers/scsi/isci/request.c

@@ -184,8 +184,8 @@ static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
 	cmd_iu->task_attr = task->ssp_task.task_attr;
 	cmd_iu->_r_c = 0;
 
-	sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
-		       sizeof(task->ssp_task.cdb) / sizeof(u32));
+	sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd,
+		       task->ssp_task.cmd->cmd_len / sizeof(u32));
 }
 
 static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)

+ 18 - 0
drivers/scsi/libiscsi.c

@@ -2808,6 +2808,9 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
 	kfree(session->targetname);
 	kfree(session->targetalias);
 	kfree(session->initiatorname);
+	kfree(session->boot_root);
+	kfree(session->boot_nic);
+	kfree(session->boot_target);
 	kfree(session->ifacename);
 
 	iscsi_destroy_session(cls_session);
@@ -3248,6 +3251,12 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
 		return iscsi_switch_str_param(&session->ifacename, buf);
 	case ISCSI_PARAM_INITIATOR_NAME:
 		return iscsi_switch_str_param(&session->initiatorname, buf);
+	case ISCSI_PARAM_BOOT_ROOT:
+		return iscsi_switch_str_param(&session->boot_root, buf);
+	case ISCSI_PARAM_BOOT_NIC:
+		return iscsi_switch_str_param(&session->boot_nic, buf);
+	case ISCSI_PARAM_BOOT_TARGET:
+		return iscsi_switch_str_param(&session->boot_target, buf);
 	default:
 		return -ENOSYS;
 	}
@@ -3326,6 +3335,15 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
 	case ISCSI_PARAM_INITIATOR_NAME:
 		len = sprintf(buf, "%s\n", session->initiatorname);
 		break;
+	case ISCSI_PARAM_BOOT_ROOT:
+		len = sprintf(buf, "%s\n", session->boot_root);
+		break;
+	case ISCSI_PARAM_BOOT_NIC:
+		len = sprintf(buf, "%s\n", session->boot_nic);
+		break;
+	case ISCSI_PARAM_BOOT_TARGET:
+		len = sprintf(buf, "%s\n", session->boot_target);
+		break;
 	default:
 		return -ENOSYS;
 	}

+ 1 - 1
drivers/scsi/libsas/sas_scsi_host.c

@@ -167,7 +167,7 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
 	int_to_scsilun(cmd->device->lun, &lun);
 	memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
 	task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
-	memcpy(task->ssp_task.cdb, cmd->cmnd, 16);
+	task->ssp_task.cmd = cmd;
 
 	task->scatter = scsi_sglist(cmd);
 	task->num_scatter = scsi_sg_count(cmd);

+ 1 - 1
drivers/scsi/lpfc/lpfc.h

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *

+ 20 - 3
drivers/scsi/lpfc/lpfc_attr.c

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -4070,11 +4070,28 @@ LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
 		 "during discovery");
 
 /*
-# lpfc_max_luns: maximum allowed LUN.
+# lpfc_max_luns: maximum allowed LUN ID. This is the highest LUN ID that
+#    will be scanned by the SCSI midlayer when sequential scanning is
+#    used; and is also the highest LUN ID allowed when the SCSI midlayer
+#    parses REPORT_LUN responses. The lpfc driver has no LUN count or
+#    LUN ID limit, but the SCSI midlayer requires this field for the uses
+#    above. The lpfc driver limits the default value to 255 for two reasons.
+#    As it bounds the sequential scan loop, scanning for thousands of luns
+#    on a target can take minutes of wall clock time.  Additionally,
+#    there are FC targets, such as JBODs, that only recognize 8-bits of
+#    LUN ID. When they receive a value greater than 8 bits, they chop off
+#    the high order bits. In other words, they see LUN IDs 0, 256, 512,
+#    and so on all as LUN ID 0. This causes the linux kernel, which sees
+#    valid responses at each of the LUN IDs, to believe there are multiple
+#    devices present, when in fact, there is only 1.
+#    A customer that is aware of their target behaviors, and the results as
+#    indicated above, is welcome to increase the lpfc_max_luns value.
+#    As mentioned, this value is not used by the lpfc driver, only the
+#    SCSI midlayer.
 # Value range is [0,65535]. Default value is 255.
 # NOTE: The SCSI layer might probe all allowed LUN on some old targets.
 */
-LPFC_VPORT_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN");
+LPFC_VPORT_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID");
 
 /*
 # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.

+ 2 - 2
drivers/scsi/lpfc/lpfc_bsg.c

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2009-2012 Emulex.  All rights reserved.           *
+ * Copyright (C) 2009-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -3392,6 +3392,7 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
 	case MBX_DOWN_LOAD:
 	case MBX_UPDATE_CFG:
 	case MBX_KILL_BOARD:
+	case MBX_READ_TOPOLOGY:
 	case MBX_LOAD_AREA:
 	case MBX_LOAD_EXP_ROM:
 	case MBX_BEACON:
@@ -3422,7 +3423,6 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
 		}
 		break;
 	case MBX_READ_SPARM64:
-	case MBX_READ_TOPOLOGY:
 	case MBX_REG_LOGIN:
 	case MBX_REG_LOGIN64:
 	case MBX_CONFIG_PORT:

+ 1 - 1
drivers/scsi/lpfc/lpfc_crtn.h

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *

+ 1 - 1
drivers/scsi/lpfc/lpfc_ct.c

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *

+ 1 - 1
drivers/scsi/lpfc/lpfc_els.c

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *

+ 35 - 3
drivers/scsi/lpfc/lpfc_hbadisc.c

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -6158,12 +6158,44 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
 		memcpy(&conn_entry->conn_rec, &conn_rec[i],
 			sizeof(struct lpfc_fcf_conn_rec));
 		conn_entry->conn_rec.vlan_tag =
-			le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF;
+			conn_entry->conn_rec.vlan_tag;
 		conn_entry->conn_rec.flags =
-			le16_to_cpu(conn_entry->conn_rec.flags);
+			conn_entry->conn_rec.flags;
 		list_add_tail(&conn_entry->list,
 			&phba->fcf_conn_rec_list);
 	}
+
+	if (!list_empty(&phba->fcf_conn_rec_list)) {
+		i = 0;
+		list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list,
+				    list) {
+			conn_rec = &conn_entry->conn_rec;
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"3345 FCF connection list rec[%02d]: "
+					"flags:x%04x, vtag:x%04x, "
+					"fabric_name:x%02x:%02x:%02x:%02x:"
+					"%02x:%02x:%02x:%02x, "
+					"switch_name:x%02x:%02x:%02x:%02x:"
+					"%02x:%02x:%02x:%02x\n", i++,
+					conn_rec->flags, conn_rec->vlan_tag,
+					conn_rec->fabric_name[0],
+					conn_rec->fabric_name[1],
+					conn_rec->fabric_name[2],
+					conn_rec->fabric_name[3],
+					conn_rec->fabric_name[4],
+					conn_rec->fabric_name[5],
+					conn_rec->fabric_name[6],
+					conn_rec->fabric_name[7],
+					conn_rec->switch_name[0],
+					conn_rec->switch_name[1],
+					conn_rec->switch_name[2],
+					conn_rec->switch_name[3],
+					conn_rec->switch_name[4],
+					conn_rec->switch_name[5],
+					conn_rec->switch_name[6],
+					conn_rec->switch_name[7]);
+		}
+	}
 }
 
 /**

+ 1 - 1
drivers/scsi/lpfc/lpfc_hw.h

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *

+ 1 - 1
drivers/scsi/lpfc/lpfc_hw4.h

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2009-2012 Emulex.  All rights reserved.                *
+ * Copyright (C) 2009-2013 Emulex.  All rights reserved.                *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *

+ 59 - 70
drivers/scsi/lpfc/lpfc_init.c

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -60,7 +60,8 @@ unsigned long _dump_buf_dif_order;
 spinlock_t _dump_buf_lock;
 
 /* Used when mapping IRQ vectors in a driver centric manner */
-uint16_t lpfc_used_cpu[LPFC_MAX_CPU];
+uint16_t *lpfc_used_cpu;
+uint32_t lpfc_present_cpu;
 
 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
 static int lpfc_post_rcv_buf(struct lpfc_hba *);
@@ -4048,52 +4049,6 @@ lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
 	lpfc_destroy_vport_work_array(phba, vports);
 }
 
-/**
- * lpfc_sli4_perform_inuse_fcf_recovery - Perform inuse fcf recovery
- * @vport: pointer to lpfc hba data structure.
- *
- * This routine is to perform FCF recovery when the in-use FCF either dead or
- * got modified.
- **/
-static void
-lpfc_sli4_perform_inuse_fcf_recovery(struct lpfc_hba *phba,
-				     struct lpfc_acqe_fip *acqe_fip)
-{
-	int rc;
-
-	spin_lock_irq(&phba->hbalock);
-	/* Mark the fast failover process in progress */
-	phba->fcf.fcf_flag |= FCF_DEAD_DISC;
-	spin_unlock_irq(&phba->hbalock);
-
-	lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
-			"2771 Start FCF fast failover process due to in-use "
-			"FCF DEAD/MODIFIED event: evt_tag:x%x, index:x%x\n",
-			acqe_fip->event_tag, acqe_fip->index);
-	rc = lpfc_sli4_redisc_fcf_table(phba);
-	if (rc) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
-				"2772 Issue FCF rediscover mabilbox command "
-				"failed, fail through to FCF dead event\n");
-		spin_lock_irq(&phba->hbalock);
-		phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
-		spin_unlock_irq(&phba->hbalock);
-		/*
-		 * Last resort will fail over by treating this as a link
-		 * down to FCF registration.
-		 */
-		lpfc_sli4_fcf_dead_failthrough(phba);
-	} else {
-		/* Reset FCF roundrobin bmask for new discovery */
-		lpfc_sli4_clear_fcf_rr_bmask(phba);
-		/*
-		 * Handling fast FCF failover to a DEAD FCF event is
-		 * considered equalivant to receiving CVL to all vports.
-		 */
-		lpfc_sli4_perform_all_vport_cvl(phba);
-	}
-}
-
 /**
  * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
  * @phba: pointer to lpfc hba data structure.
@@ -4159,22 +4114,9 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
 			break;
 		}
 
-		/* If FCF has been in discovered state, perform rediscovery
-		 * only if the FCF with the same index of the in-use FCF got
-		 * modified during normal operation. Otherwise, do nothing.
-		 */
-		if (phba->pport->port_state > LPFC_FLOGI) {
+		/* If the FCF has been in discovered state, do nothing. */
+		if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
 			spin_unlock_irq(&phba->hbalock);
-			if (phba->fcf.current_rec.fcf_indx ==
-			    acqe_fip->index) {
-				lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
-						"3300 In-use FCF (%d) "
-						"modified, perform FCF "
-						"rediscovery\n",
-						acqe_fip->index);
-				lpfc_sli4_perform_inuse_fcf_recovery(phba,
-								     acqe_fip);
-			}
 			break;
 		}
 		spin_unlock_irq(&phba->hbalock);
@@ -4227,7 +4169,39 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
 		 * is no longer valid as we are not in the middle of FCF
 		 * failover process already.
 		 */
-		lpfc_sli4_perform_inuse_fcf_recovery(phba, acqe_fip);
+		spin_lock_irq(&phba->hbalock);
+		/* Mark the fast failover process in progress */
+		phba->fcf.fcf_flag |= FCF_DEAD_DISC;
+		spin_unlock_irq(&phba->hbalock);
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+				"2771 Start FCF fast failover process due to "
+				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
+				"\n", acqe_fip->event_tag, acqe_fip->index);
+		rc = lpfc_sli4_redisc_fcf_table(phba);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+					LOG_DISCOVERY,
+					"2772 Issue FCF rediscover mabilbox "
+					"command failed, fail through to FCF "
+					"dead event\n");
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
+			spin_unlock_irq(&phba->hbalock);
+			/*
+			 * Last resort will fail over by treating this
+			 * as a link down to FCF registration.
+			 */
+			lpfc_sli4_fcf_dead_failthrough(phba);
+		} else {
+			/* Reset FCF roundrobin bmask for new discovery */
+			lpfc_sli4_clear_fcf_rr_bmask(phba);
+			/*
+			 * Handling fast FCF failover to a DEAD FCF event is
+			 * considered equalivant to receiving CVL to all vports.
+			 */
+			lpfc_sli4_perform_all_vport_cvl(phba);
+		}
 		break;
 	case LPFC_FIP_EVENT_TYPE_CVL:
 		phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
@@ -5213,6 +5187,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 		rc = -ENOMEM;
 		goto out_free_msix;
 	}
+	if (lpfc_used_cpu == NULL) {
+		lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu),
+					 GFP_KERNEL);
+		if (!lpfc_used_cpu) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3335 Failed allocate memory for msi-x "
+					"interrupt vector mapping\n");
+			kfree(phba->sli4_hba.cpu_map);
+			rc = -ENOMEM;
+			goto out_free_msix;
+		}
+		for (i = 0; i < lpfc_present_cpu; i++)
+			lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
+	}
+
 	/* Initialize io channels for round robin */
 	cpup = phba->sli4_hba.cpu_map;
 	rc = 0;
@@ -6824,8 +6813,6 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
 	int cfg_fcp_io_channel;
 	uint32_t cpu;
 	uint32_t i = 0;
-	uint32_t j = 0;
-
 
 	/*
 	 * Sanity check for configured queue parameters against the run-time
@@ -6839,10 +6826,9 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
 	for_each_present_cpu(cpu) {
 		if (cpu_online(cpu))
 			i++;
-		j++;
 	}
 	phba->sli4_hba.num_online_cpu = i;
-	phba->sli4_hba.num_present_cpu = j;
+	phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
 
 	if (i < cfg_fcp_io_channel) {
 		lpfc_printf_log(phba,
@@ -10967,8 +10953,10 @@ lpfc_init(void)
 	}
 
 	/* Initialize in case vector mapping is needed */
-	for (cpu = 0; cpu < LPFC_MAX_CPU; cpu++)
-		lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
+	lpfc_used_cpu = NULL;
+	lpfc_present_cpu = 0;
+	for_each_present_cpu(cpu)
+		lpfc_present_cpu++;
 
 	error = pci_register_driver(&lpfc_driver);
 	if (error) {
@@ -11008,6 +10996,7 @@ lpfc_exit(void)
 				(1L << _dump_buf_dif_order), _dump_buf_dif);
 		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
 	}
+	kfree(lpfc_used_cpu);
 }
 
 module_init(lpfc_init);

+ 1 - 1
drivers/scsi/lpfc/lpfc_mbox.c

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *

+ 1 - 1
drivers/scsi/lpfc/lpfc_nportdisc.c

@@ -1,7 +1,7 @@
  /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *

+ 71 - 54
drivers/scsi/lpfc/lpfc_scsi.c

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -68,14 +68,12 @@ struct scsi_dif_tuple {
 	__be32 ref_tag;         /* Target LBA or indirect LBA */
 };
 
-#if !defined(SCSI_PROT_GUARD_CHECK) || !defined(SCSI_PROT_REF_CHECK)
-#define scsi_prot_flagged(sc, flg)	sc
-#endif
-
 static void
 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
 static void
 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
+static int
+lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
 
 static void
 lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
@@ -134,6 +132,30 @@ lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
 	}
 }
 
+static inline unsigned
+lpfc_cmd_blksize(struct scsi_cmnd *sc)
+{
+	return sc->device->sector_size;
+}
+
+#define LPFC_CHECK_PROTECT_GUARD	1
+#define LPFC_CHECK_PROTECT_REF		2
+static inline unsigned
+lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
+{
+	return 1;
+}
+
+static inline unsigned
+lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
+{
+	if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
+		return 0;
+	if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
+		return 1;
+	return 0;
+}
+
 /**
  * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
  * @phba: Pointer to HBA object.
@@ -1144,13 +1166,14 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 static struct lpfc_scsi_buf*
 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
-	struct lpfc_scsi_buf *lpfc_cmd ;
+	struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
 	unsigned long gflag = 0;
 	unsigned long pflag = 0;
 	int found = 0;
 
 	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
-	list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get, list) {
+	list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
+				 &phba->lpfc_scsi_buf_list_get, list) {
 		if (lpfc_test_rrq_active(phba, ndlp,
 					 lpfc_cmd->cur_iocbq.sli4_lxritag))
 			continue;
@@ -1164,8 +1187,8 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 			    &phba->lpfc_scsi_buf_list_get);
 		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
 		spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
-		list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get,
-				    list) {
+		list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
+					 &phba->lpfc_scsi_buf_list_get, list) {
 			if (lpfc_test_rrq_active(
 				phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
 				continue;
@@ -1409,12 +1432,6 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 	return 0;
 }
 
-static inline unsigned
-lpfc_cmd_blksize(struct scsi_cmnd *sc)
-{
-	return sc->device->sector_size;
-}
-
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 
 /* Return if if error injection is detected by Initiator */
@@ -1847,10 +1864,9 @@ static int
 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 		uint8_t *txop, uint8_t *rxop)
 {
-	uint8_t guard_type = scsi_host_get_guard(sc->device->host);
 	uint8_t ret = 0;
 
-	if (guard_type == SHOST_DIX_GUARD_IP) {
+	if (lpfc_cmd_guard_csum(sc)) {
 		switch (scsi_get_prot_op(sc)) {
 		case SCSI_PROT_READ_INSERT:
 		case SCSI_PROT_WRITE_STRIP:
@@ -1928,10 +1944,9 @@ static int
 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 		uint8_t *txop, uint8_t *rxop)
 {
-	uint8_t guard_type = scsi_host_get_guard(sc->device->host);
 	uint8_t ret = 0;
 
-	if (guard_type == SHOST_DIX_GUARD_IP) {
+	if (lpfc_cmd_guard_csum(sc)) {
 		switch (scsi_get_prot_op(sc)) {
 		case SCSI_PROT_READ_INSERT:
 		case SCSI_PROT_WRITE_STRIP:
@@ -2078,12 +2093,12 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 	 * protection data is automatically generated, not checked.
 	 */
 	if (datadir == DMA_FROM_DEVICE) {
-		if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
+		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
 			bf_set(pde6_ce, pde6, checking);
 		else
 			bf_set(pde6_ce, pde6, 0);
 
-		if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
 			bf_set(pde6_re, pde6, checking);
 		else
 			bf_set(pde6_re, pde6, 0);
@@ -2240,12 +2255,12 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 		bf_set(pde6_optx, pde6, txop);
 		bf_set(pde6_oprx, pde6, rxop);
 
-		if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
+		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
 			bf_set(pde6_ce, pde6, checking);
 		else
 			bf_set(pde6_ce, pde6, 0);
 
-		if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
 			bf_set(pde6_re, pde6, checking);
 		else
 			bf_set(pde6_re, pde6, 0);
@@ -2454,12 +2469,12 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 	 * protection data is automatically generated, not checked.
 	 */
 	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
-		if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
+		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
 			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
 		else
 			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
 
-		if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
 			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
 		else
 			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
@@ -2610,7 +2625,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 		diseed->ref_tag = cpu_to_le32(reftag);
 		diseed->ref_tag_tran = diseed->ref_tag;
 
-		if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) {
+		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
 			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
 
 		} else {
@@ -2629,7 +2644,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 		}
 
 
-		if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
 			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
 		else
 			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
@@ -2792,11 +2807,12 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
 		ret = LPFC_PG_TYPE_DIF_BUF;
 		break;
 	default:
-		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-				"9021 Unsupported protection op:%d\n", op);
+		if (phba)
+			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+					"9021 Unsupported protection op:%d\n",
+					op);
 		break;
 	}
-
 	return ret;
 }
 
@@ -2821,22 +2837,22 @@ lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
 
 	/* Check if there is protection data on the wire */
 	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
-		/* Read */
+		/* Read check for protection data */
 		if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
 			return fcpdl;
 
 	} else {
-		/* Write */
+		/* Write check for protection data */
 		if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
 			return fcpdl;
 	}
 
 	/*
 	 * If we are in DIF Type 1 mode every data block has a 8 byte
-	 * DIF (trailer) attached to it. Must ajust FCP data length.
+	 * DIF (trailer) attached to it. Must ajust FCP data length
+	 * to account for the protection data.
 	 */
-	if (scsi_prot_flagged(sc, SCSI_PROT_TRANSFER_PI))
-		fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
+	fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
 
 	return fcpdl;
 }
@@ -3073,9 +3089,9 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 			chk_guard = 1;
 		guard_type = scsi_host_get_guard(cmd->device->host);
 
+		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
 		start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
 		start_app_tag = src->app_tag;
-		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
 		len = sgpe->length;
 		while (src && protsegcnt) {
 			while (len) {
@@ -3090,25 +3106,10 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 					goto skipit;
 				}
 
-				/* App Tag checking */
-				app_tag = src->app_tag;
-				if (chk_app && (app_tag != start_app_tag)) {
-					err_type = BGS_APPTAG_ERR_MASK;
-					goto out;
-				}
-
-				/* Reference Tag checking */
-				ref_tag = be32_to_cpu(src->ref_tag);
-				if (chk_ref && (ref_tag != start_ref_tag)) {
-					err_type = BGS_REFTAG_ERR_MASK;
-					goto out;
-				}
-				start_ref_tag++;
-
-				/* Guard Tag checking */
+				/* First Guard Tag checking */
 				if (chk_guard) {
 					guard_tag = src->guard_tag;
-					if (guard_type == SHOST_DIX_GUARD_IP)
+					if (lpfc_cmd_guard_csum(cmd))
 						sum = lpfc_bg_csum(data_src,
 								   blksize);
 					else
@@ -3119,6 +3120,21 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 						goto out;
 					}
 				}
+
+				/* Reference Tag checking */
+				ref_tag = be32_to_cpu(src->ref_tag);
+				if (chk_ref && (ref_tag != start_ref_tag)) {
+					err_type = BGS_REFTAG_ERR_MASK;
+					goto out;
+				}
+				start_ref_tag++;
+
+				/* App Tag checking */
+				app_tag = src->app_tag;
+				if (chk_app && (app_tag != start_app_tag)) {
+					err_type = BGS_APPTAG_ERR_MASK;
+					goto out;
+				}
 skipit:
 				len -= sizeof(struct scsi_dif_tuple);
 				if (len < 0)
@@ -4074,7 +4090,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 			 cmd->device ? cmd->device->id : 0xffff,
 			 cmd->device ? cmd->device->lun : 0xffff,
 			 lpfc_cmd->status, lpfc_cmd->result,
-			 vport->fc_myDID, pnode->nlp_DID,
+			 vport->fc_myDID,
+			 (pnode) ? pnode->nlp_DID : 0,
 			 phba->sli_rev == LPFC_SLI_REV4 ?
 			     lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
 			 pIocbOut->iocb.ulpContext,

+ 1 - 1
drivers/scsi/lpfc/lpfc_scsi.h

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *

+ 20 - 32
drivers/scsi/lpfc/lpfc_sli.c

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -1011,17 +1011,6 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
 	else
 		sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
 
-	/*
-	** This should have been removed from the txcmplq before calling
-	** iocbq_release. The normal completion
-	** path should have already done the list_del_init.
-	*/
-	if (unlikely(!list_empty(&iocbq->list))) {
-		if (iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)
-			iocbq->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
-		list_del_init(&iocbq->list);
-	}
-
 
 	if (sglq)  {
 		if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
@@ -1070,13 +1059,6 @@ __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
 {
 	size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
 
-	/*
-	** This should have been removed from the txcmplq before calling
-	** iocbq_release. The normal completion
-	** path should have already done the list_del_init.
-	*/
-	if (unlikely(!list_empty(&iocbq->list)))
-		list_del_init(&iocbq->list);
 
 	/*
 	 * Clean all volatile data fields, preserve iotag and node struct.
@@ -3279,7 +3261,7 @@ lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 		if (free_saveq) {
 			list_for_each_entry_safe(rspiocbp, next_iocb,
 						 &saveq->list, list) {
-				list_del(&rspiocbp->list);
+				list_del_init(&rspiocbp->list);
 				__lpfc_sli_release_iocbq(phba, rspiocbp);
 			}
 			__lpfc_sli_release_iocbq(phba, saveq);
@@ -4584,7 +4566,8 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
 		} else {
 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 					"2708 This device does not support "
-					"Advanced Error Reporting (AER)\n");
+					"Advanced Error Reporting (AER): %d\n",
+					rc);
 			phba->cfg_aer_support = 0;
 		}
 	}
@@ -8731,7 +8714,7 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
 			"3116 Port generated FCP XRI ABORT event on "
 			"vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
-			ndlp->vport->vpi, ndlp->nlp_rpi,
+			ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
 			bf_get(lpfc_wcqe_xa_xri, axri),
 			bf_get(lpfc_wcqe_xa_status, axri),
 			axri->parameter);
@@ -9787,7 +9770,7 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 			struct lpfc_iocbq *rspiocb)
 {
 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
-			"3096 ABORT_XRI_CN completing on xri x%x "
+			"3096 ABORT_XRI_CN completing on rpi x%x "
 			"original iotag x%x, abort cmd iotag x%x "
 			"status 0x%x, reason 0x%x\n",
 			cmdiocb->iocb.un.acxri.abortContextTag,
@@ -10109,12 +10092,13 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
 			 uint32_t timeout)
 {
 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
+	MAILBOX_t *mb = NULL;
 	int retval;
 	unsigned long flag;
 
-	/* The caller must leave context1 empty. */
+	/* The caller might set context1 for extended buffer */
 	if (pmboxq->context1)
-		return MBX_NOT_FINISHED;
+		mb = (MAILBOX_t *)pmboxq->context1;
 
 	pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
 	/* setup wake call as IOCB callback */
@@ -10130,7 +10114,8 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
 				msecs_to_jiffies(timeout * 1000));
 
 		spin_lock_irqsave(&phba->hbalock, flag);
-		pmboxq->context1 = NULL;
+		/* restore the possible extended buffer for free resource */
+		pmboxq->context1 = (uint8_t *)mb;
 		/*
 		 * if LPFC_MBX_WAKE flag is set the mailbox is completed
 		 * else do not free the resources.
@@ -10143,6 +10128,9 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
 			pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 		}
 		spin_unlock_irqrestore(&phba->hbalock, flag);
+	} else {
+		/* restore the possible extended buffer for free resource */
+		pmboxq->context1 = (uint8_t *)mb;
 	}
 
 	return retval;
@@ -16304,7 +16292,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
 	union lpfc_wqe wqe;
 	int txq_cnt = 0;
 
-	spin_lock_irqsave(&phba->hbalock, iflags);
+	spin_lock_irqsave(&pring->ring_lock, iflags);
 	list_for_each_entry(piocbq, &pring->txq, list) {
 		txq_cnt++;
 	}
@@ -16312,14 +16300,14 @@ lpfc_drain_txq(struct lpfc_hba *phba)
 	if (txq_cnt > pring->txq_max)
 		pring->txq_max = txq_cnt;
 
-	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	spin_unlock_irqrestore(&pring->ring_lock, iflags);
 
 	while (!list_empty(&pring->txq)) {
-		spin_lock_irqsave(&phba->hbalock, iflags);
+		spin_lock_irqsave(&pring->ring_lock, iflags);
 
 		piocbq = lpfc_sli_ringtx_get(phba, pring);
 		if (!piocbq) {
-			spin_unlock_irqrestore(&phba->hbalock, iflags);
+			spin_unlock_irqrestore(&pring->ring_lock, iflags);
 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"2823 txq empty and txq_cnt is %d\n ",
 				txq_cnt);
@@ -16328,7 +16316,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
 		sglq = __lpfc_sli_get_sglq(phba, piocbq);
 		if (!sglq) {
 			__lpfc_sli_ringtx_put(phba, pring, piocbq);
-			spin_unlock_irqrestore(&phba->hbalock, iflags);
+			spin_unlock_irqrestore(&pring->ring_lock, iflags);
 			break;
 		}
 		txq_cnt--;
@@ -16356,7 +16344,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
 					piocbq->iotag, piocbq->sli4_xritag);
 			list_add_tail(&piocbq->list, &completions);
 		}
-		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		spin_unlock_irqrestore(&pring->ring_lock, iflags);
 	}
 
 	/* Cancel all the IOCBs that cannot be issued */

+ 1 - 2
drivers/scsi/lpfc/lpfc_sli4.h

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2009-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2009-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -444,7 +444,6 @@ struct lpfc_vector_map_info {
 	struct cpumask	maskbits;
 };
 #define LPFC_VECTOR_MAP_EMPTY	0xffff
-#define LPFC_MAX_CPU		256
 
 /* SLI4 HBA data structure entries */
 struct lpfc_sli4_hba {

+ 3 - 3
drivers/scsi/lpfc/lpfc_version.h

@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.39"
+#define LPFC_DRIVER_VERSION "8.3.40"
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */
@@ -30,4 +30,4 @@
 
 #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
 		LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright(c) 2004-2009 Emulex.  All rights reserved."
+#define LPFC_COPYRIGHT "Copyright(c) 2004-2013 Emulex.  All rights reserved."

+ 177 - 9
drivers/scsi/megaraid/megaraid_sas.h

@@ -33,9 +33,9 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION				"06.506.00.00-rc1"
-#define MEGASAS_RELDATE				"Feb. 9, 2013"
-#define MEGASAS_EXT_VERSION			"Sat. Feb. 9 17:00:00 PDT 2013"
+#define MEGASAS_VERSION				"06.600.18.00-rc1"
+#define MEGASAS_RELDATE				"May. 15, 2013"
+#define MEGASAS_EXT_VERSION			"Wed. May. 15 17:00:00 PDT 2013"
 
 /*
  * Device IDs
@@ -49,6 +49,33 @@
 #define	PCI_DEVICE_ID_LSI_SAS0071SKINNY		0x0071
 #define	PCI_DEVICE_ID_LSI_FUSION		0x005b
 #define PCI_DEVICE_ID_LSI_INVADER		0x005d
+#define PCI_DEVICE_ID_LSI_FURY			0x005f
+
+/*
+ * Intel HBA SSDIDs
+ */
+#define MEGARAID_INTEL_RS3DC080_SSDID		0x9360
+#define MEGARAID_INTEL_RS3DC040_SSDID		0x9362
+#define MEGARAID_INTEL_RS3SC008_SSDID		0x9380
+#define MEGARAID_INTEL_RS3MC044_SSDID		0x9381
+#define MEGARAID_INTEL_RS3WC080_SSDID		0x9341
+#define MEGARAID_INTEL_RS3WC040_SSDID		0x9343
+
+/*
+ * Intel HBA branding
+ */
+#define MEGARAID_INTEL_RS3DC080_BRANDING	\
+	"Intel(R) RAID Controller RS3DC080"
+#define MEGARAID_INTEL_RS3DC040_BRANDING	\
+	"Intel(R) RAID Controller RS3DC040"
+#define MEGARAID_INTEL_RS3SC008_BRANDING	\
+	"Intel(R) RAID Controller RS3SC008"
+#define MEGARAID_INTEL_RS3MC044_BRANDING	\
+	"Intel(R) RAID Controller RS3MC044"
+#define MEGARAID_INTEL_RS3WC080_BRANDING	\
+	"Intel(R) RAID Controller RS3WC080"
+#define MEGARAID_INTEL_RS3WC040_BRANDING	\
+	"Intel(R) RAID Controller RS3WC040"
 
 /*
  * =====================================
@@ -162,6 +189,12 @@
 #define MR_DCMD_CLUSTER_RESET_LD		0x08010200
 #define MR_DCMD_PD_LIST_QUERY                   0x02010100
 
+/*
+ * Global functions
+ */
+extern u8 MR_ValidateMapInfo(struct megasas_instance *instance);
+
+
 /*
  * MFI command completion codes
  */
@@ -702,8 +735,126 @@ struct megasas_ctrl_info {
 	 */
 	char package_version[0x60];
 
-	u8 pad[0x800 - 0x6a0];
 
+	/*
+	* If adapterOperations.supportMoreThan8Phys is set,
+	* and deviceInterface.portCount is greater than 8,
+	* SAS Addrs for first 8 ports shall be populated in
+	* deviceInterface.portAddr, and the rest shall be
+	* populated in deviceInterfacePortAddr2.
+	*/
+	u64         deviceInterfacePortAddr2[8]; /*6a0h */
+	u8          reserved3[128];              /*6e0h */
+
+	struct {                                /*760h */
+		u16 minPdRaidLevel_0:4;
+		u16 maxPdRaidLevel_0:12;
+
+		u16 minPdRaidLevel_1:4;
+		u16 maxPdRaidLevel_1:12;
+
+		u16 minPdRaidLevel_5:4;
+		u16 maxPdRaidLevel_5:12;
+
+		u16 minPdRaidLevel_1E:4;
+		u16 maxPdRaidLevel_1E:12;
+
+		u16 minPdRaidLevel_6:4;
+		u16 maxPdRaidLevel_6:12;
+
+		u16 minPdRaidLevel_10:4;
+		u16 maxPdRaidLevel_10:12;
+
+		u16 minPdRaidLevel_50:4;
+		u16 maxPdRaidLevel_50:12;
+
+		u16 minPdRaidLevel_60:4;
+		u16 maxPdRaidLevel_60:12;
+
+		u16 minPdRaidLevel_1E_RLQ0:4;
+		u16 maxPdRaidLevel_1E_RLQ0:12;
+
+		u16 minPdRaidLevel_1E0_RLQ0:4;
+		u16 maxPdRaidLevel_1E0_RLQ0:12;
+
+		u16 reserved[6];
+	} pdsForRaidLevels;
+
+	u16 maxPds;                             /*780h */
+	u16 maxDedHSPs;                         /*782h */
+	u16 maxGlobalHSPs;                      /*784h */
+	u16 ddfSize;                            /*786h */
+	u8  maxLdsPerArray;                     /*788h */
+	u8  partitionsInDDF;                    /*789h */
+	u8  lockKeyBinding;                     /*78ah */
+	u8  maxPITsPerLd;                       /*78bh */
+	u8  maxViewsPerLd;                      /*78ch */
+	u8  maxTargetId;                        /*78dh */
+	u16 maxBvlVdSize;                       /*78eh */
+
+	u16 maxConfigurableSSCSize;             /*790h */
+	u16 currentSSCsize;                     /*792h */
+
+	char    expanderFwVersion[12];          /*794h */
+
+	u16 PFKTrialTimeRemaining;              /*7A0h */
+
+	u16 cacheMemorySize;                    /*7A2h */
+
+	struct {                                /*7A4h */
+		u32     supportPIcontroller:1;
+		u32     supportLdPIType1:1;
+		u32     supportLdPIType2:1;
+		u32     supportLdPIType3:1;
+		u32     supportLdBBMInfo:1;
+		u32     supportShieldState:1;
+		u32     blockSSDWriteCacheChange:1;
+		u32     supportSuspendResumeBGops:1;
+		u32     supportEmergencySpares:1;
+		u32     supportSetLinkSpeed:1;
+		u32     supportBootTimePFKChange:1;
+		u32     supportJBOD:1;
+		u32     disableOnlinePFKChange:1;
+		u32     supportPerfTuning:1;
+		u32     supportSSDPatrolRead:1;
+		u32     realTimeScheduler:1;
+
+		u32     supportResetNow:1;
+		u32     supportEmulatedDrives:1;
+		u32     headlessMode:1;
+		u32     dedicatedHotSparesLimited:1;
+
+
+		u32     supportUnevenSpans:1;
+		u32     reserved:11;
+	} adapterOperations2;
+
+	u8  driverVersion[32];                  /*7A8h */
+	u8  maxDAPdCountSpinup60;               /*7C8h */
+	u8  temperatureROC;                     /*7C9h */
+	u8  temperatureCtrl;                    /*7CAh */
+	u8  reserved4;                          /*7CBh */
+	u16 maxConfigurablePds;                 /*7CCh */
+
+
+	u8  reserved5[2];                       /*0x7CDh */
+
+	/*
+	* HA cluster information
+	*/
+	struct {
+		u32     peerIsPresent:1;
+		u32     peerIsIncompatible:1;
+		u32     hwIncompatible:1;
+		u32     fwVersionMismatch:1;
+		u32     ctrlPropIncompatible:1;
+		u32     premiumFeatureMismatch:1;
+		u32     reserved:26;
+	} cluster;
+
+	char clusterId[16];                     /*7D4h */
+
+	u8          pad[0x800-0x7E4];           /*7E4 */
 } __packed;
 
 /*
@@ -759,7 +910,7 @@ struct megasas_ctrl_info {
 #define MEGASAS_INT_CMDS			32
 #define MEGASAS_SKINNY_INT_CMDS			5
 
-#define MEGASAS_MAX_MSIX_QUEUES			16
+#define MEGASAS_MAX_MSIX_QUEUES			128
 /*
  * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit
  * SGLs based on the size of dma_addr_t
@@ -784,6 +935,11 @@ struct megasas_ctrl_info {
 #define MFI_1068_PCSR_OFFSET			0x84
 #define MFI_1068_FW_HANDSHAKE_OFFSET		0x64
 #define MFI_1068_FW_READY			0xDDDD0000
+
+#define MR_MAX_REPLY_QUEUES_OFFSET              0X0000001F
+#define MR_MAX_REPLY_QUEUES_EXT_OFFSET          0X003FC000
+#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT    14
+#define MR_MAX_MSIX_REG_ARRAY                   16
 /*
 * register set for both 1068 and 1078 controllers
 * structure extended for 1078 registers
@@ -893,6 +1049,15 @@ union megasas_sgl_frame {
 
 } __attribute__ ((packed));
 
+typedef union _MFI_CAPABILITIES {
+	struct {
+		u32     support_fp_remote_lun:1;
+		u32     support_additional_msix:1;
+		u32     reserved:30;
+	} mfi_capabilities;
+	u32     reg;
+} MFI_CAPABILITIES;
+
 struct megasas_init_frame {
 
 	u8 cmd;			/*00h */
@@ -900,7 +1065,7 @@ struct megasas_init_frame {
 	u8 cmd_status;		/*02h */
 
 	u8 reserved_1;		/*03h */
-	u32 reserved_2;		/*04h */
+	MFI_CAPABILITIES driver_operations; /*04h*/
 
 	u32 context;		/*08h */
 	u32 pad_0;		/*0Ch */
@@ -1297,7 +1462,7 @@ struct megasas_instance {
 
 	unsigned long base_addr;
 	struct megasas_register_set __iomem *reg_set;
-
+	u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
 	struct megasas_pd_list          pd_list[MEGASAS_MAX_PD];
 	u8     ld_ids[MEGASAS_MAX_LD_IDS];
 	s8 init_id;
@@ -1348,6 +1513,7 @@ struct megasas_instance {
 	u8 flag_ieee;
 	u8 issuepend_done;
 	u8 disableOnlineCtrlReset;
+	u8 UnevenSpanSupport;
 	u8 adprecovery;
 	unsigned long last_time;
 	u32 mfiStatus;
@@ -1366,6 +1532,8 @@ struct megasas_instance {
 	long reset_flags;
 	struct mutex reset_mutex;
 	int throttlequeuedepth;
+	u8 mask_interrupts;
+	u8 is_imr;
 };
 
 enum {
@@ -1381,8 +1549,8 @@ struct megasas_instance_template {
 	void (*fire_cmd)(struct megasas_instance *, dma_addr_t, \
 		u32, struct megasas_register_set __iomem *);
 
-	void (*enable_intr)(struct megasas_register_set __iomem *) ;
-	void (*disable_intr)(struct megasas_register_set __iomem *);
+	void (*enable_intr)(struct megasas_instance *);
+	void (*disable_intr)(struct megasas_instance *);
 
 	int (*clear_intr)(struct megasas_register_set __iomem *);
 

+ 166 - 60
drivers/scsi/megaraid/megaraid_sas_base.c

@@ -18,7 +18,7 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  *  FILE: megaraid_sas_base.c
- *  Version : v06.506.00.00-rc1
+ *  Version : 06.600.18.00-rc1
  *
  *  Authors: LSI Corporation
  *           Sreenivas Bagalkote
@@ -122,6 +122,8 @@ static struct pci_device_id megasas_pci_table[] = {
 	/* Fusion */
 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
 	/* Invader */
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
+	/* Fury */
 	{}
 };
 
@@ -169,8 +171,6 @@ megasas_sync_map_info(struct megasas_instance *instance);
 int
 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd);
 void megasas_reset_reply_desc(struct megasas_instance *instance);
-u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
-		      struct LD_LOAD_BALANCE_INFO *lbInfo);
 int megasas_reset_fusion(struct Scsi_Host *shost);
 void megasas_fusion_ocr_wq(struct work_struct *work);
 
@@ -223,6 +223,7 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
 	cmd->frame_count = 0;
 	if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
 	    (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
+	    (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
 	    (reset_devices))
 		cmd->frame->hdr.cmd = MFI_CMD_INVALID;
 	list_add_tail(&cmd->list, &instance->cmd_pool);
@@ -241,8 +242,10 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
  * @regs:			MFI register set
  */
 static inline void
-megasas_enable_intr_xscale(struct megasas_register_set __iomem * regs)
+megasas_enable_intr_xscale(struct megasas_instance *instance)
 {
+	struct megasas_register_set __iomem *regs;
+	regs = instance->reg_set;
 	writel(0, &(regs)->outbound_intr_mask);
 
 	/* Dummy readl to force pci flush */
@@ -254,9 +257,11 @@ megasas_enable_intr_xscale(struct megasas_register_set __iomem * regs)
  * @regs:			MFI register set
  */
 static inline void
-megasas_disable_intr_xscale(struct megasas_register_set __iomem * regs)
+megasas_disable_intr_xscale(struct megasas_instance *instance)
 {
+	struct megasas_register_set __iomem *regs;
 	u32 mask = 0x1f;
+	regs = instance->reg_set;
 	writel(mask, &regs->outbound_intr_mask);
 	/* Dummy readl to force pci flush */
 	readl(&regs->outbound_intr_mask);
@@ -410,8 +415,10 @@ static struct megasas_instance_template megasas_instance_template_xscale = {
  * @regs:			MFI register set
  */
 static inline void
-megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs)
+megasas_enable_intr_ppc(struct megasas_instance *instance)
 {
+	struct megasas_register_set __iomem *regs;
+	regs = instance->reg_set;
 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
 
 	writel(~0x80000000, &(regs)->outbound_intr_mask);
@@ -425,9 +432,11 @@ megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs)
  * @regs:			MFI register set
  */
 static inline void
-megasas_disable_intr_ppc(struct megasas_register_set __iomem * regs)
+megasas_disable_intr_ppc(struct megasas_instance *instance)
 {
+	struct megasas_register_set __iomem *regs;
 	u32 mask = 0xFFFFFFFF;
+	regs = instance->reg_set;
 	writel(mask, &regs->outbound_intr_mask);
 	/* Dummy readl to force pci flush */
 	readl(&regs->outbound_intr_mask);
@@ -528,8 +537,10 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
  * @regs:			MFI register set
  */
 static inline void
-megasas_enable_intr_skinny(struct megasas_register_set __iomem *regs)
+megasas_enable_intr_skinny(struct megasas_instance *instance)
 {
+	struct megasas_register_set __iomem *regs;
+	regs = instance->reg_set;
 	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
 
 	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
@@ -543,9 +554,11 @@ megasas_enable_intr_skinny(struct megasas_register_set __iomem *regs)
  * @regs:			MFI register set
  */
 static inline void
-megasas_disable_intr_skinny(struct megasas_register_set __iomem *regs)
+megasas_disable_intr_skinny(struct megasas_instance *instance)
 {
+	struct megasas_register_set __iomem *regs;
 	u32 mask = 0xFFFFFFFF;
+	regs = instance->reg_set;
 	writel(mask, &regs->outbound_intr_mask);
 	/* Dummy readl to force pci flush */
 	readl(&regs->outbound_intr_mask);
@@ -583,7 +596,7 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
 	/*
 	 * Check if it is our interrupt
 	 */
-	if ((megasas_read_fw_status_reg_gen2(regs) & MFI_STATE_MASK) ==
+	if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) ==
 	    MFI_STATE_FAULT) {
 		mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
 	} else
@@ -663,8 +676,10 @@ static struct megasas_instance_template megasas_instance_template_skinny = {
  * @regs:                      MFI register set
  */
 static inline void
-megasas_enable_intr_gen2(struct megasas_register_set __iomem *regs)
+megasas_enable_intr_gen2(struct megasas_instance *instance)
 {
+	struct megasas_register_set __iomem *regs;
+	regs = instance->reg_set;
 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
 
 	/* write ~0x00000005 (4 & 1) to the intr mask*/
@@ -679,9 +694,11 @@ megasas_enable_intr_gen2(struct megasas_register_set __iomem *regs)
  * @regs:                      MFI register set
  */
 static inline void
-megasas_disable_intr_gen2(struct megasas_register_set __iomem *regs)
+megasas_disable_intr_gen2(struct megasas_instance *instance)
 {
+	struct megasas_register_set __iomem *regs;
 	u32 mask = 0xFFFFFFFF;
+	regs = instance->reg_set;
 	writel(mask, &regs->outbound_intr_mask);
 	/* Dummy readl to force pci flush */
 	readl(&regs->outbound_intr_mask);
@@ -711,7 +728,7 @@ megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
 	 */
 	status = readl(&regs->outbound_intr_status);
 
-	if (status & MFI_GEN2_ENABLE_INTERRUPT_MASK) {
+	if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
 	}
 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
@@ -1471,6 +1488,14 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
 		return SCSI_MLQUEUE_HOST_BUSY;
 
 	spin_lock_irqsave(&instance->hba_lock, flags);
+
+	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+		spin_unlock_irqrestore(&instance->hba_lock, flags);
+		scmd->result = DID_ERROR << 16;
+		done(scmd);
+		return 0;
+	}
+
 	if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
 		spin_unlock_irqrestore(&instance->hba_lock, flags);
 		return SCSI_MLQUEUE_HOST_BUSY;
@@ -1591,7 +1616,8 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance)
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
-	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) {
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
 		writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
 	} else {
 		writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell);
@@ -1615,10 +1641,7 @@ megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
 
 		spin_lock_irqsave(instance->host->host_lock, flags);
 		instance->flag &= ~MEGASAS_FW_BUSY;
-		if ((instance->pdev->device ==
-			PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
-			(instance->pdev->device ==
-			PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+		if (instance->is_imr) {
 			instance->host->can_queue =
 				instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
 		} else
@@ -1695,7 +1718,7 @@ void megasas_do_ocr(struct megasas_instance *instance)
 	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
 		*instance->consumer     = MEGASAS_ADPRESET_INPROG_SIGN;
 	}
-	instance->instancet->disable_intr(instance->reg_set);
+	instance->instancet->disable_intr(instance);
 	instance->adprecovery   = MEGASAS_ADPRESET_SM_INFAULT;
 	instance->issuepend_done = 0;
 
@@ -1966,7 +1989,8 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
 	 * First wait for all commands to complete
 	 */
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
-	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER))
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
 		ret = megasas_reset_fusion(scmd->device->host);
 	else
 		ret = megasas_generic_reset(scmd);
@@ -2266,6 +2290,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
 		/* Check for LD map update */
 		if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
 		    (cmd->frame->dcmd.mbox.b[1] == 1)) {
+			fusion->fast_path_io = 0;
 			spin_lock_irqsave(instance->host->host_lock, flags);
 			if (cmd->frame->hdr.cmd_status != 0) {
 				if (cmd->frame->hdr.cmd_status !=
@@ -2283,9 +2308,13 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
 			} else
 				instance->map_id++;
 			megasas_return_cmd(instance, cmd);
-			if (MR_ValidateMapInfo(
-				    fusion->ld_map[(instance->map_id & 1)],
-				    fusion->load_balance_info))
+
+			/*
+			 * Set fast path IO to ZERO.
+			 * Validate Map will set proper value.
+			 * Meanwhile all IOs will go as LD IO.
+			 */
+			if (MR_ValidateMapInfo(instance))
 				fusion->fast_path_io = 1;
 			else
 				fusion->fast_path_io = 0;
@@ -2477,7 +2506,7 @@ process_fw_state_change_wq(struct work_struct *work)
 		printk(KERN_NOTICE "megaraid_sas: FW detected to be in fault"
 					"state, restarting it...\n");
 
-		instance->instancet->disable_intr(instance->reg_set);
+		instance->instancet->disable_intr(instance);
 		atomic_set(&instance->fw_outstanding, 0);
 
 		atomic_set(&instance->fw_reset_no_pci_access, 1);
@@ -2518,7 +2547,7 @@ process_fw_state_change_wq(struct work_struct *work)
 		spin_lock_irqsave(&instance->hba_lock, flags);
 		instance->adprecovery	= MEGASAS_HBA_OPERATIONAL;
 		spin_unlock_irqrestore(&instance->hba_lock, flags);
-		instance->instancet->enable_intr(instance->reg_set);
+		instance->instancet->enable_intr(instance);
 
 		megasas_issue_pending_cmds_again(instance);
 		instance->issuepend_done = 1;
@@ -2581,7 +2610,7 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
 			}
 
 
-			instance->instancet->disable_intr(instance->reg_set);
+			instance->instancet->disable_intr(instance);
 			instance->adprecovery	= MEGASAS_ADPRESET_SM_INFAULT;
 			instance->issuepend_done = 0;
 
@@ -2672,9 +2701,11 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
 				(instance->pdev->device ==
 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
 				(instance->pdev->device ==
-				 PCI_DEVICE_ID_LSI_FUSION) ||
+				PCI_DEVICE_ID_LSI_FUSION) ||
 				(instance->pdev->device ==
-				PCI_DEVICE_ID_LSI_INVADER)) {
+				PCI_DEVICE_ID_LSI_INVADER) ||
+				(instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_FURY)) {
 				writel(
 				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
 				  &instance->reg_set->doorbell);
@@ -2696,7 +2727,9 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
 			    (instance->pdev->device ==
 			     PCI_DEVICE_ID_LSI_FUSION) ||
 			    (instance->pdev->device ==
-			     PCI_DEVICE_ID_LSI_INVADER)) {
+			     PCI_DEVICE_ID_LSI_INVADER) ||
+			    (instance->pdev->device ==
+			     PCI_DEVICE_ID_LSI_FURY)) {
 				writel(MFI_INIT_HOTPLUG,
 				       &instance->reg_set->doorbell);
 			} else
@@ -2711,7 +2744,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
 			/*
 			 * Bring it to READY state; assuming max wait 10 secs
 			 */
-			instance->instancet->disable_intr(instance->reg_set);
+			instance->instancet->disable_intr(instance);
 			if ((instance->pdev->device ==
 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
 				(instance->pdev->device ==
@@ -2719,13 +2752,17 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
 				(instance->pdev->device
 					== PCI_DEVICE_ID_LSI_FUSION) ||
 				(instance->pdev->device
-					== PCI_DEVICE_ID_LSI_INVADER)) {
+					== PCI_DEVICE_ID_LSI_INVADER) ||
+				(instance->pdev->device
+					== PCI_DEVICE_ID_LSI_FURY)) {
 				writel(MFI_RESET_FLAGS,
 					&instance->reg_set->doorbell);
 				if ((instance->pdev->device ==
-				    PCI_DEVICE_ID_LSI_FUSION) ||
-				    (instance->pdev->device ==
-				     PCI_DEVICE_ID_LSI_INVADER)) {
+					PCI_DEVICE_ID_LSI_FUSION) ||
+					(instance->pdev->device ==
+					PCI_DEVICE_ID_LSI_INVADER) ||
+					(instance->pdev->device ==
+					PCI_DEVICE_ID_LSI_FURY)) {
 					for (i = 0; i < (10 * 1000); i += 20) {
 						if (readl(
 							    &instance->
@@ -2950,6 +2987,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
 		cmd->frame->io.pad_0 = 0;
 		if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
 		    (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
+			(instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
 		    (reset_devices))
 			cmd->frame->hdr.cmd = MFI_CMD_INVALID;
 	}
@@ -3352,7 +3390,7 @@ megasas_issue_init_mfi(struct megasas_instance *instance)
 	/*
 	 * disable the intr before firing the init frame to FW
 	 */
-	instance->instancet->disable_intr(instance->reg_set);
+	instance->instancet->disable_intr(instance);
 
 	/*
 	 * Issue the init frame in polled mode
@@ -3459,11 +3497,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
 {
 	u32 max_sectors_1;
 	u32 max_sectors_2;
-	u32 tmp_sectors, msix_enable;
+	u32 tmp_sectors, msix_enable, scratch_pad_2;
 	struct megasas_register_set __iomem *reg_set;
 	struct megasas_ctrl_info *ctrl_info;
 	unsigned long bar_list;
-	int i;
+	int i, loop, fw_msix_count = 0;
 
 	/* Find first memory bar */
 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
@@ -3487,6 +3525,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
 	switch (instance->pdev->device) {
 	case PCI_DEVICE_ID_LSI_FUSION:
 	case PCI_DEVICE_ID_LSI_INVADER:
+	case PCI_DEVICE_ID_LSI_FURY:
 		instance->instancet = &megasas_instance_template_fusion;
 		break;
 	case PCI_DEVICE_ID_LSI_SAS1078R:
@@ -3514,20 +3553,49 @@ static int megasas_init_fw(struct megasas_instance *instance)
 	if (megasas_transition_to_ready(instance, 0))
 		goto fail_ready_state;
 
+	/*
+	 * MSI-X host index 0 is common for all adapter.
+	 * It is used for all MPT based Adapters.
+	 */
+	instance->reply_post_host_index_addr[0] =
+		(u32 *)((u8 *)instance->reg_set +
+		MPI2_REPLY_POST_HOST_INDEX_OFFSET);
+
 	/* Check if MSI-X is supported while in ready state */
 	msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
 		       0x4000000) >> 0x1a;
 	if (msix_enable && !msix_disable) {
+		scratch_pad_2 = readl
+			(&instance->reg_set->outbound_scratch_pad_2);
 		/* Check max MSI-X vectors */
-		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
-		    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) {
-			instance->msix_vectors = (readl(&instance->reg_set->
-							outbound_scratch_pad_2
-							  ) & 0x1F) + 1;
+		if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) {
+			instance->msix_vectors = (scratch_pad_2
+				& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
+			fw_msix_count = instance->msix_vectors;
 			if (msix_vectors)
 				instance->msix_vectors =
 					min(msix_vectors,
 					    instance->msix_vectors);
+		} else if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)
+			|| (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+			/* Invader/Fury supports more than 8 MSI-X */
+			instance->msix_vectors = ((scratch_pad_2
+				& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
+				>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
+			fw_msix_count = instance->msix_vectors;
+			/* Save 1-15 reply post index address to local memory
+			 * Index 0 is already saved from reg offset
+			 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
+			 */
+			for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
+				instance->reply_post_host_index_addr[loop] =
+					(u32 *)((u8 *)instance->reg_set +
+					MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
+					+ (loop * 0x10));
+			}
+			if (msix_vectors)
+				instance->msix_vectors = min(msix_vectors,
+					instance->msix_vectors);
 		} else
 			instance->msix_vectors = 1;
 		/* Don't bother allocating more MSI-X vectors than cpus */
@@ -3547,6 +3615,12 @@ static int megasas_init_fw(struct megasas_instance *instance)
 			}
 		} else
 			instance->msix_vectors = 0;
+
+		dev_info(&instance->pdev->dev, "[scsi%d]: FW supports"
+			"<%d> MSIX vector,Online CPUs: <%d>,"
+			"Current MSIX <%d>\n", instance->host->host_no,
+			fw_msix_count, (unsigned int)num_online_cpus(),
+			instance->msix_vectors);
 	}
 
 	/* Get operational params, sge flags, send init cmd to controller */
@@ -3585,8 +3659,32 @@ static int megasas_init_fw(struct megasas_instance *instance)
 		max_sectors_2 = ctrl_info->max_request_size;
 
 		tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
+
+		/*Check whether controller is iMR or MR */
+		if (ctrl_info->memory_size) {
+			instance->is_imr = 0;
+			dev_info(&instance->pdev->dev, "Controller type: MR,"
+				"Memory size is: %dMB\n",
+				ctrl_info->memory_size);
+		} else {
+			instance->is_imr = 1;
+			dev_info(&instance->pdev->dev,
+				"Controller type: iMR\n");
+		}
 		instance->disableOnlineCtrlReset =
 		ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+		instance->UnevenSpanSupport =
+			ctrl_info->adapterOperations2.supportUnevenSpans;
+		if (instance->UnevenSpanSupport) {
+			struct fusion_context *fusion = instance->ctrl_context;
+			dev_info(&instance->pdev->dev, "FW supports: "
+			"UnevenSpanSupport=%x\n", instance->UnevenSpanSupport);
+			if (MR_ValidateMapInfo(instance))
+				fusion->fast_path_io = 1;
+			else
+				fusion->fast_path_io = 0;
+
+		}
 	}
 
 	instance->max_sectors_per_req = instance->max_num_sge *
@@ -3597,8 +3695,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
 	kfree(ctrl_info);
 
 	/* Check for valid throttlequeuedepth module parameter */
-	if (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY ||
-	    instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) {
+	if (instance->is_imr) {
 		if (throttlequeuedepth > (instance->max_fw_cmds -
 					  MEGASAS_SKINNY_INT_CMDS))
 			instance->throttlequeuedepth =
@@ -3882,8 +3979,7 @@ static int megasas_io_attach(struct megasas_instance *instance)
 	 */
 	host->irq = instance->pdev->irq;
 	host->unique_id = instance->unique_id;
-	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
-		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+	if (instance->is_imr) {
 		host->can_queue =
 			instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
 	} else
@@ -3925,7 +4021,8 @@ static int megasas_io_attach(struct megasas_instance *instance)
 
 	/* Fusion only supports host reset */
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
-	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) {
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
 		host->hostt->eh_device_reset_handler = NULL;
 		host->hostt->eh_bus_reset_handler = NULL;
 	}
@@ -4036,6 +4133,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
 	switch (instance->pdev->device) {
 	case PCI_DEVICE_ID_LSI_FUSION:
 	case PCI_DEVICE_ID_LSI_INVADER:
+	case PCI_DEVICE_ID_LSI_FURY:
 	{
 		struct fusion_context *fusion;
 
@@ -4076,6 +4174,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
 	instance->ev = NULL;
 	instance->issuepend_done = 1;
 	instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+	instance->is_imr = 0;
 	megasas_poll_wait_aen = 0;
 
 	instance->evt_detail = pci_alloc_consistent(pdev,
@@ -4126,9 +4225,11 @@ static int megasas_probe_one(struct pci_dev *pdev,
 	instance->unload = 1;
 	instance->last_time = 0;
 	instance->disableOnlineCtrlReset = 1;
+	instance->UnevenSpanSupport = 0;
 
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
-	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER))
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
 		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
 	else
 		INIT_WORK(&instance->work_init, process_fw_state_change_wq);
@@ -4139,6 +4240,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
 	if (megasas_init_fw(instance))
 		goto fail_init_mfi;
 
+retry_irq_register:
 	/*
 	 * Register IRQ
 	 */
@@ -4156,7 +4258,9 @@ static int megasas_probe_one(struct pci_dev *pdev,
 					free_irq(
 						instance->msixentry[j].vector,
 						&instance->irq_context[j]);
-				goto fail_irq;
+				/* Retry irq register for IO_APIC */
+				instance->msix_vectors = 0;
+				goto retry_irq_register;
 			}
 		}
 	} else {
@@ -4170,7 +4274,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
 		}
 	}
 
-	instance->instancet->enable_intr(instance->reg_set);
+	instance->instancet->enable_intr(instance);
 
 	/*
 	 * Store instance in PCI softstate
@@ -4210,7 +4314,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
 	megasas_mgmt_info.max_index--;
 
 	pci_set_drvdata(pdev, NULL);
-	instance->instancet->disable_intr(instance->reg_set);
+	instance->instancet->disable_intr(instance);
 	if (instance->msix_vectors)
 		for (i = 0 ; i < instance->msix_vectors; i++)
 			free_irq(instance->msixentry[i].vector,
@@ -4219,7 +4323,8 @@ static int megasas_probe_one(struct pci_dev *pdev,
 		free_irq(instance->pdev->irq, &instance->irq_context[0]);
 fail_irq:
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
-	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER))
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
 		megasas_release_fusion(instance);
 	else
 		megasas_release_mfi(instance);
@@ -4359,7 +4464,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
 	tasklet_kill(&instance->isr_tasklet);
 
 	pci_set_drvdata(instance->pdev, instance);
-	instance->instancet->disable_intr(instance->reg_set);
+	instance->instancet->disable_intr(instance);
 
 	if (instance->msix_vectors)
 		for (i = 0 ; i < instance->msix_vectors; i++)
@@ -4430,6 +4535,7 @@ megasas_resume(struct pci_dev *pdev)
 	switch (instance->pdev->device) {
 	case PCI_DEVICE_ID_LSI_FUSION:
 	case PCI_DEVICE_ID_LSI_INVADER:
+	case PCI_DEVICE_ID_LSI_FURY:
 	{
 		megasas_reset_reply_desc(instance);
 		if (megasas_ioc_init_fusion(instance)) {
@@ -4483,7 +4589,7 @@ megasas_resume(struct pci_dev *pdev)
 		}
 	}
 
-	instance->instancet->enable_intr(instance->reg_set);
+	instance->instancet->enable_intr(instance);
 	instance->unload = 0;
 
 	/*
@@ -4565,7 +4671,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
 
 	pci_set_drvdata(instance->pdev, NULL);
 
-	instance->instancet->disable_intr(instance->reg_set);
+	instance->instancet->disable_intr(instance);
 
 	if (instance->msix_vectors)
 		for (i = 0 ; i < instance->msix_vectors; i++)
@@ -4579,6 +4685,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
 	switch (instance->pdev->device) {
 	case PCI_DEVICE_ID_LSI_FUSION:
 	case PCI_DEVICE_ID_LSI_INVADER:
+	case PCI_DEVICE_ID_LSI_FURY:
 		megasas_release_fusion(instance);
 		for (i = 0; i < 2 ; i++)
 			if (fusion->ld_map[i])
@@ -4591,10 +4698,6 @@ static void megasas_detach_one(struct pci_dev *pdev)
 		break;
 	default:
 		megasas_release_mfi(instance);
-		pci_free_consistent(pdev,
-				    sizeof(struct megasas_evt_detail),
-				    instance->evt_detail,
-				    instance->evt_detail_h);
 		pci_free_consistent(pdev, sizeof(u32),
 				    instance->producer,
 				    instance->producer_h);
@@ -4604,6 +4707,9 @@ static void megasas_detach_one(struct pci_dev *pdev)
 		break;
 	}
 
+	if (instance->evt_detail)
+		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+				instance->evt_detail, instance->evt_detail_h);
 	scsi_host_put(host);
 
 	pci_set_drvdata(pdev, NULL);
@@ -4625,7 +4731,7 @@ static void megasas_shutdown(struct pci_dev *pdev)
 	instance->unload = 1;
 	megasas_flush_cache(instance);
 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
-	instance->instancet->disable_intr(instance->reg_set);
+	instance->instancet->disable_intr(instance);
 	if (instance->msix_vectors)
 		for (i = 0 ; i < instance->msix_vectors; i++)
 			free_irq(instance->msixentry[i].vector,

+ 739 - 45
drivers/scsi/megaraid/megaraid_sas_fp.c

@@ -60,10 +60,22 @@
 #define FALSE 0
 #define TRUE 1
 
+#define SPAN_DEBUG 0
+#define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
+#define SPAN_ROW_DATA_SIZE(map_, ld, index_)   (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
+#define SPAN_INVALID  0xff
+
 /* Prototypes */
-void
-mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
-			      struct LD_LOAD_BALANCE_INFO *lbInfo);
+void mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
+	struct LD_LOAD_BALANCE_INFO *lbInfo);
+
+static void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
+	PLD_SPAN_INFO ldSpanInfo);
+static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
+	u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
+	struct RAID_CONTEXT *pRAID_Context, struct MR_FW_RAID_MAP_ALL *map);
+static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld,
+	u64 strip, struct MR_FW_RAID_MAP_ALL *map);
 
 u32 mega_mod64(u64 dividend, u32 divisor)
 {
@@ -148,9 +160,12 @@ static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
 /*
  * This function will validate Map info data provided by FW
  */
-u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
-		      struct LD_LOAD_BALANCE_INFO *lbInfo)
+u8 MR_ValidateMapInfo(struct megasas_instance *instance)
 {
+	struct fusion_context *fusion = instance->ctrl_context;
+	struct MR_FW_RAID_MAP_ALL *map = fusion->ld_map[(instance->map_id & 1)];
+	struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info;
+	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
 	struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
 
 	if (pFwRaidMap->totalSize !=
@@ -167,13 +182,16 @@ u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
 		return 0;
 	}
 
+	if (instance->UnevenSpanSupport)
+		mr_update_span_set(map, ldSpanInfo);
+
 	mr_update_load_balance_params(map, lbInfo);
 
 	return 1;
 }
 
 u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
-		    struct MR_FW_RAID_MAP_ALL *map, int *div_error)
+		    struct MR_FW_RAID_MAP_ALL *map)
 {
 	struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
 	struct MR_QUAD_ELEMENT    *quad;
@@ -185,10 +203,8 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
 		for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) {
 			quad = &pSpanBlock->block_span_info.quad[j];
 
-			if (quad->diff == 0) {
-				*div_error = 1;
-				return span;
-			}
+			if (quad->diff == 0)
+				return SPAN_INVALID;
 			if (quad->logStart <= row  &&  row <= quad->logEnd  &&
 			    (mega_mod64(row-quad->logStart, quad->diff)) == 0) {
 				if (span_blk != NULL) {
@@ -207,7 +223,456 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
 			}
 		}
 	}
-	return span;
+	return SPAN_INVALID;
+}
+
+/*
+******************************************************************************
+*
+* Function to print info about span set created in driver from FW raid map
+*
+* Inputs :
+* map    - LD map
+* ldSpanInfo - ldSpanInfo per HBA instance
+*/
+#if SPAN_DEBUG
+static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
+{
+
+	u8   span;
+	u32    element;
+	struct MR_LD_RAID *raid;
+	LD_SPAN_SET *span_set;
+	struct MR_QUAD_ELEMENT    *quad;
+	int ldCount;
+	u16 ld;
+
+	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
+		ld = MR_TargetIdToLdGet(ldCount, map);
+			if (ld >= MAX_LOGICAL_DRIVES)
+				continue;
+		raid = MR_LdRaidGet(ld, map);
+		dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
+			ld, raid->spanDepth);
+		for (span = 0; span < raid->spanDepth; span++)
+			dev_dbg(&instance->pdev->dev, "Span=%x,"
+			" number of quads=%x\n", span,
+			map->raidMap.ldSpanMap[ld].spanBlock[span].
+			block_span_info.noElements);
+		for (element = 0; element < MAX_QUAD_DEPTH; element++) {
+			span_set = &(ldSpanInfo[ld].span_set[element]);
+			if (span_set->span_row_data_width == 0)
+				break;
+
+			dev_dbg(&instance->pdev->dev, "Span Set %x:"
+				"width=%x, diff=%x\n", element,
+				(unsigned int)span_set->span_row_data_width,
+				(unsigned int)span_set->diff);
+			dev_dbg(&instance->pdev->dev, "logical LBA"
+				"start=0x%08lx, end=0x%08lx\n",
+				(long unsigned int)span_set->log_start_lba,
+				(long unsigned int)span_set->log_end_lba);
+			dev_dbg(&instance->pdev->dev, "span row start=0x%08lx,"
+				" end=0x%08lx\n",
+				(long unsigned int)span_set->span_row_start,
+				(long unsigned int)span_set->span_row_end);
+			dev_dbg(&instance->pdev->dev, "data row start=0x%08lx,"
+				" end=0x%08lx\n",
+				(long unsigned int)span_set->data_row_start,
+				(long unsigned int)span_set->data_row_end);
+			dev_dbg(&instance->pdev->dev, "data strip start=0x%08lx,"
+				" end=0x%08lx\n",
+				(long unsigned int)span_set->data_strip_start,
+				(long unsigned int)span_set->data_strip_end);
+
+			for (span = 0; span < raid->spanDepth; span++) {
+				if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+					block_span_info.noElements >=
+					element + 1) {
+					quad = &map->raidMap.ldSpanMap[ld].
+						spanBlock[span].block_span_info.
+						quad[element];
+				dev_dbg(&instance->pdev->dev, "Span=%x,"
+					"Quad=%x, diff=%x\n", span,
+					element, quad->diff);
+				dev_dbg(&instance->pdev->dev,
+					"offset_in_span=0x%08lx\n",
+					(long unsigned int)quad->offsetInSpan);
+				dev_dbg(&instance->pdev->dev,
+					"logical start=0x%08lx, end=0x%08lx\n",
+					(long unsigned int)quad->logStart,
+					(long unsigned int)quad->logEnd);
+				}
+			}
+		}
+	}
+	return 0;
+}
+#endif
+
+/*
+******************************************************************************
+*
+* This routine calculates the Span block for given row using spanset.
+*
+* Inputs :
+*    instance - HBA instance
+*    ld   - Logical drive number
+*    row        - Row number
+*    map    - LD map
+*
+* Outputs :
+*
+*    span          - Span number
+*    block         - Absolute Block number in the physical disk
+*    div_error	   - Devide error code.
+*/
+
+u32 mr_spanset_get_span_block(struct megasas_instance *instance,
+		u32 ld, u64 row, u64 *span_blk, struct MR_FW_RAID_MAP_ALL *map)
+{
+	struct fusion_context *fusion = instance->ctrl_context;
+	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
+	LD_SPAN_SET *span_set;
+	struct MR_QUAD_ELEMENT    *quad;
+	u32    span, info;
+	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+
+	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+		span_set = &(ldSpanInfo[ld].span_set[info]);
+
+		if (span_set->span_row_data_width == 0)
+			break;
+
+		if (row > span_set->data_row_end)
+			continue;
+
+		for (span = 0; span < raid->spanDepth; span++)
+			if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+				block_span_info.noElements >= info+1) {
+				quad = &map->raidMap.ldSpanMap[ld].
+					spanBlock[span].
+					block_span_info.quad[info];
+				if (quad->diff == 0)
+					return SPAN_INVALID;
+				if (quad->logStart <= row  &&
+					row <= quad->logEnd  &&
+					(mega_mod64(row - quad->logStart,
+						quad->diff)) == 0) {
+					if (span_blk != NULL) {
+						u64  blk;
+						blk = mega_div64_32
+						    ((row - quad->logStart),
+						    quad->diff);
+						blk = (blk + quad->offsetInSpan)
+							 << raid->stripeShift;
+						*span_blk = blk;
+					}
+					return span;
+				}
+			}
+	}
+	return SPAN_INVALID;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the row for given strip using spanset.
+*
+* Inputs :
+*    instance - HBA instance
+*    ld   - Logical drive number
+*    Strip        - Strip
+*    map    - LD map
+*
+* Outputs :
+*
+*    row         - row associated with strip
+*/
+
+static u64  get_row_from_strip(struct megasas_instance *instance,
+	u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map)
+{
+	struct fusion_context *fusion = instance->ctrl_context;
+	struct MR_LD_RAID	*raid = MR_LdRaidGet(ld, map);
+	LD_SPAN_SET	*span_set;
+	PLD_SPAN_INFO	ldSpanInfo = fusion->log_to_span;
+	u32		info, strip_offset, span, span_offset;
+	u64		span_set_Strip, span_set_Row, retval;
+
+	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+		span_set = &(ldSpanInfo[ld].span_set[info]);
+
+		if (span_set->span_row_data_width == 0)
+			break;
+		if (strip > span_set->data_strip_end)
+			continue;
+
+		span_set_Strip = strip - span_set->data_strip_start;
+		strip_offset = mega_mod64(span_set_Strip,
+				span_set->span_row_data_width);
+		span_set_Row = mega_div64_32(span_set_Strip,
+				span_set->span_row_data_width) * span_set->diff;
+		for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
+			if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+				block_span_info.noElements >= info+1) {
+				if (strip_offset >=
+					span_set->strip_offset[span])
+					span_offset++;
+				else
+					break;
+			}
+#if SPAN_DEBUG
+		dev_info(&instance->pdev->dev, "Strip 0x%llx,"
+			"span_set_Strip 0x%llx, span_set_Row 0x%llx"
+			"data width 0x%llx span offset 0x%x\n", strip,
+			(unsigned long long)span_set_Strip,
+			(unsigned long long)span_set_Row,
+			(unsigned long long)span_set->span_row_data_width,
+			span_offset);
+		dev_info(&instance->pdev->dev, "For strip 0x%llx"
+			"row is 0x%llx\n", strip,
+			(unsigned long long) span_set->data_row_start +
+			(unsigned long long) span_set_Row + (span_offset - 1));
+#endif
+		retval = (span_set->data_row_start + span_set_Row +
+				(span_offset - 1));
+		return retval;
+	}
+	return -1LLU;
+}
+
+
+/*
+******************************************************************************
+*
+* This routine calculates the Start Strip for given row using spanset.
+*
+* Inputs :
+*    instance - HBA instance
+*    ld   - Logical drive number
+*    row        - Row number
+*    map    - LD map
+*
+* Outputs :
+*
+*    Strip         - Start strip associated with row
+*/
+
+static u64 get_strip_from_row(struct megasas_instance *instance,
+		u32 ld, u64 row, struct MR_FW_RAID_MAP_ALL *map)
+{
+	struct fusion_context *fusion = instance->ctrl_context;
+	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
+	LD_SPAN_SET *span_set;
+	struct MR_QUAD_ELEMENT    *quad;
+	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+	u32    span, info;
+	u64  strip;
+
+	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+		span_set = &(ldSpanInfo[ld].span_set[info]);
+
+		if (span_set->span_row_data_width == 0)
+			break;
+		if (row > span_set->data_row_end)
+			continue;
+
+		for (span = 0; span < raid->spanDepth; span++)
+			if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+				block_span_info.noElements >= info+1) {
+				quad = &map->raidMap.ldSpanMap[ld].
+					spanBlock[span].block_span_info.quad[info];
+				if (quad->logStart <= row  &&
+					row <= quad->logEnd  &&
+					mega_mod64((row - quad->logStart),
+					quad->diff) == 0) {
+					strip = mega_div64_32
+						(((row - span_set->data_row_start)
+							- quad->logStart),
+							quad->diff);
+					strip *= span_set->span_row_data_width;
+					strip += span_set->data_strip_start;
+					strip += span_set->strip_offset[span];
+					return strip;
+				}
+			}
+	}
+	dev_err(&instance->pdev->dev, "get_strip_from_row"
+		"returns invalid strip for ld=%x, row=%lx\n",
+		ld, (long unsigned int)row);
+	return -1;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the Physical Arm for given strip using spanset.
+*
+* Inputs :
+*    instance - HBA instance
+*    ld   - Logical drive number
+*    strip      - Strip
+*    map    - LD map
+*
+* Outputs :
+*
+*    Phys Arm         - Phys Arm associated with strip
+*/
+
+static u32 get_arm_from_strip(struct megasas_instance *instance,
+	u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map)
+{
+	struct fusion_context *fusion = instance->ctrl_context;
+	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
+	LD_SPAN_SET *span_set;
+	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+	u32    info, strip_offset, span, span_offset, retval;
+
+	for (info = 0 ; info < MAX_QUAD_DEPTH; info++) {
+		span_set = &(ldSpanInfo[ld].span_set[info]);
+
+		if (span_set->span_row_data_width == 0)
+			break;
+		if (strip > span_set->data_strip_end)
+			continue;
+
+		strip_offset = (uint)mega_mod64
+				((strip - span_set->data_strip_start),
+				span_set->span_row_data_width);
+
+		for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
+			if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+				block_span_info.noElements >= info+1) {
+				if (strip_offset >=
+					span_set->strip_offset[span])
+					span_offset =
+						span_set->strip_offset[span];
+				else
+					break;
+			}
+#if SPAN_DEBUG
+		dev_info(&instance->pdev->dev, "get_arm_from_strip:"
+			"for ld=0x%x strip=0x%lx arm is  0x%x\n", ld,
+			(long unsigned int)strip, (strip_offset - span_offset));
+#endif
+		retval = (strip_offset - span_offset);
+		return retval;
+	}
+
+	dev_err(&instance->pdev->dev, "get_arm_from_strip"
+		"returns invalid arm for ld=%x strip=%lx\n",
+		ld, (long unsigned int)strip);
+
+	return -1;
+}
+
+/* This Function will return Phys arm */
+u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
+		struct MR_FW_RAID_MAP_ALL *map)
+{
+	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
+	/* Need to check correct default value */
+	u32    arm = 0;
+
+	switch (raid->level) {
+	case 0:
+	case 5:
+	case 6:
+		arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
+		break;
+	case 1:
+		/* start with logical arm */
+		arm = get_arm_from_strip(instance, ld, stripe, map);
+		if (arm != -1UL)
+			arm *= 2;
+		break;
+	}
+
+	return arm;
+}
+
+
+/*
+******************************************************************************
+*
+* This routine calculates the arm, span and block for the specified stripe and
+* reference in stripe using spanset
+*
+* Inputs :
+*
+*    ld   - Logical drive number
+*    stripRow        - Stripe number
+*    stripRef    - Reference in stripe
+*
+* Outputs :
+*
+*    span          - Span number
+*    block         - Absolute Block number in the physical disk
+*/
+static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
+		u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
+		struct RAID_CONTEXT *pRAID_Context,
+		struct MR_FW_RAID_MAP_ALL *map)
+{
+	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
+	u32     pd, arRef;
+	u8      physArm, span;
+	u64     row;
+	u8	retval = TRUE;
+	u8	do_invader = 0;
+	u64	*pdBlock = &io_info->pdBlock;
+	u16	*pDevHandle = &io_info->devHandle;
+	u32	logArm, rowMod, armQ, arm;
+
+	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
+		instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+		do_invader = 1;
+
+	/*Get row and span from io_info for Uneven Span IO.*/
+	row	    = io_info->start_row;
+	span	    = io_info->start_span;
+
+
+	if (raid->level == 6) {
+		logArm = get_arm_from_strip(instance, ld, stripRow, map);
+		if (logArm == -1UL)
+			return FALSE;
+		rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
+		armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
+		arm = armQ + 1 + logArm;
+		if (arm >= SPAN_ROW_SIZE(map, ld, span))
+			arm -= SPAN_ROW_SIZE(map, ld, span);
+		physArm = (u8)arm;
+	} else
+		/* Calculate the arm */
+		physArm = get_arm(instance, ld, span, stripRow, map);
+	if (physArm == 0xFF)
+		return FALSE;
+
+	arRef       = MR_LdSpanArrayGet(ld, span, map);
+	pd          = MR_ArPdGet(arRef, physArm, map);
+
+	if (pd != MR_PD_INVALID)
+		*pDevHandle = MR_PdDevHandleGet(pd, map);
+	else {
+		*pDevHandle = MR_PD_INVALID;
+		if ((raid->level >= 5) &&
+			(!do_invader  || (do_invader &&
+			(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
+			pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
+		else if (raid->level == 1) {
+			pd = MR_ArPdGet(arRef, physArm + 1, map);
+			if (pd != MR_PD_INVALID)
+				*pDevHandle = MR_PdDevHandleGet(pd, map);
+		}
+	}
+
+	*pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
+	pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
+					physArm;
+	return retval;
 }
 
 /*
@@ -228,16 +693,22 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
 *    block         - Absolute Block number in the physical disk
 */
 u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
-		   u16 stripRef, u64 *pdBlock, u16 *pDevHandle,
-		   struct RAID_CONTEXT *pRAID_Context,
-		   struct MR_FW_RAID_MAP_ALL *map)
+		u16 stripRef, struct IO_REQUEST_INFO *io_info,
+		struct RAID_CONTEXT *pRAID_Context,
+		struct MR_FW_RAID_MAP_ALL *map)
 {
 	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
 	u32         pd, arRef;
 	u8          physArm, span;
 	u64         row;
 	u8	    retval = TRUE;
-	int	    error_code = 0;
+	u8          do_invader = 0;
+	u64	    *pdBlock = &io_info->pdBlock;
+	u16	    *pDevHandle = &io_info->devHandle;
+
+	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
+		instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+		do_invader = 1;
 
 	row =  mega_div64_32(stripRow, raid->rowDataSize);
 
@@ -267,8 +738,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
 		span = 0;
 		*pdBlock = row << raid->stripeShift;
 	} else {
-		span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code);
-		if (error_code == 1)
+		span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map);
+		if (span == SPAN_INVALID)
 			return FALSE;
 	}
 
@@ -282,9 +753,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
 	else {
 		*pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
 		if ((raid->level >= 5) &&
-		    ((instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) ||
-		     (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER &&
-		      raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
+			(!do_invader  || (do_invader &&
+			(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
 			pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
 		else if (raid->level == 1) {
 			/* Get alternate Pd. */
@@ -327,17 +797,42 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 	u32         numBlocks, ldTgtId;
 	u8          isRead;
 	u8	    retval = 0;
+	u8	    startlba_span = SPAN_INVALID;
+	u64 *pdBlock = &io_info->pdBlock;
 
 	ldStartBlock = io_info->ldStartBlock;
 	numBlocks = io_info->numBlocks;
 	ldTgtId = io_info->ldTgtId;
 	isRead = io_info->isRead;
+	io_info->IoforUnevenSpan = 0;
+	io_info->start_span	= SPAN_INVALID;
 
 	ld = MR_TargetIdToLdGet(ldTgtId, map);
 	raid = MR_LdRaidGet(ld, map);
 
+	/*
+	 * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
+	 * return FALSE
+	 */
+	if (raid->rowDataSize == 0) {
+		if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
+			return FALSE;
+		else if (instance->UnevenSpanSupport) {
+			io_info->IoforUnevenSpan = 1;
+		} else {
+			dev_info(&instance->pdev->dev,
+				"raid->rowDataSize is 0, but has SPAN[0]"
+				"rowDataSize = 0x%0x,"
+				"but there is _NO_ UnevenSpanSupport\n",
+				MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
+			return FALSE;
+		}
+	}
+
 	stripSize = 1 << raid->stripeShift;
 	stripe_mask = stripSize-1;
+
+
 	/*
 	 * calculate starting row and stripe, and number of strips and rows
 	 */
@@ -347,11 +842,50 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 	ref_in_end_stripe   = (u16)(endLba & stripe_mask);
 	endStrip            = endLba >> raid->stripeShift;
 	num_strips          = (u8)(endStrip - start_strip + 1); /* End strip */
-	if (raid->rowDataSize == 0)
-		return FALSE;
-	start_row           =  mega_div64_32(start_strip, raid->rowDataSize);
-	endRow              =  mega_div64_32(endStrip, raid->rowDataSize);
-	numRows             = (u8)(endRow - start_row + 1);
+
+	if (io_info->IoforUnevenSpan) {
+		start_row = get_row_from_strip(instance, ld, start_strip, map);
+		endRow	  = get_row_from_strip(instance, ld, endStrip, map);
+		if (start_row == -1ULL || endRow == -1ULL) {
+			dev_info(&instance->pdev->dev, "return from %s %d."
+				"Send IO w/o region lock.\n",
+				__func__, __LINE__);
+			return FALSE;
+		}
+
+		if (raid->spanDepth == 1) {
+			startlba_span = 0;
+			*pdBlock = start_row << raid->stripeShift;
+		} else
+			startlba_span = (u8)mr_spanset_get_span_block(instance,
+						ld, start_row, pdBlock, map);
+		if (startlba_span == SPAN_INVALID) {
+			dev_info(&instance->pdev->dev, "return from %s %d"
+				"for row 0x%llx,start strip %llx"
+				"endSrip %llx\n", __func__, __LINE__,
+				(unsigned long long)start_row,
+				(unsigned long long)start_strip,
+				(unsigned long long)endStrip);
+			return FALSE;
+		}
+		io_info->start_span	= startlba_span;
+		io_info->start_row	= start_row;
+#if SPAN_DEBUG
+		dev_dbg(&instance->pdev->dev, "Check Span number from %s %d"
+			"for row 0x%llx, start strip 0x%llx end strip 0x%llx"
+			" span 0x%x\n", __func__, __LINE__,
+			(unsigned long long)start_row,
+			(unsigned long long)start_strip,
+			(unsigned long long)endStrip, startlba_span);
+		dev_dbg(&instance->pdev->dev, "start_row 0x%llx endRow 0x%llx"
+			"Start span 0x%x\n", (unsigned long long)start_row,
+			(unsigned long long)endRow, startlba_span);
+#endif
+	} else {
+		start_row = mega_div64_32(start_strip, raid->rowDataSize);
+		endRow    = mega_div64_32(endStrip, raid->rowDataSize);
+	}
+	numRows = (u8)(endRow - start_row + 1);
 
 	/*
 	 * calculate region info.
@@ -384,28 +918,56 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 			regSize = numBlocks;
 		}
 		/* multi-strip IOs always need to full stripe locked */
-	} else {
+	} else if (io_info->IoforUnevenSpan == 0) {
+		/*
+		 * For Even span region lock optimization.
+		 * If the start strip is the last in the start row
+		 */
 		if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
-			/* If the start strip is the last in the start row */
 			regStart += ref_in_start_stripe;
-			regSize = stripSize - ref_in_start_stripe;
 			/* initialize count to sectors from startref to end
 			   of strip */
+			regSize = stripSize - ref_in_start_stripe;
 		}
 
+		/* add complete rows in the middle of the transfer */
 		if (numRows > 2)
-			/* Add complete rows in the middle of the transfer */
 			regSize += (numRows-2) << raid->stripeShift;
 
-		/* if IO ends within first strip of last row */
+		/* if IO ends within first strip of last row*/
 		if (endStrip == endRow*raid->rowDataSize)
 			regSize += ref_in_end_stripe+1;
 		else
 			regSize += stripSize;
+	} else {
+		/*
+		 * For Uneven span region lock optimization.
+		 * If the start strip is the last in the start row
+		 */
+		if (start_strip == (get_strip_from_row(instance, ld, start_row, map) +
+				SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
+			regStart += ref_in_start_stripe;
+			/* initialize count to sectors from
+			 * startRef to end of strip
+			 */
+			regSize = stripSize - ref_in_start_stripe;
+		}
+		/* Add complete rows in the middle of the transfer*/
+
+		if (numRows > 2)
+			/* Add complete rows in the middle of the transfer*/
+			regSize += (numRows-2) << raid->stripeShift;
+
+		/* if IO ends within first strip of last row */
+		if (endStrip == get_strip_from_row(instance, ld, endRow, map))
+			regSize += ref_in_end_stripe + 1;
+		else
+			regSize += stripSize;
 	}
 
 	pRAID_Context->timeoutValue     = map->raidMap.fpPdIoTimeoutSec;
-	if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)
+	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+		(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
 		pRAID_Context->regLockFlags = (isRead) ?
 			raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
 	else
@@ -419,30 +981,161 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 	/*Get Phy Params only if FP capable, or else leave it to MR firmware
 	  to do the calculation.*/
 	if (io_info->fpOkForIo) {
-		retval = MR_GetPhyParams(instance, ld, start_strip,
-					 ref_in_start_stripe,
-					 &io_info->pdBlock,
-					 &io_info->devHandle, pRAID_Context,
-					 map);
-		/* If IO on an invalid Pd, then FP i snot possible */
+		retval = io_info->IoforUnevenSpan ?
+				mr_spanset_get_phy_params(instance, ld,
+					start_strip, ref_in_start_stripe,
+					io_info, pRAID_Context, map) :
+				MR_GetPhyParams(instance, ld, start_strip,
+					ref_in_start_stripe, io_info,
+					pRAID_Context, map);
+		/* If IO on an invalid Pd, then FP is not possible.*/
 		if (io_info->devHandle == MR_PD_INVALID)
 			io_info->fpOkForIo = FALSE;
 		return retval;
 	} else if (isRead) {
 		uint stripIdx;
 		for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
-			if (!MR_GetPhyParams(instance, ld,
-					     start_strip + stripIdx,
-					     ref_in_start_stripe,
-					     &io_info->pdBlock,
-					     &io_info->devHandle,
-					     pRAID_Context, map))
+			retval = io_info->IoforUnevenSpan ?
+				mr_spanset_get_phy_params(instance, ld,
+				    start_strip + stripIdx,
+				    ref_in_start_stripe, io_info,
+				    pRAID_Context, map) :
+				MR_GetPhyParams(instance, ld,
+				    start_strip + stripIdx, ref_in_start_stripe,
+				    io_info, pRAID_Context, map);
+			if (!retval)
 				return TRUE;
 		}
 	}
+
+#if SPAN_DEBUG
+	/* Just for testing what arm we get for strip.*/
+	if (io_info->IoforUnevenSpan)
+		get_arm_from_strip(instance, ld, start_strip, map);
+#endif
 	return TRUE;
 }
 
+/*
+******************************************************************************
+*
+* This routine pepare spanset info from Valid Raid map and store it into
+* local copy of ldSpanInfo per instance data structure.
+*
+* Inputs :
+* map    - LD map
+* ldSpanInfo - ldSpanInfo per HBA instance
+*
+*/
+void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
+			PLD_SPAN_INFO ldSpanInfo)
+{
+	u8   span, count;
+	u32  element, span_row_width;
+	u64  span_row;
+	struct MR_LD_RAID *raid;
+	LD_SPAN_SET *span_set, *span_set_prev;
+	struct MR_QUAD_ELEMENT    *quad;
+	int ldCount;
+	u16 ld;
+
+
+	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
+		ld = MR_TargetIdToLdGet(ldCount, map);
+		if (ld >= MAX_LOGICAL_DRIVES)
+			continue;
+		raid = MR_LdRaidGet(ld, map);
+		for (element = 0; element < MAX_QUAD_DEPTH; element++) {
+			for (span = 0; span < raid->spanDepth; span++) {
+				if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+					block_span_info.noElements <
+					element + 1)
+					continue;
+				span_set = &(ldSpanInfo[ld].span_set[element]);
+				quad = &map->raidMap.ldSpanMap[ld].
+					spanBlock[span].block_span_info.
+					quad[element];
+
+				span_set->diff = quad->diff;
+
+				for (count = 0, span_row_width = 0;
+					count < raid->spanDepth; count++) {
+					if (map->raidMap.ldSpanMap[ld].
+						spanBlock[count].
+						block_span_info.
+						noElements >= element + 1) {
+						span_set->strip_offset[count] =
+							span_row_width;
+						span_row_width +=
+							MR_LdSpanPtrGet
+							(ld, count, map)->spanRowDataSize;
+						printk(KERN_INFO "megasas:"
+							"span %x rowDataSize %x\n",
+							count, MR_LdSpanPtrGet
+							(ld, count, map)->spanRowDataSize);
+					}
+				}
+
+				span_set->span_row_data_width = span_row_width;
+				span_row = mega_div64_32(((quad->logEnd -
+					quad->logStart) + quad->diff),
+					quad->diff);
+
+				if (element == 0) {
+					span_set->log_start_lba = 0;
+					span_set->log_end_lba =
+						((span_row << raid->stripeShift)
+						* span_row_width) - 1;
+
+					span_set->span_row_start = 0;
+					span_set->span_row_end = span_row - 1;
+
+					span_set->data_strip_start = 0;
+					span_set->data_strip_end =
+						(span_row * span_row_width) - 1;
+
+					span_set->data_row_start = 0;
+					span_set->data_row_end =
+						(span_row * quad->diff) - 1;
+				} else {
+					span_set_prev = &(ldSpanInfo[ld].
+							span_set[element - 1]);
+					span_set->log_start_lba =
+						span_set_prev->log_end_lba + 1;
+					span_set->log_end_lba =
+						span_set->log_start_lba +
+						((span_row << raid->stripeShift)
+						* span_row_width) - 1;
+
+					span_set->span_row_start =
+						span_set_prev->span_row_end + 1;
+					span_set->span_row_end =
+					span_set->span_row_start + span_row - 1;
+
+					span_set->data_strip_start =
+					span_set_prev->data_strip_end + 1;
+					span_set->data_strip_end =
+						span_set->data_strip_start +
+						(span_row * span_row_width) - 1;
+
+					span_set->data_row_start =
+						span_set_prev->data_row_end + 1;
+					span_set->data_row_end =
+						span_set->data_row_start +
+						(span_row * quad->diff) - 1;
+				}
+				break;
+		}
+		if (span == raid->spanDepth)
+			break;
+	    }
+	}
+#if SPAN_DEBUG
+	getSpanInfo(map, ldSpanInfo);
+#endif
+
+}
+
 void
 mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
 			      struct LD_LOAD_BALANCE_INFO *lbInfo)
@@ -503,8 +1196,9 @@ u8 megasas_get_best_arm(struct LD_LOAD_BALANCE_INFO *lbInfo, u8 arm, u64 block,
 	diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
 	bestArm = (diff0 <= diff1 ? 0 : 1);
 
-	if ((bestArm == arm && pend0 > pend1 + 16)  ||
-	    (bestArm != arm && pend1 > pend0 + 16))
+	/*Make balance count from 16 to 4 to keep driver in sync with Firmware*/
+	if ((bestArm == arm && pend0 > pend1 + 4)  ||
+	    (bestArm != arm && pend1 > pend0 + 4))
 		bestArm ^= 1;
 
 	/* Update the last accessed block on the correct pd */

+ 127 - 21
drivers/scsi/megaraid/megaraid_sas_fusion.c

@@ -86,8 +86,6 @@ u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
 void
 megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
 
-u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
-		      struct LD_LOAD_BALANCE_INFO *lbInfo);
 u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
 			   struct IO_REQUEST_INFO *in_info);
 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
@@ -101,8 +99,10 @@ extern int resetwaittime;
  * @regs:			MFI register set
  */
 void
-megasas_enable_intr_fusion(struct megasas_register_set __iomem *regs)
+megasas_enable_intr_fusion(struct megasas_instance *instance)
 {
+	struct megasas_register_set __iomem *regs;
+	regs = instance->reg_set;
 	/* For Thunderbolt/Invader also clear intr on enable */
 	writel(~0, &regs->outbound_intr_status);
 	readl(&regs->outbound_intr_status);
@@ -111,6 +111,7 @@ megasas_enable_intr_fusion(struct megasas_register_set __iomem *regs)
 
 	/* Dummy readl to force pci flush */
 	readl(&regs->outbound_intr_mask);
+	instance->mask_interrupts = 0;
 }
 
 /**
@@ -118,10 +119,13 @@ megasas_enable_intr_fusion(struct megasas_register_set __iomem *regs)
  * @regs:			 MFI register set
  */
 void
-megasas_disable_intr_fusion(struct megasas_register_set __iomem *regs)
+megasas_disable_intr_fusion(struct megasas_instance *instance)
 {
 	u32 mask = 0xFFFFFFFF;
 	u32 status;
+	struct megasas_register_set __iomem *regs;
+	regs = instance->reg_set;
+	instance->mask_interrupts = 1;
 
 	writel(mask, &regs->outbound_intr_mask);
 	/* Dummy readl to force pci flush */
@@ -643,6 +647,12 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
 	init_frame->cmd	= MFI_CMD_INIT;
 	init_frame->cmd_status = 0xFF;
 
+	/* driver support Extended MSIX */
+	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+		(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+		init_frame->driver_operations.
+			mfi_capabilities.support_additional_msix = 1;
+
 	init_frame->queue_info_new_phys_addr_lo = ioc_init_handle;
 	init_frame->data_xfer_len = sizeof(struct MPI2_IOC_INIT_REQUEST);
 
@@ -657,7 +667,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
 	/*
 	 * disable the intr before firing the init frame
 	 */
-	instance->instancet->disable_intr(instance->reg_set);
+	instance->instancet->disable_intr(instance);
 
 	for (i = 0; i < (10 * 1000); i += 20) {
 		if (readl(&instance->reg_set->doorbell) & 1)
@@ -770,8 +780,7 @@ megasas_get_map_info(struct megasas_instance *instance)
 
 	fusion->fast_path_io = 0;
 	if (!megasas_get_ld_map_info(instance)) {
-		if (MR_ValidateMapInfo(fusion->ld_map[(instance->map_id & 1)],
-				       fusion->load_balance_info)) {
+		if (MR_ValidateMapInfo(instance)) {
 			fusion->fast_path_io = 1;
 			return 0;
 		}
@@ -864,6 +873,66 @@ megasas_sync_map_info(struct megasas_instance *instance)
 	return ret;
 }
 
+/*
+ * meagasas_display_intel_branding - Display branding string
+ * @instance: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+megasas_display_intel_branding(struct megasas_instance *instance)
+{
+	if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
+		return;
+
+	switch (instance->pdev->device) {
+	case PCI_DEVICE_ID_LSI_INVADER:
+		switch (instance->pdev->subsystem_device) {
+		case MEGARAID_INTEL_RS3DC080_SSDID:
+			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+				instance->host->host_no,
+				MEGARAID_INTEL_RS3DC080_BRANDING);
+			break;
+		case MEGARAID_INTEL_RS3DC040_SSDID:
+			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+				instance->host->host_no,
+				MEGARAID_INTEL_RS3DC040_BRANDING);
+			break;
+		case MEGARAID_INTEL_RS3SC008_SSDID:
+			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+				instance->host->host_no,
+				MEGARAID_INTEL_RS3SC008_BRANDING);
+			break;
+		case MEGARAID_INTEL_RS3MC044_SSDID:
+			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+				instance->host->host_no,
+				MEGARAID_INTEL_RS3MC044_BRANDING);
+			break;
+		default:
+			break;
+		}
+		break;
+	case PCI_DEVICE_ID_LSI_FURY:
+		switch (instance->pdev->subsystem_device) {
+		case MEGARAID_INTEL_RS3WC080_SSDID:
+			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+				instance->host->host_no,
+				MEGARAID_INTEL_RS3WC080_BRANDING);
+			break;
+		case MEGARAID_INTEL_RS3WC040_SSDID:
+			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+				instance->host->host_no,
+				MEGARAID_INTEL_RS3WC040_BRANDING);
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+}
+
 /**
  * megasas_init_adapter_fusion -	Initializes the FW
  * @instance:		Adapter soft state
@@ -944,6 +1013,8 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
 	if (megasas_ioc_init_fusion(instance))
 		goto fail_ioc_init;
 
+	megasas_display_intel_branding(instance);
+
 	instance->flag_ieee = 1;
 
 	fusion->map_sz =  sizeof(struct MR_FW_RAID_MAP) +
@@ -1071,7 +1142,8 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
 
 	fusion = instance->ctrl_context;
 
-	if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) {
+	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+		(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
 		struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
 		sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
 		sgl_ptr_end->Flags = 0;
@@ -1088,7 +1160,8 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
 		sgl_ptr->Length = sg_dma_len(os_sgl);
 		sgl_ptr->Address = sg_dma_address(os_sgl);
 		sgl_ptr->Flags = 0;
-		if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) {
+		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+			(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
 			if (i == sge_count - 1)
 				sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
 		}
@@ -1100,8 +1173,10 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
 		    (sge_count > fusion->max_sge_in_main_msg)) {
 
 			struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
-			if (instance->pdev->device ==
-			    PCI_DEVICE_ID_LSI_INVADER) {
+			if ((instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_INVADER) ||
+				(instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_FURY)) {
 				if ((cmd->io_request->IoFlags &
 				MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
 				MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
@@ -1117,8 +1192,10 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
 			sg_chain = sgl_ptr;
 			/* Prepare chain element */
 			sg_chain->NextChainOffset = 0;
-			if (instance->pdev->device ==
-			    PCI_DEVICE_ID_LSI_INVADER)
+			if ((instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_INVADER) ||
+				(instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_FURY))
 				sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
 			else
 				sg_chain->Flags =
@@ -1434,7 +1511,8 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
 		cmd->request_desc->SCSIIO.RequestFlags =
 			(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
 			 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
-		if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) {
+		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+			(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
 			if (io_request->RaidContext.regLockFlags ==
 			    REGION_TYPE_UNUSED)
 				cmd->request_desc->SCSIIO.RequestFlags =
@@ -1465,7 +1543,8 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
 		cmd->request_desc->SCSIIO.RequestFlags =
 			(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
 			 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
-		if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) {
+		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+			(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
 			if (io_request->RaidContext.regLockFlags ==
 			    REGION_TYPE_UNUSED)
 				cmd->request_desc->SCSIIO.RequestFlags =
@@ -1522,11 +1601,27 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
 		io_request->RaidContext.RAIDFlags =
 			MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
 			MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
+		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+			(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+			io_request->IoFlags |=
+				MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
 		cmd->request_desc->SCSIIO.RequestFlags =
 			(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
 			 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
 		cmd->request_desc->SCSIIO.DevHandle =
 			local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+		/*
+		 * If the command is for the tape device, set the
+		 * FP timeout to the os layer timeout value.
+		 */
+		if (scmd->device->type == TYPE_TAPE) {
+			if ((scmd->request->timeout / HZ) > 0xFFFF)
+				io_request->RaidContext.timeoutValue =
+					0xFFFF;
+			else
+				io_request->RaidContext.timeoutValue =
+					scmd->request->timeout / HZ;
+		}
 	} else {
 		io_request->Function  = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
 		io_request->DevHandle = device_id;
@@ -1825,8 +1920,15 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
 		return IRQ_NONE;
 
 	wmb();
-	writel((MSIxIndex << 24) | fusion->last_reply_idx[MSIxIndex],
-	       &instance->reg_set->reply_post_host_index);
+	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+		(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+		writel(((MSIxIndex & 0x7) << 24) |
+			fusion->last_reply_idx[MSIxIndex],
+			instance->reply_post_host_index_addr[MSIxIndex/8]);
+	else
+		writel((MSIxIndex << 24) |
+			fusion->last_reply_idx[MSIxIndex],
+			instance->reply_post_host_index_addr[0]);
 	megasas_check_and_restore_queue_depth(instance);
 	return IRQ_HANDLED;
 }
@@ -1868,6 +1970,9 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
 	struct megasas_instance *instance = irq_context->instance;
 	u32 mfiStatus, fw_state;
 
+	if (instance->mask_interrupts)
+		return IRQ_NONE;
+
 	if (!instance->msix_vectors) {
 		mfiStatus = instance->instancet->clear_intr(instance->reg_set);
 		if (!mfiStatus)
@@ -1929,7 +2034,8 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
 	fusion = instance->ctrl_context;
 	io_req = cmd->io_request;
 
-	if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) {
+	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+		(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
 		struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
 			(struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
 		sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
@@ -2132,7 +2238,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
 	mutex_lock(&instance->reset_mutex);
 	set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
 	instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
-	instance->instancet->disable_intr(instance->reg_set);
+	instance->instancet->disable_intr(instance);
 	msleep(1000);
 
 	/* First try waiting for commands to complete */
@@ -2256,7 +2362,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
 
 			clear_bit(MEGASAS_FUSION_IN_RESET,
 				  &instance->reset_flags);
-			instance->instancet->enable_intr(instance->reg_set);
+			instance->instancet->enable_intr(instance);
 			instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
 
 			/* Re-fire management commands */
@@ -2318,7 +2424,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
 		retval = FAILED;
 	} else {
 		clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
-		instance->instancet->enable_intr(instance->reg_set);
+		instance->instancet->enable_intr(instance);
 		instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
 	}
 out:

+ 33 - 2
drivers/scsi/megaraid/megaraid_sas_fusion.h

@@ -43,7 +43,7 @@
 #define HOST_DIAG_WRITE_ENABLE			    0x80
 #define HOST_DIAG_RESET_ADAPTER			    0x4
 #define MEGASAS_FUSION_MAX_RESET_TRIES		    3
-#define MAX_MSIX_QUEUES_FUSION			    16
+#define MAX_MSIX_QUEUES_FUSION			    128
 
 /* Invader defines */
 #define MPI2_TYPE_CUDA				    0x2
@@ -62,6 +62,9 @@
 #define MEGASAS_RD_WR_PROTECT_CHECK_ALL		    0x20
 #define MEGASAS_RD_WR_PROTECT_CHECK_NONE	    0x60
 
+#define MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET   (0x0000030C)
+#define MPI2_REPLY_POST_HOST_INDEX_OFFSET	(0x0000006C)
+
 /*
  * Raid context flags
  */
@@ -460,6 +463,7 @@ struct MPI2_IOC_INIT_REQUEST {
 /* mrpriv defines */
 #define MR_PD_INVALID 0xFFFF
 #define MAX_SPAN_DEPTH 8
+#define MAX_QUAD_DEPTH	MAX_SPAN_DEPTH
 #define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
 #define MAX_ROW_SIZE 32
 #define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
@@ -501,7 +505,9 @@ struct MR_LD_SPAN {
 	u64      startBlk;
 	u64      numBlks;
 	u16      arrayRef;
-	u8       reserved[6];
+	u8       spanRowSize;
+	u8       spanRowDataSize;
+	u8       reserved[4];
 };
 
 struct MR_SPAN_BLOCK_INFO {
@@ -587,6 +593,10 @@ struct IO_REQUEST_INFO {
 	u16 devHandle;
 	u64 pdBlock;
 	u8 fpOkForIo;
+	u8 IoforUnevenSpan;
+	u8 start_span;
+	u8 reserved;
+	u64 start_row;
 };
 
 struct MR_LD_TARGET_SYNC {
@@ -648,6 +658,26 @@ struct LD_LOAD_BALANCE_INFO {
 	u64     last_accessed_block[2];
 };
 
+/* SPAN_SET is info caclulated from span info from Raid map per LD */
+typedef struct _LD_SPAN_SET {
+	u64  log_start_lba;
+	u64  log_end_lba;
+	u64  span_row_start;
+	u64  span_row_end;
+	u64  data_strip_start;
+	u64  data_strip_end;
+	u64  data_row_start;
+	u64  data_row_end;
+	u8   strip_offset[MAX_SPAN_DEPTH];
+	u32    span_row_data_width;
+	u32    diff;
+	u32    reserved[2];
+} LD_SPAN_SET, *PLD_SPAN_SET;
+
+typedef struct LOG_BLOCK_SPAN_INFO {
+	LD_SPAN_SET  span_set[MAX_SPAN_DEPTH];
+} LD_SPAN_INFO, *PLD_SPAN_INFO;
+
 struct MR_FW_RAID_MAP_ALL {
 	struct MR_FW_RAID_MAP raidMap;
 	struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
@@ -692,6 +722,7 @@ struct fusion_context {
 	u32 map_sz;
 	u8 fast_path_io;
 	struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES];
+	LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES];
 };
 
 union desc_value {

+ 4 - 2
drivers/scsi/mpt2sas/mpi/mpi2.h

@@ -8,7 +8,7 @@
  *                  scatter/gather formats.
  *  Creation Date:  June 21, 2006
  *
- *  mpi2.h Version:  02.00.25
+ *  mpi2.h Version:  02.00.27
  *
  *  Version History
  *  ---------------
@@ -75,6 +75,8 @@
  *  02-06-12  02.00.24  Bumped MPI2_HEADER_VERSION_UNIT.
  *  03-29-12  02.00.25  Bumped MPI2_HEADER_VERSION_UNIT.
  *                      Added Hard Reset delay timings.
+ *  07-10-12  02.00.26  Bumped MPI2_HEADER_VERSION_UNIT.
+ *  07-26-12  02.00.27  Bumped MPI2_HEADER_VERSION_UNIT.
  *  --------------------------------------------------------------------------
  */
 
@@ -100,7 +102,7 @@
 #define MPI2_VERSION_02_00                  (0x0200)
 
 /* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x19)
+#define MPI2_HEADER_VERSION_UNIT            (0x1B)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)

+ 3 - 1
drivers/scsi/mpt2sas/mpi/mpi2_init.h

@@ -6,7 +6,7 @@
  *          Title:  MPI SCSI initiator mode messages and structures
  *  Creation Date:  June 23, 2006
  *
- *    mpi2_init.h Version:  02.00.13
+ *    mpi2_init.h Version:  02.00.14
  *
  *  Version History
  *  ---------------
@@ -36,6 +36,7 @@
  *  11-10-10  02.00.11  Added MPI2_SCSIIO_NUM_SGLOFFSETS define.
  *  02-06-12  02.00.13  Added alternate defines for Task Priority / Command
  *                      Priority to match SAM-4.
+ *  07-10-12  02.00.14  Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION.
  *  --------------------------------------------------------------------------
  */
 
@@ -189,6 +190,7 @@ typedef struct _MPI2_SCSI_IO_REQUEST
 #define MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT     (26)
 
 #define MPI2_SCSIIO_CONTROL_DATADIRECTION_MASK  (0x03000000)
+#define MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION (24)
 #define MPI2_SCSIIO_CONTROL_NODATATRANSFER      (0x00000000)
 #define MPI2_SCSIIO_CONTROL_WRITE               (0x01000000)
 #define MPI2_SCSIIO_CONTROL_READ                (0x02000000)

+ 6 - 2
drivers/scsi/mpt2sas/mpi/mpi2_ioc.h

@@ -6,7 +6,7 @@
  *          Title:  MPI IOC, Port, Event, FW Download, and FW Upload messages
  *  Creation Date:  October 11, 2006
  *
- *  mpi2_ioc.h Version:  02.00.21
+ *  mpi2_ioc.h Version:  02.00.22
  *
  *  Version History
  *  ---------------
@@ -118,6 +118,9 @@
  *                      MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure.
  *                      Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete.
  *  03-29-12  02.00.21  Added a product specific range to event values.
+ *  07-26-12  02.00.22  Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE.
+ *                      Added ElapsedSeconds field to
+ *                      MPI2_EVENT_DATA_IR_OPERATION_STATUS.
  *  --------------------------------------------------------------------------
  */
 
@@ -284,6 +287,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY
 #define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT              (0)
 
 /* IOCExceptions */
+#define MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE     (0x0200)
 #define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX      (0x0100)
 
 #define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_MASK              (0x00E0)
@@ -624,7 +628,7 @@ typedef struct _MPI2_EVENT_DATA_IR_OPERATION_STATUS
     U8                      RAIDOperation;              /* 0x04 */
     U8                      PercentComplete;            /* 0x05 */
     U16                     Reserved2;                  /* 0x06 */
-    U32                     Resereved3;                 /* 0x08 */
+	U32                     ElapsedSeconds;             /* 0x08 */
 } MPI2_EVENT_DATA_IR_OPERATION_STATUS,
   MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_OPERATION_STATUS,
   Mpi2EventDataIrOperationStatus_t,

+ 7 - 2
drivers/scsi/mpt2sas/mpi/mpi2_raid.h

@@ -6,7 +6,7 @@
  *          Title:  MPI Integrated RAID messages and structures
  *  Creation Date:  April 26, 2007
  *
- *    mpi2_raid.h Version:  02.00.08
+ *    mpi2_raid.h Version:  02.00.09
  *
  *  Version History
  *  ---------------
@@ -27,6 +27,8 @@
  *                      related structures and defines.
  *                      Added product-specific range to RAID Action values.
  *  02-06-12  02.00.08  Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN.
+ *  07-26-12  02.00.09  Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR.
+ *                      Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define.
  *  --------------------------------------------------------------------------
  */
 
@@ -276,10 +278,13 @@ typedef struct _MPI2_RAID_VOL_INDICATOR
     U64                     TotalBlocks;                    /* 0x00 */
     U64                     BlocksRemaining;                /* 0x08 */
     U32                     Flags;                          /* 0x10 */
+	U32                     ElapsedSeconds;                 /* 0x14 */
 } MPI2_RAID_VOL_INDICATOR, MPI2_POINTER PTR_MPI2_RAID_VOL_INDICATOR,
   Mpi2RaidVolIndicator_t, MPI2_POINTER pMpi2RaidVolIndicator_t;
 
 /* defines for RAID Volume Indicator Flags field */
+#define MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID   (0x80000000)
+
 #define MPI2_RAID_VOL_FLAGS_OP_MASK                 (0x0000000F)
 #define MPI2_RAID_VOL_FLAGS_OP_BACKGROUND_INIT      (0x00000000)
 #define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001)
@@ -320,7 +325,7 @@ MPI2_POINTER pMpi2RaidCompatibilityResultStruct_t;
 /* RAID Action Reply ActionData union */
 typedef union _MPI2_RAID_ACTION_REPLY_DATA
 {
-	U32                                     Word[5];
+	U32                                     Word[6];
 	MPI2_RAID_VOL_INDICATOR                 RaidVolumeIndicator;
 	U16                                     VolDevHandle;
 	U8                                      VolumeState;

+ 6 - 4
drivers/scsi/mpt2sas/mpi/mpi2_tool.h

@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2000-2010 LSI Corporation.
+ *  Copyright (c) 2000-2012 LSI Corporation.
  *
  *
  *           Name:  mpi2_tool.h
  *          Title:  MPI diagnostic tool structures and definitions
  *  Creation Date:  March 26, 2007
  *
- *    mpi2_tool.h Version:  02.00.07
+ *    mpi2_tool.h Version:  02.00.10
  *
  *  Version History
  *  ---------------
@@ -27,6 +27,8 @@
  *                      Post Request.
  *  05-25-11  02.00.07  Added Flags field and related defines to
  *                      MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST.
+ *  07-26-12  02.00.10  Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that
+ *			it uses MPI Chain SGE as well as MPI Simple SGE.
  *  --------------------------------------------------------------------------
  */
 
@@ -270,7 +272,7 @@ typedef struct _MPI2_TOOLBOX_BEACON_REQUEST
 
 #define MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH    (0x5C)
 
-/* Toolbox Diagnostic CLI Tool request message */
+/* MPI v2.0 Toolbox Diagnostic CLI Tool request message */
 typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST {
     U8                      Tool;                       /* 0x00 */
     U8                      Reserved1;                  /* 0x01 */
@@ -288,7 +290,7 @@ typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST {
     U32                     DataLength;                 /* 0x10 */
     U8                      DiagnosticCliCommand
 		[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];     /* 0x14 */
-    MPI2_SGE_SIMPLE_UNION   SGL;                        /* 0x70 */
+	MPI2_MPI_SGE_IO_UNION   SGL;                        /* 0x70 */
 } MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
   MPI2_POINTER PTR_MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
   Mpi2ToolboxDiagnosticCliRequest_t,

+ 30 - 29
drivers/scsi/mpt2sas/mpt2sas_base.c

@@ -80,10 +80,6 @@ static int msix_disable = -1;
 module_param(msix_disable, int, 0);
 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
 
-static int missing_delay[2] = {-1, -1};
-module_param_array(missing_delay, int, NULL, 0);
-MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
-
 static int mpt2sas_fwfault_debug;
 MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
 	"and halt firmware - (default=0)");
@@ -2199,7 +2195,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
 }
 
 /**
- * _base_update_missing_delay - change the missing delay timers
+ * mpt2sas_base_update_missing_delay - change the missing delay timers
  * @ioc: per adapter object
  * @device_missing_delay: amount of time till device is reported missing
  * @io_missing_delay: interval IO is returned when there is a missing device
@@ -2210,8 +2206,8 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
  * delay, as well as the io missing delay. This should be called at driver
  * load time.
  */
-static void
-_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
+void
+mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
 	u16 device_missing_delay, u8 io_missing_delay)
 {
 	u16 dmd, dmd_new, dmd_orignal;
@@ -2507,23 +2503,25 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
 	/* reply free queue sizing - taking into account for 64 FW events */
 	ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
 
+	/* calculate reply descriptor post queue depth */
+	ioc->reply_post_queue_depth = ioc->hba_queue_depth +
+					ioc->reply_free_queue_depth +  1;
 	/* align the reply post queue on the next 16 count boundary */
-	if (!ioc->reply_free_queue_depth % 16)
-		ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
-	else
-		ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
-				32 - (ioc->reply_free_queue_depth % 16);
+	if (ioc->reply_post_queue_depth % 16)
+		ioc->reply_post_queue_depth += 16 -
+			(ioc->reply_post_queue_depth % 16);
+
+
 	if (ioc->reply_post_queue_depth >
 	    facts->MaxReplyDescriptorPostQueueDepth) {
-		ioc->reply_post_queue_depth = min_t(u16,
-		    (facts->MaxReplyDescriptorPostQueueDepth -
-		    (facts->MaxReplyDescriptorPostQueueDepth % 16)),
-		    (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
-		ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
-		ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
+		ioc->reply_post_queue_depth =
+			facts->MaxReplyDescriptorPostQueueDepth -
+		    (facts->MaxReplyDescriptorPostQueueDepth % 16);
+		ioc->hba_queue_depth =
+			((ioc->reply_post_queue_depth - 64) / 2) - 1;
+		ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
 	}
 
-
 	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
 	    "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
 	    "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
@@ -3940,11 +3938,15 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
 	writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
 	     &ioc->chip->HostDiagnostic);
 
-	/* don't access any registers for 50 milliseconds */
-	msleep(50);
+	/* This delay allows the chip PCIe hardware time to finish reset tasks*/
+	if (sleep_flag == CAN_SLEEP)
+		msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
+	else
+		mdelay(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
 
-	/* 300 second max wait */
-	for (count = 0; count < 3000000 ; count++) {
+	/* Approximately 300 second max wait */
+	for (count = 0; count < (300000000 /
+	    MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
 
 		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
 
@@ -3953,11 +3955,13 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
 		if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
 			break;
 
-		/* wait 100 msec */
+		/* Wait to pass the second read delay window */
 		if (sleep_flag == CAN_SLEEP)
-			msleep(1);
+			msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
+			       /1000);
 		else
-			mdelay(1);
+			mdelay(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
+			       /1000);
 	}
 
 	if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
@@ -4407,9 +4411,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
 	if (r)
 		goto out_free_resources;
 
-	if (missing_delay[0] != -1 && missing_delay[1] != -1)
-		_base_update_missing_delay(ioc, missing_delay[0],
-		    missing_delay[1]);
 	ioc->non_operational_loop = 0;
 
 	return 0;

+ 5 - 2
drivers/scsi/mpt2sas/mpt2sas_base.h

@@ -69,8 +69,8 @@
 #define MPT2SAS_DRIVER_NAME		"mpt2sas"
 #define MPT2SAS_AUTHOR	"LSI Corporation <DL-MPTFusionLinux@lsi.com>"
 #define MPT2SAS_DESCRIPTION	"LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION		"14.100.00.00"
-#define MPT2SAS_MAJOR_VERSION		14
+#define MPT2SAS_DRIVER_VERSION		"15.100.00.00"
+#define MPT2SAS_MAJOR_VERSION		15
 #define MPT2SAS_MINOR_VERSION		100
 #define MPT2SAS_BUILD_VERSION		00
 #define MPT2SAS_RELEASE_VERSION		00
@@ -1055,6 +1055,9 @@ void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_ty
 
 void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc);
 
+void mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
+	u16 device_missing_delay, u8 io_missing_delay);
+
 int mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc);
 
 /* scsih shared API */

+ 129 - 14
drivers/scsi/mpt2sas/mpt2sas_scsih.c

@@ -101,6 +101,10 @@ static ushort max_sectors = 0xFFFF;
 module_param(max_sectors, ushort, 0);
 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767  default=32767");
 
+static int missing_delay[2] = {-1, -1};
+module_param_array(missing_delay, int, NULL, 0);
+MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
+
 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
 #define MPT2SAS_MAX_LUN (16895)
 static int max_lun = MPT2SAS_MAX_LUN;
@@ -3994,11 +3998,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
 			else
 				mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
 		} else
-/* MPI Revision I (UNIT = 0xA) - removed MPI2_SCSIIO_CONTROL_UNTAGGED */
-/*			mpi_control |= MPI2_SCSIIO_CONTROL_UNTAGGED;
- */
-			mpi_control |= (0x500);
-
+			mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
 	} else
 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
 	/* Make sure Device is not raid volume.
@@ -5815,9 +5815,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT2SAS_ADAPTER *ioc,
 	u8 task_abort_retries;
 
 	mutex_lock(&ioc->tm_cmds.mutex);
-	dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: phy number(%d), "
-	    "width(%d)\n", ioc->name, __func__, event_data->PhyNum,
-	     event_data->PortWidth));
+	pr_info(MPT2SAS_FMT
+		"%s: enter: phy number(%d), width(%d)\n",
+		ioc->name, __func__, event_data->PhyNum,
+		event_data->PortWidth);
 
 	_scsih_block_io_all_device(ioc);
 
@@ -7093,12 +7094,15 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
 	struct _sas_device *sas_device;
 	struct _sas_node *expander_device;
 	static struct _raid_device *raid_device;
+	u8 retry_count;
 	unsigned long flags;
 
 	printk(MPT2SAS_INFO_FMT "scan devices: start\n", ioc->name);
 
 	_scsih_sas_host_refresh(ioc);
 
+	printk(MPT2SAS_INFO_FMT "\tscan devices: expanders start\n",
+		ioc->name);
 	/* expanders */
 	handle = 0xFFFF;
 	while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
@@ -7107,6 +7111,13 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
 		    MPI2_IOCSTATUS_MASK;
 		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
 			break;
+		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+			printk(MPT2SAS_INFO_FMT "\tbreak from expander scan: "
+				"ioc_status(0x%04x), loginfo(0x%08x)\n",
+				ioc->name, ioc_status,
+				le32_to_cpu(mpi_reply.IOCLogInfo));
+			break;
+		}
 		handle = le16_to_cpu(expander_pg0.DevHandle);
 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
 		expander_device = mpt2sas_scsih_expander_find_by_sas_address(
@@ -7115,13 +7126,26 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
 		if (expander_device)
 			_scsih_refresh_expander_links(ioc, expander_device,
 			    handle);
-		else
+		else {
+			printk(MPT2SAS_INFO_FMT "\tBEFORE adding expander: "
+				"handle (0x%04x), sas_addr(0x%016llx)\n",
+				ioc->name, handle, (unsigned long long)
+				le64_to_cpu(expander_pg0.SASAddress));
 			_scsih_expander_add(ioc, handle);
+			printk(MPT2SAS_INFO_FMT "\tAFTER adding expander: "
+				"handle (0x%04x), sas_addr(0x%016llx)\n",
+				ioc->name, handle, (unsigned long long)
+				le64_to_cpu(expander_pg0.SASAddress));
+		}
 	}
 
+	printk(MPT2SAS_INFO_FMT "\tscan devices: expanders complete\n",
+		ioc->name);
+
 	if (!ioc->ir_firmware)
 		goto skip_to_sas;
 
+	printk(MPT2SAS_INFO_FMT "\tscan devices phys disk start\n", ioc->name);
 	/* phys disk */
 	phys_disk_num = 0xFF;
 	while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
@@ -7131,6 +7155,13 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
 		    MPI2_IOCSTATUS_MASK;
 		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
 			break;
+		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+			printk(MPT2SAS_INFO_FMT "\tbreak from phys disk scan:"
+				"ioc_status(0x%04x), loginfo(0x%08x)\n",
+				ioc->name, ioc_status,
+				le32_to_cpu(mpi_reply.IOCLogInfo));
+			break;
+		}
 		phys_disk_num = pd_pg0.PhysDiskNum;
 		handle = le16_to_cpu(pd_pg0.DevHandle);
 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
@@ -7142,17 +7173,46 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
 		    handle) != 0)
 			continue;
+		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+			MPI2_IOCSTATUS_MASK;
+		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+			printk(MPT2SAS_INFO_FMT "\tbreak from phys disk scan "
+				"ioc_status(0x%04x), loginfo(0x%08x)\n",
+				ioc->name, ioc_status,
+				le32_to_cpu(mpi_reply.IOCLogInfo));
+			break;
+		}
 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
 		if (!_scsih_get_sas_address(ioc, parent_handle,
 		    &sas_address)) {
+			printk(MPT2SAS_INFO_FMT "\tBEFORE adding phys disk: "
+				" handle (0x%04x), sas_addr(0x%016llx)\n",
+				ioc->name, handle, (unsigned long long)
+				le64_to_cpu(sas_device_pg0.SASAddress));
 			mpt2sas_transport_update_links(ioc, sas_address,
 			    handle, sas_device_pg0.PhyNum,
 			    MPI2_SAS_NEG_LINK_RATE_1_5);
 			set_bit(handle, ioc->pd_handles);
-			_scsih_add_device(ioc, handle, 0, 1);
+			retry_count = 0;
+			/* This will retry adding the end device.
+			* _scsih_add_device() will decide on retries and
+			* return "1" when it should be retried
+			*/
+			while (_scsih_add_device(ioc, handle, retry_count++,
+				1)) {
+					ssleep(1);
+			}
+			printk(MPT2SAS_INFO_FMT "\tAFTER adding phys disk: "
+				" handle (0x%04x), sas_addr(0x%016llx)\n",
+				ioc->name, handle, (unsigned long long)
+				le64_to_cpu(sas_device_pg0.SASAddress));
 		}
 	}
 
+	printk(MPT2SAS_INFO_FMT "\tscan devices: phys disk complete\n",
+		ioc->name);
+
+	printk(MPT2SAS_INFO_FMT "\tscan devices: volumes start\n", ioc->name);
 	/* volumes */
 	handle = 0xFFFF;
 	while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
@@ -7161,6 +7221,13 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
 		    MPI2_IOCSTATUS_MASK;
 		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
 			break;
+		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+			printk(MPT2SAS_INFO_FMT "\tbreak from volume scan: "
+				"ioc_status(0x%04x), loginfo(0x%08x)\n",
+				ioc->name, ioc_status,
+				le32_to_cpu(mpi_reply.IOCLogInfo));
+			break;
+		}
 		handle = le16_to_cpu(volume_pg1.DevHandle);
 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
 		raid_device = _scsih_raid_device_find_by_wwid(ioc,
@@ -7172,18 +7239,38 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
 		     sizeof(Mpi2RaidVolPage0_t)))
 			continue;
+		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+			MPI2_IOCSTATUS_MASK;
+		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+			printk(MPT2SAS_INFO_FMT "\tbreak from volume scan: "
+				"ioc_status(0x%04x), loginfo(0x%08x)\n",
+				ioc->name, ioc_status,
+				le32_to_cpu(mpi_reply.IOCLogInfo));
+			break;
+		}
 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
 			memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
 			element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
 			element.VolDevHandle = volume_pg1.DevHandle;
+			printk(MPT2SAS_INFO_FMT "\tBEFORE adding volume: "
+				" handle (0x%04x)\n", ioc->name,
+				volume_pg1.DevHandle);
 			_scsih_sas_volume_add(ioc, &element);
+			printk(MPT2SAS_INFO_FMT "\tAFTER adding volume: "
+				" handle (0x%04x)\n", ioc->name,
+				volume_pg1.DevHandle);
 		}
 	}
 
+	printk(MPT2SAS_INFO_FMT "\tscan devices: volumes complete\n",
+		ioc->name);
+
  skip_to_sas:
 
+	printk(MPT2SAS_INFO_FMT "\tscan devices: end devices start\n",
+		ioc->name);
 	/* sas devices */
 	handle = 0xFFFF;
 	while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
@@ -7193,6 +7280,13 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
 		    MPI2_IOCSTATUS_MASK;
 		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
 			break;
+		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+			printk(MPT2SAS_INFO_FMT "\tbreak from end device scan:"
+				" ioc_status(0x%04x), loginfo(0x%08x)\n",
+				ioc->name, ioc_status,
+				le32_to_cpu(mpi_reply.IOCLogInfo));
+				break;
+		}
 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
 		if (!(_scsih_is_end_device(
 		    le32_to_cpu(sas_device_pg0.DeviceInfo))))
@@ -7205,12 +7299,31 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
 			continue;
 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
+			printk(MPT2SAS_INFO_FMT "\tBEFORE adding end device: "
+				"handle (0x%04x), sas_addr(0x%016llx)\n",
+				ioc->name, handle, (unsigned long long)
+				le64_to_cpu(sas_device_pg0.SASAddress));
 			mpt2sas_transport_update_links(ioc, sas_address, handle,
 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
-			_scsih_add_device(ioc, handle, 0, 0);
+			retry_count = 0;
+			/* This will retry adding the end device.
+			 * _scsih_add_device() will decide on retries and
+			 * return "1" when it should be retried
+			 */
+			while (_scsih_add_device(ioc, handle, retry_count++,
+				0)) {
+					ssleep(1);
+			}
+			printk(MPT2SAS_INFO_FMT "\tAFTER adding end device: "
+				"handle (0x%04x), sas_addr(0x%016llx)\n",
+				ioc->name, handle, (unsigned long long)
+				le64_to_cpu(sas_device_pg0.SASAddress));
 		}
 	}
 
+	printk(MPT2SAS_INFO_FMT "\tscan devices: end devices complete\n",
+		ioc->name);
+
 	printk(MPT2SAS_INFO_FMT "scan devices: complete\n", ioc->name);
 }
 
@@ -7303,7 +7416,9 @@ _firmware_event_work(struct work_struct *work)
 	case MPT2SAS_PORT_ENABLE_COMPLETE:
 		ioc->start_scan = 0;
 
-
+		if (missing_delay[0] != -1 && missing_delay[1] != -1)
+			mpt2sas_base_update_missing_delay(ioc, missing_delay[0],
+				missing_delay[1]);
 
 		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "port enable: complete "
 		    "from worker thread\n", ioc->name));
@@ -8070,8 +8185,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (max_sectors != 0xFFFF) {
 		if (max_sectors < 64) {
 			shost->max_sectors = 64;
-			printk(MPT2SAS_WARN_FMT "Invalid value %d passed "\
-			    "for max_sectors, range is 64 to 32767. Assigning "\
+			printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
+			    "for max_sectors, range is 64 to 32767. Assigning "
 			    "value of 64.\n", ioc->name, max_sectors);
 		} else if (max_sectors > 32767) {
 			shost->max_sectors = 32767;

+ 2 - 1
drivers/scsi/mvsas/mv_sas.c

@@ -686,7 +686,8 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
 	if (ssp_hdr->frame_type != SSP_TASK) {
 		buf_cmd[9] = fburst | task->ssp_task.task_attr |
 				(task->ssp_task.task_prio << 3);
-		memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
+		memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
+		       task->ssp_task.cmd->cmd_len);
 	} else{
 		buf_cmd[10] = tmf->tmf;
 		switch (tmf->tmf) {

+ 3 - 2
drivers/scsi/pm8001/pm8001_hwi.c

@@ -3740,7 +3740,7 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 	pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 	mb();
 
-	if ((pm8001_dev->id & NCQ_ABORT_ALL_FLAG) && t)	{
+	if (pm8001_dev->id & NCQ_ABORT_ALL_FLAG) {
 		pm8001_tag_free(pm8001_ha, tag);
 		sas_free_task(t);
 		/* clear the flag */
@@ -4291,7 +4291,8 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
 		ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
 	ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
 	ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
-	memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16);
+	memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
+	       task->ssp_task.cmd->cmd_len);
 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
 
 	/* fill in PRD (scatter/gather) table, if any */

+ 11 - 10
drivers/scsi/pm8001/pm80xx_hwi.c

@@ -3559,9 +3559,9 @@ err_out:
 
 static int check_enc_sas_cmd(struct sas_task *task)
 {
-	if ((task->ssp_task.cdb[0] == READ_10)
-		|| (task->ssp_task.cdb[0] == WRITE_10)
-		|| (task->ssp_task.cdb[0] == WRITE_VERIFY))
+	u8 cmd = task->ssp_task.cmd->cmnd[0];
+
+	if (cmd == READ_10 || cmd == WRITE_10 || cmd == WRITE_VERIFY)
 		return 1;
 	else
 		return 0;
@@ -3624,7 +3624,8 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
 		ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
 	ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
 	ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
-	memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16);
+	memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
+		       task->ssp_task.cmd->cmd_len);
 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
 
 	/* Check if encryption is set */
@@ -3632,7 +3633,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
 		!(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) {
 		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
 			"Encryption enabled.Sending Encrypt SAS command 0x%x\n",
-			task->ssp_task.cdb[0]));
+			task->ssp_task.cmd->cmnd[0]));
 		opc = OPC_INB_SSP_INI_DIF_ENC_IO;
 		/* enable encryption. 0 for SAS 1.1 and SAS 2.0 compatible TLR*/
 		ssp_cmd.dad_dir_m_tlr =	cpu_to_le32
@@ -3666,14 +3667,14 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
 		/* XTS mode. All other fields are 0 */
 		ssp_cmd.key_cmode = 0x6 << 4;
 		/* set tweak values. Should be the start lba */
-		ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cdb[2] << 24) |
-						(task->ssp_task.cdb[3] << 16) |
-						(task->ssp_task.cdb[4] << 8) |
-						(task->ssp_task.cdb[5]));
+		ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cmd->cmnd[2] << 24) |
+						(task->ssp_task.cmd->cmnd[3] << 16) |
+						(task->ssp_task.cmd->cmnd[4] << 8) |
+						(task->ssp_task.cmd->cmnd[5]));
 	} else {
 		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
 			"Sending Normal SAS command 0x%x inb q %x\n",
-			task->ssp_task.cdb[0], inb));
+			task->ssp_task.cmd->cmnd[0], inb));
 		/* fill in PRD (scatter/gather) table, if any */
 		if (task->num_scatter > 1) {
 			pm8001_chip_make_sg(task->scatter, ccb->n_elem,

+ 4 - 4
drivers/scsi/scsi.c

@@ -1070,8 +1070,8 @@ EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
  * @opcode:	opcode for command to look up
  *
  * Uses the REPORT SUPPORTED OPERATION CODES to look up the given
- * opcode. Returns 0 if RSOC fails or if the command opcode is
- * unsupported. Returns 1 if the device claims to support the command.
+ * opcode. Returns -EINVAL if RSOC fails, 0 if the command opcode is
+ * unsupported and 1 if the device claims to support the command.
  */
 int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
 		       unsigned int len, unsigned char opcode)
@@ -1081,7 +1081,7 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
 	int result;
 
 	if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
-		return 0;
+		return -EINVAL;
 
 	memset(cmd, 0, 16);
 	cmd[0] = MAINTENANCE_IN;
@@ -1097,7 +1097,7 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
 	if (result && scsi_sense_valid(&sshdr) &&
 	    sshdr.sense_key == ILLEGAL_REQUEST &&
 	    (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
-		return 0;
+		return -EINVAL;
 
 	if ((buffer[1] & 3) == 3) /* Command supported */
 		return 1;

+ 1 - 0
drivers/scsi/scsi_devinfo.c

@@ -228,6 +228,7 @@ static struct {
 	{"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
 	{"SEAGATE", "ST34555N", "0930", BLIST_NOTQ},	/* Chokes on tagged INQUIRY */
 	{"SEAGATE", "ST3390N", "9546", BLIST_NOTQ},
+	{"SEAGATE", "ST900MM0006", NULL, BLIST_SKIP_VPD_PAGES},
 	{"SGI", "RAID3", "*", BLIST_SPARSELUN},
 	{"SGI", "RAID5", "*", BLIST_SPARSELUN},
 	{"SGI", "TP9100", "*", BLIST_REPORTLUN2},

+ 3 - 4
drivers/scsi/scsi_error.c

@@ -45,8 +45,6 @@
 
 static void scsi_eh_done(struct scsi_cmnd *scmd);
 
-#define SENSE_TIMEOUT		(10*HZ)
-
 /*
  * These should *probably* be handled by the host itself.
  * Since it is allowed to sleep, it probably should.
@@ -881,7 +879,7 @@ retry:
  */
 static int scsi_request_sense(struct scsi_cmnd *scmd)
 {
-	return scsi_send_eh_cmnd(scmd, NULL, 0, SENSE_TIMEOUT, ~0);
+	return scsi_send_eh_cmnd(scmd, NULL, 0, scmd->device->eh_timeout, ~0);
 }
 
 /**
@@ -982,7 +980,8 @@ static int scsi_eh_tur(struct scsi_cmnd *scmd)
 	int retry_cnt = 1, rtn;
 
 retry_tur:
-	rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0);
+	rtn = scsi_send_eh_cmnd(scmd, tur_command, 6,
+				scmd->device->eh_timeout, 0);
 
 	SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
 		__func__, scmd, rtn));

+ 5 - 0
drivers/scsi/scsi_scan.c

@@ -924,6 +924,11 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
 	if (*bflags & BLIST_NO_DIF)
 		sdev->no_dif = 1;
 
+	sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
+
+	if (*bflags & BLIST_SKIP_VPD_PAGES)
+		sdev->skip_vpd_pages = 1;
+
 	transport_configure_device(&sdev->sdev_gendev);
 
 	if (sdev->host->hostt->slave_configure) {

+ 30 - 0
drivers/scsi/scsi_sysfs.c

@@ -559,6 +559,35 @@ sdev_store_timeout (struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout);
 
+static ssize_t
+sdev_show_eh_timeout(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev;
+	sdev = to_scsi_device(dev);
+	return snprintf(buf, 20, "%u\n", sdev->eh_timeout / HZ);
+}
+
+static ssize_t
+sdev_store_eh_timeout(struct device *dev, struct device_attribute *attr,
+		    const char *buf, size_t count)
+{
+	struct scsi_device *sdev;
+	unsigned int eh_timeout;
+	int err;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	sdev = to_scsi_device(dev);
+	err = kstrtouint(buf, 10, &eh_timeout);
+	if (err)
+		return err;
+	sdev->eh_timeout = eh_timeout * HZ;
+
+	return count;
+}
+static DEVICE_ATTR(eh_timeout, S_IRUGO | S_IWUSR, sdev_show_eh_timeout, sdev_store_eh_timeout);
+
 static ssize_t
 store_rescan_field (struct device *dev, struct device_attribute *attr,
 		    const char *buf, size_t count)
@@ -723,6 +752,7 @@ static struct attribute *scsi_sdev_attrs[] = {
 	&dev_attr_delete.attr,
 	&dev_attr_state.attr,
 	&dev_attr_timeout.attr,
+	&dev_attr_eh_timeout.attr,
 	&dev_attr_iocounterbits.attr,
 	&dev_attr_iorequest_cnt.attr,
 	&dev_attr_iodone_cnt.attr,

+ 12 - 0
drivers/scsi/scsi_transport_iscsi.c

@@ -3473,6 +3473,9 @@ iscsi_session_attr(tgt_reset_tmo, ISCSI_PARAM_TGT_RESET_TMO, 0);
 iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
 iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0);
 iscsi_session_attr(targetalias, ISCSI_PARAM_TARGET_ALIAS, 0);
+iscsi_session_attr(boot_root, ISCSI_PARAM_BOOT_ROOT, 0);
+iscsi_session_attr(boot_nic, ISCSI_PARAM_BOOT_NIC, 0);
+iscsi_session_attr(boot_target, ISCSI_PARAM_BOOT_TARGET, 0);
 
 static ssize_t
 show_priv_session_state(struct device *dev, struct device_attribute *attr,
@@ -3568,6 +3571,9 @@ static struct attribute *iscsi_session_attrs[] = {
 	&dev_attr_sess_ifacename.attr,
 	&dev_attr_sess_initiatorname.attr,
 	&dev_attr_sess_targetalias.attr,
+	&dev_attr_sess_boot_root.attr,
+	&dev_attr_sess_boot_nic.attr,
+	&dev_attr_sess_boot_target.attr,
 	&dev_attr_priv_sess_recovery_tmo.attr,
 	&dev_attr_priv_sess_state.attr,
 	&dev_attr_priv_sess_creator.attr,
@@ -3631,6 +3637,12 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
 		param = ISCSI_PARAM_INITIATOR_NAME;
 	else if (attr == &dev_attr_sess_targetalias.attr)
 		param = ISCSI_PARAM_TARGET_ALIAS;
+	else if (attr == &dev_attr_sess_boot_root.attr)
+		param = ISCSI_PARAM_BOOT_ROOT;
+	else if (attr == &dev_attr_sess_boot_nic.attr)
+		param = ISCSI_PARAM_BOOT_NIC;
+	else if (attr == &dev_attr_sess_boot_target.attr)
+		param = ISCSI_PARAM_BOOT_TARGET;
 	else if (attr == &dev_attr_priv_sess_recovery_tmo.attr)
 		return S_IRUGO | S_IWUSR;
 	else if (attr == &dev_attr_priv_sess_state.attr)

+ 58 - 19
drivers/scsi/sd.c

@@ -142,7 +142,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
 	char *buffer_data;
 	struct scsi_mode_data data;
 	struct scsi_sense_hdr sshdr;
-	const char *temp = "temporary ";
+	static const char temp[] = "temporary ";
 	int len;
 
 	if (sdp->type != TYPE_DISK)
@@ -442,8 +442,10 @@ sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr,
 
 	if (max == 0)
 		sdp->no_write_same = 1;
-	else if (max <= SD_MAX_WS16_BLOCKS)
+	else if (max <= SD_MAX_WS16_BLOCKS) {
+		sdp->no_write_same = 0;
 		sdkp->max_ws_blocks = max;
+	}
 
 	sd_config_write_same(sdkp);
 
@@ -503,6 +505,16 @@ static struct scsi_driver sd_template = {
 	.eh_action		= sd_eh_action,
 };
 
+/*
+ * Dummy kobj_map->probe function.
+ * The default ->probe function will call modprobe, which is
+ * pointless as this module is already loaded.
+ */
+static struct kobject *sd_default_probe(dev_t devt, int *partno, void *data)
+{
+	return NULL;
+}
+
 /*
  * Device no to disk mapping:
  * 
@@ -740,7 +752,6 @@ static void sd_config_write_same(struct scsi_disk *sdkp)
 {
 	struct request_queue *q = sdkp->disk->queue;
 	unsigned int logical_block_size = sdkp->device->sector_size;
-	unsigned int blocks = 0;
 
 	if (sdkp->device->no_write_same) {
 		sdkp->max_ws_blocks = 0;
@@ -752,18 +763,20 @@ static void sd_config_write_same(struct scsi_disk *sdkp)
 	 * blocks per I/O unless the device explicitly advertises a
 	 * bigger limit.
 	 */
-	if (sdkp->max_ws_blocks == 0)
-		sdkp->max_ws_blocks = SD_MAX_WS10_BLOCKS;
-
-	if (sdkp->ws16 || sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
-		blocks = min_not_zero(sdkp->max_ws_blocks,
-				      (u32)SD_MAX_WS16_BLOCKS);
-	else
-		blocks = min_not_zero(sdkp->max_ws_blocks,
-				      (u32)SD_MAX_WS10_BLOCKS);
+	if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
+		sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
+						   (u32)SD_MAX_WS16_BLOCKS);
+	else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes)
+		sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
+						   (u32)SD_MAX_WS10_BLOCKS);
+	else {
+		sdkp->device->no_write_same = 1;
+		sdkp->max_ws_blocks = 0;
+	}
 
 out:
-	blk_queue_max_write_same_sectors(q, blocks * (logical_block_size >> 9));
+	blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
+					 (logical_block_size >> 9));
 }
 
 /**
@@ -2635,9 +2648,24 @@ static void sd_read_block_provisioning(struct scsi_disk *sdkp)
 
 static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
 {
-	if (scsi_report_opcode(sdkp->device, buffer, SD_BUF_SIZE,
-			       WRITE_SAME_16))
+	struct scsi_device *sdev = sdkp->device;
+
+	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
+		sdev->no_report_opcodes = 1;
+
+		/* Disable WRITE SAME if REPORT SUPPORTED OPERATION
+		 * CODES is unsupported and the device has an ATA
+		 * Information VPD page (SAT).
+		 */
+		if (!scsi_get_vpd_page(sdev, 0x89, buffer, SD_BUF_SIZE))
+			sdev->no_write_same = 1;
+	}
+
+	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1)
 		sdkp->ws16 = 1;
+
+	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1)
+		sdkp->ws10 = 1;
 }
 
 static int sd_try_extended_inquiry(struct scsi_device *sdp)
@@ -2970,8 +2998,10 @@ static int sd_probe(struct device *dev)
 static int sd_remove(struct device *dev)
 {
 	struct scsi_disk *sdkp;
+	dev_t devt;
 
 	sdkp = dev_get_drvdata(dev);
+	devt = disk_devt(sdkp->disk);
 	scsi_autopm_get_device(sdkp->device);
 
 	async_synchronize_full_domain(&scsi_sd_probe_domain);
@@ -2981,6 +3011,9 @@ static int sd_remove(struct device *dev)
 	del_gendisk(sdkp->disk);
 	sd_shutdown(dev);
 
+	blk_register_region(devt, SD_MINORS, NULL,
+			    sd_default_probe, NULL, NULL);
+
 	mutex_lock(&sd_ref_mutex);
 	dev_set_drvdata(dev, NULL);
 	put_device(&sdkp->dev);
@@ -3124,9 +3157,13 @@ static int __init init_sd(void)
 
 	SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
 
-	for (i = 0; i < SD_MAJORS; i++)
-		if (register_blkdev(sd_major(i), "sd") == 0)
-			majors++;
+	for (i = 0; i < SD_MAJORS; i++) {
+		if (register_blkdev(sd_major(i), "sd") != 0)
+			continue;
+		majors++;
+		blk_register_region(sd_major(i), SD_MINORS, NULL,
+				    sd_default_probe, NULL, NULL);
+	}
 
 	if (!majors)
 		return -ENODEV;
@@ -3185,8 +3222,10 @@ static void __exit exit_sd(void)
 
 	class_unregister(&sd_disk_class);
 
-	for (i = 0; i < SD_MAJORS; i++)
+	for (i = 0; i < SD_MAJORS; i++) {
+		blk_unregister_region(sd_major(i), SD_MINORS);
 		unregister_blkdev(sd_major(i), "sd");
+	}
 }
 
 module_init(init_sd);

+ 1 - 0
drivers/scsi/sd.h

@@ -84,6 +84,7 @@ struct scsi_disk {
 	unsigned	lbpws : 1;
 	unsigned	lbpws10 : 1;
 	unsigned	lbpvpd : 1;
+	unsigned	ws10 : 1;
 	unsigned	ws16 : 1;
 };
 #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)

+ 175 - 35
drivers/scsi/storvsc_drv.c

@@ -55,10 +55,15 @@
  * V1 RC < 2008/1/31: 1.0
  * V1 RC > 2008/1/31:  2.0
  * Win7: 4.2
+ * Win8: 5.1
  */
 
-#define VMSTOR_CURRENT_MAJOR  4
-#define VMSTOR_CURRENT_MINOR  2
+
+#define VMSTOR_WIN7_MAJOR 4
+#define VMSTOR_WIN7_MINOR 2
+
+#define VMSTOR_WIN8_MAJOR 5
+#define VMSTOR_WIN8_MINOR 1
 
 
 /*  Packet structure describing virtual storage requests. */
@@ -74,18 +79,103 @@ enum vstor_packet_operation {
 	VSTOR_OPERATION_QUERY_PROTOCOL_VERSION	= 9,
 	VSTOR_OPERATION_QUERY_PROPERTIES	= 10,
 	VSTOR_OPERATION_ENUMERATE_BUS		= 11,
-	VSTOR_OPERATION_MAXIMUM			= 11
+	VSTOR_OPERATION_FCHBA_DATA              = 12,
+	VSTOR_OPERATION_CREATE_SUB_CHANNELS     = 13,
+	VSTOR_OPERATION_MAXIMUM                 = 13
+};
+
+/*
+ * WWN packet for Fibre Channel HBA
+ */
+
+struct hv_fc_wwn_packet {
+	bool	primary_active;
+	u8	reserved1;
+	u8	reserved2;
+	u8	primary_port_wwn[8];
+	u8	primary_node_wwn[8];
+	u8	secondary_port_wwn[8];
+	u8	secondary_node_wwn[8];
 };
 
+
+
+/*
+ * SRB Flag Bits
+ */
+
+#define SRB_FLAGS_QUEUE_ACTION_ENABLE		0x00000002
+#define SRB_FLAGS_DISABLE_DISCONNECT		0x00000004
+#define SRB_FLAGS_DISABLE_SYNCH_TRANSFER	0x00000008
+#define SRB_FLAGS_BYPASS_FROZEN_QUEUE		0x00000010
+#define SRB_FLAGS_DISABLE_AUTOSENSE		0x00000020
+#define SRB_FLAGS_DATA_IN			0x00000040
+#define SRB_FLAGS_DATA_OUT			0x00000080
+#define SRB_FLAGS_NO_DATA_TRANSFER		0x00000000
+#define SRB_FLAGS_UNSPECIFIED_DIRECTION	(SRB_FLAGS_DATA_IN | SRB_FLAGS_DATA_OUT)
+#define SRB_FLAGS_NO_QUEUE_FREEZE		0x00000100
+#define SRB_FLAGS_ADAPTER_CACHE_ENABLE		0x00000200
+#define SRB_FLAGS_FREE_SENSE_BUFFER		0x00000400
+
+/*
+ * This flag indicates the request is part of the workflow for processing a D3.
+ */
+#define SRB_FLAGS_D3_PROCESSING			0x00000800
+#define SRB_FLAGS_IS_ACTIVE			0x00010000
+#define SRB_FLAGS_ALLOCATED_FROM_ZONE		0x00020000
+#define SRB_FLAGS_SGLIST_FROM_POOL		0x00040000
+#define SRB_FLAGS_BYPASS_LOCKED_QUEUE		0x00080000
+#define SRB_FLAGS_NO_KEEP_AWAKE			0x00100000
+#define SRB_FLAGS_PORT_DRIVER_ALLOCSENSE	0x00200000
+#define SRB_FLAGS_PORT_DRIVER_SENSEHASPORT	0x00400000
+#define SRB_FLAGS_DONT_START_NEXT_PACKET	0x00800000
+#define SRB_FLAGS_PORT_DRIVER_RESERVED		0x0F000000
+#define SRB_FLAGS_CLASS_DRIVER_RESERVED		0xF0000000
+
+
 /*
  * Platform neutral description of a scsi request -
  * this remains the same across the write regardless of 32/64 bit
  * note: it's patterned off the SCSI_PASS_THROUGH structure
  */
 #define STORVSC_MAX_CMD_LEN			0x10
-#define STORVSC_SENSE_BUFFER_SIZE		0x12
+
+#define POST_WIN7_STORVSC_SENSE_BUFFER_SIZE	0x14
+#define PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE	0x12
+
+#define STORVSC_SENSE_BUFFER_SIZE		0x14
 #define STORVSC_MAX_BUF_LEN_WITH_PADDING	0x14
 
+/*
+ * Sense buffer size changed in win8; have a run-time
+ * variable to track the size we should use.
+ */
+static int sense_buffer_size;
+
+/*
+ * The size of the vmscsi_request has changed in win8. The
+ * additional size is because of new elements added to the
+ * structure. These elements are valid only when we are talking
+ * to a win8 host.
+ * Track the correction to size we need to apply.
+ */
+
+static int vmscsi_size_delta;
+static int vmstor_current_major;
+static int vmstor_current_minor;
+
+struct vmscsi_win8_extension {
+	/*
+	 * The following were added in Windows 8
+	 */
+	u16 reserve;
+	u8  queue_tag;
+	u8  queue_action;
+	u32 srb_flags;
+	u32 time_out_value;
+	u32 queue_sort_ey;
+} __packed;
+
 struct vmscsi_request {
 	u16 length;
 	u8 srb_status;
@@ -108,6 +198,11 @@ struct vmscsi_request {
 		u8 sense_data[STORVSC_SENSE_BUFFER_SIZE];
 		u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING];
 	};
+	/*
+	 * The following was added in win8.
+	 */
+	struct vmscsi_win8_extension win8_extension;
+
 } __attribute((packed));
 
 
@@ -115,22 +210,18 @@ struct vmscsi_request {
  * This structure is sent during the intialization phase to get the different
  * properties of the channel.
  */
+
+#define STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL		0x1
+
 struct vmstorage_channel_properties {
-	u16 protocol_version;
-	u8  path_id;
-	u8 target_id;
+	u32 reserved;
+	u16 max_channel_cnt;
+	u16 reserved1;
 
-	/* Note: port number is only really known on the client side */
-	u32  port_number;
-	u32  flags;
+	u32 flags;
 	u32   max_transfer_bytes;
 
-	/*
-	 * This id is unique for each channel and will correspond with
-	 * vendor specific data in the inquiry data.
-	 */
-
-	u64  unique_id;
+	u64  reserved2;
 } __packed;
 
 /*  This structure is sent during the storage protocol negotiations. */
@@ -175,6 +266,15 @@ struct vstor_packet {
 
 		/* Used during version negotiations. */
 		struct vmstorage_protocol_version version;
+
+		/* Fibre channel address packet */
+		struct hv_fc_wwn_packet wwn_packet;
+
+		/* Number of sub-channels to create */
+		u16 sub_channel_count;
+
+		/* This will be the maximum of the union members */
+		u8  buffer[0x34];
 	};
 } __packed;
 
@@ -221,6 +321,11 @@ static int storvsc_ringbuffer_size = (20 * PAGE_SIZE);
 module_param(storvsc_ringbuffer_size, int, S_IRUGO);
 MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
 
+/*
+ * Timeout in seconds for all devices managed by this driver.
+ */
+static int storvsc_timeout = 180;
+
 #define STORVSC_MAX_IO_REQUESTS				128
 
 /*
@@ -674,7 +779,8 @@ static int storvsc_channel_init(struct hv_device *device)
 	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
 
 	ret = vmbus_sendpacket(device->channel, vstor_packet,
-			       sizeof(struct vstor_packet),
+			       (sizeof(struct vstor_packet) -
+			       vmscsi_size_delta),
 			       (unsigned long)request,
 			       VM_PKT_DATA_INBAND,
 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -698,7 +804,7 @@ static int storvsc_channel_init(struct hv_device *device)
 	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
 
 	vstor_packet->version.major_minor =
-		storvsc_get_version(VMSTOR_CURRENT_MAJOR, VMSTOR_CURRENT_MINOR);
+		storvsc_get_version(vmstor_current_major, vmstor_current_minor);
 
 	/*
 	 * The revision number is only used in Windows; set it to 0.
@@ -706,7 +812,8 @@ static int storvsc_channel_init(struct hv_device *device)
 	vstor_packet->version.revision = 0;
 
 	ret = vmbus_sendpacket(device->channel, vstor_packet,
-			       sizeof(struct vstor_packet),
+			       (sizeof(struct vstor_packet) -
+				vmscsi_size_delta),
 			       (unsigned long)request,
 			       VM_PKT_DATA_INBAND,
 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -727,11 +834,10 @@ static int storvsc_channel_init(struct hv_device *device)
 	memset(vstor_packet, 0, sizeof(struct vstor_packet));
 	vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
 	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
-	vstor_packet->storage_channel_properties.port_number =
-					stor_device->port_number;
 
 	ret = vmbus_sendpacket(device->channel, vstor_packet,
-			       sizeof(struct vstor_packet),
+			       (sizeof(struct vstor_packet) -
+				vmscsi_size_delta),
 			       (unsigned long)request,
 			       VM_PKT_DATA_INBAND,
 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -749,16 +855,13 @@ static int storvsc_channel_init(struct hv_device *device)
 	    vstor_packet->status != 0)
 		goto cleanup;
 
-	stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
-	stor_device->target_id
-		= vstor_packet->storage_channel_properties.target_id;
-
 	memset(vstor_packet, 0, sizeof(struct vstor_packet));
 	vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
 	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
 
 	ret = vmbus_sendpacket(device->channel, vstor_packet,
-			       sizeof(struct vstor_packet),
+			       (sizeof(struct vstor_packet) -
+				vmscsi_size_delta),
 			       (unsigned long)request,
 			       VM_PKT_DATA_INBAND,
 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -1012,7 +1115,8 @@ static void storvsc_on_channel_callback(void *context)
 
 	do {
 		ret = vmbus_recvpacket(device->channel, packet,
-				       ALIGN(sizeof(struct vstor_packet), 8),
+				       ALIGN((sizeof(struct vstor_packet) -
+					     vmscsi_size_delta), 8),
 				       &bytes_recvd, &request_id);
 		if (ret == 0 && bytes_recvd > 0) {
 
@@ -1023,7 +1127,8 @@ static void storvsc_on_channel_callback(void *context)
 			    (request == &stor_device->reset_request)) {
 
 				memcpy(&request->vstor_packet, packet,
-				       sizeof(struct vstor_packet));
+				       (sizeof(struct vstor_packet) -
+					vmscsi_size_delta));
 				complete(&request->wait_event);
 			} else {
 				storvsc_on_receive(device,
@@ -1116,10 +1221,11 @@ static int storvsc_do_io(struct hv_device *device,
 
 	vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
 
-	vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
+	vstor_packet->vm_srb.length = (sizeof(struct vmscsi_request) -
+					vmscsi_size_delta);
 
 
-	vstor_packet->vm_srb.sense_info_length = STORVSC_SENSE_BUFFER_SIZE;
+	vstor_packet->vm_srb.sense_info_length = sense_buffer_size;
 
 
 	vstor_packet->vm_srb.data_transfer_length =
@@ -1131,11 +1237,13 @@ static int storvsc_do_io(struct hv_device *device,
 		ret = vmbus_sendpacket_multipagebuffer(device->channel,
 				&request->data_buffer,
 				vstor_packet,
-				sizeof(struct vstor_packet),
+				(sizeof(struct vstor_packet) -
+				vmscsi_size_delta),
 				(unsigned long)request);
 	} else {
 		ret = vmbus_sendpacket(device->channel, vstor_packet,
-			       sizeof(struct vstor_packet),
+			       (sizeof(struct vstor_packet) -
+				vmscsi_size_delta),
 			       (unsigned long)request,
 			       VM_PKT_DATA_INBAND,
 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -1204,6 +1312,8 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
 
 	blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
 
+	blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
+
 	sdevice->no_write_same = 1;
 
 	return 0;
@@ -1257,7 +1367,8 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
 	vstor_packet->vm_srb.path_id = stor_device->path_id;
 
 	ret = vmbus_sendpacket(device->channel, vstor_packet,
-			       sizeof(struct vstor_packet),
+			       (sizeof(struct vstor_packet) -
+				vmscsi_size_delta),
 			       (unsigned long)&stor_device->reset_request,
 			       VM_PKT_DATA_INBAND,
 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -1342,18 +1453,28 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 	scmnd->host_scribble = (unsigned char *)cmd_request;
 
 	vm_srb = &cmd_request->vstor_packet.vm_srb;
+	vm_srb->win8_extension.time_out_value = 60;
 
 
 	/* Build the SRB */
 	switch (scmnd->sc_data_direction) {
 	case DMA_TO_DEVICE:
 		vm_srb->data_in = WRITE_TYPE;
+		vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT;
+		vm_srb->win8_extension.srb_flags |=
+			(SRB_FLAGS_QUEUE_ACTION_ENABLE |
+			SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
 		break;
 	case DMA_FROM_DEVICE:
 		vm_srb->data_in = READ_TYPE;
+		vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
+		vm_srb->win8_extension.srb_flags |=
+			(SRB_FLAGS_QUEUE_ACTION_ENABLE |
+			SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
 		break;
 	default:
 		vm_srb->data_in = UNKNOWN_TYPE;
+		vm_srb->win8_extension.srb_flags = 0;
 		break;
 	}
 
@@ -1485,6 +1606,24 @@ static int storvsc_probe(struct hv_device *device,
 	int target = 0;
 	struct storvsc_device *stor_device;
 
+	/*
+	 * Based on the windows host we are running on,
+	 * set state to properly communicate with the host.
+	 */
+
+	if (vmbus_proto_version == VERSION_WIN8) {
+		sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
+		vmscsi_size_delta = 0;
+		vmstor_current_major = VMSTOR_WIN8_MAJOR;
+		vmstor_current_minor = VMSTOR_WIN8_MINOR;
+	} else {
+		sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
+		vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
+		vmstor_current_major = VMSTOR_WIN7_MAJOR;
+		vmstor_current_minor = VMSTOR_WIN7_MINOR;
+	}
+
+
 	host = scsi_host_alloc(&scsi_driver,
 			       sizeof(struct hv_host_device));
 	if (!host)
@@ -1594,7 +1733,8 @@ static int __init storvsc_drv_init(void)
 	max_outstanding_req_per_channel =
 		((storvsc_ringbuffer_size - PAGE_SIZE) /
 		ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
-		sizeof(struct vstor_packet) + sizeof(u64),
+		sizeof(struct vstor_packet) + sizeof(u64) -
+		vmscsi_size_delta,
 		sizeof(u64)));
 
 	if (max_outstanding_req_per_channel <

Some files were not shown because too many files changed in this diff