Эх сурвалжийг харах

Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (154 commits)
  [SCSI] osd: Remove out-of-tree left overs
  [SCSI] libosd: Use REQ_QUIET requests.
  [SCSI] osduld: use filp_open() when looking up an osd-device
  [SCSI] libosd: Define an osd_dev wrapper to retrieve the request_queue
  [SCSI] libosd: osd_req_{read,write} takes a length parameter
  [SCSI] libosd: Let _osd_req_finalize_data_integrity receive number of out_bytes
  [SCSI] libosd: osd_req_{read,write}_kern new API
  [SCSI] libosd: Better printout of OSD target system information
  [SCSI] libosd: OSD2r05: Attribute definitions
  [SCSI] libosd: OSD2r05: Additional command enums
  [SCSI] mpt fusion: fix up doc book comments
  [SCSI] mpt fusion: Added support for Broadcast primitives Event handling
  [SCSI] mpt fusion: Queue full event handling
  [SCSI] mpt fusion: RAID device handling and Dual port Raid support is added
  [SCSI] mpt fusion: Put IOC into ready state if it not already in ready state
  [SCSI] mpt fusion: Code Cleanup patch
  [SCSI] mpt fusion: Rescan SAS topology added
  [SCSI] mpt fusion: SAS topology scan changes, expander events
  [SCSI] mpt fusion: Firmware event implementation using seperate WorkQueue
  [SCSI] mpt fusion: rewrite of ioctl_cmds internal generated function
  ...
Linus Torvalds 16 жил өмнө
parent
commit
c9b8af00ff
100 өөрчлөгдсөн 28513 нэмэгдсэн , 6018 устгасан
  1. 4 6
      drivers/infiniband/ulp/iser/iscsi_iser.c
  2. 434 215
      drivers/message/fusion/mptbase.c
  3. 110 70
      drivers/message/fusion/mptbase.h
  4. 350 282
      drivers/message/fusion/mptctl.c
  5. 3 0
      drivers/message/fusion/mptdebug.h
  6. 4 11
      drivers/message/fusion/mptfc.c
  7. 933 115
      drivers/message/fusion/mptsas.c
  8. 37 4
      drivers/message/fusion/mptsas.h
  9. 271 432
      drivers/message/fusion/mptscsih.c
  10. 6 1
      drivers/message/fusion/mptscsih.h
  11. 38 33
      drivers/message/fusion/mptspi.c
  12. 11 0
      drivers/net/Kconfig
  13. 1 0
      drivers/net/Makefile
  14. 190 3
      drivers/net/bnx2.c
  15. 18 0
      drivers/net/bnx2.h
  16. 2711 0
      drivers/net/cnic.c
  17. 299 0
      drivers/net/cnic.h
  18. 580 0
      drivers/net/cnic_defs.h
  19. 299 0
      drivers/net/cnic_if.h
  20. 20 10
      drivers/s390/scsi/zfcp_ccw.c
  21. 5 5
      drivers/s390/scsi/zfcp_dbf.c
  22. 0 7
      drivers/s390/scsi/zfcp_def.h
  23. 7 1
      drivers/s390/scsi/zfcp_erp.c
  24. 1 0
      drivers/s390/scsi/zfcp_ext.h
  25. 6 1
      drivers/s390/scsi/zfcp_fc.c
  26. 17 12
      drivers/s390/scsi/zfcp_fsf.c
  27. 12 1
      drivers/s390/scsi/zfcp_scsi.c
  28. 11 20
      drivers/scsi/Kconfig
  29. 2 1
      drivers/scsi/Makefile
  30. 1 1
      drivers/scsi/NCR_D700.c
  31. 155 0
      drivers/scsi/bnx2i/57xx_iscsi_constants.h
  32. 1509 0
      drivers/scsi/bnx2i/57xx_iscsi_hsi.h
  33. 7 0
      drivers/scsi/bnx2i/Kconfig
  34. 3 0
      drivers/scsi/bnx2i/Makefile
  35. 771 0
      drivers/scsi/bnx2i/bnx2i.h
  36. 2405 0
      drivers/scsi/bnx2i/bnx2i_hwi.c
  37. 438 0
      drivers/scsi/bnx2i/bnx2i_init.c
  38. 2064 0
      drivers/scsi/bnx2i/bnx2i_iscsi.c
  39. 142 0
      drivers/scsi/bnx2i/bnx2i_sysfs.c
  40. 0 1
      drivers/scsi/cxgb3i/cxgb3i.h
  41. 22 4
      drivers/scsi/cxgb3i/cxgb3i_iscsi.c
  42. 14 9
      drivers/scsi/cxgb3i/cxgb3i_offload.c
  43. 2 1
      drivers/scsi/cxgb3i/cxgb3i_offload.h
  44. 6 0
      drivers/scsi/device_handler/scsi_dh_rdac.c
  45. 36 59
      drivers/scsi/fcoe/fcoe.c
  46. 1 0
      drivers/scsi/fcoe/fcoe.h
  47. 13 8
      drivers/scsi/fcoe/libfcoe.c
  48. 1 0
      drivers/scsi/fnic/fnic_main.c
  49. 3 2
      drivers/scsi/gdth_proc.c
  50. 304 130
      drivers/scsi/ibmvscsi/ibmvfc.c
  51. 33 7
      drivers/scsi/ibmvscsi/ibmvfc.h
  52. 346 117
      drivers/scsi/ibmvscsi/ibmvscsi.c
  53. 4 0
      drivers/scsi/ibmvscsi/ibmvscsi.h
  54. 67 1
      drivers/scsi/ibmvscsi/viosrp.h
  55. 3 2
      drivers/scsi/ipr.c
  56. 4 0
      drivers/scsi/libfc/fc_exch.c
  57. 1 1
      drivers/scsi/libfc/fc_fcp.c
  58. 3 3
      drivers/scsi/libfc/fc_rport.c
  59. 241 227
      drivers/scsi/libiscsi.c
  60. 15 3
      drivers/scsi/libiscsi_tcp.c
  61. 114 9
      drivers/scsi/lpfc/lpfc.h
  62. 167 83
      drivers/scsi/lpfc/lpfc_attr.c
  63. 52 11
      drivers/scsi/lpfc/lpfc_crtn.h
  64. 9 6
      drivers/scsi/lpfc/lpfc_ct.c
  65. 14 7
      drivers/scsi/lpfc/lpfc_debugfs.c
  66. 1 0
      drivers/scsi/lpfc/lpfc_disc.h
  67. 190 85
      drivers/scsi/lpfc/lpfc_els.c
  68. 830 45
      drivers/scsi/lpfc/lpfc_hbadisc.c
  69. 99 43
      drivers/scsi/lpfc/lpfc_hw.h
  70. 2141 0
      drivers/scsi/lpfc/lpfc_hw4.h
  71. 758 199
      drivers/scsi/lpfc/lpfc_init.c
  72. 30 24
      drivers/scsi/lpfc/lpfc_logmsg.h
  73. 631 43
      drivers/scsi/lpfc/lpfc_mbox.c
  74. 162 44
      drivers/scsi/lpfc/lpfc_mem.c
  75. 37 14
      drivers/scsi/lpfc/lpfc_nportdisc.c
  76. 766 118
      drivers/scsi/lpfc/lpfc_scsi.c
  77. 2 0
      drivers/scsi/lpfc/lpfc_scsi.h
  78. 686 74
      drivers/scsi/lpfc/lpfc_sli.c
  79. 23 6
      drivers/scsi/lpfc/lpfc_sli.h
  80. 467 0
      drivers/scsi/lpfc/lpfc_sli4.h
  81. 1 1
      drivers/scsi/lpfc/lpfc_version.h
  82. 46 16
      drivers/scsi/lpfc/lpfc_vport.c
  83. 3 2
      drivers/scsi/mpt2sas/mpt2sas_base.h
  84. 21 11
      drivers/scsi/mpt2sas/mpt2sas_ctl.c
  85. 294 69
      drivers/scsi/mpt2sas/mpt2sas_scsih.c
  86. 18 18
      drivers/scsi/mpt2sas/mpt2sas_transport.c
  87. 0 3222
      drivers/scsi/mvsas.c
  88. 42 0
      drivers/scsi/mvsas/Kconfig
  89. 32 0
      drivers/scsi/mvsas/Makefile
  90. 793 0
      drivers/scsi/mvsas/mv_64xx.c
  91. 151 0
      drivers/scsi/mvsas/mv_64xx.h
  92. 672 0
      drivers/scsi/mvsas/mv_94xx.c
  93. 222 0
      drivers/scsi/mvsas/mv_94xx.h
  94. 280 0
      drivers/scsi/mvsas/mv_chips.h
  95. 502 0
      drivers/scsi/mvsas/mv_defs.h
  96. 703 0
      drivers/scsi/mvsas/mv_init.c
  97. 2154 0
      drivers/scsi/mvsas/mv_sas.c
  98. 406 0
      drivers/scsi/mvsas/mv_sas.h
  99. 0 25
      drivers/scsi/osd/Kbuild
  100. 0 37
      drivers/scsi/osd/Makefile

+ 4 - 6
drivers/infiniband/ulp/iser/iscsi_iser.c

@@ -257,11 +257,8 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
 {
 {
 	struct iscsi_iser_task *iser_task = task->dd_data;
 	struct iscsi_iser_task *iser_task = task->dd_data;
 
 
-	/*
-	 * mgmt tasks do not need special cleanup and we do not
-	 * allocate anything in the init task callout
-	 */
-	if (!task->sc || task->state == ISCSI_TASK_PENDING)
+	/* mgmt tasks do not need special cleanup */
+	if (!task->sc)
 		return;
 		return;
 
 
 	if (iser_task->status == ISER_TASK_STATUS_STARTED) {
 	if (iser_task->status == ISER_TASK_STATUS_STARTED) {
@@ -517,7 +514,8 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
 }
 }
 
 
 static struct iscsi_endpoint *
 static struct iscsi_endpoint *
-iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
+iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
+		      int non_blocking)
 {
 {
 	int err;
 	int err;
 	struct iser_conn *ib_conn;
 	struct iser_conn *ib_conn;

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 434 - 215
drivers/message/fusion/mptbase.c


+ 110 - 70
drivers/message/fusion/mptbase.h

@@ -76,8 +76,8 @@
 #define COPYRIGHT	"Copyright (c) 1999-2008 " MODULEAUTHOR
 #define COPYRIGHT	"Copyright (c) 1999-2008 " MODULEAUTHOR
 #endif
 #endif
 
 
-#define MPT_LINUX_VERSION_COMMON	"3.04.07"
-#define MPT_LINUX_PACKAGE_NAME		"@(#)mptlinux-3.04.07"
+#define MPT_LINUX_VERSION_COMMON	"3.04.10"
+#define MPT_LINUX_PACKAGE_NAME		"@(#)mptlinux-3.04.09"
 #define WHAT_MAGIC_STRING		"@" "(" "#" ")"
 #define WHAT_MAGIC_STRING		"@" "(" "#" ")"
 
 
 #define show_mptmod_ver(s,ver)  \
 #define show_mptmod_ver(s,ver)  \
@@ -104,6 +104,7 @@
 #endif
 #endif
 
 
 #define MPT_NAME_LENGTH			32
 #define MPT_NAME_LENGTH			32
+#define MPT_KOBJ_NAME_LEN		20
 
 
 #define MPT_PROCFS_MPTBASEDIR		"mpt"
 #define MPT_PROCFS_MPTBASEDIR		"mpt"
 						/* chg it to "driver/fusion" ? */
 						/* chg it to "driver/fusion" ? */
@@ -134,6 +135,7 @@
 
 
 #define MPT_COALESCING_TIMEOUT		0x10
 #define MPT_COALESCING_TIMEOUT		0x10
 
 
+
 /*
 /*
  * SCSI transfer rate defines.
  * SCSI transfer rate defines.
  */
  */
@@ -161,10 +163,10 @@
 /*
 /*
  * Set the MAX_SGE value based on user input.
  * Set the MAX_SGE value based on user input.
  */
  */
-#ifdef  CONFIG_FUSION_MAX_SGE
-#if     CONFIG_FUSION_MAX_SGE  < 16
+#ifdef CONFIG_FUSION_MAX_SGE
+#if CONFIG_FUSION_MAX_SGE  < 16
 #define MPT_SCSI_SG_DEPTH	16
 #define MPT_SCSI_SG_DEPTH	16
-#elif   CONFIG_FUSION_MAX_SGE  > 128
+#elif CONFIG_FUSION_MAX_SGE  > 128
 #define MPT_SCSI_SG_DEPTH	128
 #define MPT_SCSI_SG_DEPTH	128
 #else
 #else
 #define MPT_SCSI_SG_DEPTH	CONFIG_FUSION_MAX_SGE
 #define MPT_SCSI_SG_DEPTH	CONFIG_FUSION_MAX_SGE
@@ -173,6 +175,18 @@
 #define MPT_SCSI_SG_DEPTH	40
 #define MPT_SCSI_SG_DEPTH	40
 #endif
 #endif
 
 
+#ifdef CONFIG_FUSION_MAX_FC_SGE
+#if CONFIG_FUSION_MAX_FC_SGE  < 16
+#define MPT_SCSI_FC_SG_DEPTH	16
+#elif CONFIG_FUSION_MAX_FC_SGE  > 256
+#define MPT_SCSI_FC_SG_DEPTH	256
+#else
+#define MPT_SCSI_FC_SG_DEPTH	CONFIG_FUSION_MAX_FC_SGE
+#endif
+#else
+#define MPT_SCSI_FC_SG_DEPTH	40
+#endif
+
 /* debug print string length used for events and iocstatus */
 /* debug print string length used for events and iocstatus */
 # define EVENT_DESCR_STR_SZ             100
 # define EVENT_DESCR_STR_SZ             100
 
 
@@ -431,38 +445,36 @@ do { \
  *	IOCTL structure and associated defines
  *	IOCTL structure and associated defines
  */
  */
 
 
-#define MPT_IOCTL_STATUS_DID_IOCRESET	0x01	/* IOC Reset occurred on the current*/
-#define MPT_IOCTL_STATUS_RF_VALID	0x02	/* The Reply Frame is VALID */
-#define MPT_IOCTL_STATUS_TIMER_ACTIVE	0x04	/* The timer is running */
-#define MPT_IOCTL_STATUS_SENSE_VALID	0x08	/* Sense data is valid */
-#define MPT_IOCTL_STATUS_COMMAND_GOOD	0x10	/* Command Status GOOD */
-#define MPT_IOCTL_STATUS_TMTIMER_ACTIVE	0x20	/* The TM timer is running */
-#define MPT_IOCTL_STATUS_TM_FAILED	0x40	/* User TM request failed */
-
 #define MPTCTL_RESET_OK			0x01	/* Issue Bus Reset */
 #define MPTCTL_RESET_OK			0x01	/* Issue Bus Reset */
 
 
-typedef struct _MPT_IOCTL {
-	struct _MPT_ADAPTER	*ioc;
-	u8			 ReplyFrame[MPT_DEFAULT_FRAME_SIZE];	/* reply frame data */
-	u8			 sense[MPT_SENSE_BUFFER_ALLOC];
-	int			 wait_done;	/* wake-up value for this ioc */
-	u8			 rsvd;
-	u8			 status;	/* current command status */
-	u8			 reset;		/* 1 if bus reset allowed */
-	u8			 id;		/* target for reset */
-	struct mutex		 ioctl_mutex;
-} MPT_IOCTL;
-
-#define MPT_SAS_MGMT_STATUS_RF_VALID	0x02	/* The Reply Frame is VALID */
-#define MPT_SAS_MGMT_STATUS_COMMAND_GOOD	0x10	/* Command Status GOOD */
-#define MPT_SAS_MGMT_STATUS_TM_FAILED	0x40	/* User TM request failed */
-
-typedef struct _MPT_SAS_MGMT {
+#define MPT_MGMT_STATUS_RF_VALID	0x01	/* The Reply Frame is VALID */
+#define MPT_MGMT_STATUS_COMMAND_GOOD	0x02	/* Command Status GOOD */
+#define MPT_MGMT_STATUS_PENDING		0x04	/* command is pending */
+#define MPT_MGMT_STATUS_DID_IOCRESET	0x08	/* IOC Reset occurred
+						   on the current*/
+#define MPT_MGMT_STATUS_SENSE_VALID	0x10	/* valid sense info */
+#define MPT_MGMT_STATUS_TIMER_ACTIVE	0x20	/* obsolete */
+#define MPT_MGMT_STATUS_FREE_MF		0x40	/* free the mf from
+						   complete routine */
+
+#define INITIALIZE_MGMT_STATUS(status) \
+	status = MPT_MGMT_STATUS_PENDING;
+#define CLEAR_MGMT_STATUS(status) \
+	status = 0;
+#define CLEAR_MGMT_PENDING_STATUS(status) \
+	status &= ~MPT_MGMT_STATUS_PENDING;
+#define SET_MGMT_MSG_CONTEXT(msg_context, value) \
+	msg_context = value;
+
+typedef struct _MPT_MGMT {
 	struct mutex		 mutex;
 	struct mutex		 mutex;
 	struct completion	 done;
 	struct completion	 done;
 	u8			 reply[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */
 	u8			 reply[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */
+	u8			 sense[MPT_SENSE_BUFFER_ALLOC];
 	u8			 status;	/* current command status */
 	u8			 status;	/* current command status */
-}MPT_SAS_MGMT;
+	int			 completion_code;
+	u32			 msg_context;
+} MPT_MGMT;
 
 
 /*
 /*
  *  Event Structure and define
  *  Event Structure and define
@@ -564,6 +576,10 @@ struct mptfc_rport_info
 	u8		flags;
 	u8		flags;
 };
 };
 
 
+typedef void (*MPT_ADD_SGE)(void *pAddr, u32 flagslength, dma_addr_t dma_addr);
+typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length,
+		dma_addr_t dma_addr);
+
 /*
 /*
  *  Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
  *  Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
  */
  */
@@ -573,6 +589,10 @@ typedef struct _MPT_ADAPTER
 	int			 pci_irq;	/* This irq           */
 	int			 pci_irq;	/* This irq           */
 	char			 name[MPT_NAME_LENGTH];	/* "iocN"             */
 	char			 name[MPT_NAME_LENGTH];	/* "iocN"             */
 	char			 prod_name[MPT_NAME_LENGTH];	/* "LSIFC9x9"         */
 	char			 prod_name[MPT_NAME_LENGTH];	/* "LSIFC9x9"         */
+#ifdef CONFIG_FUSION_LOGGING
+	/* used in mpt_display_event_info */
+	char			 evStr[EVENT_DESCR_STR_SZ];
+#endif
 	char			 board_name[16];
 	char			 board_name[16];
 	char			 board_assembly[16];
 	char			 board_assembly[16];
 	char			 board_tracer[16];
 	char			 board_tracer[16];
@@ -600,6 +620,10 @@ typedef struct _MPT_ADAPTER
 	int			 reply_depth;	/* Num Allocated reply frames */
 	int			 reply_depth;	/* Num Allocated reply frames */
 	int			 reply_sz;	/* Reply frame size */
 	int			 reply_sz;	/* Reply frame size */
 	int			 num_chain;	/* Number of chain buffers */
 	int			 num_chain;	/* Number of chain buffers */
+	MPT_ADD_SGE              add_sge;       /* Pointer to add_sge
+						   function */
+	MPT_ADD_CHAIN		 add_chain;	/* Pointer to add_chain
+						   function */
 		/* Pool of buffers for chaining. ReqToChain
 		/* Pool of buffers for chaining. ReqToChain
 		 * and ChainToChain track index of chain buffers.
 		 * and ChainToChain track index of chain buffers.
 		 * ChainBuffer (DMA) virt/phys addresses.
 		 * ChainBuffer (DMA) virt/phys addresses.
@@ -640,11 +664,8 @@ typedef struct _MPT_ADAPTER
 	RaidCfgData		raid_data;	/* Raid config. data */
 	RaidCfgData		raid_data;	/* Raid config. data */
 	SasCfgData		sas_data;	/* Sas config. data */
 	SasCfgData		sas_data;	/* Sas config. data */
 	FcCfgData		fc_data;	/* Fc config. data */
 	FcCfgData		fc_data;	/* Fc config. data */
-	MPT_IOCTL		*ioctl;		/* ioctl data pointer */
 	struct proc_dir_entry	*ioc_dentry;
 	struct proc_dir_entry	*ioc_dentry;
 	struct _MPT_ADAPTER	*alt_ioc;	/* ptr to 929 bound adapter port */
 	struct _MPT_ADAPTER	*alt_ioc;	/* ptr to 929 bound adapter port */
-	spinlock_t		 diagLock;	/* diagnostic reset lock */
-	int			 diagPending;
 	u32			 biosVersion;	/* BIOS version from IO Unit Page 2 */
 	u32			 biosVersion;	/* BIOS version from IO Unit Page 2 */
 	int			 eventTypes;	/* Event logging parameters */
 	int			 eventTypes;	/* Event logging parameters */
 	int			 eventContext;	/* Next event context */
 	int			 eventContext;	/* Next event context */
@@ -652,7 +673,6 @@ typedef struct _MPT_ADAPTER
 	struct _mpt_ioctl_events *events;	/* pointer to event log */
 	struct _mpt_ioctl_events *events;	/* pointer to event log */
 	u8			*cached_fw;	/* Pointer to FW */
 	u8			*cached_fw;	/* Pointer to FW */
 	dma_addr_t	 	cached_fw_dma;
 	dma_addr_t	 	cached_fw_dma;
-	struct list_head	 configQ;	/* linked list of config. requests */
 	int			 hs_reply_idx;
 	int			 hs_reply_idx;
 #ifndef MFCNT
 #ifndef MFCNT
 	u32			 pad0;
 	u32			 pad0;
@@ -665,9 +685,6 @@ typedef struct _MPT_ADAPTER
 	IOCFactsReply_t		 facts;
 	IOCFactsReply_t		 facts;
 	PortFactsReply_t	 pfacts[2];
 	PortFactsReply_t	 pfacts[2];
 	FCPortPage0_t		 fc_port_page0[2];
 	FCPortPage0_t		 fc_port_page0[2];
-	struct timer_list	 persist_timer;	/* persist table timer */
-	int			 persist_wait_done; /* persist completion flag */
-	u8			 persist_reply_frame[MPT_DEFAULT_FRAME_SIZE]; /* persist reply */
 	LANPage0_t		 lan_cnfg_page0;
 	LANPage0_t		 lan_cnfg_page0;
 	LANPage1_t		 lan_cnfg_page1;
 	LANPage1_t		 lan_cnfg_page1;
 
 
@@ -682,23 +699,44 @@ typedef struct _MPT_ADAPTER
 	int			 aen_event_read_flag; /* flag to indicate event log was read*/
 	int			 aen_event_read_flag; /* flag to indicate event log was read*/
 	u8			 FirstWhoInit;
 	u8			 FirstWhoInit;
 	u8			 upload_fw;	/* If set, do a fw upload */
 	u8			 upload_fw;	/* If set, do a fw upload */
-	u8			 reload_fw;	/* Force a FW Reload on next reset */
 	u8			 NBShiftFactor;  /* NB Shift Factor based on Block Size (Facts)  */
 	u8			 NBShiftFactor;  /* NB Shift Factor based on Block Size (Facts)  */
 	u8			 pad1[4];
 	u8			 pad1[4];
 	u8			 DoneCtx;
 	u8			 DoneCtx;
 	u8			 TaskCtx;
 	u8			 TaskCtx;
 	u8			 InternalCtx;
 	u8			 InternalCtx;
-	spinlock_t		 initializing_hba_lock;
-	int 	 		 initializing_hba_lock_flag;
 	struct list_head	 list;
 	struct list_head	 list;
 	struct net_device	*netdev;
 	struct net_device	*netdev;
 	struct list_head	 sas_topology;
 	struct list_head	 sas_topology;
 	struct mutex		 sas_topology_mutex;
 	struct mutex		 sas_topology_mutex;
+
+	struct workqueue_struct	*fw_event_q;
+	struct list_head	 fw_event_list;
+	spinlock_t		 fw_event_lock;
+	u8			 fw_events_off; /* if '1', then ignore events */
+	char 			 fw_event_q_name[MPT_KOBJ_NAME_LEN];
+
 	struct mutex		 sas_discovery_mutex;
 	struct mutex		 sas_discovery_mutex;
 	u8			 sas_discovery_runtime;
 	u8			 sas_discovery_runtime;
 	u8			 sas_discovery_ignore_events;
 	u8			 sas_discovery_ignore_events;
+
+	/* port_info object for the host */
+	struct mptsas_portinfo	*hba_port_info;
+	u64			 hba_port_sas_addr;
+	u16			 hba_port_num_phy;
+	struct list_head	 sas_device_info_list;
+	struct mutex		 sas_device_info_mutex;
+	u8			 old_sas_discovery_protocal;
+	u8			 sas_discovery_quiesce_io;
 	int			 sas_index; /* index refrencing */
 	int			 sas_index; /* index refrencing */
-	MPT_SAS_MGMT		 sas_mgmt;
+	MPT_MGMT		 sas_mgmt;
+	MPT_MGMT		 mptbase_cmds; /* for sending config pages */
+	MPT_MGMT		 internal_cmds;
+	MPT_MGMT		 taskmgmt_cmds;
+	MPT_MGMT		 ioctl_cmds;
+	spinlock_t		 taskmgmt_lock; /* diagnostic reset lock */
+	int			 taskmgmt_in_progress;
+	u8			 taskmgmt_quiesce_io;
+	u8			 ioc_reset_in_progress;
 	struct work_struct	 sas_persist_task;
 	struct work_struct	 sas_persist_task;
 
 
 	struct work_struct	 fc_setup_reset_work;
 	struct work_struct	 fc_setup_reset_work;
@@ -707,15 +745,27 @@ typedef struct _MPT_ADAPTER
 	u8			 fc_link_speed[2];
 	u8			 fc_link_speed[2];
 	spinlock_t		 fc_rescan_work_lock;
 	spinlock_t		 fc_rescan_work_lock;
 	struct work_struct	 fc_rescan_work;
 	struct work_struct	 fc_rescan_work;
-	char			 fc_rescan_work_q_name[20];
+	char			 fc_rescan_work_q_name[MPT_KOBJ_NAME_LEN];
 	struct workqueue_struct *fc_rescan_work_q;
 	struct workqueue_struct *fc_rescan_work_q;
+
+	/* driver forced bus resets count */
+	unsigned long		  hard_resets;
+	/* fw/external bus resets count */
+	unsigned long		  soft_resets;
+	/* cmd timeouts */
+	unsigned long		  timeouts;
+
 	struct scsi_cmnd	**ScsiLookup;
 	struct scsi_cmnd	**ScsiLookup;
 	spinlock_t		  scsi_lookup_lock;
 	spinlock_t		  scsi_lookup_lock;
-
-	char			 reset_work_q_name[20];
+	u64			dma_mask;
+	u32			  broadcast_aen_busy;
+	char			 reset_work_q_name[MPT_KOBJ_NAME_LEN];
 	struct workqueue_struct *reset_work_q;
 	struct workqueue_struct *reset_work_q;
 	struct delayed_work	 fault_reset_work;
 	struct delayed_work	 fault_reset_work;
-	spinlock_t		 fault_reset_work_lock;
+
+	u8			sg_addr_size;
+	u8			in_rescan;
+	u8			SGE_size;
 
 
 } MPT_ADAPTER;
 } MPT_ADAPTER;
 
 
@@ -753,13 +803,14 @@ typedef struct _mpt_sge {
 	dma_addr_t	Address;
 	dma_addr_t	Address;
 } MptSge_t;
 } MptSge_t;
 
 
-#define mpt_addr_size() \
-	((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SGE_FLAGS_64_BIT_ADDRESSING : \
-		MPI_SGE_FLAGS_32_BIT_ADDRESSING)
 
 
-#define mpt_msg_flags() \
-	((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \
-		MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32)
+#define mpt_msg_flags(ioc) \
+	(ioc->sg_addr_size == sizeof(u64)) ?		\
+	MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : 		\
+	MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32
+
+#define MPT_SGE_FLAGS_64_BIT_ADDRESSING \
+	(MPI_SGE_FLAGS_64_BIT_ADDRESSING << MPI_SGE_FLAGS_SHIFT)
 
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /*
 /*
@@ -835,22 +886,14 @@ typedef struct _MPT_SCSI_HOST {
 		/* Pool of memory for holding SCpnts before doing
 		/* Pool of memory for holding SCpnts before doing
 		 * OS callbacks. freeQ is the free pool.
 		 * OS callbacks. freeQ is the free pool.
 		 */
 		 */
-	u8			  tmPending;
-	u8			  resetPending;
 	u8			  negoNvram;		/* DV disabled, nego NVRAM */
 	u8			  negoNvram;		/* DV disabled, nego NVRAM */
 	u8			  pad1;
 	u8			  pad1;
-	u8                        tmState;
 	u8			  rsvd[2];
 	u8			  rsvd[2];
 	MPT_FRAME_HDR		 *cmdPtr;		/* Ptr to nonOS request */
 	MPT_FRAME_HDR		 *cmdPtr;		/* Ptr to nonOS request */
 	struct scsi_cmnd	 *abortSCpnt;
 	struct scsi_cmnd	 *abortSCpnt;
 	MPT_LOCAL_REPLY		  localReply;		/* internal cmd reply struct */
 	MPT_LOCAL_REPLY		  localReply;		/* internal cmd reply struct */
-	unsigned long		  hard_resets;		/* driver forced bus resets count */
-	unsigned long		  soft_resets;		/* fw/external bus resets count */
-	unsigned long		  timeouts;		/* cmd timeouts */
 	ushort			  sel_timeout[MPT_MAX_FC_DEVICES];
 	ushort			  sel_timeout[MPT_MAX_FC_DEVICES];
 	char 			  *info_kbuf;
 	char 			  *info_kbuf;
-	wait_queue_head_t	  scandv_waitq;
-	int			  scandv_wait_done;
 	long			  last_queue_full;
 	long			  last_queue_full;
 	u16			  tm_iocstatus;
 	u16			  tm_iocstatus;
 	u16			  spi_pending;
 	u16			  spi_pending;
@@ -870,21 +913,16 @@ struct scsi_cmnd;
  * Generic structure passed to the base mpt_config function.
  * Generic structure passed to the base mpt_config function.
  */
  */
 typedef struct _x_config_parms {
 typedef struct _x_config_parms {
-	struct list_head	 linkage;	/* linked list */
-	struct timer_list	 timer;		/* timer function for this request  */
 	union {
 	union {
 		ConfigExtendedPageHeader_t	*ehdr;
 		ConfigExtendedPageHeader_t	*ehdr;
 		ConfigPageHeader_t	*hdr;
 		ConfigPageHeader_t	*hdr;
 	} cfghdr;
 	} cfghdr;
 	dma_addr_t		 physAddr;
 	dma_addr_t		 physAddr;
-	int			 wait_done;	/* wait for this request */
 	u32			 pageAddr;	/* properly formatted */
 	u32			 pageAddr;	/* properly formatted */
+	u16			 status;
 	u8			 action;
 	u8			 action;
 	u8			 dir;
 	u8			 dir;
 	u8			 timeout;	/* seconds */
 	u8			 timeout;	/* seconds */
-	u8			 pad1;
-	u16			 status;
-	u16			 pad2;
 } CONFIGPARMS;
 } CONFIGPARMS;
 
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -909,7 +947,6 @@ extern MPT_FRAME_HDR	*mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc);
 extern void	 mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
 extern void	 mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
 extern void	 mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
 extern void	 mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
 extern void	 mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
 extern void	 mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
-extern void	 mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr);
 
 
 extern int	 mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag);
 extern int	 mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag);
 extern int	 mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp);
 extern int	 mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp);
@@ -922,6 +959,12 @@ extern void	 mpt_free_fw_memory(MPT_ADAPTER *ioc);
 extern int	 mpt_findImVolumes(MPT_ADAPTER *ioc);
 extern int	 mpt_findImVolumes(MPT_ADAPTER *ioc);
 extern int	 mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
 extern int	 mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
 extern int	 mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk);
 extern int	 mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk);
+extern int	mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
+		pRaidPhysDiskPage1_t phys_disk);
+extern int	mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc,
+		u8 phys_disk_num);
+extern int	 mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
+extern void	 mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
 extern void     mpt_halt_firmware(MPT_ADAPTER *ioc);
 extern void     mpt_halt_firmware(MPT_ADAPTER *ioc);
 
 
 
 
@@ -959,7 +1002,6 @@ extern int mpt_fwfault_debug;
 #define MPT_SGE_FLAGS_END_OF_BUFFER		(0x40000000)
 #define MPT_SGE_FLAGS_END_OF_BUFFER		(0x40000000)
 #define MPT_SGE_FLAGS_LOCAL_ADDRESS		(0x08000000)
 #define MPT_SGE_FLAGS_LOCAL_ADDRESS		(0x08000000)
 #define MPT_SGE_FLAGS_DIRECTION			(0x04000000)
 #define MPT_SGE_FLAGS_DIRECTION			(0x04000000)
-#define MPT_SGE_FLAGS_ADDRESSING		(mpt_addr_size() << MPI_SGE_FLAGS_SHIFT)
 #define MPT_SGE_FLAGS_END_OF_LIST		(0x01000000)
 #define MPT_SGE_FLAGS_END_OF_LIST		(0x01000000)
 
 
 #define MPT_SGE_FLAGS_TRANSACTION_ELEMENT	(0x00000000)
 #define MPT_SGE_FLAGS_TRANSACTION_ELEMENT	(0x00000000)
@@ -972,14 +1014,12 @@ extern int mpt_fwfault_debug;
 	 MPT_SGE_FLAGS_END_OF_BUFFER |	\
 	 MPT_SGE_FLAGS_END_OF_BUFFER |	\
 	 MPT_SGE_FLAGS_END_OF_LIST |	\
 	 MPT_SGE_FLAGS_END_OF_LIST |	\
 	 MPT_SGE_FLAGS_SIMPLE_ELEMENT |	\
 	 MPT_SGE_FLAGS_SIMPLE_ELEMENT |	\
-	 MPT_SGE_FLAGS_ADDRESSING | \
 	 MPT_TRANSFER_IOC_TO_HOST)
 	 MPT_TRANSFER_IOC_TO_HOST)
 #define MPT_SGE_FLAGS_SSIMPLE_WRITE \
 #define MPT_SGE_FLAGS_SSIMPLE_WRITE \
 	(MPT_SGE_FLAGS_LAST_ELEMENT |	\
 	(MPT_SGE_FLAGS_LAST_ELEMENT |	\
 	 MPT_SGE_FLAGS_END_OF_BUFFER |	\
 	 MPT_SGE_FLAGS_END_OF_BUFFER |	\
 	 MPT_SGE_FLAGS_END_OF_LIST |	\
 	 MPT_SGE_FLAGS_END_OF_LIST |	\
 	 MPT_SGE_FLAGS_SIMPLE_ELEMENT |	\
 	 MPT_SGE_FLAGS_SIMPLE_ELEMENT |	\
-	 MPT_SGE_FLAGS_ADDRESSING | \
 	 MPT_TRANSFER_HOST_TO_IOC)
 	 MPT_TRANSFER_HOST_TO_IOC)
 
 
 /*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 350 - 282
drivers/message/fusion/mptctl.c


+ 3 - 0
drivers/message/fusion/mptdebug.h

@@ -58,6 +58,7 @@
 #define MPT_DEBUG_FC			0x00080000
 #define MPT_DEBUG_FC			0x00080000
 #define MPT_DEBUG_SAS			0x00100000
 #define MPT_DEBUG_SAS			0x00100000
 #define MPT_DEBUG_SAS_WIDE		0x00200000
 #define MPT_DEBUG_SAS_WIDE		0x00200000
+#define MPT_DEBUG_36GB_MEM              0x00400000
 
 
 /*
 /*
  * CONFIG_FUSION_LOGGING - enabled in Kconfig
  * CONFIG_FUSION_LOGGING - enabled in Kconfig
@@ -135,6 +136,8 @@
 #define dsaswideprintk(IOC, CMD)		\
 #define dsaswideprintk(IOC, CMD)		\
 	MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE)
 	MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE)
 
 
+#define d36memprintk(IOC, CMD)		\
+	MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_36GB_MEM)
 
 
 
 
 /*
 /*

+ 4 - 11
drivers/message/fusion/mptfc.c

@@ -1251,17 +1251,15 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	 * A slightly different algorithm is required for
 	 * A slightly different algorithm is required for
 	 * 64bit SGEs.
 	 * 64bit SGEs.
 	 */
 	 */
-	scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
-	if (sizeof(dma_addr_t) == sizeof(u64)) {
+	scale = ioc->req_sz/ioc->SGE_size;
+	if (ioc->sg_addr_size == sizeof(u64)) {
 		numSGE = (scale - 1) *
 		numSGE = (scale - 1) *
 		  (ioc->facts.MaxChainDepth-1) + scale +
 		  (ioc->facts.MaxChainDepth-1) + scale +
-		  (ioc->req_sz - 60) / (sizeof(dma_addr_t) +
-		  sizeof(u32));
+		  (ioc->req_sz - 60) / ioc->SGE_size;
 	} else {
 	} else {
 		numSGE = 1 + (scale - 1) *
 		numSGE = 1 + (scale - 1) *
 		  (ioc->facts.MaxChainDepth-1) + scale +
 		  (ioc->facts.MaxChainDepth-1) + scale +
-		  (ioc->req_sz - 64) / (sizeof(dma_addr_t) +
-		  sizeof(u32));
+		  (ioc->req_sz - 64) / ioc->SGE_size;
 	}
 	}
 
 
 	if (numSGE < sh->sg_tablesize) {
 	if (numSGE < sh->sg_tablesize) {
@@ -1292,9 +1290,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
 
 	/* Clear the TM flags
 	/* Clear the TM flags
 	 */
 	 */
-	hd->tmPending = 0;
-	hd->tmState = TM_STATE_NONE;
-	hd->resetPending = 0;
 	hd->abortSCpnt = NULL;
 	hd->abortSCpnt = NULL;
 
 
 	/* Clear the pointer used to store
 	/* Clear the pointer used to store
@@ -1312,8 +1307,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	hd->timer.data = (unsigned long) hd;
 	hd->timer.data = (unsigned long) hd;
 	hd->timer.function = mptscsih_timer_expired;
 	hd->timer.function = mptscsih_timer_expired;
 
 
-	init_waitqueue_head(&hd->scandv_waitq);
-	hd->scandv_wait_done = 0;
 	hd->last_queue_full = 0;
 	hd->last_queue_full = 0;
 
 
 	sh->transportt = mptfc_transport_template;
 	sh->transportt = mptfc_transport_template;

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 933 - 115
drivers/message/fusion/mptsas.c


+ 37 - 4
drivers/message/fusion/mptsas.h

@@ -53,6 +53,7 @@ struct mptsas_target_reset_event {
 	struct list_head 	list;
 	struct list_head 	list;
 	EVENT_DATA_SAS_DEVICE_STATUS_CHANGE sas_event_data;
 	EVENT_DATA_SAS_DEVICE_STATUS_CHANGE sas_event_data;
 	u8	target_reset_issued;
 	u8	target_reset_issued;
+	unsigned long	 time_count;
 };
 };
 
 
 enum mptsas_hotplug_action {
 enum mptsas_hotplug_action {
@@ -60,12 +61,37 @@ enum mptsas_hotplug_action {
 	MPTSAS_DEL_DEVICE,
 	MPTSAS_DEL_DEVICE,
 	MPTSAS_ADD_RAID,
 	MPTSAS_ADD_RAID,
 	MPTSAS_DEL_RAID,
 	MPTSAS_DEL_RAID,
+	MPTSAS_ADD_PHYSDISK,
+	MPTSAS_ADD_PHYSDISK_REPROBE,
+	MPTSAS_DEL_PHYSDISK,
+	MPTSAS_DEL_PHYSDISK_REPROBE,
 	MPTSAS_ADD_INACTIVE_VOLUME,
 	MPTSAS_ADD_INACTIVE_VOLUME,
 	MPTSAS_IGNORE_EVENT,
 	MPTSAS_IGNORE_EVENT,
 };
 };
 
 
+struct mptsas_mapping{
+	u8			id;
+	u8			channel;
+};
+
+struct mptsas_device_info {
+	struct list_head 	list;
+	struct mptsas_mapping	os;	/* operating system mapping*/
+	struct mptsas_mapping	fw;	/* firmware mapping */
+	u64			sas_address;
+	u32			device_info; /* specific bits for devices */
+	u16			slot;		/* enclosure slot id */
+	u64			enclosure_logical_id; /*enclosure address */
+	u8			is_logical_volume; /* is this logical volume */
+	/* this belongs to volume */
+	u8			is_hidden_raid_component;
+	/* this valid when is_hidden_raid_component set */
+	u8			volume_id;
+	/* cached data for a removed device */
+	u8			is_cached;
+};
+
 struct mptsas_hotplug_event {
 struct mptsas_hotplug_event {
-	struct work_struct	work;
 	MPT_ADAPTER		*ioc;
 	MPT_ADAPTER		*ioc;
 	enum mptsas_hotplug_action event_type;
 	enum mptsas_hotplug_action event_type;
 	u64			sas_address;
 	u64			sas_address;
@@ -73,11 +99,18 @@ struct mptsas_hotplug_event {
 	u8			id;
 	u8			id;
 	u32			device_info;
 	u32			device_info;
 	u16			handle;
 	u16			handle;
-	u16			parent_handle;
 	u8			phy_id;
 	u8			phy_id;
-	u8			phys_disk_num_valid;	/* hrc (hidden raid component) */
 	u8			phys_disk_num;		/* hrc - unique index*/
 	u8			phys_disk_num;		/* hrc - unique index*/
-	u8			hidden_raid_component;	/* hrc - don't expose*/
+	struct scsi_device	*sdev;
+};
+
+struct fw_event_work {
+	struct list_head 	list;
+	struct delayed_work	 work;
+	MPT_ADAPTER	*ioc;
+	u32			event;
+	u8			retries;
+	u8			event_data[1];
 };
 };
 
 
 struct mptsas_discovery_event {
 struct mptsas_discovery_event {

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 271 - 432
drivers/message/fusion/mptscsih.c


+ 6 - 1
drivers/message/fusion/mptscsih.h

@@ -60,6 +60,7 @@
 #define MPT_SCANDV_SELECTION_TIMEOUT	(0x00000008)
 #define MPT_SCANDV_SELECTION_TIMEOUT	(0x00000008)
 #define MPT_SCANDV_ISSUE_SENSE		(0x00000010)
 #define MPT_SCANDV_ISSUE_SENSE		(0x00000010)
 #define MPT_SCANDV_FALLBACK		(0x00000020)
 #define MPT_SCANDV_FALLBACK		(0x00000020)
+#define MPT_SCANDV_BUSY			(0x00000040)
 
 
 #define MPT_SCANDV_MAX_RETRIES		(10)
 #define MPT_SCANDV_MAX_RETRIES		(10)
 
 
@@ -89,6 +90,7 @@
 
 
 #endif
 #endif
 
 
+
 typedef struct _internal_cmd {
 typedef struct _internal_cmd {
 	char		*data;		/* data pointer */
 	char		*data;		/* data pointer */
 	dma_addr_t	data_dma;	/* data dma address */
 	dma_addr_t	data_dma;	/* data dma address */
@@ -112,6 +114,8 @@ extern int mptscsih_resume(struct pci_dev *pdev);
 extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func);
 extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func);
 extern const char * mptscsih_info(struct Scsi_Host *SChost);
 extern const char * mptscsih_info(struct Scsi_Host *SChost);
 extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *));
 extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *));
+extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel,
+	u8 id, int lun, int ctx2abort, ulong timeout);
 extern void mptscsih_slave_destroy(struct scsi_device *device);
 extern void mptscsih_slave_destroy(struct scsi_device *device);
 extern int mptscsih_slave_configure(struct scsi_device *device);
 extern int mptscsih_slave_configure(struct scsi_device *device);
 extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
 extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
@@ -126,7 +130,8 @@ extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pE
 extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
 extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
 extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
 extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
 extern void mptscsih_timer_expired(unsigned long data);
 extern void mptscsih_timer_expired(unsigned long data);
-extern int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout);
 extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
 extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
 extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
 extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
 extern struct device_attribute *mptscsih_host_attrs[];
 extern struct device_attribute *mptscsih_host_attrs[];
+extern struct scsi_cmnd	*mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
+extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);

+ 38 - 33
drivers/message/fusion/mptspi.c

@@ -300,7 +300,7 @@ mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id)
 	flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE |
 	flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE |
 		(IOCPage4Ptr->Header.PageLength + ii) * 4;
 		(IOCPage4Ptr->Header.PageLength + ii) * 4;
 
 
-	mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
+	ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
 
 
 	ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
 	ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
 		"writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n",
 		"writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n",
@@ -614,19 +614,24 @@ static void mptspi_read_parameters(struct scsi_target *starget)
 	spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0;
 	spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0;
 }
 }
 
 
-static int
+int
 mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
 mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
 {
 {
+	MPT_ADAPTER	*ioc = hd->ioc;
 	MpiRaidActionRequest_t	*pReq;
 	MpiRaidActionRequest_t	*pReq;
 	MPT_FRAME_HDR		*mf;
 	MPT_FRAME_HDR		*mf;
-	MPT_ADAPTER *ioc = hd->ioc;
+	int			ret;
+	unsigned long 	 	timeleft;
+
+	mutex_lock(&ioc->internal_cmds.mutex);
 
 
 	/* Get and Populate a free Frame
 	/* Get and Populate a free Frame
 	 */
 	 */
 	if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
 	if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
-		ddvprintk(ioc, printk(MYIOC_s_WARN_FMT "_do_raid: no msg frames!\n",
-					ioc->name));
-		return -EAGAIN;
+		dfailprintk(hd->ioc, printk(MYIOC_s_WARN_FMT
+			"%s: no msg frames!\n", ioc->name, __func__));
+		ret = -EAGAIN;
+		goto out;
 	}
 	}
 	pReq = (MpiRaidActionRequest_t *)mf;
 	pReq = (MpiRaidActionRequest_t *)mf;
 	if (quiesce)
 	if (quiesce)
@@ -643,29 +648,36 @@ mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
 	pReq->Reserved2 = 0;
 	pReq->Reserved2 = 0;
 	pReq->ActionDataWord = 0; /* Reserved for this action */
 	pReq->ActionDataWord = 0; /* Reserved for this action */
 
 
-	mpt_add_sge((char *)&pReq->ActionDataSGE,
+	ioc->add_sge((char *)&pReq->ActionDataSGE,
 		MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1);
 		MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1);
 
 
 	ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n",
 	ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n",
 			ioc->name, pReq->Action, channel, id));
 			ioc->name, pReq->Action, channel, id));
 
 
-	hd->pLocal = NULL;
-	hd->timer.expires = jiffies + HZ*10; /* 10 second timeout */
-	hd->scandv_wait_done = 0;
-
-	/* Save cmd pointer, for resource free if timeout or
-	 * FW reload occurs
-	 */
-	hd->cmdPtr = mf;
-
-	add_timer(&hd->timer);
+	INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
 	mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
 	mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
-	wait_event(hd->scandv_waitq, hd->scandv_wait_done);
+	timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done, 10*HZ);
+	if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+		ret = -ETIME;
+		dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: TIMED OUT!\n",
+		    ioc->name, __func__));
+		if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
+			goto out;
+		if (!timeleft) {
+			printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
+			    ioc->name, __func__);
+			mpt_HardResetHandler(ioc, CAN_SLEEP);
+			mpt_free_msg_frame(ioc, mf);
+		}
+		goto out;
+	}
 
 
-	if ((hd->pLocal == NULL) || (hd->pLocal->completion != 0))
-		return -1;
+	ret = ioc->internal_cmds.completion_code;
 
 
-	return 0;
+ out:
+	CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
+	mutex_unlock(&ioc->internal_cmds.mutex);
+	return ret;
 }
 }
 
 
 static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd,
 static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd,
@@ -1423,17 +1435,15 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	 * A slightly different algorithm is required for
 	 * A slightly different algorithm is required for
 	 * 64bit SGEs.
 	 * 64bit SGEs.
 	 */
 	 */
-	scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
-	if (sizeof(dma_addr_t) == sizeof(u64)) {
+	scale = ioc->req_sz/ioc->SGE_size;
+	if (ioc->sg_addr_size == sizeof(u64)) {
 		numSGE = (scale - 1) *
 		numSGE = (scale - 1) *
 		  (ioc->facts.MaxChainDepth-1) + scale +
 		  (ioc->facts.MaxChainDepth-1) + scale +
-		  (ioc->req_sz - 60) / (sizeof(dma_addr_t) +
-		  sizeof(u32));
+		  (ioc->req_sz - 60) / ioc->SGE_size;
 	} else {
 	} else {
 		numSGE = 1 + (scale - 1) *
 		numSGE = 1 + (scale - 1) *
 		  (ioc->facts.MaxChainDepth-1) + scale +
 		  (ioc->facts.MaxChainDepth-1) + scale +
-		  (ioc->req_sz - 64) / (sizeof(dma_addr_t) +
-		  sizeof(u32));
+		  (ioc->req_sz - 64) / ioc->SGE_size;
 	}
 	}
 
 
 	if (numSGE < sh->sg_tablesize) {
 	if (numSGE < sh->sg_tablesize) {
@@ -1464,9 +1474,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
 
 	/* Clear the TM flags
 	/* Clear the TM flags
 	 */
 	 */
-	hd->tmPending = 0;
-	hd->tmState = TM_STATE_NONE;
-	hd->resetPending = 0;
 	hd->abortSCpnt = NULL;
 	hd->abortSCpnt = NULL;
 
 
 	/* Clear the pointer used to store
 	/* Clear the pointer used to store
@@ -1493,8 +1500,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		mpt_saf_te));
 		mpt_saf_te));
 	ioc->spi_data.noQas = 0;
 	ioc->spi_data.noQas = 0;
 
 
-	init_waitqueue_head(&hd->scandv_waitq);
-	hd->scandv_wait_done = 0;
 	hd->last_queue_full = 0;
 	hd->last_queue_full = 0;
 	hd->spi_pending = 0;
 	hd->spi_pending = 0;
 
 
@@ -1514,7 +1519,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	 * issue internal bus reset
 	 * issue internal bus reset
 	 */
 	 */
 	if (ioc->spi_data.bus_reset)
 	if (ioc->spi_data.bus_reset)
-		mptscsih_TMHandler(hd,
+		mptscsih_IssueTaskMgmt(hd,
 		    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
 		    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
 		    0, 0, 0, 0, 5);
 		    0, 0, 0, 0, 5);
 
 

+ 11 - 0
drivers/net/Kconfig

@@ -2264,6 +2264,17 @@ config BNX2
 	  To compile this driver as a module, choose M here: the module
 	  To compile this driver as a module, choose M here: the module
 	  will be called bnx2.  This is recommended.
 	  will be called bnx2.  This is recommended.
 
 
+config CNIC
+	tristate "Broadcom CNIC support"
+	depends on BNX2
+	depends on UIO
+	help
+	  This driver supports offload features of Broadcom NetXtremeII
+	  gigabit Ethernet cards.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called cnic.  This is recommended.
+
 config SPIDER_NET
 config SPIDER_NET
 	tristate "Spider Gigabit Ethernet driver"
 	tristate "Spider Gigabit Ethernet driver"
 	depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)
 	depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)

+ 1 - 0
drivers/net/Makefile

@@ -73,6 +73,7 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o
 obj-$(CONFIG_FEALNX) += fealnx.o
 obj-$(CONFIG_FEALNX) += fealnx.o
 obj-$(CONFIG_TIGON3) += tg3.o
 obj-$(CONFIG_TIGON3) += tg3.o
 obj-$(CONFIG_BNX2) += bnx2.o
 obj-$(CONFIG_BNX2) += bnx2.o
+obj-$(CONFIG_CNIC) += cnic.o
 obj-$(CONFIG_BNX2X) += bnx2x.o
 obj-$(CONFIG_BNX2X) += bnx2x.o
 bnx2x-objs := bnx2x_main.o bnx2x_link.o
 bnx2x-objs := bnx2x_main.o bnx2x_link.o
 spidernet-y += spider_net.o spider_net_ethtool.o
 spidernet-y += spider_net.o spider_net_ethtool.o

+ 190 - 3
drivers/net/bnx2.c

@@ -49,6 +49,10 @@
 #include <linux/firmware.h>
 #include <linux/firmware.h>
 #include <linux/log2.h>
 #include <linux/log2.h>
 
 
+#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
+#define BCM_CNIC 1
+#include "cnic_if.h"
+#endif
 #include "bnx2.h"
 #include "bnx2.h"
 #include "bnx2_fw.h"
 #include "bnx2_fw.h"
 
 
@@ -315,6 +319,158 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
 	spin_unlock_bh(&bp->indirect_lock);
 	spin_unlock_bh(&bp->indirect_lock);
 }
 }
 
 
+#ifdef BCM_CNIC
+static int
+bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	struct drv_ctl_io *io = &info->data.io;
+
+	switch (info->cmd) {
+	case DRV_CTL_IO_WR_CMD:
+		bnx2_reg_wr_ind(bp, io->offset, io->data);
+		break;
+	case DRV_CTL_IO_RD_CMD:
+		io->data = bnx2_reg_rd_ind(bp, io->offset);
+		break;
+	case DRV_CTL_CTX_WR_CMD:
+		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
+{
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+	int sb_id;
+
+	if (bp->flags & BNX2_FLAG_USING_MSIX) {
+		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
+		bnapi->cnic_present = 0;
+		sb_id = bp->irq_nvecs;
+		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
+	} else {
+		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
+		bnapi->cnic_tag = bnapi->last_status_idx;
+		bnapi->cnic_present = 1;
+		sb_id = 0;
+		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
+	}
+
+	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
+	cp->irq_arr[0].status_blk = (void *)
+		((unsigned long) bnapi->status_blk.msi +
+		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
+	cp->irq_arr[0].status_blk_num = sb_id;
+	cp->num_irq = 1;
+}
+
+static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
+			      void *data)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	if (ops == NULL)
+		return -EINVAL;
+
+	if (cp->drv_state & CNIC_DRV_STATE_REGD)
+		return -EBUSY;
+
+	bp->cnic_data = data;
+	rcu_assign_pointer(bp->cnic_ops, ops);
+
+	cp->num_irq = 0;
+	cp->drv_state = CNIC_DRV_STATE_REGD;
+
+	bnx2_setup_cnic_irq_info(bp);
+
+	return 0;
+}
+
+static int bnx2_unregister_cnic(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	cp->drv_state = 0;
+	bnapi->cnic_present = 0;
+	rcu_assign_pointer(bp->cnic_ops, NULL);
+	synchronize_rcu();
+	return 0;
+}
+
+struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	cp->drv_owner = THIS_MODULE;
+	cp->chip_id = bp->chip_id;
+	cp->pdev = bp->pdev;
+	cp->io_base = bp->regview;
+	cp->drv_ctl = bnx2_drv_ctl;
+	cp->drv_register_cnic = bnx2_register_cnic;
+	cp->drv_unregister_cnic = bnx2_unregister_cnic;
+
+	return cp;
+}
+EXPORT_SYMBOL(bnx2_cnic_probe);
+
+static void
+bnx2_cnic_stop(struct bnx2 *bp)
+{
+	struct cnic_ops *c_ops;
+	struct cnic_ctl_info info;
+
+	rcu_read_lock();
+	c_ops = rcu_dereference(bp->cnic_ops);
+	if (c_ops) {
+		info.cmd = CNIC_CTL_STOP_CMD;
+		c_ops->cnic_ctl(bp->cnic_data, &info);
+	}
+	rcu_read_unlock();
+}
+
+static void
+bnx2_cnic_start(struct bnx2 *bp)
+{
+	struct cnic_ops *c_ops;
+	struct cnic_ctl_info info;
+
+	rcu_read_lock();
+	c_ops = rcu_dereference(bp->cnic_ops);
+	if (c_ops) {
+		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
+			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+
+			bnapi->cnic_tag = bnapi->last_status_idx;
+		}
+		info.cmd = CNIC_CTL_START_CMD;
+		c_ops->cnic_ctl(bp->cnic_data, &info);
+	}
+	rcu_read_unlock();
+}
+
+#else
+
+static void
+bnx2_cnic_stop(struct bnx2 *bp)
+{
+}
+
+static void
+bnx2_cnic_start(struct bnx2 *bp)
+{
+}
+
+#endif
+
 static int
 static int
 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
 {
 {
@@ -488,6 +644,7 @@ bnx2_napi_enable(struct bnx2 *bp)
 static void
 static void
 bnx2_netif_stop(struct bnx2 *bp)
 bnx2_netif_stop(struct bnx2 *bp)
 {
 {
+	bnx2_cnic_stop(bp);
 	bnx2_disable_int_sync(bp);
 	bnx2_disable_int_sync(bp);
 	if (netif_running(bp->dev)) {
 	if (netif_running(bp->dev)) {
 		bnx2_napi_disable(bp);
 		bnx2_napi_disable(bp);
@@ -504,6 +661,7 @@ bnx2_netif_start(struct bnx2 *bp)
 			netif_tx_wake_all_queues(bp->dev);
 			netif_tx_wake_all_queues(bp->dev);
 			bnx2_napi_enable(bp);
 			bnx2_napi_enable(bp);
 			bnx2_enable_int(bp);
 			bnx2_enable_int(bp);
+			bnx2_cnic_start(bp);
 		}
 		}
 	}
 	}
 }
 }
@@ -3164,6 +3322,11 @@ bnx2_has_work(struct bnx2_napi *bnapi)
 	if (bnx2_has_fast_work(bnapi))
 	if (bnx2_has_fast_work(bnapi))
 		return 1;
 		return 1;
 
 
+#ifdef BCM_CNIC
+	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
+		return 1;
+#endif
+
 	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
 	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
 	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
 	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
 		return 1;
 		return 1;
@@ -3193,6 +3356,23 @@ bnx2_chk_missed_msi(struct bnx2 *bp)
 	bp->idle_chk_status_idx = bnapi->last_status_idx;
 	bp->idle_chk_status_idx = bnapi->last_status_idx;
 }
 }
 
 
+#ifdef BCM_CNIC
+static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
+{
+	struct cnic_ops *c_ops;
+
+	if (!bnapi->cnic_present)
+		return;
+
+	rcu_read_lock();
+	c_ops = rcu_dereference(bp->cnic_ops);
+	if (c_ops)
+		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
+						      bnapi->status_blk.msi);
+	rcu_read_unlock();
+}
+#endif
+
 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
 {
 {
 	struct status_block *sblk = bnapi->status_blk.msi;
 	struct status_block *sblk = bnapi->status_blk.msi;
@@ -3267,6 +3447,10 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
 
 
 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
 
 
+#ifdef BCM_CNIC
+		bnx2_poll_cnic(bp, bnapi);
+#endif
+
 		/* bnapi->last_status_idx is used below to tell the hw how
 		/* bnapi->last_status_idx is used below to tell the hw how
 		 * much work has been processed, so we must read it before
 		 * much work has been processed, so we must read it before
 		 * checking for more work.
 		 * checking for more work.
@@ -4632,8 +4816,11 @@ bnx2_init_chip(struct bnx2 *bp)
 	val = REG_RD(bp, BNX2_MQ_CONFIG);
 	val = REG_RD(bp, BNX2_MQ_CONFIG);
 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
 	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
 	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
-	if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
-		val |= BNX2_MQ_CONFIG_HALT_DIS;
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
+		if (CHIP_REV(bp) == CHIP_REV_Ax)
+			val |= BNX2_MQ_CONFIG_HALT_DIS;
+	}
 
 
 	REG_WR(bp, BNX2_MQ_CONFIG, val);
 	REG_WR(bp, BNX2_MQ_CONFIG, val);
 
 
@@ -7471,7 +7658,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
 
 
 	dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
 	dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
-	mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
+	mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
 	dev->mem_end = dev->mem_start + mem_len;
 	dev->mem_end = dev->mem_start + mem_len;
 	dev->irq = pdev->irq;
 	dev->irq = pdev->irq;
 
 

+ 18 - 0
drivers/net/bnx2.h

@@ -361,6 +361,9 @@ struct l2_fhdr {
 #define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE	 (1<<28)
 #define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE	 (1<<28)
 
 
 #define BNX2_L2CTX_HOST_BDIDX				0x00000004
 #define BNX2_L2CTX_HOST_BDIDX				0x00000004
+#define BNX2_L2CTX_STATUSB_NUM_SHIFT			 16
+#define BNX2_L2CTX_STATUSB_NUM(sb_id)			 \
+	(((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_STATUSB_NUM_SHIFT) : 0)
 #define BNX2_L2CTX_HOST_BSEQ				0x00000008
 #define BNX2_L2CTX_HOST_BSEQ				0x00000008
 #define BNX2_L2CTX_NX_BSEQ				0x0000000c
 #define BNX2_L2CTX_NX_BSEQ				0x0000000c
 #define BNX2_L2CTX_NX_BDHADDR_HI			0x00000010
 #define BNX2_L2CTX_NX_BDHADDR_HI			0x00000010
@@ -5900,6 +5903,7 @@ struct l2_fhdr {
 #define BNX2_RXP_FTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
 #define BNX2_RXP_FTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
 
 
 #define BNX2_RXP_SCRATCH				0x000e0000
 #define BNX2_RXP_SCRATCH				0x000e0000
+#define BNX2_RXP_SCRATCH_RXP_FLOOD			 0x000e0024
 #define BNX2_RXP_SCRATCH_RSS_TBL_SZ			 0x000e0038
 #define BNX2_RXP_SCRATCH_RSS_TBL_SZ			 0x000e0038
 #define BNX2_RXP_SCRATCH_RSS_TBL			 0x000e003c
 #define BNX2_RXP_SCRATCH_RSS_TBL			 0x000e003c
 #define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES		 128
 #define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES		 128
@@ -6678,6 +6682,11 @@ struct bnx2_napi {
 	u32 			last_status_idx;
 	u32 			last_status_idx;
 	u32			int_num;
 	u32			int_num;
 
 
+#ifdef BCM_CNIC
+	u32			cnic_tag;
+	int			cnic_present;
+#endif
+
 	struct bnx2_rx_ring_info	rx_ring;
 	struct bnx2_rx_ring_info	rx_ring;
 	struct bnx2_tx_ring_info	tx_ring;
 	struct bnx2_tx_ring_info	tx_ring;
 };
 };
@@ -6727,6 +6736,11 @@ struct bnx2 {
 	int		tx_ring_size;
 	int		tx_ring_size;
 	u32		tx_wake_thresh;
 	u32		tx_wake_thresh;
 
 
+#ifdef BCM_CNIC
+	struct cnic_ops		*cnic_ops;
+	void			*cnic_data;
+#endif
+
 	/* End of fields used in the performance code paths. */
 	/* End of fields used in the performance code paths. */
 
 
 	unsigned int		current_interval;
 	unsigned int		current_interval;
@@ -6885,6 +6899,10 @@ struct bnx2 {
 
 
 	u32			idle_chk_status_idx;
 	u32			idle_chk_status_idx;
 
 
+#ifdef BCM_CNIC
+	struct cnic_eth_dev	cnic_eth_dev;
+#endif
+
 	const struct firmware	*mips_firmware;
 	const struct firmware	*mips_firmware;
 	const struct firmware	*rv2p_firmware;
 	const struct firmware	*rv2p_firmware;
 };
 };

+ 2711 - 0
drivers/net/cnic.c

@@ -0,0 +1,2711 @@
+/* cnic.c: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
+ * Modified and maintained by: Michael Chan <mchan@broadcom.com>
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/uio_driver.h>
+#include <linux/in.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define BCM_VLAN 1
+#endif
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <scsi/iscsi_if.h>
+
+#include "cnic_if.h"
+#include "bnx2.h"
+#include "cnic.h"
+#include "cnic_defs.h"
+
+#define DRV_MODULE_NAME		"cnic"
+#define PFX DRV_MODULE_NAME	": "
+
+static char version[] __devinitdata =
+	"Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
+	      "Chen (zongxi@broadcom.com");
+MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(CNIC_MODULE_VERSION);
+
+static LIST_HEAD(cnic_dev_list);
+static DEFINE_RWLOCK(cnic_dev_lock);
+static DEFINE_MUTEX(cnic_lock);
+
+static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+
+static int cnic_service_bnx2(void *, void *);
+static int cnic_ctl(void *, struct cnic_ctl_info *);
+
+static struct cnic_ops cnic_bnx2_ops = {
+	.cnic_owner	= THIS_MODULE,
+	.cnic_handler	= cnic_service_bnx2,
+	.cnic_ctl	= cnic_ctl,
+};
+
+static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *);
+static void cnic_init_bnx2_tx_ring(struct cnic_dev *);
+static void cnic_init_bnx2_rx_ring(struct cnic_dev *);
+static int cnic_cm_set_pg(struct cnic_sock *);
+
+static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
+{
+	struct cnic_dev *dev = uinfo->priv;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (cp->uio_dev != -1)
+		return -EBUSY;
+
+	cp->uio_dev = iminor(inode);
+
+	cnic_shutdown_bnx2_rx_ring(dev);
+
+	cnic_init_bnx2_tx_ring(dev);
+	cnic_init_bnx2_rx_ring(dev);
+
+	return 0;
+}
+
+static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
+{
+	struct cnic_dev *dev = uinfo->priv;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	cp->uio_dev = -1;
+	return 0;
+}
+
+static inline void cnic_hold(struct cnic_dev *dev)
+{
+	atomic_inc(&dev->ref_count);
+}
+
+static inline void cnic_put(struct cnic_dev *dev)
+{
+	atomic_dec(&dev->ref_count);
+}
+
+static inline void csk_hold(struct cnic_sock *csk)
+{
+	atomic_inc(&csk->ref_count);
+}
+
+static inline void csk_put(struct cnic_sock *csk)
+{
+	atomic_dec(&csk->ref_count);
+}
+
+static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
+{
+	struct cnic_dev *cdev;
+
+	read_lock(&cnic_dev_lock);
+	list_for_each_entry(cdev, &cnic_dev_list, list) {
+		if (netdev == cdev->netdev) {
+			cnic_hold(cdev);
+			read_unlock(&cnic_dev_lock);
+			return cdev;
+		}
+	}
+	read_unlock(&cnic_dev_lock);
+	return NULL;
+}
+
+static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_io *io = &info.data.io;
+
+	info.cmd = DRV_CTL_CTX_WR_CMD;
+	io->cid_addr = cid_addr;
+	io->offset = off;
+	io->data = val;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_io *io = &info.data.io;
+
+	info.cmd = DRV_CTL_IO_WR_CMD;
+	io->offset = off;
+	io->data = val;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_io *io = &info.data.io;
+
+	info.cmd = DRV_CTL_IO_RD_CMD;
+	io->offset = off;
+	ethdev->drv_ctl(dev->netdev, &info);
+	return io->data;
+}
+
+static int cnic_in_use(struct cnic_sock *csk)
+{
+	return test_bit(SK_F_INUSE, &csk->flags);
+}
+
+static void cnic_kwq_completion(struct cnic_dev *dev, u32 count)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+
+	info.cmd = DRV_CTL_COMPLETION_CMD;
+	info.data.comp.comp_count = count;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
+			   struct cnic_sock *csk)
+{
+	struct iscsi_path path_req;
+	char *buf = NULL;
+	u16 len = 0;
+	u32 msg_type = ISCSI_KEVENT_IF_DOWN;
+	struct cnic_ulp_ops *ulp_ops;
+
+	if (cp->uio_dev == -1)
+		return -ENODEV;
+
+	if (csk) {
+		len = sizeof(path_req);
+		buf = (char *) &path_req;
+		memset(&path_req, 0, len);
+
+		msg_type = ISCSI_KEVENT_PATH_REQ;
+		path_req.handle = (u64) csk->l5_cid;
+		if (test_bit(SK_F_IPV6, &csk->flags)) {
+			memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
+			       sizeof(struct in6_addr));
+			path_req.ip_addr_len = 16;
+		} else {
+			memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
+			       sizeof(struct in_addr));
+			path_req.ip_addr_len = 4;
+		}
+		path_req.vlan_id = csk->vlan_id;
+		path_req.pmtu = csk->mtu;
+	}
+
+	rcu_read_lock();
+	ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
+	if (ulp_ops)
+		ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len);
+	rcu_read_unlock();
+	return 0;
+}
+
+static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
+				  char *buf, u16 len)
+{
+	int rc = -EINVAL;
+
+	switch (msg_type) {
+	case ISCSI_UEVENT_PATH_UPDATE: {
+		struct cnic_local *cp;
+		u32 l5_cid;
+		struct cnic_sock *csk;
+		struct iscsi_path *path_resp;
+
+		if (len < sizeof(*path_resp))
+			break;
+
+		path_resp = (struct iscsi_path *) buf;
+		cp = dev->cnic_priv;
+		l5_cid = (u32) path_resp->handle;
+		if (l5_cid >= MAX_CM_SK_TBL_SZ)
+			break;
+
+		csk = &cp->csk_tbl[l5_cid];
+		csk_hold(csk);
+		if (cnic_in_use(csk)) {
+			memcpy(csk->ha, path_resp->mac_addr, 6);
+			if (test_bit(SK_F_IPV6, &csk->flags))
+				memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
+				       sizeof(struct in6_addr));
+			else
+				memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
+				       sizeof(struct in_addr));
+			if (is_valid_ether_addr(csk->ha))
+				cnic_cm_set_pg(csk);
+		}
+		csk_put(csk);
+		rc = 0;
+	}
+	}
+
+	return rc;
+}
+
+static int cnic_offld_prep(struct cnic_sock *csk)
+{
+	if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+		return 0;
+
+	if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
+		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+		return 0;
+	}
+
+	return 1;
+}
+
+static int cnic_close_prep(struct cnic_sock *csk)
+{
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	smp_mb__after_clear_bit();
+
+	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+		while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+			msleep(1);
+
+		return 1;
+	}
+	return 0;
+}
+
+static int cnic_abort_prep(struct cnic_sock *csk)
+{
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	smp_mb__after_clear_bit();
+
+	while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+		msleep(1);
+
+	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+		csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+		return 1;
+	}
+
+	return 0;
+}
+
+int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
+{
+	struct cnic_dev *dev;
+
+	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+		printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n",
+		       ulp_type);
+		return -EINVAL;
+	}
+	mutex_lock(&cnic_lock);
+	if (cnic_ulp_tbl[ulp_type]) {
+		printk(KERN_ERR PFX "cnic_register_driver: Type %d has already "
+				    "been registered\n", ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EBUSY;
+	}
+
+	read_lock(&cnic_dev_lock);
+	list_for_each_entry(dev, &cnic_dev_list, list) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
+	}
+	read_unlock(&cnic_dev_lock);
+
+	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
+	mutex_unlock(&cnic_lock);
+
+	/* Prevent race conditions with netdev_event */
+	rtnl_lock();
+	read_lock(&cnic_dev_lock);
+	list_for_each_entry(dev, &cnic_dev_list, list) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
+			ulp_ops->cnic_init(dev);
+	}
+	read_unlock(&cnic_dev_lock);
+	rtnl_unlock();
+
+	return 0;
+}
+
+int cnic_unregister_driver(int ulp_type)
+{
+	struct cnic_dev *dev;
+
+	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+		printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
+		       ulp_type);
+		return -EINVAL;
+	}
+	mutex_lock(&cnic_lock);
+	if (!cnic_ulp_tbl[ulp_type]) {
+		printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
+				    "been registered\n", ulp_type);
+		goto out_unlock;
+	}
+	read_lock(&cnic_dev_lock);
+	list_for_each_entry(dev, &cnic_dev_list, list) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+			printk(KERN_ERR PFX "cnic_unregister_driver: Type %d "
+			       "still has devices registered\n", ulp_type);
+			read_unlock(&cnic_dev_lock);
+			goto out_unlock;
+		}
+	}
+	read_unlock(&cnic_dev_lock);
+
+	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
+
+	mutex_unlock(&cnic_lock);
+	synchronize_rcu();
+	return 0;
+
+out_unlock:
+	mutex_unlock(&cnic_lock);
+	return -EINVAL;
+}
+
+static int cnic_start_hw(struct cnic_dev *);
+static void cnic_stop_hw(struct cnic_dev *);
+
+static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
+				void *ulp_ctx)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_ulp_ops *ulp_ops;
+
+	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+		printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n",
+		       ulp_type);
+		return -EINVAL;
+	}
+	mutex_lock(&cnic_lock);
+	if (cnic_ulp_tbl[ulp_type] == NULL) {
+		printk(KERN_ERR PFX "cnic_register_device: Driver with type %d "
+				    "has not been registered\n", ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EAGAIN;
+	}
+	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+		printk(KERN_ERR PFX "cnic_register_device: Type %d has already "
+		       "been registered to this device\n", ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EBUSY;
+	}
+
+	clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
+	cp->ulp_handle[ulp_type] = ulp_ctx;
+	ulp_ops = cnic_ulp_tbl[ulp_type];
+	rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
+	cnic_hold(dev);
+
+	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
+			ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
+
+	mutex_unlock(&cnic_lock);
+
+	return 0;
+
+}
+EXPORT_SYMBOL(cnic_register_driver);
+
+static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+		printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
+		       ulp_type);
+		return -EINVAL;
+	}
+	mutex_lock(&cnic_lock);
+	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+		rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
+		cnic_put(dev);
+	} else {
+		printk(KERN_ERR PFX "cnic_unregister_device: device not "
+		       "registered to this ulp type %d\n", ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EINVAL;
+	}
+	mutex_unlock(&cnic_lock);
+
+	synchronize_rcu();
+
+	return 0;
+}
+EXPORT_SYMBOL(cnic_unregister_driver);
+
+static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
+{
+	id_tbl->start = start_id;
+	id_tbl->max = size;
+	id_tbl->next = 0;
+	spin_lock_init(&id_tbl->lock);
+	id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
+	if (!id_tbl->table)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
+{
+	kfree(id_tbl->table);
+	id_tbl->table = NULL;
+}
+
+static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
+{
+	int ret = -1;
+
+	id -= id_tbl->start;
+	if (id >= id_tbl->max)
+		return ret;
+
+	spin_lock(&id_tbl->lock);
+	if (!test_bit(id, id_tbl->table)) {
+		set_bit(id, id_tbl->table);
+		ret = 0;
+	}
+	spin_unlock(&id_tbl->lock);
+	return ret;
+}
+
+/* Returns -1 if not successful */
+static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
+{
+	u32 id;
+
+	spin_lock(&id_tbl->lock);
+	id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
+	if (id >= id_tbl->max) {
+		id = -1;
+		if (id_tbl->next != 0) {
+			id = find_first_zero_bit(id_tbl->table, id_tbl->next);
+			if (id >= id_tbl->next)
+				id = -1;
+		}
+	}
+
+	if (id < id_tbl->max) {
+		set_bit(id, id_tbl->table);
+		id_tbl->next = (id + 1) & (id_tbl->max - 1);
+		id += id_tbl->start;
+	}
+
+	spin_unlock(&id_tbl->lock);
+
+	return id;
+}
+
+static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
+{
+	if (id == -1)
+		return;
+
+	id -= id_tbl->start;
+	if (id >= id_tbl->max)
+		return;
+
+	clear_bit(id, id_tbl->table);
+}
+
+static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+	int i;
+
+	if (!dma->pg_arr)
+		return;
+
+	for (i = 0; i < dma->num_pages; i++) {
+		if (dma->pg_arr[i]) {
+			pci_free_consistent(dev->pcidev, BCM_PAGE_SIZE,
+					    dma->pg_arr[i], dma->pg_map_arr[i]);
+			dma->pg_arr[i] = NULL;
+		}
+	}
+	if (dma->pgtbl) {
+		pci_free_consistent(dev->pcidev, dma->pgtbl_size,
+				    dma->pgtbl, dma->pgtbl_map);
+		dma->pgtbl = NULL;
+	}
+	kfree(dma->pg_arr);
+	dma->pg_arr = NULL;
+	dma->num_pages = 0;
+}
+
+static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+	int i;
+	u32 *page_table = dma->pgtbl;
+
+	for (i = 0; i < dma->num_pages; i++) {
+		/* Each entry needs to be in big endian format. */
+		*page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
+		page_table++;
+		*page_table = (u32) dma->pg_map_arr[i];
+		page_table++;
+	}
+}
+
+static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
+			  int pages, int use_pg_tbl)
+{
+	int i, size;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	size = pages * (sizeof(void *) + sizeof(dma_addr_t));
+	dma->pg_arr = kzalloc(size, GFP_ATOMIC);
+	if (dma->pg_arr == NULL)
+		return -ENOMEM;
+
+	dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
+	dma->num_pages = pages;
+
+	for (i = 0; i < pages; i++) {
+		dma->pg_arr[i] = pci_alloc_consistent(dev->pcidev,
+						      BCM_PAGE_SIZE,
+						      &dma->pg_map_arr[i]);
+		if (dma->pg_arr[i] == NULL)
+			goto error;
+	}
+	if (!use_pg_tbl)
+		return 0;
+
+	dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
+			  ~(BCM_PAGE_SIZE - 1);
+	dma->pgtbl = pci_alloc_consistent(dev->pcidev, dma->pgtbl_size,
+					  &dma->pgtbl_map);
+	if (dma->pgtbl == NULL)
+		goto error;
+
+	cp->setup_pgtbl(dev, dma);
+
+	return 0;
+
+error:
+	cnic_free_dma(dev, dma);
+	return -ENOMEM;
+}
+
+static void cnic_free_resc(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i = 0;
+
+	if (cp->cnic_uinfo) {
+		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+		while (cp->uio_dev != -1 && i < 15) {
+			msleep(100);
+			i++;
+		}
+		uio_unregister_device(cp->cnic_uinfo);
+		kfree(cp->cnic_uinfo);
+		cp->cnic_uinfo = NULL;
+	}
+
+	if (cp->l2_buf) {
+		pci_free_consistent(dev->pcidev, cp->l2_buf_size,
+				    cp->l2_buf, cp->l2_buf_map);
+		cp->l2_buf = NULL;
+	}
+
+	if (cp->l2_ring) {
+		pci_free_consistent(dev->pcidev, cp->l2_ring_size,
+				    cp->l2_ring, cp->l2_ring_map);
+		cp->l2_ring = NULL;
+	}
+
+	for (i = 0; i < cp->ctx_blks; i++) {
+		if (cp->ctx_arr[i].ctx) {
+			pci_free_consistent(dev->pcidev, cp->ctx_blk_size,
+					    cp->ctx_arr[i].ctx,
+					    cp->ctx_arr[i].mapping);
+			cp->ctx_arr[i].ctx = NULL;
+		}
+	}
+	kfree(cp->ctx_arr);
+	cp->ctx_arr = NULL;
+	cp->ctx_blks = 0;
+
+	cnic_free_dma(dev, &cp->gbl_buf_info);
+	cnic_free_dma(dev, &cp->conn_buf_info);
+	cnic_free_dma(dev, &cp->kwq_info);
+	cnic_free_dma(dev, &cp->kcq_info);
+	kfree(cp->iscsi_tbl);
+	cp->iscsi_tbl = NULL;
+	kfree(cp->ctx_tbl);
+	cp->ctx_tbl = NULL;
+
+	cnic_free_id_tbl(&cp->cid_tbl);
+}
+
+static int cnic_alloc_context(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (CHIP_NUM(cp) == CHIP_NUM_5709) {
+		int i, k, arr_size;
+
+		cp->ctx_blk_size = BCM_PAGE_SIZE;
+		cp->cids_per_blk = BCM_PAGE_SIZE / 128;
+		arr_size = BNX2_MAX_CID / cp->cids_per_blk *
+			   sizeof(struct cnic_ctx);
+		cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
+		if (cp->ctx_arr == NULL)
+			return -ENOMEM;
+
+		k = 0;
+		for (i = 0; i < 2; i++) {
+			u32 j, reg, off, lo, hi;
+
+			if (i == 0)
+				off = BNX2_PG_CTX_MAP;
+			else
+				off = BNX2_ISCSI_CTX_MAP;
+
+			reg = cnic_reg_rd_ind(dev, off);
+			lo = reg >> 16;
+			hi = reg & 0xffff;
+			for (j = lo; j < hi; j += cp->cids_per_blk, k++)
+				cp->ctx_arr[k].cid = j;
+		}
+
+		cp->ctx_blks = k;
+		if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
+			cp->ctx_blks = 0;
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < cp->ctx_blks; i++) {
+			cp->ctx_arr[i].ctx =
+				pci_alloc_consistent(dev->pcidev, BCM_PAGE_SIZE,
+						     &cp->ctx_arr[i].mapping);
+			if (cp->ctx_arr[i].ctx == NULL)
+				return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct uio_info *uinfo;
+	int ret;
+
+	ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
+	if (ret)
+		goto error;
+	cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
+
+	ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1);
+	if (ret)
+		goto error;
+	cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
+
+	ret = cnic_alloc_context(dev);
+	if (ret)
+		goto error;
+
+	cp->l2_ring_size = 2 * BCM_PAGE_SIZE;
+	cp->l2_ring = pci_alloc_consistent(dev->pcidev, cp->l2_ring_size,
+					   &cp->l2_ring_map);
+	if (!cp->l2_ring)
+		goto error;
+
+	cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
+	cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size);
+	cp->l2_buf = pci_alloc_consistent(dev->pcidev, cp->l2_buf_size,
+					   &cp->l2_buf_map);
+	if (!cp->l2_buf)
+		goto error;
+
+	uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC);
+	if (!uinfo)
+		goto error;
+
+	uinfo->mem[0].addr = dev->netdev->base_addr;
+	uinfo->mem[0].internal_addr = dev->regview;
+	uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
+	uinfo->mem[0].memtype = UIO_MEM_PHYS;
+
+	uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
+	if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
+		uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
+	else
+		uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
+	uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
+
+	uinfo->mem[2].addr = (unsigned long) cp->l2_ring;
+	uinfo->mem[2].size = cp->l2_ring_size;
+	uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
+
+	uinfo->mem[3].addr = (unsigned long) cp->l2_buf;
+	uinfo->mem[3].size = cp->l2_buf_size;
+	uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
+
+	uinfo->name = "bnx2_cnic";
+	uinfo->version = CNIC_MODULE_VERSION;
+	uinfo->irq = UIO_IRQ_CUSTOM;
+
+	uinfo->open = cnic_uio_open;
+	uinfo->release = cnic_uio_close;
+
+	uinfo->priv = dev;
+
+	ret = uio_register_device(&dev->pcidev->dev, uinfo);
+	if (ret) {
+		kfree(uinfo);
+		goto error;
+	}
+
+	cp->cnic_uinfo = uinfo;
+
+	return 0;
+
+error:
+	cnic_free_resc(dev);
+	return ret;
+}
+
+static inline u32 cnic_kwq_avail(struct cnic_local *cp)
+{
+	return cp->max_kwq_idx -
+		((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
+}
+
+static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
+				  u32 num_wqes)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct kwqe *prod_qe;
+	u16 prod, sw_prod, i;
+
+	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EAGAIN;		/* bnx2 is down */
+
+	spin_lock_bh(&cp->cnic_ulp_lock);
+	if (num_wqes > cnic_kwq_avail(cp) &&
+	    !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) {
+		spin_unlock_bh(&cp->cnic_ulp_lock);
+		return -EAGAIN;
+	}
+
+	cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT;
+
+	prod = cp->kwq_prod_idx;
+	sw_prod = prod & MAX_KWQ_IDX;
+	for (i = 0; i < num_wqes; i++) {
+		prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
+		memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
+		prod++;
+		sw_prod = prod & MAX_KWQ_IDX;
+	}
+	cp->kwq_prod_idx = prod;
+
+	CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
+
+	spin_unlock_bh(&cp->cnic_ulp_lock);
+	return 0;
+}
+
+static void service_kcqes(struct cnic_dev *dev, int num_cqes)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i, j;
+
+	i = 0;
+	j = 1;
+	while (num_cqes) {
+		struct cnic_ulp_ops *ulp_ops;
+		int ulp_type;
+		u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
+		u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK;
+
+		if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
+			cnic_kwq_completion(dev, 1);
+
+		while (j < num_cqes) {
+			u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
+
+			if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer)
+				break;
+
+			if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
+				cnic_kwq_completion(dev, 1);
+			j++;
+		}
+
+		if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
+			ulp_type = CNIC_ULP_RDMA;
+		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
+			ulp_type = CNIC_ULP_ISCSI;
+		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
+			ulp_type = CNIC_ULP_L4;
+		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
+			goto end;
+		else {
+			printk(KERN_ERR PFX "%s: Unknown type of KCQE(0x%x)\n",
+			       dev->netdev->name, kcqe_op_flag);
+			goto end;
+		}
+
+		rcu_read_lock();
+		ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+		if (likely(ulp_ops)) {
+			ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
+						  cp->completed_kcq + i, j);
+		}
+		rcu_read_unlock();
+end:
+		num_cqes -= j;
+		i += j;
+		j = 1;
+	}
+	return;
+}
+
+static u16 cnic_bnx2_next_idx(u16 idx)
+{
+	return idx + 1;
+}
+
+static u16 cnic_bnx2_hw_idx(u16 idx)
+{
+	return idx;
+}
+
+static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u16 i, ri, last;
+	struct kcqe *kcqe;
+	int kcqe_cnt = 0, last_cnt = 0;
+
+	i = ri = last = *sw_prod;
+	ri &= MAX_KCQ_IDX;
+
+	while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
+		kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
+		cp->completed_kcq[kcqe_cnt++] = kcqe;
+		i = cp->next_idx(i);
+		ri = i & MAX_KCQ_IDX;
+		if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
+			last_cnt = kcqe_cnt;
+			last = i;
+		}
+	}
+
+	*sw_prod = last;
+	return last_cnt;
+}
+
+static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp)
+{
+	u16 rx_cons = *cp->rx_cons_ptr;
+	u16 tx_cons = *cp->tx_cons_ptr;
+
+	if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
+		cp->tx_cons = tx_cons;
+		cp->rx_cons = rx_cons;
+		uio_event_notify(cp->cnic_uinfo);
+	}
+}
+
+static int cnic_service_bnx2(void *data, void *status_blk)
+{
+	struct cnic_dev *dev = data;
+	struct status_block *sblk = status_blk;
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 status_idx = sblk->status_idx;
+	u16 hw_prod, sw_prod;
+	int kcqe_cnt;
+
+	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
+		return status_idx;
+
+	cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
+
+	hw_prod = sblk->status_completion_producer_index;
+	sw_prod = cp->kcq_prod_idx;
+	while (sw_prod != hw_prod) {
+		kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
+		if (kcqe_cnt == 0)
+			goto done;
+
+		service_kcqes(dev, kcqe_cnt);
+
+		/* Tell compiler that status_blk fields can change. */
+		barrier();
+		if (status_idx != sblk->status_idx) {
+			status_idx = sblk->status_idx;
+			cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
+			hw_prod = sblk->status_completion_producer_index;
+		} else
+			break;
+	}
+
+done:
+	CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
+
+	cp->kcq_prod_idx = sw_prod;
+
+	cnic_chk_bnx2_pkt_rings(cp);
+	return status_idx;
+}
+
+static void cnic_service_bnx2_msix(unsigned long data)
+{
+	struct cnic_dev *dev = (struct cnic_dev *) data;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct status_block_msix *status_blk = cp->bnx2_status_blk;
+	u32 status_idx = status_blk->status_idx;
+	u16 hw_prod, sw_prod;
+	int kcqe_cnt;
+
+	cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
+
+	hw_prod = status_blk->status_completion_producer_index;
+	sw_prod = cp->kcq_prod_idx;
+	while (sw_prod != hw_prod) {
+		kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
+		if (kcqe_cnt == 0)
+			goto done;
+
+		service_kcqes(dev, kcqe_cnt);
+
+		/* Tell compiler that status_blk fields can change. */
+		barrier();
+		if (status_idx != status_blk->status_idx) {
+			status_idx = status_blk->status_idx;
+			cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
+			hw_prod = status_blk->status_completion_producer_index;
+		} else
+			break;
+	}
+
+done:
+	CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
+	cp->kcq_prod_idx = sw_prod;
+
+	cnic_chk_bnx2_pkt_rings(cp);
+
+	cp->last_status_idx = status_idx;
+	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
+}
+
+static irqreturn_t cnic_irq(int irq, void *dev_instance)
+{
+	struct cnic_dev *dev = dev_instance;
+	struct cnic_local *cp = dev->cnic_priv;
+	u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
+
+	if (cp->ack_int)
+		cp->ack_int(dev);
+
+	prefetch(cp->status_blk);
+	prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
+
+	if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
+		tasklet_schedule(&cp->cnic_irq_task);
+
+	return IRQ_HANDLED;
+}
+
+static void cnic_ulp_stop(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int if_type;
+
+	rcu_read_lock();
+	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+		struct cnic_ulp_ops *ulp_ops;
+
+		ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
+		if (!ulp_ops)
+			continue;
+
+		if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
+			ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
+	}
+	rcu_read_unlock();
+}
+
+static void cnic_ulp_start(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int if_type;
+
+	rcu_read_lock();
+	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+		struct cnic_ulp_ops *ulp_ops;
+
+		ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
+		if (!ulp_ops || !ulp_ops->cnic_start)
+			continue;
+
+		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
+			ulp_ops->cnic_start(cp->ulp_handle[if_type]);
+	}
+	rcu_read_unlock();
+}
+
+static int cnic_ctl(void *data, struct cnic_ctl_info *info)
+{
+	struct cnic_dev *dev = data;
+
+	switch (info->cmd) {
+	case CNIC_CTL_STOP_CMD:
+		cnic_hold(dev);
+		mutex_lock(&cnic_lock);
+
+		cnic_ulp_stop(dev);
+		cnic_stop_hw(dev);
+
+		mutex_unlock(&cnic_lock);
+		cnic_put(dev);
+		break;
+	case CNIC_CTL_START_CMD:
+		cnic_hold(dev);
+		mutex_lock(&cnic_lock);
+
+		if (!cnic_start_hw(dev))
+			cnic_ulp_start(dev);
+
+		mutex_unlock(&cnic_lock);
+		cnic_put(dev);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void cnic_ulp_init(struct cnic_dev *dev)
+{
+	int i;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	rcu_read_lock();
+	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
+		struct cnic_ulp_ops *ulp_ops;
+
+		ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
+		if (!ulp_ops || !ulp_ops->cnic_init)
+			continue;
+
+		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
+			ulp_ops->cnic_init(dev);
+
+	}
+	rcu_read_unlock();
+}
+
+static void cnic_ulp_exit(struct cnic_dev *dev)
+{
+	int i;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	rcu_read_lock();
+	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
+		struct cnic_ulp_ops *ulp_ops;
+
+		ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
+		if (!ulp_ops || !ulp_ops->cnic_exit)
+			continue;
+
+		if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
+			ulp_ops->cnic_exit(dev);
+
+	}
+	rcu_read_unlock();
+}
+
+static int cnic_cm_offload_pg(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_offload_pg *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
+	l4kwqe->flags =
+		L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
+	l4kwqe->l2hdr_nbytes = ETH_HLEN;
+
+	l4kwqe->da0 = csk->ha[0];
+	l4kwqe->da1 = csk->ha[1];
+	l4kwqe->da2 = csk->ha[2];
+	l4kwqe->da3 = csk->ha[3];
+	l4kwqe->da4 = csk->ha[4];
+	l4kwqe->da5 = csk->ha[5];
+
+	l4kwqe->sa0 = dev->mac_addr[0];
+	l4kwqe->sa1 = dev->mac_addr[1];
+	l4kwqe->sa2 = dev->mac_addr[2];
+	l4kwqe->sa3 = dev->mac_addr[3];
+	l4kwqe->sa4 = dev->mac_addr[4];
+	l4kwqe->sa5 = dev->mac_addr[5];
+
+	l4kwqe->etype = ETH_P_IP;
+	l4kwqe->ipid_count = DEF_IPID_COUNT;
+	l4kwqe->host_opaque = csk->l5_cid;
+
+	if (csk->vlan_id) {
+		l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
+		l4kwqe->vlan_tag = csk->vlan_id;
+		l4kwqe->l2hdr_nbytes += 4;
+	}
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_update_pg(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_update_pg *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
+	l4kwqe->flags =
+		L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
+	l4kwqe->pg_cid = csk->pg_cid;
+
+	l4kwqe->da0 = csk->ha[0];
+	l4kwqe->da1 = csk->ha[1];
+	l4kwqe->da2 = csk->ha[2];
+	l4kwqe->da3 = csk->ha[3];
+	l4kwqe->da4 = csk->ha[4];
+	l4kwqe->da5 = csk->ha[5];
+
+	l4kwqe->pg_host_opaque = csk->l5_cid;
+	l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_upload_pg(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_upload *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
+	l4kwqe->flags =
+		L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
+	l4kwqe->cid = csk->pg_cid;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_conn_req(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_connect_req1 *l4kwqe1;
+	struct l4_kwq_connect_req2 *l4kwqe2;
+	struct l4_kwq_connect_req3 *l4kwqe3;
+	struct kwqe *wqes[3];
+	u8 tcp_flags = 0;
+	int num_wqes = 2;
+
+	l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
+	l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
+	l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
+	memset(l4kwqe1, 0, sizeof(*l4kwqe1));
+	memset(l4kwqe2, 0, sizeof(*l4kwqe2));
+	memset(l4kwqe3, 0, sizeof(*l4kwqe3));
+
+	l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
+	l4kwqe3->flags =
+		L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
+	l4kwqe3->ka_timeout = csk->ka_timeout;
+	l4kwqe3->ka_interval = csk->ka_interval;
+	l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
+	l4kwqe3->tos = csk->tos;
+	l4kwqe3->ttl = csk->ttl;
+	l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
+	l4kwqe3->pmtu = csk->mtu;
+	l4kwqe3->rcv_buf = csk->rcv_buf;
+	l4kwqe3->snd_buf = csk->snd_buf;
+	l4kwqe3->seed = csk->seed;
+
+	wqes[0] = (struct kwqe *) l4kwqe1;
+	if (test_bit(SK_F_IPV6, &csk->flags)) {
+		wqes[1] = (struct kwqe *) l4kwqe2;
+		wqes[2] = (struct kwqe *) l4kwqe3;
+		num_wqes = 3;
+
+		l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
+		l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
+		l4kwqe2->flags =
+			L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
+			L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
+		l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
+		l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
+		l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
+		l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
+		l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
+		l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
+		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
+			       sizeof(struct tcphdr);
+	} else {
+		wqes[1] = (struct kwqe *) l4kwqe3;
+		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
+			       sizeof(struct tcphdr);
+	}
+
+	l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
+	l4kwqe1->flags =
+		(L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
+		 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
+	l4kwqe1->cid = csk->cid;
+	l4kwqe1->pg_cid = csk->pg_cid;
+	l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
+	l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
+	l4kwqe1->src_port = be16_to_cpu(csk->src_port);
+	l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
+	if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
+	if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
+	if (csk->tcp_flags & SK_TCP_NAGLE)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
+	if (csk->tcp_flags & SK_TCP_TIMESTAMP)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
+	if (csk->tcp_flags & SK_TCP_SACK)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
+	if (csk->tcp_flags & SK_TCP_SEG_SCALING)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
+
+	l4kwqe1->tcp_flags = tcp_flags;
+
+	return dev->submit_kwqes(dev, wqes, num_wqes);
+}
+
+static int cnic_cm_close_req(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_close_req *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
+	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
+	l4kwqe->cid = csk->cid;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_abort_req(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_reset_req *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
+	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
+	l4kwqe->cid = csk->cid;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
+			  u32 l5_cid, struct cnic_sock **csk, void *context)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_sock *csk1;
+
+	if (l5_cid >= MAX_CM_SK_TBL_SZ)
+		return -EINVAL;
+
+	csk1 = &cp->csk_tbl[l5_cid];
+	if (atomic_read(&csk1->ref_count))
+		return -EAGAIN;
+
+	if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
+		return -EBUSY;
+
+	csk1->dev = dev;
+	csk1->cid = cid;
+	csk1->l5_cid = l5_cid;
+	csk1->ulp_type = ulp_type;
+	csk1->context = context;
+
+	csk1->ka_timeout = DEF_KA_TIMEOUT;
+	csk1->ka_interval = DEF_KA_INTERVAL;
+	csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
+	csk1->tos = DEF_TOS;
+	csk1->ttl = DEF_TTL;
+	csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
+	csk1->rcv_buf = DEF_RCV_BUF;
+	csk1->snd_buf = DEF_SND_BUF;
+	csk1->seed = DEF_SEED;
+
+	*csk = csk1;
+	return 0;
+}
+
+static void cnic_cm_cleanup(struct cnic_sock *csk)
+{
+	if (csk->src_port) {
+		struct cnic_dev *dev = csk->dev;
+		struct cnic_local *cp = dev->cnic_priv;
+
+		cnic_free_id(&cp->csk_port_tbl, csk->src_port);
+		csk->src_port = 0;
+	}
+}
+
+static void cnic_close_conn(struct cnic_sock *csk)
+{
+	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
+		cnic_cm_upload_pg(csk);
+		clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
+	}
+	cnic_cm_cleanup(csk);
+}
+
+static int cnic_cm_destroy(struct cnic_sock *csk)
+{
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	csk_hold(csk);
+	clear_bit(SK_F_INUSE, &csk->flags);
+	smp_mb__after_clear_bit();
+	while (atomic_read(&csk->ref_count) != 1)
+		msleep(1);
+	cnic_cm_cleanup(csk);
+
+	csk->flags = 0;
+	csk_put(csk);
+	return 0;
+}
+
+static inline u16 cnic_get_vlan(struct net_device *dev,
+				struct net_device **vlan_dev)
+{
+	if (dev->priv_flags & IFF_802_1Q_VLAN) {
+		*vlan_dev = vlan_dev_real_dev(dev);
+		return vlan_dev_vlan_id(dev);
+	}
+	*vlan_dev = dev;
+	return 0;
+}
+
+static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
+			     struct dst_entry **dst)
+{
+	struct flowi fl;
+	int err;
+	struct rtable *rt;
+
+	memset(&fl, 0, sizeof(fl));
+	fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
+
+	err = ip_route_output_key(&init_net, &rt, &fl);
+	if (!err)
+		*dst = &rt->u.dst;
+	return err;
+}
+
+static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
+			     struct dst_entry **dst)
+{
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+	struct flowi fl;
+
+	memset(&fl, 0, sizeof(fl));
+	ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
+	if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
+		fl.oif = dst_addr->sin6_scope_id;
+
+	*dst = ip6_route_output(&init_net, NULL, &fl);
+	if (*dst)
+		return 0;
+#endif
+
+	return -ENETUNREACH;
+}
+
+static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
+					   int ulp_type)
+{
+	struct cnic_dev *dev = NULL;
+	struct dst_entry *dst;
+	struct net_device *netdev = NULL;
+	int err = -ENETUNREACH;
+
+	if (dst_addr->sin_family == AF_INET)
+		err = cnic_get_v4_route(dst_addr, &dst);
+	else if (dst_addr->sin_family == AF_INET6) {
+		struct sockaddr_in6 *dst_addr6 =
+			(struct sockaddr_in6 *) dst_addr;
+
+		err = cnic_get_v6_route(dst_addr6, &dst);
+	} else
+		return NULL;
+
+	if (err)
+		return NULL;
+
+	if (!dst->dev)
+		goto done;
+
+	cnic_get_vlan(dst->dev, &netdev);
+
+	dev = cnic_from_netdev(netdev);
+
+done:
+	dst_release(dst);
+	if (dev)
+		cnic_put(dev);
+	return dev;
+}
+
+static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
+}
+
+static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct cnic_local *cp = dev->cnic_priv;
+	int is_v6, err, rc = -ENETUNREACH;
+	struct dst_entry *dst;
+	struct net_device *realdev;
+	u32 local_port;
+
+	if (saddr->local.v6.sin6_family == AF_INET6 &&
+	    saddr->remote.v6.sin6_family == AF_INET6)
+		is_v6 = 1;
+	else if (saddr->local.v4.sin_family == AF_INET &&
+		 saddr->remote.v4.sin_family == AF_INET)
+		is_v6 = 0;
+	else
+		return -EINVAL;
+
+	clear_bit(SK_F_IPV6, &csk->flags);
+
+	if (is_v6) {
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+		set_bit(SK_F_IPV6, &csk->flags);
+		err = cnic_get_v6_route(&saddr->remote.v6, &dst);
+		if (err)
+			return err;
+
+		if (!dst || dst->error || !dst->dev)
+			goto err_out;
+
+		memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
+		       sizeof(struct in6_addr));
+		csk->dst_port = saddr->remote.v6.sin6_port;
+		local_port = saddr->local.v6.sin6_port;
+#else
+		return rc;
+#endif
+
+	} else {
+		err = cnic_get_v4_route(&saddr->remote.v4, &dst);
+		if (err)
+			return err;
+
+		if (!dst || dst->error || !dst->dev)
+			goto err_out;
+
+		csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
+		csk->dst_port = saddr->remote.v4.sin_port;
+		local_port = saddr->local.v4.sin_port;
+	}
+
+	csk->vlan_id = cnic_get_vlan(dst->dev, &realdev);
+	if (realdev != dev->netdev)
+		goto err_out;
+
+	if (local_port >= CNIC_LOCAL_PORT_MIN &&
+	    local_port < CNIC_LOCAL_PORT_MAX) {
+		if (cnic_alloc_id(&cp->csk_port_tbl, local_port))
+			local_port = 0;
+	} else
+		local_port = 0;
+
+	if (!local_port) {
+		local_port = cnic_alloc_new_id(&cp->csk_port_tbl);
+		if (local_port == -1) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+	}
+	csk->src_port = local_port;
+
+	csk->mtu = dst_mtu(dst);
+	rc = 0;
+
+err_out:
+	dst_release(dst);
+	return rc;
+}
+
+static void cnic_init_csk_state(struct cnic_sock *csk)
+{
+	csk->state = 0;
+	clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+	clear_bit(SK_F_CLOSING, &csk->flags);
+}
+
+static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+	int err = 0;
+
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
+		return -EINVAL;
+
+	cnic_init_csk_state(csk);
+
+	err = cnic_get_route(csk, saddr);
+	if (err)
+		goto err_out;
+
+	err = cnic_resolve_addr(csk, saddr);
+	if (!err)
+		return 0;
+
+err_out:
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	return err;
+}
+
+static int cnic_cm_abort(struct cnic_sock *csk)
+{
+	struct cnic_local *cp = csk->dev->cnic_priv;
+	u32 opcode;
+
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	if (cnic_abort_prep(csk))
+		return cnic_cm_abort_req(csk);
+
+	/* Getting here means that we haven't started connect, or
+	 * connect was not successful.
+	 */
+
+	csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+		opcode = csk->state;
+	else
+		opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
+	cp->close_conn(csk, opcode);
+
+	return 0;
+}
+
+static int cnic_cm_close(struct cnic_sock *csk)
+{
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	if (cnic_close_prep(csk)) {
+		csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
+		return cnic_cm_close_req(csk);
+	}
+	return 0;
+}
+
+static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
+			   u8 opcode)
+{
+	struct cnic_ulp_ops *ulp_ops;
+	int ulp_type = csk->ulp_type;
+
+	rcu_read_lock();
+	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+	if (ulp_ops) {
+		if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
+			ulp_ops->cm_connect_complete(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
+			ulp_ops->cm_close_complete(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
+			ulp_ops->cm_remote_abort(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
+			ulp_ops->cm_abort_complete(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
+			ulp_ops->cm_remote_close(csk);
+	}
+	rcu_read_unlock();
+}
+
+static int cnic_cm_set_pg(struct cnic_sock *csk)
+{
+	if (cnic_offld_prep(csk)) {
+		if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+			cnic_cm_update_pg(csk);
+		else
+			cnic_cm_offload_pg(csk);
+	}
+	return 0;
+}
+
+static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 l5_cid = kcqe->pg_host_opaque;
+	u8 opcode = kcqe->op_code;
+	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
+
+	csk_hold(csk);
+	if (!cnic_in_use(csk))
+		goto done;
+
+	if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
+		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+		goto done;
+	}
+	csk->pg_cid = kcqe->pg_cid;
+	set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
+	cnic_cm_conn_req(csk);
+
+done:
+	csk_put(csk);
+}
+
+static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
+	u8 opcode = l4kcqe->op_code;
+	u32 l5_cid;
+	struct cnic_sock *csk;
+
+	if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
+	    opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
+		cnic_cm_process_offld_pg(dev, l4kcqe);
+		return;
+	}
+
+	l5_cid = l4kcqe->conn_id;
+	if (opcode & 0x80)
+		l5_cid = l4kcqe->cid;
+	if (l5_cid >= MAX_CM_SK_TBL_SZ)
+		return;
+
+	csk = &cp->csk_tbl[l5_cid];
+	csk_hold(csk);
+
+	if (!cnic_in_use(csk)) {
+		csk_put(csk);
+		return;
+	}
+
+	switch (opcode) {
+	case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
+		if (l4kcqe->status == 0)
+			set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
+
+		smp_mb__before_clear_bit();
+		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+		cnic_cm_upcall(cp, csk, opcode);
+		break;
+
+	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
+		if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
+			csk->state = opcode;
+		/* fall through */
+	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
+	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
+		cp->close_conn(csk, opcode);
+		break;
+
+	case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
+		cnic_cm_upcall(cp, csk, opcode);
+		break;
+	}
+	csk_put(csk);
+}
+
+static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
+{
+	struct cnic_dev *dev = data;
+	int i;
+
+	for (i = 0; i < num; i++)
+		cnic_cm_process_kcqe(dev, kcqe[i]);
+}
+
+static struct cnic_ulp_ops cm_ulp_ops = {
+	.indicate_kcqes		= cnic_cm_indicate_kcqe,
+};
+
+static void cnic_cm_free_mem(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	kfree(cp->csk_tbl);
+	cp->csk_tbl = NULL;
+	cnic_free_id_tbl(&cp->csk_port_tbl);
+}
+
+static int cnic_cm_alloc_mem(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
+			      GFP_KERNEL);
+	if (!cp->csk_tbl)
+		return -ENOMEM;
+
+	if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
+			     CNIC_LOCAL_PORT_MIN)) {
+		cnic_cm_free_mem(dev);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
+{
+	if ((opcode == csk->state) ||
+	    (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED &&
+	     csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) {
+		if (!test_and_set_bit(SK_F_CLOSING, &csk->flags))
+			return 1;
+	}
+	return 0;
+}
+
+static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	if (cnic_ready_to_close(csk, opcode)) {
+		cnic_close_conn(csk);
+		cnic_cm_upcall(cp, csk, opcode);
+	}
+}
+
+static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
+{
+}
+
+static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
+{
+	u32 seed;
+
+	get_random_bytes(&seed, 4);
+	cnic_ctx_wr(dev, 45, 0, seed);
+	return 0;
+}
+
+static int cnic_cm_open(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int err;
+
+	err = cnic_cm_alloc_mem(dev);
+	if (err)
+		return err;
+
+	err = cp->start_cm(dev);
+
+	if (err)
+		goto err_out;
+
+	dev->cm_create = cnic_cm_create;
+	dev->cm_destroy = cnic_cm_destroy;
+	dev->cm_connect = cnic_cm_connect;
+	dev->cm_abort = cnic_cm_abort;
+	dev->cm_close = cnic_cm_close;
+	dev->cm_select_dev = cnic_cm_select_dev;
+
+	cp->ulp_handle[CNIC_ULP_L4] = dev;
+	rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
+	return 0;
+
+err_out:
+	cnic_cm_free_mem(dev);
+	return err;
+}
+
+static int cnic_cm_shutdown(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i;
+
+	cp->stop_cm(dev);
+
+	if (!cp->csk_tbl)
+		return 0;
+
+	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
+		struct cnic_sock *csk = &cp->csk_tbl[i];
+
+		clear_bit(SK_F_INUSE, &csk->flags);
+		cnic_cm_cleanup(csk);
+	}
+	cnic_cm_free_mem(dev);
+
+	return 0;
+}
+
+static void cnic_init_context(struct cnic_dev *dev, u32 cid)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 cid_addr;
+	int i;
+
+	if (CHIP_NUM(cp) == CHIP_NUM_5709)
+		return;
+
+	cid_addr = GET_CID_ADDR(cid);
+
+	for (i = 0; i < CTX_SIZE; i += 4)
+		cnic_ctx_wr(dev, cid_addr, i, 0);
+}
+
+static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int ret = 0, i;
+	u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
+
+	if (CHIP_NUM(cp) != CHIP_NUM_5709)
+		return 0;
+
+	for (i = 0; i < cp->ctx_blks; i++) {
+		int j;
+		u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
+		u32 val;
+
+		memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
+
+		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
+			(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
+		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
+			(u64) cp->ctx_arr[i].mapping >> 32);
+		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
+			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
+		for (j = 0; j < 10; j++) {
+
+			val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
+			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
+				break;
+			udelay(5);
+		}
+		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
+			ret = -EBUSY;
+			break;
+		}
+	}
+	return ret;
+}
+
+static void cnic_free_irq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		cp->disable_int_sync(dev);
+		tasklet_disable(&cp->cnic_irq_task);
+		free_irq(ethdev->irq_arr[0].vector, dev);
+	}
+}
+
+static int cnic_init_bnx2_irq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		int err, i = 0;
+		int sblk_num = cp->status_blk_num;
+		u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
+			   BNX2_HC_SB_CONFIG_1;
+
+		CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
+
+		CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
+		CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
+		CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
+
+		cp->bnx2_status_blk = cp->status_blk;
+		cp->last_status_idx = cp->bnx2_status_blk->status_idx;
+		tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2_msix,
+			     (unsigned long) dev);
+		err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
+				  "cnic", dev);
+		if (err) {
+			tasklet_disable(&cp->cnic_irq_task);
+			return err;
+		}
+		while (cp->bnx2_status_blk->status_completion_producer_index &&
+		       i < 10) {
+			CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
+				1 << (11 + sblk_num));
+			udelay(10);
+			i++;
+			barrier();
+		}
+		if (cp->bnx2_status_blk->status_completion_producer_index) {
+			cnic_free_irq(dev);
+			goto failed;
+		}
+
+	} else {
+		struct status_block *sblk = cp->status_blk;
+		u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
+		int i = 0;
+
+		while (sblk->status_completion_producer_index && i < 10) {
+			CNIC_WR(dev, BNX2_HC_COMMAND,
+				hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+			udelay(10);
+			i++;
+			barrier();
+		}
+		if (sblk->status_completion_producer_index)
+			goto failed;
+
+	}
+	return 0;
+
+failed:
+	printk(KERN_ERR PFX "%s: " "KCQ index not resetting to 0.\n",
+	       dev->netdev->name);
+	return -EBUSY;
+}
+
+static void cnic_enable_bnx2_int(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+		return;
+
+	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
+}
+
+static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+		return;
+
+	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+	CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
+	synchronize_irq(ethdev->irq_arr[0].vector);
+}
+
+static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	u32 cid_addr, tx_cid, sb_id;
+	u32 val, offset0, offset1, offset2, offset3;
+	int i;
+	struct tx_bd *txbd;
+	dma_addr_t buf_map;
+	struct status_block *s_blk = cp->status_blk;
+
+	sb_id = cp->status_blk_num;
+	tx_cid = 20;
+	cnic_init_context(dev, tx_cid);
+	cnic_init_context(dev, tx_cid + 1);
+	cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		struct status_block_msix *sblk = cp->status_blk;
+
+		tx_cid = TX_TSS_CID + sb_id - 1;
+		cnic_init_context(dev, tx_cid);
+		CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
+			(TX_TSS_CID << 7));
+		cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
+	}
+	cp->tx_cons = *cp->tx_cons_ptr;
+
+	cid_addr = GET_CID_ADDR(tx_cid);
+	if (CHIP_NUM(cp) == CHIP_NUM_5709) {
+		u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
+
+		for (i = 0; i < PHY_CTX_SIZE; i += 4)
+			cnic_ctx_wr(dev, cid_addr2, i, 0);
+
+		offset0 = BNX2_L2CTX_TYPE_XI;
+		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
+		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
+		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
+	} else {
+		offset0 = BNX2_L2CTX_TYPE;
+		offset1 = BNX2_L2CTX_CMD_TYPE;
+		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
+		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
+	}
+	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
+	cnic_ctx_wr(dev, cid_addr, offset0, val);
+
+	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
+	cnic_ctx_wr(dev, cid_addr, offset1, val);
+
+	txbd = (struct tx_bd *) cp->l2_ring;
+
+	buf_map = cp->l2_buf_map;
+	for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
+		txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
+		txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
+	}
+	val = (u64) cp->l2_ring_map >> 32;
+	cnic_ctx_wr(dev, cid_addr, offset2, val);
+	txbd->tx_bd_haddr_hi = val;
+
+	val = (u64) cp->l2_ring_map & 0xffffffff;
+	cnic_ctx_wr(dev, cid_addr, offset3, val);
+	txbd->tx_bd_haddr_lo = val;
+}
+
+static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	u32 cid_addr, sb_id, val, coal_reg, coal_val;
+	int i;
+	struct rx_bd *rxbd;
+	struct status_block *s_blk = cp->status_blk;
+
+	sb_id = cp->status_blk_num;
+	cnic_init_context(dev, 2);
+	cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
+	coal_reg = BNX2_HC_COMMAND;
+	coal_val = CNIC_RD(dev, coal_reg);
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		struct status_block_msix *sblk = cp->status_blk;
+
+		cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
+		coal_reg = BNX2_HC_COALESCE_NOW;
+		coal_val = 1 << (11 + sb_id);
+	}
+	i = 0;
+	while (!(*cp->rx_cons_ptr != 0) && i < 10) {
+		CNIC_WR(dev, coal_reg, coal_val);
+		udelay(10);
+		i++;
+		barrier();
+	}
+	cp->rx_cons = *cp->rx_cons_ptr;
+
+	cid_addr = GET_CID_ADDR(2);
+	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
+	      BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
+
+	if (sb_id == 0)
+		val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT;
+	else
+		val = BNX2_L2CTX_STATUSB_NUM(sb_id);
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
+
+	rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE);
+	for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
+		dma_addr_t buf_map;
+		int n = (i % cp->l2_rx_ring_size) + 1;
+
+		buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
+		rxbd->rx_bd_len = cp->l2_single_buf_size;
+		rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
+		rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
+		rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
+	}
+	val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
+	rxbd->rx_bd_haddr_hi = val;
+
+	val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
+	rxbd->rx_bd_haddr_lo = val;
+
+	val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
+	cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
+}
+
+static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
+{
+	struct kwqe *wqes[1], l2kwqe;
+
+	memset(&l2kwqe, 0, sizeof(l2kwqe));
+	wqes[0] = &l2kwqe;
+	l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) |
+			      (L2_KWQE_OPCODE_VALUE_FLUSH <<
+			       KWQE_OPCODE_SHIFT) | 2;
+	dev->submit_kwqes(dev, wqes, 1);
+}
+
+static void cnic_set_bnx2_mac(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 val;
+
+	val = cp->func << 2;
+
+	cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
+
+	val = cnic_reg_rd_ind(dev, cp->shmem_base +
+			      BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
+	dev->mac_addr[0] = (u8) (val >> 8);
+	dev->mac_addr[1] = (u8) val;
+
+	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
+
+	val = cnic_reg_rd_ind(dev, cp->shmem_base +
+			      BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
+	dev->mac_addr[2] = (u8) (val >> 24);
+	dev->mac_addr[3] = (u8) (val >> 16);
+	dev->mac_addr[4] = (u8) (val >> 8);
+	dev->mac_addr[5] = (u8) val;
+
+	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
+
+	val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
+	if (CHIP_NUM(cp) != CHIP_NUM_5709)
+		val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
+
+	CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
+	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
+	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
+}
+
+static int cnic_start_bnx2_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct status_block *sblk = cp->status_blk;
+	u32 val;
+	int err;
+
+	cnic_set_bnx2_mac(dev);
+
+	val = CNIC_RD(dev, BNX2_MQ_CONFIG);
+	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
+	if (BCM_PAGE_BITS > 12)
+		val |= (12 - 8)  << 4;
+	else
+		val |= (BCM_PAGE_BITS - 8)  << 4;
+
+	CNIC_WR(dev, BNX2_MQ_CONFIG, val);
+
+	CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
+	CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
+	CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
+
+	err = cnic_setup_5709_context(dev, 1);
+	if (err)
+		return err;
+
+	cnic_init_context(dev, KWQ_CID);
+	cnic_init_context(dev, KCQ_CID);
+
+	cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
+	cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
+
+	cp->max_kwq_idx = MAX_KWQ_IDX;
+	cp->kwq_prod_idx = 0;
+	cp->kwq_con_idx = 0;
+	cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT;
+
+	if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
+		cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
+	else
+		cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
+
+	/* Initialize the kernel work queue context. */
+	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
+	      (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val);
+
+	val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
+	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+
+	val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
+	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+
+	val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
+	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+
+	val = (u32) cp->kwq_info.pgtbl_map;
+	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+
+	cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
+	cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
+
+	cp->kcq_prod_idx = 0;
+
+	/* Initialize the kernel complete queue context. */
+	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
+	      (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val);
+
+	val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
+	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+
+	val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
+	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+
+	val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32);
+	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+
+	val = (u32) cp->kcq_info.pgtbl_map;
+	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+
+	cp->int_num = 0;
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		u32 sb_id = cp->status_blk_num;
+		u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id);
+
+		cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
+		cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+		cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+	}
+
+	/* Enable Commnad Scheduler notification when we write to the
+	 * host producer index of the kernel contexts. */
+	CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
+
+	/* Enable Command Scheduler notification when we write to either
+	 * the Send Queue or Receive Queue producer indexes of the kernel
+	 * bypass contexts. */
+	CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
+	CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
+
+	/* Notify COM when the driver post an application buffer. */
+	CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
+
+	/* Set the CP and COM doorbells.  These two processors polls the
+	 * doorbell for a non zero value before running.  This must be done
+	 * after setting up the kernel queue contexts. */
+	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
+	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
+
+	cnic_init_bnx2_tx_ring(dev);
+	cnic_init_bnx2_rx_ring(dev);
+
+	err = cnic_init_bnx2_irq(dev);
+	if (err) {
+		printk(KERN_ERR PFX "%s: cnic_init_irq failed\n",
+		       dev->netdev->name);
+		cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
+		cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
+		return err;
+	}
+
+	return 0;
+}
+
+static int cnic_start_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	int err;
+
+	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EALREADY;
+
+	err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
+	if (err) {
+		printk(KERN_ERR PFX "%s: register_cnic failed\n",
+		       dev->netdev->name);
+		goto err2;
+	}
+
+	dev->regview = ethdev->io_base;
+	cp->chip_id = ethdev->chip_id;
+	pci_dev_get(dev->pcidev);
+	cp->func = PCI_FUNC(dev->pcidev->devfn);
+	cp->status_blk = ethdev->irq_arr[0].status_blk;
+	cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
+
+	err = cp->alloc_resc(dev);
+	if (err) {
+		printk(KERN_ERR PFX "%s: allocate resource failure\n",
+		       dev->netdev->name);
+		goto err1;
+	}
+
+	err = cp->start_hw(dev);
+	if (err)
+		goto err1;
+
+	err = cnic_cm_open(dev);
+	if (err)
+		goto err1;
+
+	set_bit(CNIC_F_CNIC_UP, &dev->flags);
+
+	cp->enable_int(dev);
+
+	return 0;
+
+err1:
+	ethdev->drv_unregister_cnic(dev->netdev);
+	cp->free_resc(dev);
+	pci_dev_put(dev->pcidev);
+err2:
+	return err;
+}
+
+static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	cnic_disable_bnx2_int_sync(dev);
+
+	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
+	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
+
+	cnic_init_context(dev, KWQ_CID);
+	cnic_init_context(dev, KCQ_CID);
+
+	cnic_setup_5709_context(dev, 0);
+	cnic_free_irq(dev);
+
+	ethdev->drv_unregister_cnic(dev->netdev);
+
+	cnic_free_resc(dev);
+}
+
+static void cnic_stop_hw(struct cnic_dev *dev)
+{
+	if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		clear_bit(CNIC_F_CNIC_UP, &dev->flags);
+		rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
+		synchronize_rcu();
+		cnic_cm_shutdown(dev);
+		cp->stop_hw(dev);
+		pci_dev_put(dev->pcidev);
+	}
+}
+
+static void cnic_free_dev(struct cnic_dev *dev)
+{
+	int i = 0;
+
+	while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
+		msleep(100);
+		i++;
+	}
+	if (atomic_read(&dev->ref_count) != 0)
+		printk(KERN_ERR PFX "%s: Failed waiting for ref count to go"
+				    " to zero.\n", dev->netdev->name);
+
+	printk(KERN_INFO PFX "Removed CNIC device: %s\n", dev->netdev->name);
+	dev_put(dev->netdev);
+	kfree(dev);
+}
+
+static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
+				       struct pci_dev *pdev)
+{
+	struct cnic_dev *cdev;
+	struct cnic_local *cp;
+	int alloc_size;
+
+	alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
+
+	cdev = kzalloc(alloc_size , GFP_KERNEL);
+	if (cdev == NULL) {
+		printk(KERN_ERR PFX "%s: allocate dev struct failure\n",
+		       dev->name);
+		return NULL;
+	}
+
+	cdev->netdev = dev;
+	cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
+	cdev->register_device = cnic_register_device;
+	cdev->unregister_device = cnic_unregister_device;
+	cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
+
+	cp = cdev->cnic_priv;
+	cp->dev = cdev;
+	cp->uio_dev = -1;
+	cp->l2_single_buf_size = 0x400;
+	cp->l2_rx_ring_size = 3;
+
+	spin_lock_init(&cp->cnic_ulp_lock);
+
+	printk(KERN_INFO PFX "Added CNIC device: %s\n", dev->name);
+
+	return cdev;
+}
+
+static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
+{
+	struct pci_dev *pdev;
+	struct cnic_dev *cdev;
+	struct cnic_local *cp;
+	struct cnic_eth_dev *ethdev = NULL;
+	struct cnic_eth_dev *(*probe)(void *) = NULL;
+
+	probe = __symbol_get("bnx2_cnic_probe");
+	if (probe) {
+		ethdev = (*probe)(dev);
+		symbol_put_addr(probe);
+	}
+	if (!ethdev)
+		return NULL;
+
+	pdev = ethdev->pdev;
+	if (!pdev)
+		return NULL;
+
+	dev_hold(dev);
+	pci_dev_get(pdev);
+	if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
+	    pdev->device == PCI_DEVICE_ID_NX2_5709S) {
+		u8 rev;
+
+		pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
+		if (rev < 0x10) {
+			pci_dev_put(pdev);
+			goto cnic_err;
+		}
+	}
+	pci_dev_put(pdev);
+
+	cdev = cnic_alloc_dev(dev, pdev);
+	if (cdev == NULL)
+		goto cnic_err;
+
+	set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
+	cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
+
+	cp = cdev->cnic_priv;
+	cp->ethdev = ethdev;
+	cdev->pcidev = pdev;
+
+	cp->cnic_ops = &cnic_bnx2_ops;
+	cp->start_hw = cnic_start_bnx2_hw;
+	cp->stop_hw = cnic_stop_bnx2_hw;
+	cp->setup_pgtbl = cnic_setup_page_tbl;
+	cp->alloc_resc = cnic_alloc_bnx2_resc;
+	cp->free_resc = cnic_free_resc;
+	cp->start_cm = cnic_cm_init_bnx2_hw;
+	cp->stop_cm = cnic_cm_stop_bnx2_hw;
+	cp->enable_int = cnic_enable_bnx2_int;
+	cp->disable_int_sync = cnic_disable_bnx2_int_sync;
+	cp->close_conn = cnic_close_bnx2_conn;
+	cp->next_idx = cnic_bnx2_next_idx;
+	cp->hw_idx = cnic_bnx2_hw_idx;
+	return cdev;
+
+cnic_err:
+	dev_put(dev);
+	return NULL;
+}
+
+static struct cnic_dev *is_cnic_dev(struct net_device *dev)
+{
+	struct ethtool_drvinfo drvinfo;
+	struct cnic_dev *cdev = NULL;
+
+	if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
+		memset(&drvinfo, 0, sizeof(drvinfo));
+		dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
+
+		if (!strcmp(drvinfo.driver, "bnx2"))
+			cdev = init_bnx2_cnic(dev);
+		if (cdev) {
+			write_lock(&cnic_dev_lock);
+			list_add(&cdev->list, &cnic_dev_list);
+			write_unlock(&cnic_dev_lock);
+		}
+	}
+	return cdev;
+}
+
+/**
+ * netdev event handler
+ */
+static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
+							 void *ptr)
+{
+	struct net_device *netdev = ptr;
+	struct cnic_dev *dev;
+	int if_type;
+	int new_dev = 0;
+
+	dev = cnic_from_netdev(netdev);
+
+	if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
+		/* Check for the hot-plug device */
+		dev = is_cnic_dev(netdev);
+		if (dev) {
+			new_dev = 1;
+			cnic_hold(dev);
+		}
+	}
+	if (dev) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		if (new_dev)
+			cnic_ulp_init(dev);
+		else if (event == NETDEV_UNREGISTER)
+			cnic_ulp_exit(dev);
+		else if (event == NETDEV_UP) {
+			mutex_lock(&cnic_lock);
+			if (!cnic_start_hw(dev))
+				cnic_ulp_start(dev);
+			mutex_unlock(&cnic_lock);
+		}
+
+		rcu_read_lock();
+		for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+			struct cnic_ulp_ops *ulp_ops;
+			void *ctx;
+
+			ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
+			if (!ulp_ops || !ulp_ops->indicate_netevent)
+				continue;
+
+			ctx = cp->ulp_handle[if_type];
+
+			ulp_ops->indicate_netevent(ctx, event);
+		}
+		rcu_read_unlock();
+
+		if (event == NETDEV_GOING_DOWN) {
+			mutex_lock(&cnic_lock);
+			cnic_ulp_stop(dev);
+			cnic_stop_hw(dev);
+			mutex_unlock(&cnic_lock);
+		} else if (event == NETDEV_UNREGISTER) {
+			write_lock(&cnic_dev_lock);
+			list_del_init(&dev->list);
+			write_unlock(&cnic_dev_lock);
+
+			cnic_put(dev);
+			cnic_free_dev(dev);
+			goto done;
+		}
+		cnic_put(dev);
+	}
+done:
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block cnic_netdev_notifier = {
+	.notifier_call = cnic_netdev_event
+};
+
+static void cnic_release(void)
+{
+	struct cnic_dev *dev;
+
+	while (!list_empty(&cnic_dev_list)) {
+		dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
+		if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+			cnic_ulp_stop(dev);
+			cnic_stop_hw(dev);
+		}
+
+		cnic_ulp_exit(dev);
+		list_del_init(&dev->list);
+		cnic_free_dev(dev);
+	}
+}
+
+static int __init cnic_init(void)
+{
+	int rc = 0;
+
+	printk(KERN_INFO "%s", version);
+
+	rc = register_netdevice_notifier(&cnic_netdev_notifier);
+	if (rc) {
+		cnic_release();
+		return rc;
+	}
+
+	return 0;
+}
+
+static void __exit cnic_exit(void)
+{
+	unregister_netdevice_notifier(&cnic_netdev_notifier);
+	cnic_release();
+	return;
+}
+
+module_init(cnic_init);
+module_exit(cnic_exit);

+ 299 - 0
drivers/net/cnic.h

@@ -0,0 +1,299 @@
+/* cnic.h: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+
+#ifndef CNIC_H
+#define CNIC_H
+
+#define KWQ_PAGE_CNT	4
+#define KCQ_PAGE_CNT	16
+
+#define KWQ_CID 		24
+#define KCQ_CID 		25
+
+/*
+ *	krnlq_context definition
+ */
+#define L5_KRNLQ_FLAGS	0x00000000
+#define L5_KRNLQ_SIZE	0x00000000
+#define L5_KRNLQ_TYPE	0x00000000
+#define KRNLQ_FLAGS_PG_SZ					(0xf<<0)
+#define KRNLQ_FLAGS_PG_SZ_256					(0<<0)
+#define KRNLQ_FLAGS_PG_SZ_512					(1<<0)
+#define KRNLQ_FLAGS_PG_SZ_1K					(2<<0)
+#define KRNLQ_FLAGS_PG_SZ_2K					(3<<0)
+#define KRNLQ_FLAGS_PG_SZ_4K					(4<<0)
+#define KRNLQ_FLAGS_PG_SZ_8K					(5<<0)
+#define KRNLQ_FLAGS_PG_SZ_16K					(6<<0)
+#define KRNLQ_FLAGS_PG_SZ_32K					(7<<0)
+#define KRNLQ_FLAGS_PG_SZ_64K					(8<<0)
+#define KRNLQ_FLAGS_PG_SZ_128K					(9<<0)
+#define KRNLQ_FLAGS_PG_SZ_256K					(10<<0)
+#define KRNLQ_FLAGS_PG_SZ_512K					(11<<0)
+#define KRNLQ_FLAGS_PG_SZ_1M					(12<<0)
+#define KRNLQ_FLAGS_PG_SZ_2M					(13<<0)
+#define KRNLQ_FLAGS_QE_SELF_SEQ					(1<<15)
+#define KRNLQ_SIZE_TYPE_SIZE	((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16)
+#define KRNLQ_TYPE_TYPE						(0xf<<28)
+#define KRNLQ_TYPE_TYPE_EMPTY					(0<<28)
+#define KRNLQ_TYPE_TYPE_KRNLQ					(6<<28)
+
+#define L5_KRNLQ_HOST_QIDX		0x00000004
+#define L5_KRNLQ_HOST_FW_QIDX		0x00000008
+#define L5_KRNLQ_NX_QE_SELF_SEQ 	0x0000000c
+#define L5_KRNLQ_QE_SELF_SEQ_MAX	0x0000000c
+#define L5_KRNLQ_NX_QE_HADDR_HI 	0x00000010
+#define L5_KRNLQ_NX_QE_HADDR_LO 	0x00000014
+#define L5_KRNLQ_PGTBL_PGIDX		0x00000018
+#define L5_KRNLQ_NX_PG_QIDX 		0x00000018
+#define L5_KRNLQ_PGTBL_NPAGES		0x0000001c
+#define L5_KRNLQ_QIDX_INCR		0x0000001c
+#define L5_KRNLQ_PGTBL_HADDR_HI 	0x00000020
+#define L5_KRNLQ_PGTBL_HADDR_LO 	0x00000024
+
+#define BNX2_PG_CTX_MAP			0x1a0034
+#define BNX2_ISCSI_CTX_MAP		0x1a0074
+
+struct cnic_redirect_entry {
+	struct dst_entry *old_dst;
+	struct dst_entry *new_dst;
+};
+
+#define MAX_COMPLETED_KCQE	64
+
+#define MAX_CNIC_L5_CONTEXT	256
+
+#define MAX_CM_SK_TBL_SZ	MAX_CNIC_L5_CONTEXT
+
+#define MAX_ISCSI_TBL_SZ	256
+
+#define CNIC_LOCAL_PORT_MIN	60000
+#define CNIC_LOCAL_PORT_MAX	61000
+#define CNIC_LOCAL_PORT_RANGE	(CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
+
+#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe))
+#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe))
+#define MAX_KWQE_CNT (KWQE_CNT - 1)
+#define MAX_KCQE_CNT (KCQE_CNT - 1)
+
+#define MAX_KWQ_IDX	((KWQ_PAGE_CNT * KWQE_CNT) - 1)
+#define MAX_KCQ_IDX	((KCQ_PAGE_CNT * KCQE_CNT) - 1)
+
+#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5))
+#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
+
+#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5))
+#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
+
+#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) ==		\
+		(MAX_KCQE_CNT - 1)) ?					\
+		(x) + 2 : (x) + 1
+
+#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp)
+#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp)
+#define BNX2X_KWQ_DATA(cp, x)						\
+	&(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
+
+#define DEF_IPID_COUNT		0xc001
+
+#define DEF_KA_TIMEOUT		10000
+#define DEF_KA_INTERVAL		300000
+#define DEF_KA_MAX_PROBE_COUNT	3
+#define DEF_TOS			0
+#define DEF_TTL			0xfe
+#define DEF_SND_SEQ_SCALE	0
+#define DEF_RCV_BUF		0xffff
+#define DEF_SND_BUF		0xffff
+#define DEF_SEED		0
+#define DEF_MAX_RT_TIME		500
+#define DEF_MAX_DA_COUNT	2
+#define DEF_SWS_TIMER		1000
+#define DEF_MAX_CWND		0xffff
+
+struct cnic_ctx {
+	u32		cid;
+	void		*ctx;
+	dma_addr_t	mapping;
+};
+
+#define BNX2_MAX_CID		0x2000
+
+struct cnic_dma {
+	int		num_pages;
+	void		**pg_arr;
+	dma_addr_t	*pg_map_arr;
+	int		pgtbl_size;
+	u32		*pgtbl;
+	dma_addr_t	pgtbl_map;
+};
+
+struct cnic_id_tbl {
+	spinlock_t	lock;
+	u32		start;
+	u32		max;
+	u32		next;
+	unsigned long	*table;
+};
+
+#define CNIC_KWQ16_DATA_SIZE	128
+
+struct kwqe_16_data {
+	u8	data[CNIC_KWQ16_DATA_SIZE];
+};
+
+struct cnic_iscsi {
+	struct cnic_dma		task_array_info;
+	struct cnic_dma		r2tq_info;
+	struct cnic_dma		hq_info;
+};
+
+struct cnic_context {
+	u32			cid;
+	struct kwqe_16_data	*kwqe_data;
+	dma_addr_t		kwqe_data_mapping;
+	wait_queue_head_t	waitq;
+	int			wait_cond;
+	unsigned long		timestamp;
+	u32			ctx_flags;
+#define	CTX_FL_OFFLD_START	0x00000001
+	u8			ulp_proto_id;
+	union {
+		struct cnic_iscsi	*iscsi;
+	} proto;
+};
+
+struct cnic_local {
+
+	spinlock_t cnic_ulp_lock;
+	void *ulp_handle[MAX_CNIC_ULP_TYPE];
+	unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
+#define ULP_F_INIT	0
+#define ULP_F_START	1
+	struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
+
+	/* protected by ulp_lock */
+	u32 cnic_local_flags;
+#define	CNIC_LCL_FL_KWQ_INIT	0x00000001
+
+	struct cnic_dev *dev;
+
+	struct cnic_eth_dev *ethdev;
+
+	void		*l2_ring;
+	dma_addr_t	l2_ring_map;
+	int		l2_ring_size;
+	int		l2_rx_ring_size;
+
+	void		*l2_buf;
+	dma_addr_t	l2_buf_map;
+	int		l2_buf_size;
+	int		l2_single_buf_size;
+
+	u16		*rx_cons_ptr;
+	u16		*tx_cons_ptr;
+	u16		rx_cons;
+	u16		tx_cons;
+
+	u32 kwq_cid_addr;
+	u32 kcq_cid_addr;
+
+	struct cnic_dma		kwq_info;
+	struct kwqe		**kwq;
+
+	struct cnic_dma		kwq_16_data_info;
+
+	u16		max_kwq_idx;
+
+	u16		kwq_prod_idx;
+	u32		kwq_io_addr;
+
+	u16		*kwq_con_idx_ptr;
+	u16		kwq_con_idx;
+
+	struct cnic_dma	kcq_info;
+	struct kcqe	**kcq;
+
+	u16		kcq_prod_idx;
+	u32		kcq_io_addr;
+
+	void				*status_blk;
+	struct status_block_msix	*bnx2_status_blk;
+	struct host_status_block	*bnx2x_status_blk;
+
+	u32				status_blk_num;
+	u32				int_num;
+	u32				last_status_idx;
+	struct tasklet_struct		cnic_irq_task;
+
+	struct kcqe		*completed_kcq[MAX_COMPLETED_KCQE];
+
+	struct cnic_sock	*csk_tbl;
+	struct cnic_id_tbl	csk_port_tbl;
+
+	struct cnic_dma		conn_buf_info;
+	struct cnic_dma		gbl_buf_info;
+
+	struct cnic_iscsi	*iscsi_tbl;
+	struct cnic_context	*ctx_tbl;
+	struct cnic_id_tbl	cid_tbl;
+	int			max_iscsi_conn;
+	atomic_t		iscsi_conn;
+
+	/* per connection parameters */
+	int			num_iscsi_tasks;
+	int			num_ccells;
+	int			task_array_size;
+	int			r2tq_size;
+	int			hq_size;
+	int			num_cqs;
+
+	struct cnic_ctx		*ctx_arr;
+	int			ctx_blks;
+	int			ctx_blk_size;
+	int			cids_per_blk;
+
+	u32			chip_id;
+	int			func;
+	u32			shmem_base;
+
+	u32			uio_dev;
+	struct uio_info		*cnic_uinfo;
+
+	struct cnic_ops		*cnic_ops;
+	int			(*start_hw)(struct cnic_dev *);
+	void			(*stop_hw)(struct cnic_dev *);
+	void			(*setup_pgtbl)(struct cnic_dev *,
+					       struct cnic_dma *);
+	int			(*alloc_resc)(struct cnic_dev *);
+	void			(*free_resc)(struct cnic_dev *);
+	int			(*start_cm)(struct cnic_dev *);
+	void			(*stop_cm)(struct cnic_dev *);
+	void			(*enable_int)(struct cnic_dev *);
+	void			(*disable_int_sync)(struct cnic_dev *);
+	void			(*ack_int)(struct cnic_dev *);
+	void			(*close_conn)(struct cnic_sock *, u32 opcode);
+	u16			(*next_idx)(u16);
+	u16			(*hw_idx)(u16);
+};
+
+struct bnx2x_bd_chain_next {
+	u32	addr_lo;
+	u32	addr_hi;
+	u8	reserved[8];
+};
+
+#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN		(ISCSI_KCQE_OPCODE_UPDATE_CONN)
+#define ISCSI_RAMROD_CMD_ID_INIT		(ISCSI_KCQE_OPCODE_INIT)
+
+#define CDU_REGION_NUMBER_XCM_AG 2
+#define CDU_REGION_NUMBER_UCM_AG 4
+
+#endif
+

+ 580 - 0
drivers/net/cnic_defs.h

@@ -0,0 +1,580 @@
+
+/* cnic.c: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#ifndef CNIC_DEFS_H
+#define CNIC_DEFS_H
+
+/* KWQ (kernel work queue) request op codes */
+#define L2_KWQE_OPCODE_VALUE_FLUSH                  (4)
+
+#define L4_KWQE_OPCODE_VALUE_CONNECT1               (50)
+#define L4_KWQE_OPCODE_VALUE_CONNECT2               (51)
+#define L4_KWQE_OPCODE_VALUE_CONNECT3               (52)
+#define L4_KWQE_OPCODE_VALUE_RESET                  (53)
+#define L4_KWQE_OPCODE_VALUE_CLOSE                  (54)
+#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET          (60)
+#define L4_KWQE_OPCODE_VALUE_INIT_ULP               (61)
+
+#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG             (1)
+#define L4_KWQE_OPCODE_VALUE_UPDATE_PG              (9)
+#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG              (14)
+
+#define L5CM_RAMROD_CMD_ID_BASE			(0x80)
+#define L5CM_RAMROD_CMD_ID_TCP_CONNECT		(L5CM_RAMROD_CMD_ID_BASE + 3)
+#define L5CM_RAMROD_CMD_ID_CLOSE		(L5CM_RAMROD_CMD_ID_BASE + 12)
+#define L5CM_RAMROD_CMD_ID_ABORT		(L5CM_RAMROD_CMD_ID_BASE + 13)
+#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE	(L5CM_RAMROD_CMD_ID_BASE + 14)
+#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD	(L5CM_RAMROD_CMD_ID_BASE + 15)
+
+/* KCQ (kernel completion queue) response op codes */
+#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP             (53)
+#define L4_KCQE_OPCODE_VALUE_RESET_COMP             (54)
+#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE          (55)
+#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE       (56)
+#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED         (57)
+#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED         (58)
+#define L4_KCQE_OPCODE_VALUE_INIT_ULP               (61)
+
+#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG             (1)
+#define L4_KCQE_OPCODE_VALUE_UPDATE_PG              (9)
+#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG              (14)
+
+/* KCQ (kernel completion queue) completion status */
+#define L4_KCQE_COMPLETION_STATUS_SUCCESS		    (0)
+#define L4_KCQE_COMPLETION_STATUS_TIMEOUT        (0x93)
+
+#define L4_LAYER_CODE (4)
+#define L2_LAYER_CODE (2)
+
+/*
+ * L4 KCQ CQE
+ */
+struct l4_kcq {
+	u32 cid;
+	u32 pg_cid;
+	u32 conn_id;
+	u32 pg_host_opaque;
+#if defined(__BIG_ENDIAN)
+	u16 status;
+	u16 reserved1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved1;
+	u16 status;
+#endif
+	u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KCQ_RESERVED3 (0x7<<0)
+#define L4_KCQ_RESERVED3_SHIFT 0
+#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
+#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
+#define L4_KCQ_LAYER_CODE (0x7<<4)
+#define L4_KCQ_LAYER_CODE_SHIFT 4
+#define L4_KCQ_RESERVED4 (0x1<<7)
+#define L4_KCQ_RESERVED4_SHIFT 7
+	u8 op_code;
+	u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+	u16 qe_self_seq;
+	u8 op_code;
+	u8 flags;
+#define L4_KCQ_RESERVED3 (0xF<<0)
+#define L4_KCQ_RESERVED3_SHIFT 0
+#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
+#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
+#define L4_KCQ_LAYER_CODE (0x7<<4)
+#define L4_KCQ_LAYER_CODE_SHIFT 4
+#define L4_KCQ_RESERVED4 (0x1<<7)
+#define L4_KCQ_RESERVED4_SHIFT 7
+#endif
+};
+
+
+/*
+ * L4 KCQ CQE PG upload
+ */
+struct l4_kcq_upload_pg {
+	u32 pg_cid;
+#if defined(__BIG_ENDIAN)
+	u16 pg_status;
+	u16 pg_ipid_count;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pg_ipid_count;
+	u16 pg_status;
+#endif
+	u32 reserved1[5];
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
+#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
+#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
+	u8 op_code;
+	u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+	u16 qe_self_seq;
+	u8 op_code;
+	u8 flags;
+#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
+#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
+#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
+#endif
+};
+
+
+/*
+ * Gracefully close the connection request
+ */
+struct l4_kwq_close_req {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 reserved2[6];
+};
+
+
+/*
+ * The first request to be passed in order to establish connection in option2
+ */
+struct l4_kwq_connect_req1 {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u8 reserved0;
+	u8 conn_flags;
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
+#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+	u8 conn_flags;
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
+#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
+	u8 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 pg_cid;
+	u32 src_ip;
+	u32 dst_ip;
+#if defined(__BIG_ENDIAN)
+	u16 dst_port;
+	u16 src_port;
+#elif defined(__LITTLE_ENDIAN)
+	u16 src_port;
+	u16 dst_port;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 rsrv1[3];
+	u8 tcp_flags;
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
+#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
+#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
+#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
+#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+	u8 tcp_flags;
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
+#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
+#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
+#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
+#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
+	u8 rsrv1[3];
+#endif
+	u32 rsrv2;
+};
+
+
+/*
+ * The second ( optional )request to be passed in order to establish
+ * connection in option2 - for IPv6 only
+ */
+struct l4_kwq_connect_req2 {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u8 reserved0;
+	u8 rsrv;
+#elif defined(__LITTLE_ENDIAN)
+	u8 rsrv;
+	u8 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 reserved2;
+	u32 src_ip_v6_2;
+	u32 src_ip_v6_3;
+	u32 src_ip_v6_4;
+	u32 dst_ip_v6_2;
+	u32 dst_ip_v6_3;
+	u32 dst_ip_v6_4;
+};
+
+
+/*
+ * The third ( and last )request to be passed in order to establish
+ * connection in option2
+ */
+struct l4_kwq_connect_req3 {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 ka_timeout;
+	u32 ka_interval ;
+#if defined(__BIG_ENDIAN)
+	u8 snd_seq_scale;
+	u8 ttl;
+	u8 tos;
+	u8 ka_max_probe_count;
+#elif defined(__LITTLE_ENDIAN)
+	u8 ka_max_probe_count;
+	u8 tos;
+	u8 ttl;
+	u8 snd_seq_scale;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 pmtu;
+	u16 mss;
+#elif defined(__LITTLE_ENDIAN)
+	u16 mss;
+	u16 pmtu;
+#endif
+	u32 rcv_buf;
+	u32 snd_buf;
+	u32 seed;
+};
+
+
+/*
+ * a KWQE request to offload a PG connection
+ */
+struct l4_kwq_offload_pg {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 l2hdr_nbytes;
+	u8 pg_flags;
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
+#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
+#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
+	u8 da0;
+	u8 da1;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da1;
+	u8 da0;
+	u8 pg_flags;
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
+#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
+#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
+	u8 l2hdr_nbytes;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 da2;
+	u8 da3;
+	u8 da4;
+	u8 da5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da5;
+	u8 da4;
+	u8 da3;
+	u8 da2;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 sa0;
+	u8 sa1;
+	u8 sa2;
+	u8 sa3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 sa3;
+	u8 sa2;
+	u8 sa1;
+	u8 sa0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 sa4;
+	u8 sa5;
+	u16 etype;
+#elif defined(__LITTLE_ENDIAN)
+	u16 etype;
+	u8 sa5;
+	u8 sa4;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 vlan_tag;
+	u16 ipid_start;
+#elif defined(__LITTLE_ENDIAN)
+	u16 ipid_start;
+	u16 vlan_tag;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 ipid_count;
+	u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved3;
+	u16 ipid_count;
+#endif
+	u32 host_opaque;
+};
+
+
+/*
+ * Abortively close the connection request
+ */
+struct l4_kwq_reset_req {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 reserved2[6];
+};
+
+
+/*
+ * a KWQE request to update a PG connection
+ */
+struct l4_kwq_update_pg {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
+	u8 opcode;
+	u16 oper16;
+#elif defined(__LITTLE_ENDIAN)
+	u16 oper16;
+	u8 opcode;
+	u8 flags;
+#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 pg_cid;
+	u32 pg_host_opaque;
+#if defined(__BIG_ENDIAN)
+	u8 pg_valids;
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
+#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
+#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
+#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
+#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
+	u8 pg_unused_a;
+	u16 pg_ipid_count;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pg_ipid_count;
+	u8 pg_unused_a;
+	u8 pg_valids;
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
+#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
+#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
+#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
+#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserverd3;
+	u8 da0;
+	u8 da1;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da1;
+	u8 da0;
+	u16 reserverd3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 da2;
+	u8 da3;
+	u8 da4;
+	u8 da5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da5;
+	u8 da4;
+	u8 da3;
+	u8 da2;
+#endif
+	u32 reserved4;
+	u32 reserved5;
+};
+
+
+/*
+ * a KWQE request to upload a PG or L4 context
+ */
+struct l4_kwq_upload {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
+#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
+	u8 opcode;
+	u16 oper16;
+#elif defined(__LITTLE_ENDIAN)
+	u16 oper16;
+	u8 opcode;
+	u8 flags;
+#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
+#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 reserved2[6];
+};
+
+#endif /* CNIC_DEFS_H */

+ 299 - 0
drivers/net/cnic_if.h

@@ -0,0 +1,299 @@
+/* cnic_if.h: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+
+#ifndef CNIC_IF_H
+#define CNIC_IF_H
+
+#define CNIC_MODULE_VERSION	"2.0.0"
+#define CNIC_MODULE_RELDATE	"May 21, 2009"
+
+#define CNIC_ULP_RDMA		0
+#define CNIC_ULP_ISCSI		1
+#define CNIC_ULP_L4		2
+#define MAX_CNIC_ULP_TYPE_EXT	2
+#define MAX_CNIC_ULP_TYPE	3
+
+struct kwqe {
+	u32 kwqe_op_flag;
+
+#define KWQE_OPCODE_MASK	0x00ff0000
+#define KWQE_OPCODE_SHIFT	16
+#define KWQE_FLAGS_LAYER_SHIFT	28
+#define KWQE_OPCODE(x)		((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
+
+	u32 kwqe_info0;
+	u32 kwqe_info1;
+	u32 kwqe_info2;
+	u32 kwqe_info3;
+	u32 kwqe_info4;
+	u32 kwqe_info5;
+	u32 kwqe_info6;
+};
+
+struct kwqe_16 {
+	u32 kwqe_info0;
+	u32 kwqe_info1;
+	u32 kwqe_info2;
+	u32 kwqe_info3;
+};
+
+struct kcqe {
+	u32 kcqe_info0;
+	u32 kcqe_info1;
+	u32 kcqe_info2;
+	u32 kcqe_info3;
+	u32 kcqe_info4;
+	u32 kcqe_info5;
+	u32 kcqe_info6;
+	u32 kcqe_op_flag;
+		#define KCQE_RAMROD_COMPLETION		(0x1<<27) /* Everest */
+		#define KCQE_FLAGS_LAYER_MASK		(0x7<<28)
+		#define KCQE_FLAGS_LAYER_MASK_MISC	(0<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L2	(2<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L3	(3<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L4	(4<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L5_RDMA	(5<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L5_ISCSI	(6<<28)
+		#define KCQE_FLAGS_NEXT 		(1<<31)
+		#define KCQE_FLAGS_OPCODE_MASK		(0xff<<16)
+		#define KCQE_FLAGS_OPCODE_SHIFT		(16)
+		#define KCQE_OPCODE(op)			\
+		(((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
+};
+
+#define MAX_CNIC_CTL_DATA	64
+#define MAX_DRV_CTL_DATA	64
+
+#define CNIC_CTL_STOP_CMD		1
+#define CNIC_CTL_START_CMD		2
+#define CNIC_CTL_COMPLETION_CMD		3
+
+#define DRV_CTL_IO_WR_CMD		0x101
+#define DRV_CTL_IO_RD_CMD		0x102
+#define DRV_CTL_CTX_WR_CMD		0x103
+#define DRV_CTL_CTXTBL_WR_CMD		0x104
+#define DRV_CTL_COMPLETION_CMD		0x105
+
+struct cnic_ctl_completion {
+	u32	cid;
+};
+
+struct drv_ctl_completion {
+	u32	comp_count;
+};
+
+struct cnic_ctl_info {
+	int	cmd;
+	union {
+		struct cnic_ctl_completion comp;
+		char bytes[MAX_CNIC_CTL_DATA];
+	} data;
+};
+
+struct drv_ctl_io {
+	u32		cid_addr;
+	u32		offset;
+	u32		data;
+	dma_addr_t	dma_addr;
+};
+
+struct drv_ctl_info {
+	int	cmd;
+	union {
+		struct drv_ctl_completion comp;
+		struct drv_ctl_io io;
+		char bytes[MAX_DRV_CTL_DATA];
+	} data;
+};
+
+struct cnic_ops {
+	struct module	*cnic_owner;
+	/* Calls to these functions are protected by RCU.  When
+	 * unregistering, we wait for any calls to complete before
+	 * continuing.
+	 */
+	int		(*cnic_handler)(void *, void *);
+	int		(*cnic_ctl)(void *, struct cnic_ctl_info *);
+};
+
+#define MAX_CNIC_VEC	8
+
+struct cnic_irq {
+	unsigned int	vector;
+	void		*status_blk;
+	u32		status_blk_num;
+	u32		irq_flags;
+#define CNIC_IRQ_FL_MSIX		0x00000001
+};
+
+struct cnic_eth_dev {
+	struct module	*drv_owner;
+	u32		drv_state;
+#define CNIC_DRV_STATE_REGD		0x00000001
+#define CNIC_DRV_STATE_USING_MSIX	0x00000002
+	u32		chip_id;
+	u32		max_kwqe_pending;
+	struct pci_dev	*pdev;
+	void __iomem	*io_base;
+
+	u32		ctx_tbl_offset;
+	u32		ctx_tbl_len;
+	int		ctx_blk_size;
+	u32		starting_cid;
+	u32		max_iscsi_conn;
+	u32		max_fcoe_conn;
+	u32		max_rdma_conn;
+	u32		reserved0[2];
+
+	int		num_irq;
+	struct cnic_irq	irq_arr[MAX_CNIC_VEC];
+	int		(*drv_register_cnic)(struct net_device *,
+					     struct cnic_ops *, void *);
+	int		(*drv_unregister_cnic)(struct net_device *);
+	int		(*drv_submit_kwqes_32)(struct net_device *,
+					       struct kwqe *[], u32);
+	int		(*drv_submit_kwqes_16)(struct net_device *,
+					       struct kwqe_16 *[], u32);
+	int		(*drv_ctl)(struct net_device *, struct drv_ctl_info *);
+	unsigned long	reserved1[2];
+};
+
+struct cnic_sockaddr {
+	union {
+		struct sockaddr_in	v4;
+		struct sockaddr_in6	v6;
+	} local;
+	union {
+		struct sockaddr_in	v4;
+		struct sockaddr_in6	v6;
+	} remote;
+};
+
+struct cnic_sock {
+	struct cnic_dev *dev;
+	void	*context;
+	u32	src_ip[4];
+	u32	dst_ip[4];
+	u16	src_port;
+	u16	dst_port;
+	u16	vlan_id;
+	unsigned char old_ha[6];
+	unsigned char ha[6];
+	u32	mtu;
+	u32	cid;
+	u32	l5_cid;
+	u32	pg_cid;
+	int	ulp_type;
+
+	u32	ka_timeout;
+	u32	ka_interval;
+	u8	ka_max_probe_count;
+	u8	tos;
+	u8	ttl;
+	u8	snd_seq_scale;
+	u32	rcv_buf;
+	u32	snd_buf;
+	u32	seed;
+
+	unsigned long	tcp_flags;
+#define SK_TCP_NO_DELAY_ACK	0x1
+#define SK_TCP_KEEP_ALIVE	0x2
+#define SK_TCP_NAGLE		0x4
+#define SK_TCP_TIMESTAMP	0x8
+#define SK_TCP_SACK		0x10
+#define SK_TCP_SEG_SCALING	0x20
+	unsigned long	flags;
+#define SK_F_INUSE		0
+#define SK_F_OFFLD_COMPLETE	1
+#define SK_F_OFFLD_SCHED	2
+#define SK_F_PG_OFFLD_COMPLETE	3
+#define SK_F_CONNECT_START	4
+#define SK_F_IPV6		5
+#define SK_F_CLOSING		7
+
+	atomic_t ref_count;
+	u32 state;
+	struct kwqe kwqe1;
+	struct kwqe kwqe2;
+	struct kwqe kwqe3;
+};
+
+struct cnic_dev {
+	struct net_device	*netdev;
+	struct pci_dev		*pcidev;
+	void __iomem		*regview;
+	struct list_head	list;
+
+	int (*register_device)(struct cnic_dev *dev, int ulp_type,
+			       void *ulp_ctx);
+	int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
+	int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
+				u32 num_wqes);
+	int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
+				u32 num_wqes);
+
+	int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
+			 void *);
+	int (*cm_destroy)(struct cnic_sock *);
+	int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
+	int (*cm_abort)(struct cnic_sock *);
+	int (*cm_close)(struct cnic_sock *);
+	struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
+	int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
+				 char *data, u16 data_size);
+	unsigned long	flags;
+#define CNIC_F_CNIC_UP		1
+#define CNIC_F_BNX2_CLASS	3
+#define CNIC_F_BNX2X_CLASS	4
+	atomic_t	ref_count;
+	u8		mac_addr[6];
+
+	int		max_iscsi_conn;
+	int		max_fcoe_conn;
+	int		max_rdma_conn;
+
+	void		*cnic_priv;
+};
+
+#define CNIC_WR(dev, off, val)		writel(val, dev->regview + off)
+#define CNIC_WR16(dev, off, val)	writew(val, dev->regview + off)
+#define CNIC_WR8(dev, off, val)		writeb(val, dev->regview + off)
+#define CNIC_RD(dev, off)		readl(dev->regview + off)
+#define CNIC_RD16(dev, off)		readw(dev->regview + off)
+
+struct cnic_ulp_ops {
+	/* Calls to these functions are protected by RCU.  When
+	 * unregistering, we wait for any calls to complete before
+	 * continuing.
+	 */
+
+	void (*cnic_init)(struct cnic_dev *dev);
+	void (*cnic_exit)(struct cnic_dev *dev);
+	void (*cnic_start)(void *ulp_ctx);
+	void (*cnic_stop)(void *ulp_ctx);
+	void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
+				u32 num_cqes);
+	void (*indicate_netevent)(void *ulp_ctx, unsigned long event);
+	void (*cm_connect_complete)(struct cnic_sock *);
+	void (*cm_close_complete)(struct cnic_sock *);
+	void (*cm_abort_complete)(struct cnic_sock *);
+	void (*cm_remote_close)(struct cnic_sock *);
+	void (*cm_remote_abort)(struct cnic_sock *);
+	void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
+				  char *data, u16 data_size);
+	struct module *owner;
+};
+
+extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
+
+extern int cnic_unregister_driver(int ulp_type);
+
+#endif

+ 20 - 10
drivers/s390/scsi/zfcp_ccw.c

@@ -11,6 +11,24 @@
 
 
 #include "zfcp_ext.h"
 #include "zfcp_ext.h"
 
 
+#define ZFCP_MODEL_PRIV 0x4
+
+static struct ccw_device_id zfcp_ccw_device_id[] = {
+	{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
+	{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) },
+	{},
+};
+MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
+
+/**
+ * zfcp_ccw_priv_sch - check if subchannel is privileged
+ * @adapter: Adapter/Subchannel to check
+ */
+int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter)
+{
+	return adapter->ccw_device->id.dev_model == ZFCP_MODEL_PRIV;
+}
+
 /**
 /**
  * zfcp_ccw_probe - probe function of zfcp driver
  * zfcp_ccw_probe - probe function of zfcp driver
  * @ccw_device: pointer to belonging ccw device
  * @ccw_device: pointer to belonging ccw device
@@ -176,8 +194,8 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
 					"ccnoti4", NULL);
 					"ccnoti4", NULL);
 		break;
 		break;
 	case CIO_BOXED:
 	case CIO_BOXED:
-		dev_warn(&adapter->ccw_device->dev,
-			 "The ccw device did not respond in time.\n");
+		dev_warn(&adapter->ccw_device->dev, "The FCP device "
+			 "did not respond within the specified time\n");
 		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL);
 		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL);
 		break;
 		break;
 	}
 	}
@@ -199,14 +217,6 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
 	up(&zfcp_data.config_sema);
 	up(&zfcp_data.config_sema);
 }
 }
 
 
-static struct ccw_device_id zfcp_ccw_device_id[] = {
-	{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
-	{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x4) }, /* priv. */
-	{},
-};
-
-MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
-
 static struct ccw_driver zfcp_ccw_driver = {
 static struct ccw_driver zfcp_ccw_driver = {
 	.owner       = THIS_MODULE,
 	.owner       = THIS_MODULE,
 	.name        = "zfcp",
 	.name        = "zfcp",

+ 5 - 5
drivers/s390/scsi/zfcp_dbf.c

@@ -163,7 +163,7 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
 	}
 	}
 
 
 	response->fsf_command = fsf_req->fsf_command;
 	response->fsf_command = fsf_req->fsf_command;
-	response->fsf_reqid = (unsigned long)fsf_req;
+	response->fsf_reqid = fsf_req->req_id;
 	response->fsf_seqno = fsf_req->seq_no;
 	response->fsf_seqno = fsf_req->seq_no;
 	response->fsf_issued = fsf_req->issued;
 	response->fsf_issued = fsf_req->issued;
 	response->fsf_prot_status = qtcb->prefix.prot_status;
 	response->fsf_prot_status = qtcb->prefix.prot_status;
@@ -737,7 +737,7 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
 	spin_lock_irqsave(&adapter->san_dbf_lock, flags);
 	spin_lock_irqsave(&adapter->san_dbf_lock, flags);
 	memset(r, 0, sizeof(*r));
 	memset(r, 0, sizeof(*r));
 	strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE);
 	strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE);
-	r->fsf_reqid = (unsigned long)fsf_req;
+	r->fsf_reqid = fsf_req->req_id;
 	r->fsf_seqno = fsf_req->seq_no;
 	r->fsf_seqno = fsf_req->seq_no;
 	r->s_id = fc_host_port_id(adapter->scsi_host);
 	r->s_id = fc_host_port_id(adapter->scsi_host);
 	r->d_id = wka_port->d_id;
 	r->d_id = wka_port->d_id;
@@ -773,7 +773,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
 	spin_lock_irqsave(&adapter->san_dbf_lock, flags);
 	spin_lock_irqsave(&adapter->san_dbf_lock, flags);
 	memset(r, 0, sizeof(*r));
 	memset(r, 0, sizeof(*r));
 	strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
 	strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
-	r->fsf_reqid = (unsigned long)fsf_req;
+	r->fsf_reqid = fsf_req->req_id;
 	r->fsf_seqno = fsf_req->seq_no;
 	r->fsf_seqno = fsf_req->seq_no;
 	r->s_id = wka_port->d_id;
 	r->s_id = wka_port->d_id;
 	r->d_id = fc_host_port_id(adapter->scsi_host);
 	r->d_id = fc_host_port_id(adapter->scsi_host);
@@ -803,7 +803,7 @@ static void zfcp_san_dbf_event_els(const char *tag, int level,
 	spin_lock_irqsave(&adapter->san_dbf_lock, flags);
 	spin_lock_irqsave(&adapter->san_dbf_lock, flags);
 	memset(rec, 0, sizeof(*rec));
 	memset(rec, 0, sizeof(*rec));
 	strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
 	strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
-	rec->fsf_reqid = (unsigned long)fsf_req;
+	rec->fsf_reqid = fsf_req->req_id;
 	rec->fsf_seqno = fsf_req->seq_no;
 	rec->fsf_seqno = fsf_req->seq_no;
 	rec->s_id = s_id;
 	rec->s_id = s_id;
 	rec->d_id = d_id;
 	rec->d_id = d_id;
@@ -965,7 +965,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
 						      ZFCP_DBF_SCSI_FCP_SNS_INFO);
 						      ZFCP_DBF_SCSI_FCP_SNS_INFO);
 				}
 				}
 
 
-				rec->fsf_reqid = (unsigned long)fsf_req;
+				rec->fsf_reqid = fsf_req->req_id;
 				rec->fsf_seqno = fsf_req->seq_no;
 				rec->fsf_seqno = fsf_req->seq_no;
 				rec->fsf_issued = fsf_req->issued;
 				rec->fsf_issued = fsf_req->issued;
 			}
 			}

+ 0 - 7
drivers/s390/scsi/zfcp_def.h

@@ -47,13 +47,6 @@
 
 
 /********************* CIO/QDIO SPECIFIC DEFINES *****************************/
 /********************* CIO/QDIO SPECIFIC DEFINES *****************************/
 
 
-/* Adapter Identification Parameters */
-#define ZFCP_CONTROL_UNIT_TYPE  0x1731
-#define ZFCP_CONTROL_UNIT_MODEL 0x03
-#define ZFCP_DEVICE_TYPE        0x1732
-#define ZFCP_DEVICE_MODEL       0x03
-#define ZFCP_DEVICE_MODEL_PRIV	0x04
-
 /* DMQ bug workaround: don't use last SBALE */
 /* DMQ bug workaround: don't use last SBALE */
 #define ZFCP_MAX_SBALES_PER_SBAL	(QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
 #define ZFCP_MAX_SBALES_PER_SBAL	(QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
 
 

+ 7 - 1
drivers/s390/scsi/zfcp_erp.c

@@ -880,6 +880,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
 				zfcp_port_put(port);
 				zfcp_port_put(port);
 			return ZFCP_ERP_CONTINUES;
 			return ZFCP_ERP_CONTINUES;
 		}
 		}
+		/* fall through */
 	case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
 	case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
 		if (!port->d_id)
 		if (!port->d_id)
 			return ZFCP_ERP_FAILED;
 			return ZFCP_ERP_FAILED;
@@ -894,8 +895,13 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
 				act->step = ZFCP_ERP_STEP_PORT_CLOSING;
 				act->step = ZFCP_ERP_STEP_PORT_CLOSING;
 				return ZFCP_ERP_CONTINUES;
 				return ZFCP_ERP_CONTINUES;
 			}
 			}
-		/* fall through otherwise */
 		}
 		}
+		if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) {
+			port->d_id = 0;
+			_zfcp_erp_port_reopen(port, 0, "erpsoc1", NULL);
+			return ZFCP_ERP_EXIT;
+		}
+		/* fall through otherwise */
 	}
 	}
 	return ZFCP_ERP_FAILED;
 	return ZFCP_ERP_FAILED;
 }
 }

+ 1 - 0
drivers/s390/scsi/zfcp_ext.h

@@ -27,6 +27,7 @@ extern int zfcp_sg_setup_table(struct scatterlist *, int);
 
 
 /* zfcp_ccw.c */
 /* zfcp_ccw.c */
 extern int zfcp_ccw_register(void);
 extern int zfcp_ccw_register(void);
+extern int zfcp_ccw_priv_sch(struct zfcp_adapter *);
 extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
 extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
 
 
 /* zfcp_cfdc.c */
 /* zfcp_cfdc.c */

+ 6 - 1
drivers/s390/scsi/zfcp_fc.c

@@ -150,9 +150,14 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
 	struct zfcp_port *port;
 	struct zfcp_port *port;
 
 
 	read_lock_irqsave(&zfcp_data.config_lock, flags);
 	read_lock_irqsave(&zfcp_data.config_lock, flags);
-	list_for_each_entry(port, &fsf_req->adapter->port_list_head, list)
+	list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
 		if ((port->d_id & range) == (elem->nport_did & range))
 		if ((port->d_id & range) == (elem->nport_did & range))
 			zfcp_test_link(port);
 			zfcp_test_link(port);
+		if (!port->d_id)
+			zfcp_erp_port_reopen(port,
+					     ZFCP_STATUS_COMMON_ERP_FAILED,
+					     "fcrscn1", NULL);
+	}
 
 
 	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 }
 }

+ 17 - 12
drivers/s390/scsi/zfcp_fsf.c

@@ -526,6 +526,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
 		break;
 		break;
 	case FSF_TOPO_AL:
 	case FSF_TOPO_AL:
 		fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
 		fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+		/* fall through */
 	default:
 	default:
 		dev_err(&adapter->ccw_device->dev,
 		dev_err(&adapter->ccw_device->dev,
 			"Unknown or unsupported arbitrated loop "
 			"Unknown or unsupported arbitrated loop "
@@ -897,6 +898,7 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
 		switch (fsq->word[0]) {
 		switch (fsq->word[0]) {
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
 			zfcp_test_link(unit->port);
 			zfcp_test_link(unit->port);
+			/* fall through */
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
 			break;
@@ -993,6 +995,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
 		break;
 		break;
 	case FSF_PORT_HANDLE_NOT_VALID:
 	case FSF_PORT_HANDLE_NOT_VALID:
 		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
 		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
+		/* fall through */
 	case FSF_GENERIC_COMMAND_REJECTED:
 	case FSF_GENERIC_COMMAND_REJECTED:
 	case FSF_PAYLOAD_SIZE_MISMATCH:
 	case FSF_PAYLOAD_SIZE_MISMATCH:
 	case FSF_REQUEST_SIZE_TOO_LARGE:
 	case FSF_REQUEST_SIZE_TOO_LARGE:
@@ -1399,7 +1402,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
 	struct fsf_plogi *plogi;
 	struct fsf_plogi *plogi;
 
 
 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
-		return;
+		goto out;
 
 
 	switch (header->fsf_status) {
 	switch (header->fsf_status) {
 	case FSF_PORT_ALREADY_OPEN:
 	case FSF_PORT_ALREADY_OPEN:
@@ -1461,6 +1464,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 		break;
 	}
 	}
+
+out:
+	zfcp_port_put(port);
 }
 }
 
 
 /**
 /**
@@ -1473,6 +1479,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
 	struct qdio_buffer_element *sbale;
 	struct qdio_buffer_element *sbale;
 	struct zfcp_adapter *adapter = erp_action->adapter;
 	struct zfcp_adapter *adapter = erp_action->adapter;
 	struct zfcp_fsf_req *req;
 	struct zfcp_fsf_req *req;
+	struct zfcp_port *port = erp_action->port;
 	int retval = -EIO;
 	int retval = -EIO;
 
 
 	spin_lock_bh(&adapter->req_q_lock);
 	spin_lock_bh(&adapter->req_q_lock);
@@ -1493,16 +1500,18 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
         sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
         sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
 
 	req->handler = zfcp_fsf_open_port_handler;
 	req->handler = zfcp_fsf_open_port_handler;
-	req->qtcb->bottom.support.d_id = erp_action->port->d_id;
-	req->data = erp_action->port;
+	req->qtcb->bottom.support.d_id = port->d_id;
+	req->data = port;
 	req->erp_action = erp_action;
 	req->erp_action = erp_action;
 	erp_action->fsf_req = req;
 	erp_action->fsf_req = req;
+	zfcp_port_get(port);
 
 
 	zfcp_fsf_start_erp_timer(req);
 	zfcp_fsf_start_erp_timer(req);
 	retval = zfcp_fsf_req_send(req);
 	retval = zfcp_fsf_req_send(req);
 	if (retval) {
 	if (retval) {
 		zfcp_fsf_req_free(req);
 		zfcp_fsf_req_free(req);
 		erp_action->fsf_req = NULL;
 		erp_action->fsf_req = NULL;
+		zfcp_port_put(port);
 	}
 	}
 out:
 out:
 	spin_unlock_bh(&adapter->req_q_lock);
 	spin_unlock_bh(&adapter->req_q_lock);
@@ -1590,8 +1599,10 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
 		dev_warn(&req->adapter->ccw_device->dev,
 		dev_warn(&req->adapter->ccw_device->dev,
 			 "Opening WKA port 0x%x failed\n", wka_port->d_id);
 			 "Opening WKA port 0x%x failed\n", wka_port->d_id);
+		/* fall through */
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		/* fall through */
 	case FSF_ACCESS_DENIED:
 	case FSF_ACCESS_DENIED:
 		wka_port->status = ZFCP_WKA_PORT_OFFLINE;
 		wka_port->status = ZFCP_WKA_PORT_OFFLINE;
 		break;
 		break;
@@ -1876,7 +1887,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
 
 
 		if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
 		if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
 		    (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
 		    (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
-		    (adapter->ccw_device->id.dev_model != ZFCP_DEVICE_MODEL_PRIV)) {
+		    !zfcp_ccw_priv_sch(adapter)) {
 			exclusive = (bottom->lun_access_info &
 			exclusive = (bottom->lun_access_info &
 					FSF_UNIT_ACCESS_EXCLUSIVE);
 					FSF_UNIT_ACCESS_EXCLUSIVE);
 			readwrite = (bottom->lun_access_info &
 			readwrite = (bottom->lun_access_info &
@@ -2314,7 +2325,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
 {
 {
 	struct zfcp_fsf_req *req;
 	struct zfcp_fsf_req *req;
 	struct fcp_cmnd_iu *fcp_cmnd_iu;
 	struct fcp_cmnd_iu *fcp_cmnd_iu;
-	unsigned int sbtype;
+	unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
 	int real_bytes, retval = -EIO;
 	int real_bytes, retval = -EIO;
 	struct zfcp_adapter *adapter = unit->port->adapter;
 	struct zfcp_adapter *adapter = unit->port->adapter;
 
 
@@ -2356,11 +2367,9 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
 	switch (scsi_cmnd->sc_data_direction) {
 	switch (scsi_cmnd->sc_data_direction) {
 	case DMA_NONE:
 	case DMA_NONE:
 		req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
 		req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
-		sbtype = SBAL_FLAGS0_TYPE_READ;
 		break;
 		break;
 	case DMA_FROM_DEVICE:
 	case DMA_FROM_DEVICE:
 		req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
 		req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
-		sbtype = SBAL_FLAGS0_TYPE_READ;
 		fcp_cmnd_iu->rddata = 1;
 		fcp_cmnd_iu->rddata = 1;
 		break;
 		break;
 	case DMA_TO_DEVICE:
 	case DMA_TO_DEVICE:
@@ -2369,8 +2378,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
 		fcp_cmnd_iu->wddata = 1;
 		fcp_cmnd_iu->wddata = 1;
 		break;
 		break;
 	case DMA_BIDIRECTIONAL:
 	case DMA_BIDIRECTIONAL:
-	default:
-		retval = -EIO;
 		goto failed_scsi_cmnd;
 		goto failed_scsi_cmnd;
 	}
 	}
 
 
@@ -2394,9 +2401,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
 					     scsi_sglist(scsi_cmnd),
 					     scsi_sglist(scsi_cmnd),
 					     FSF_MAX_SBALS_PER_REQ);
 					     FSF_MAX_SBALS_PER_REQ);
 	if (unlikely(real_bytes < 0)) {
 	if (unlikely(real_bytes < 0)) {
-		if (req->sbal_number < FSF_MAX_SBALS_PER_REQ)
-			retval = -EIO;
-		else {
+		if (req->sbal_number >= FSF_MAX_SBALS_PER_REQ) {
 			dev_err(&adapter->ccw_device->dev,
 			dev_err(&adapter->ccw_device->dev,
 				"Oversize data package, unit 0x%016Lx "
 				"Oversize data package, unit 0x%016Lx "
 				"on port 0x%016Lx closed\n",
 				"on port 0x%016Lx closed\n",

+ 12 - 1
drivers/s390/scsi/zfcp_scsi.c

@@ -12,6 +12,10 @@
 #include "zfcp_ext.h"
 #include "zfcp_ext.h"
 #include <asm/atomic.h>
 #include <asm/atomic.h>
 
 
+static unsigned int default_depth = 32;
+module_param_named(queue_depth, default_depth, uint, 0600);
+MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
+
 /* Find start of Sense Information in FCP response unit*/
 /* Find start of Sense Information in FCP response unit*/
 char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
 char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
 {
 {
@@ -24,6 +28,12 @@ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
 	return fcp_sns_info_ptr;
 	return fcp_sns_info_ptr;
 }
 }
 
 
+static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth)
+{
+	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+	return sdev->queue_depth;
+}
+
 static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
 static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
 {
 {
 	struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
 	struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
@@ -34,7 +44,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
 static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
 static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
 {
 {
 	if (sdp->tagged_supported)
 	if (sdp->tagged_supported)
-		scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, 32);
+		scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth);
 	else
 	else
 		scsi_adjust_queue_depth(sdp, 0, 1);
 		scsi_adjust_queue_depth(sdp, 0, 1);
 	return 0;
 	return 0;
@@ -647,6 +657,7 @@ struct zfcp_data zfcp_data = {
 		.name			 = "zfcp",
 		.name			 = "zfcp",
 		.module			 = THIS_MODULE,
 		.module			 = THIS_MODULE,
 		.proc_name		 = "zfcp",
 		.proc_name		 = "zfcp",
+		.change_queue_depth	 = zfcp_scsi_change_queue_depth,
 		.slave_alloc		 = zfcp_scsi_slave_alloc,
 		.slave_alloc		 = zfcp_scsi_slave_alloc,
 		.slave_configure	 = zfcp_scsi_slave_configure,
 		.slave_configure	 = zfcp_scsi_slave_configure,
 		.slave_destroy		 = zfcp_scsi_slave_destroy,
 		.slave_destroy		 = zfcp_scsi_slave_destroy,

+ 11 - 20
drivers/scsi/Kconfig

@@ -191,20 +191,19 @@ config SCSI_ENCLOSURE
 	  it has an enclosure device.  Selecting this option will just allow
 	  it has an enclosure device.  Selecting this option will just allow
 	  certain enclosure conditions to be reported and is not required.
 	  certain enclosure conditions to be reported and is not required.
 
 
-comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs"
-	depends on SCSI
-
 config SCSI_MULTI_LUN
 config SCSI_MULTI_LUN
 	bool "Probe all LUNs on each SCSI device"
 	bool "Probe all LUNs on each SCSI device"
 	depends on SCSI
 	depends on SCSI
 	help
 	help
-	  If you have a SCSI device that supports more than one LUN (Logical
-	  Unit Number), e.g. a CD jukebox, and only one LUN is detected, you
-	  can say Y here to force the SCSI driver to probe for multiple LUNs.
-	  A SCSI device with multiple LUNs acts logically like multiple SCSI
-	  devices. The vast majority of SCSI devices have only one LUN, and
-	  so most people can say N here. The max_luns boot/module parameter 
-	  allows to override this setting.
+	  Some devices support more than one LUN (Logical Unit Number) in order
+	  to allow access to several media, e.g. CD jukebox, USB card reader,
+	  mobile phone in mass storage mode. This option forces the kernel to
+	  probe for all LUNs by default. This setting can be overriden by
+	  max_luns boot/module parameter. Note that this option does not affect
+	  devices conforming to SCSI-3 or higher as they can explicitely report
+	  their number of LUNs. It is safe to say Y here unless you have one of
+	  those rare devices which reacts in an unexpected way when probed for
+	  multiple LUNs.
 
 
 config SCSI_CONSTANTS
 config SCSI_CONSTANTS
 	bool "Verbose SCSI error reporting (kernel size +=12K)"
 	bool "Verbose SCSI error reporting (kernel size +=12K)"
@@ -355,6 +354,7 @@ config ISCSI_TCP
 	 http://open-iscsi.org
 	 http://open-iscsi.org
 
 
 source "drivers/scsi/cxgb3i/Kconfig"
 source "drivers/scsi/cxgb3i/Kconfig"
+source "drivers/scsi/bnx2i/Kconfig"
 
 
 config SGIWD93_SCSI
 config SGIWD93_SCSI
 	tristate "SGI WD93C93 SCSI Driver"
 	tristate "SGI WD93C93 SCSI Driver"
@@ -508,6 +508,7 @@ config SCSI_AIC7XXX_OLD
 
 
 source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
 source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
 source "drivers/scsi/aic94xx/Kconfig"
 source "drivers/scsi/aic94xx/Kconfig"
+source "drivers/scsi/mvsas/Kconfig"
 
 
 config SCSI_DPT_I2O
 config SCSI_DPT_I2O
 	tristate "Adaptec I2O RAID support "
 	tristate "Adaptec I2O RAID support "
@@ -1050,16 +1051,6 @@ config SCSI_IZIP_SLOW_CTR
 
 
 	  Generally, saying N is fine.
 	  Generally, saying N is fine.
 
 
-config SCSI_MVSAS
-	tristate "Marvell 88SE6440 SAS/SATA support"
-	depends on PCI && SCSI
-	select SCSI_SAS_LIBSAS
-	help
-	  This driver supports Marvell SAS/SATA PCI devices.
-
-	  To compiler this driver as a module, choose M here: the module
-	  will be called mvsas.
-
 config SCSI_NCR53C406A
 config SCSI_NCR53C406A
 	tristate "NCR53c406a SCSI support"
 	tristate "NCR53c406a SCSI support"
 	depends on ISA && SCSI
 	depends on ISA && SCSI

+ 2 - 1
drivers/scsi/Makefile

@@ -126,9 +126,10 @@ obj-$(CONFIG_SCSI_IBMVSCSIS)	+= ibmvscsi/
 obj-$(CONFIG_SCSI_IBMVFC)	+= ibmvscsi/
 obj-$(CONFIG_SCSI_IBMVFC)	+= ibmvscsi/
 obj-$(CONFIG_SCSI_HPTIOP)	+= hptiop.o
 obj-$(CONFIG_SCSI_HPTIOP)	+= hptiop.o
 obj-$(CONFIG_SCSI_STEX)		+= stex.o
 obj-$(CONFIG_SCSI_STEX)		+= stex.o
-obj-$(CONFIG_SCSI_MVSAS)	+= mvsas.o
+obj-$(CONFIG_SCSI_MVSAS)	+= mvsas/
 obj-$(CONFIG_PS3_ROM)		+= ps3rom.o
 obj-$(CONFIG_PS3_ROM)		+= ps3rom.o
 obj-$(CONFIG_SCSI_CXGB3_ISCSI)	+= libiscsi.o libiscsi_tcp.o cxgb3i/
 obj-$(CONFIG_SCSI_CXGB3_ISCSI)	+= libiscsi.o libiscsi_tcp.o cxgb3i/
+obj-$(CONFIG_SCSI_BNX2_ISCSI)	+= libiscsi.o bnx2i/
 
 
 obj-$(CONFIG_ARM)		+= arm/
 obj-$(CONFIG_ARM)		+= arm/
 
 

+ 1 - 1
drivers/scsi/NCR_D700.c

@@ -224,7 +224,7 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
 	return ret;
 	return ret;
 }
 }
 
 
-static int
+static irqreturn_t
 NCR_D700_intr(int irq, void *data)
 NCR_D700_intr(int irq, void *data)
 {
 {
 	struct NCR_D700_private *p = (struct NCR_D700_private *)data;
 	struct NCR_D700_private *p = (struct NCR_D700_private *)data;

+ 155 - 0
drivers/scsi/bnx2i/57xx_iscsi_constants.h

@@ -0,0 +1,155 @@
+/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+#ifndef __57XX_ISCSI_CONSTANTS_H_
+#define __57XX_ISCSI_CONSTANTS_H_
+
+/**
+* This file defines HSI constants for the iSCSI flows
+*/
+
+/* iSCSI request op codes */
+#define ISCSI_OPCODE_CLEANUP_REQUEST    (7)
+
+/* iSCSI response/messages op codes */
+#define ISCSI_OPCODE_CLEANUP_RESPONSE 		(0x27)
+#define ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION    (0)
+
+/* iSCSI task types */
+#define ISCSI_TASK_TYPE_READ    (0)
+#define ISCSI_TASK_TYPE_WRITE   (1)
+#define ISCSI_TASK_TYPE_MPATH   (2)
+
+/* initial CQ sequence numbers */
+#define ISCSI_INITIAL_SN    (1)
+
+/* KWQ (kernel work queue) layer codes */
+#define ISCSI_KWQE_LAYER_CODE   (6)
+
+/* KWQ (kernel work queue) request op codes */
+#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN1 (0)
+#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN2 (1)
+#define ISCSI_KWQE_OPCODE_UPDATE_CONN   (2)
+#define ISCSI_KWQE_OPCODE_DESTROY_CONN  (3)
+#define ISCSI_KWQE_OPCODE_INIT1         (4)
+#define ISCSI_KWQE_OPCODE_INIT2         (5)
+
+/* KCQ (kernel completion queue) response op codes */
+#define ISCSI_KCQE_OPCODE_OFFLOAD_CONN  (0x10)
+#define ISCSI_KCQE_OPCODE_UPDATE_CONN   (0x12)
+#define ISCSI_KCQE_OPCODE_DESTROY_CONN  (0x13)
+#define ISCSI_KCQE_OPCODE_INIT          (0x14)
+#define ISCSI_KCQE_OPCODE_FW_CLEAN_TASK	(0x15)
+#define ISCSI_KCQE_OPCODE_TCP_RESET     (0x16)
+#define ISCSI_KCQE_OPCODE_TCP_SYN       (0x17)
+#define ISCSI_KCQE_OPCODE_TCP_FIN       (0X18)
+#define ISCSI_KCQE_OPCODE_TCP_ERROR     (0x19)
+#define ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
+#define ISCSI_KCQE_OPCODE_ISCSI_ERROR   (0x21)
+
+/* KCQ (kernel completion queue) completion status */
+#define ISCSI_KCQE_COMPLETION_STATUS_SUCCESS                            (0x0)
+#define ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE                     (0x1)
+#define ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE                  (0x2)
+#define ISCSI_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE                   (0x3)
+#define ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR                          (0x4)
+
+#define ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR                        (0x5)
+#define ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR                       (0x6)
+
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_UNEXPECTED_OPCODE     (0xa)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE                (0xb)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN               (0xc)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT                   (0xd)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN                (0xe)
+
+/* Response */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN            (0xf)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T              (0x10)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_IS_ZERO  (0x2c)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG  (0x2d)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0                 (0x11)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1                 (0x12)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2                 (0x13)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3                 (0x14)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4                 (0x15)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5                 (0x16)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6                 (0x17)
+
+/* Data-In */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN        (0x18)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN       (0x19)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO            (0x1a)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV          (0x1b)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN                (0x1c)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN      (0x1d)
+
+/* R2T */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF            (0x1f)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN                   (0x20)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN                 (0x21)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 (0x22)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 (0x23)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED       (0x24)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV           (0x25)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN         (0x26)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO (0x27)
+
+/* TMF */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN        (0x28)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN         (0x29)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN         (0x2a)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP   (0x2b)
+
+/* IP/TCP processing errors: */
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT               (0x40)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS                (0x41)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG               (0x42)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_MAX_RTRANS                (0x43)
+
+/* iSCSI licensing errors */
+/* general iSCSI license not installed */
+#define ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED                (0x50)
+/* additional LOM specific iSCSI license not installed */
+#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED              (0x51)
+
+/* SQ/RQ/CQ DB structure sizes */
+#define ISCSI_SQ_DB_SIZE    (16)
+#define ISCSI_RQ_DB_SIZE    (16)
+#define ISCSI_CQ_DB_SIZE    (80)
+
+#define ISCSI_SQN_TO_NOTIFY_NOT_VALID                                   0xFFFF
+
+/* Page size codes (for flags field in connection offload request) */
+#define ISCSI_PAGE_SIZE_256     (0)
+#define ISCSI_PAGE_SIZE_512     (1)
+#define ISCSI_PAGE_SIZE_1K      (2)
+#define ISCSI_PAGE_SIZE_2K      (3)
+#define ISCSI_PAGE_SIZE_4K      (4)
+#define ISCSI_PAGE_SIZE_8K      (5)
+#define ISCSI_PAGE_SIZE_16K     (6)
+#define ISCSI_PAGE_SIZE_32K     (7)
+#define ISCSI_PAGE_SIZE_64K     (8)
+#define ISCSI_PAGE_SIZE_128K    (9)
+#define ISCSI_PAGE_SIZE_256K    (10)
+#define ISCSI_PAGE_SIZE_512K    (11)
+#define ISCSI_PAGE_SIZE_1M      (12)
+#define ISCSI_PAGE_SIZE_2M      (13)
+#define ISCSI_PAGE_SIZE_4M      (14)
+#define ISCSI_PAGE_SIZE_8M      (15)
+
+/* Iscsi PDU related defines */
+#define ISCSI_HEADER_SIZE   (48)
+#define ISCSI_DIGEST_SHIFT  (2)
+#define ISCSI_DIGEST_SIZE   (4)
+
+#define B577XX_ISCSI_CONNECTION_TYPE    3
+
+#endif /*__57XX_ISCSI_CONSTANTS_H_ */

+ 1509 - 0
drivers/scsi/bnx2i/57xx_iscsi_hsi.h

@@ -0,0 +1,1509 @@
+/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+#ifndef __57XX_ISCSI_HSI_LINUX_LE__
+#define __57XX_ISCSI_HSI_LINUX_LE__
+
+/*
+ * iSCSI Async CQE
+ */
+struct bnx2i_async_msg {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 reserved1;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 reserved1;
+	u8 op_code;
+#endif
+	u32 reserved2;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved5;
+	u8 err_code;
+	u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved4;
+	u8 err_code;
+	u16 reserved5;
+#endif
+	u32 reserved6;
+	u32 lun[2];
+#if defined(__BIG_ENDIAN)
+	u8 async_event;
+	u8 async_vcode;
+	u16 param1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 param1;
+	u8 async_vcode;
+	u8 async_event;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 param2;
+	u16 param3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 param3;
+	u16 param2;
+#endif
+	u32 reserved7[3];
+	u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Buffer Descriptor (BD)
+ */
+struct iscsi_bd {
+	u32 buffer_addr_hi;
+	u32 buffer_addr_lo;
+#if defined(__BIG_ENDIAN)
+	u16 reserved0;
+	u16 buffer_length;
+#elif defined(__LITTLE_ENDIAN)
+	u16 buffer_length;
+	u16 reserved0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u16 flags;
+#define ISCSI_BD_RESERVED1 (0x3F<<0)
+#define ISCSI_BD_RESERVED1_SHIFT 0
+#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
+#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
+#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
+#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
+#define ISCSI_BD_RESERVED2 (0xFF<<8)
+#define ISCSI_BD_RESERVED2_SHIFT 8
+#elif defined(__LITTLE_ENDIAN)
+	u16 flags;
+#define ISCSI_BD_RESERVED1 (0x3F<<0)
+#define ISCSI_BD_RESERVED1_SHIFT 0
+#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
+#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
+#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
+#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
+#define ISCSI_BD_RESERVED2 (0xFF<<8)
+#define ISCSI_BD_RESERVED2_SHIFT 8
+	u16 reserved3;
+#endif
+};
+
+
+/*
+ * iSCSI Cleanup SQ WQE
+ */
+struct bnx2i_cleanup_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 reserved1;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 reserved1;
+	u8 op_code;
+#endif
+	u32 reserved2[3];
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u16 itt;
+#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
+	u16 reserved3;
+#endif
+	u32 reserved4[10];
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved6;
+	u16 reserved5;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved5;
+	u8 reserved6;
+	u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Cleanup CQE
+ */
+struct bnx2i_cleanup_response {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 status;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 status;
+	u8 op_code;
+#endif
+	u32 reserved1[3];
+	u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved4;
+	u8 err_code;
+	u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved3;
+	u8 err_code;
+	u16 reserved4;
+#endif
+	u32 reserved5[7];
+#if defined(__BIG_ENDIAN)
+	u16 reserved6;
+	u16 itt;
+#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
+	u16 reserved6;
+#endif
+	u32 cq_req_sn;
+};
+
+
+/*
+ * SCSI read/write SQ WQE
+ */
+struct bnx2i_cmd_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
+#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
+#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
+#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
+#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
+#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
+#define ISCSI_CMD_REQUEST_READ (0x1<<6)
+#define ISCSI_CMD_REQUEST_READ_SHIFT 6
+#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
+#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_attr;
+#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
+#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
+#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
+#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
+#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
+#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
+#define ISCSI_CMD_REQUEST_READ (0x1<<6)
+#define ISCSI_CMD_REQUEST_READ_SHIFT 6
+#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
+#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
+	u8 op_code;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 ud_buffer_offset;
+	u16 sd_buffer_offset;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sd_buffer_offset;
+	u16 ud_buffer_offset;
+#endif
+	u32 lun[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved2;
+	u16 itt;
+#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
+	u16 reserved2;
+#endif
+	u32 total_data_transfer_length;
+	u32 cmd_sn;
+	u32 reserved3;
+	u32 cdb[4];
+	u32 zero_fill;
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 sd_start_bd_index;
+	u8 ud_start_bd_index;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 ud_start_bd_index;
+	u8 sd_start_bd_index;
+	u8 cq_index;
+#endif
+};
+
+
+/*
+ * task statistics for write response
+ */
+struct bnx2i_write_resp_task_stat {
+	u32 num_data_ins;
+};
+
+/*
+ * task statistics for read response
+ */
+struct bnx2i_read_resp_task_stat {
+#if defined(__BIG_ENDIAN)
+	u16 num_data_outs;
+	u16 num_r2ts;
+#elif defined(__LITTLE_ENDIAN)
+	u16 num_r2ts;
+	u16 num_data_outs;
+#endif
+};
+
+/*
+ * task statistics for iSCSI cmd response
+ */
+union bnx2i_cmd_resp_task_stat {
+	struct bnx2i_write_resp_task_stat write_stat;
+	struct bnx2i_read_resp_task_stat read_stat;
+};
+
+/*
+ * SCSI Command CQE
+ */
+struct bnx2i_cmd_response {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 response_flags;
+#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
+#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
+#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
+#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
+	u8 response;
+	u8 status;
+#elif defined(__LITTLE_ENDIAN)
+	u8 status;
+	u8 response;
+	u8 response_flags;
+#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
+#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
+#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
+#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 reserved2;
+	u32 residual_count;
+#if defined(__BIG_ENDIAN)
+	u16 reserved4;
+	u8 err_code;
+	u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved3;
+	u8 err_code;
+	u16 reserved4;
+#endif
+	u32 reserved5[5];
+	union bnx2i_cmd_resp_task_stat task_stat;
+	u32 reserved6;
+#if defined(__BIG_ENDIAN)
+	u16 reserved7;
+	u16 itt;
+#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
+	u16 reserved7;
+#endif
+	u32 cq_req_sn;
+};
+
+
+
+/*
+ * firmware middle-path request SQ WQE
+ */
+struct bnx2i_fw_mp_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+	u16 hdr_opaque1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 hdr_opaque1;
+	u8 op_attr;
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 hdr_opaque2[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved0;
+	u16 itt;
+#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
+	u16 reserved0;
+#endif
+	u32 hdr_opaque3[4];
+	u32 resp_bd_list_addr_lo;
+	u32 resp_bd_list_addr_hi;
+	u32 resp_buffer;
+#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+	u16 reserved4;
+	u8 reserved3;
+	u8 flags;
+#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
+#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
+#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+	u8 flags;
+#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
+#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
+#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
+	u8 reserved3;
+	u16 reserved4;
+#endif
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved6;
+	u8 reserved5;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 reserved5;
+	u8 reserved6;
+	u8 cq_index;
+#endif
+};
+
+
+/*
+ * firmware response - CQE: used only by firmware
+ */
+struct bnx2i_fw_response {
+	u32 hdr_dword1[2];
+	u32 hdr_exp_cmd_sn;
+	u32 hdr_max_cmd_sn;
+	u32 hdr_ttt;
+	u32 hdr_res_cnt;
+	u32 cqe_flags;
+#define ISCSI_FW_RESPONSE_RESERVED2 (0xFF<<0)
+#define ISCSI_FW_RESPONSE_RESERVED2_SHIFT 0
+#define ISCSI_FW_RESPONSE_ERR_CODE (0xFF<<8)
+#define ISCSI_FW_RESPONSE_ERR_CODE_SHIFT 8
+#define ISCSI_FW_RESPONSE_RESERVED3 (0xFFFF<<16)
+#define ISCSI_FW_RESPONSE_RESERVED3_SHIFT 16
+	u32 stat_sn;
+	u32 hdr_dword2[2];
+	u32 hdr_dword3[2];
+	u32 task_stat;
+	u32 reserved0;
+	u32 hdr_itt;
+	u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI KCQ CQE parameters
+ */
+union iscsi_kcqe_params {
+	u32 reserved0[4];
+};
+
+/*
+ * iSCSI KCQ CQE
+ */
+struct iscsi_kcqe {
+	u32 iscsi_conn_id;
+	u32 completion_status;
+	u32 iscsi_conn_context_id;
+	union iscsi_kcqe_params params;
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define ISCSI_KCQE_RESERVED0 (0xF<<0)
+#define ISCSI_KCQE_RESERVED0_SHIFT 0
+#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
+#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
+#define ISCSI_KCQE_RESERVED1 (0x1<<7)
+#define ISCSI_KCQE_RESERVED1_SHIFT 7
+	u8 op_code;
+	u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+	u16 qe_self_seq;
+	u8 op_code;
+	u8 flags;
+#define ISCSI_KCQE_RESERVED0 (0xF<<0)
+#define ISCSI_KCQE_RESERVED0_SHIFT 0
+#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
+#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
+#define ISCSI_KCQE_RESERVED1 (0x1<<7)
+#define ISCSI_KCQE_RESERVED1_SHIFT 7
+#endif
+};
+
+
+
+/*
+ * iSCSI KWQE header
+ */
+struct iscsi_kwqe_header {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
+#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
+	u8 op_code;
+#elif defined(__LITTLE_ENDIAN)
+	u8 op_code;
+	u8 flags;
+#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
+#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
+#endif
+};
+
+/*
+ * iSCSI firmware init request 1
+ */
+struct iscsi_kwqe_init1 {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u8 reserved0;
+	u8 num_cqs;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_cqs;
+	u8 reserved0;
+	struct iscsi_kwqe_header hdr;
+#endif
+	u32 dummy_buffer_addr_lo;
+	u32 dummy_buffer_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u16 num_ccells_per_conn;
+	u16 num_tasks_per_conn;
+#elif defined(__LITTLE_ENDIAN)
+	u16 num_tasks_per_conn;
+	u16 num_ccells_per_conn;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 sq_wqes_per_page;
+	u16 sq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sq_num_wqes;
+	u16 sq_wqes_per_page;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 cq_log_wqes_per_page;
+	u8 flags;
+#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
+#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+	u16 cq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+	u16 cq_num_wqes;
+	u8 flags;
+#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
+#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+	u8 cq_log_wqes_per_page;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 cq_num_pages;
+	u16 sq_num_pages;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sq_num_pages;
+	u16 cq_num_pages;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 rq_buffer_size;
+	u16 rq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rq_num_wqes;
+	u16 rq_buffer_size;
+#endif
+};
+
+/*
+ * iSCSI firmware init request 2
+ */
+struct iscsi_kwqe_init2 {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u16 max_cq_sqn;
+#elif defined(__LITTLE_ENDIAN)
+	u16 max_cq_sqn;
+	struct iscsi_kwqe_header hdr;
+#endif
+	u32 error_bit_map[2];
+	u32 reserved1[5];
+};
+
+/*
+ * Initial iSCSI connection offload request 1
+ */
+struct iscsi_kwqe_conn_offload1 {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 iscsi_conn_id;
+	struct iscsi_kwqe_header hdr;
+#endif
+	u32 sq_page_table_addr_lo;
+	u32 sq_page_table_addr_hi;
+	u32 cq_page_table_addr_lo;
+	u32 cq_page_table_addr_hi;
+	u32 reserved0[3];
+};
+
+/*
+ * iSCSI Page Table Entry (PTE)
+ */
+struct iscsi_pte {
+	u32 hi;
+	u32 lo;
+};
+
+/*
+ * Initial iSCSI connection offload request 2
+ */
+struct iscsi_kwqe_conn_offload2 {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	struct iscsi_kwqe_header hdr;
+#endif
+	u32 rq_page_table_addr_lo;
+	u32 rq_page_table_addr_hi;
+	struct iscsi_pte sq_first_pte;
+	struct iscsi_pte cq_first_pte;
+	u32 num_additional_wqes;
+};
+
+
+/*
+ * Initial iSCSI connection offload request 3
+ */
+struct iscsi_kwqe_conn_offload3 {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	struct iscsi_kwqe_header hdr;
+#endif
+	u32 reserved1;
+	struct iscsi_pte qp_first_pte[3];
+};
+
+
+/*
+ * iSCSI connection update request
+ */
+struct iscsi_kwqe_conn_update {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	struct iscsi_kwqe_header hdr;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 session_error_recovery_level;
+	u8 max_outstanding_r2ts;
+	u8 reserved2;
+	u8 conn_flags;
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
+#elif defined(__LITTLE_ENDIAN)
+	u8 conn_flags;
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
+	u8 reserved2;
+	u8 max_outstanding_r2ts;
+	u8 session_error_recovery_level;
+#endif
+	u32 context_id;
+	u32 max_send_pdu_length;
+	u32 max_recv_pdu_length;
+	u32 first_burst_length;
+	u32 max_burst_length;
+	u32 exp_stat_sn;
+};
+
+/*
+ * iSCSI destroy connection request
+ */
+struct iscsi_kwqe_conn_destroy {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	struct iscsi_kwqe_header hdr;
+#endif
+	u32 context_id;
+	u32 reserved1[6];
+};
+
+/*
+ * iSCSI KWQ WQE
+ */
+union iscsi_kwqe {
+	struct iscsi_kwqe_init1 init1;
+	struct iscsi_kwqe_init2 init2;
+	struct iscsi_kwqe_conn_offload1 conn_offload1;
+	struct iscsi_kwqe_conn_offload2 conn_offload2;
+	struct iscsi_kwqe_conn_update conn_update;
+	struct iscsi_kwqe_conn_destroy conn_destroy;
+};
+
+/*
+ * iSCSI Login SQ WQE
+ */
+struct bnx2i_login_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
+#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
+#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
+	u8 version_max;
+	u8 version_min;
+#elif defined(__LITTLE_ENDIAN)
+	u8 version_min;
+	u8 version_max;
+	u8 op_attr;
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
+#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
+#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 isid_lo;
+#if defined(__BIG_ENDIAN)
+	u16 isid_hi;
+	u16 tsih;
+#elif defined(__LITTLE_ENDIAN)
+	u16 tsih;
+	u16 isid_hi;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved2;
+	u16 itt;
+#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
+	u16 reserved2;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 cid;
+	u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved3;
+	u16 cid;
+#endif
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 reserved4;
+	u32 resp_bd_list_addr_lo;
+	u32 resp_bd_list_addr_hi;
+	u32 resp_buffer;
+#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+	u16 reserved8;
+	u8 reserved7;
+	u8 flags;
+#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
+#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+	u8 flags;
+#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
+#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
+	u8 reserved7;
+	u16 reserved8;
+#endif
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved10;
+	u8 reserved9;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 reserved9;
+	u8 reserved10;
+	u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Login CQE
+ */
+struct bnx2i_login_response {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 response_flags;
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
+#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
+#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
+	u8 version_max;
+	u8 version_active;
+#elif defined(__LITTLE_ENDIAN)
+	u8 version_active;
+	u8 version_max;
+	u8 response_flags;
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
+#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
+#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 reserved1[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u8 err_code;
+	u8 reserved2;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved2;
+	u8 err_code;
+	u16 reserved3;
+#endif
+	u32 stat_sn;
+	u32 isid_lo;
+#if defined(__BIG_ENDIAN)
+	u16 isid_hi;
+	u16 tsih;
+#elif defined(__LITTLE_ENDIAN)
+	u16 tsih;
+	u16 isid_hi;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 status_class;
+	u8 status_detail;
+	u16 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved4;
+	u8 status_detail;
+	u8 status_class;
+#endif
+	u32 reserved5[3];
+#if defined(__BIG_ENDIAN)
+	u16 reserved6;
+	u16 itt;
+#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
+	u16 reserved6;
+#endif
+	u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Logout SQ WQE
+ */
+struct bnx2i_logout_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
+#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_attr;
+#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
+#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 reserved1[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved2;
+	u16 itt;
+#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
+	u16 reserved2;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 cid;
+	u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved3;
+	u16 cid;
+#endif
+	u32 cmd_sn;
+	u32 reserved4[5];
+	u32 zero_fill;
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved6;
+	u8 reserved5;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 reserved5;
+	u8 reserved6;
+	u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Logout CQE
+ */
+struct bnx2i_logout_response {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 reserved1;
+	u8 response;
+	u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved0;
+	u8 response;
+	u8 reserved1;
+	u8 op_code;
+#endif
+	u32 reserved2;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved5;
+	u8 err_code;
+	u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved4;
+	u8 err_code;
+	u16 reserved5;
+#endif
+	u32 reserved6[3];
+#if defined(__BIG_ENDIAN)
+	u16 time_to_wait;
+	u16 time_to_retain;
+#elif defined(__LITTLE_ENDIAN)
+	u16 time_to_retain;
+	u16 time_to_wait;
+#endif
+	u32 reserved7[3];
+#if defined(__BIG_ENDIAN)
+	u16 reserved8;
+	u16 itt;
+#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
+	u16 reserved8;
+#endif
+	u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Nop-In CQE
+ */
+struct bnx2i_nop_in_msg {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 reserved1;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 reserved1;
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 ttt;
+	u32 reserved2;
+#if defined(__BIG_ENDIAN)
+	u16 reserved4;
+	u8 err_code;
+	u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved3;
+	u8 err_code;
+	u16 reserved4;
+#endif
+	u32 reserved5;
+	u32 lun[2];
+	u32 reserved6[4];
+#if defined(__BIG_ENDIAN)
+	u16 reserved7;
+	u16 itt;
+#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
+#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
+#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
+#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
+#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
+	u16 reserved7;
+#endif
+	u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI NOP-OUT SQ WQE
+ */
+struct bnx2i_nop_out_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_attr;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 lun[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved2;
+	u16 itt;
+#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
+	u16 reserved2;
+#endif
+	u32 ttt;
+	u32 cmd_sn;
+	u32 reserved3[2];
+	u32 resp_bd_list_addr_lo;
+	u32 resp_bd_list_addr_hi;
+	u32 resp_buffer;
+#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+	u16 reserved7;
+	u8 reserved6;
+	u8 flags;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
+#elif defined(__LITTLE_ENDIAN)
+	u8 flags;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
+	u8 reserved6;
+	u16 reserved7;
+#endif
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved9;
+	u8 reserved8;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 reserved8;
+	u8 reserved9;
+	u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI Reject CQE
+ */
+struct bnx2i_reject_msg {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 reserved1;
+	u8 reason;
+	u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved0;
+	u8 reason;
+	u8 reserved1;
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved4;
+	u8 err_code;
+	u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved3;
+	u8 err_code;
+	u16 reserved4;
+#endif
+	u32 reserved5[8];
+	u32 cq_req_sn;
+};
+
+/*
+ * bnx2i iSCSI TMF SQ WQE
+ */
+struct bnx2i_tmf_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_attr;
+#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 lun[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved1;
+	u16 itt;
+#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
+	u16 reserved1;
+#endif
+	u32 ref_itt;
+	u32 cmd_sn;
+	u32 reserved2;
+	u32 ref_cmd_sn;
+	u32 reserved3[3];
+	u32 zero_fill;
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved5;
+	u8 reserved4;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 reserved4;
+	u8 reserved5;
+	u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI Text SQ WQE
+ */
+struct bnx2i_text_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
+#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
+#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_attr;
+#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
+#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
+#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 lun[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u16 itt;
+#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
+	u16 reserved3;
+#endif
+	u32 ttt;
+	u32 cmd_sn;
+	u32 reserved4[2];
+	u32 resp_bd_list_addr_lo;
+	u32 resp_bd_list_addr_hi;
+	u32 resp_buffer;
+#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT 24
+	u32 zero_fill;
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved7;
+	u8 reserved6;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 reserved6;
+	u8 reserved7;
+	u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI SQ WQE
+ */
+union iscsi_request {
+	struct bnx2i_cmd_request cmd;
+	struct bnx2i_tmf_request tmf;
+	struct bnx2i_nop_out_request nop_out;
+	struct bnx2i_login_request login_req;
+	struct bnx2i_text_request text;
+	struct bnx2i_logout_request logout_req;
+	struct bnx2i_cleanup_request cleanup;
+};
+
+
+/*
+ * iSCSI TMF CQE
+ */
+struct bnx2i_tmf_response {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 reserved1;
+	u8 response;
+	u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved0;
+	u8 response;
+	u8 reserved1;
+	u8 op_code;
+#endif
+	u32 reserved2;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved5;
+	u8 err_code;
+	u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved4;
+	u8 err_code;
+	u16 reserved5;
+#endif
+	u32 reserved6[7];
+#if defined(__BIG_ENDIAN)
+	u16 reserved7;
+	u16 itt;
+#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
+	u16 reserved7;
+#endif
+	u32 cq_req_sn;
+};
+
+/*
+ * iSCSI Text CQE
+ */
+struct bnx2i_text_response {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 response_flags;
+#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
+#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
+#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
+#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 response_flags;
+#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
+#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
+#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
+#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 ttt;
+	u32 reserved2;
+#if defined(__BIG_ENDIAN)
+	u16 reserved4;
+	u8 err_code;
+	u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved3;
+	u8 err_code;
+	u16 reserved4;
+#endif
+	u32 reserved5;
+	u32 lun[2];
+	u32 reserved6[4];
+#if defined(__BIG_ENDIAN)
+	u16 reserved7;
+	u16 itt;
+#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
+	u16 reserved7;
+#endif
+	u32 cq_req_sn;
+};
+
+/*
+ * iSCSI CQE
+ */
+union iscsi_response {
+	struct bnx2i_cmd_response cmd;
+	struct bnx2i_tmf_response tmf;
+	struct bnx2i_login_response login_resp;
+	struct bnx2i_text_response text;
+	struct bnx2i_logout_response logout_resp;
+	struct bnx2i_cleanup_response cleanup;
+	struct bnx2i_reject_msg reject;
+	struct bnx2i_async_msg async;
+	struct bnx2i_nop_in_msg nop_in;
+};
+
+#endif /* __57XX_ISCSI_HSI_LINUX_LE__ */

+ 7 - 0
drivers/scsi/bnx2i/Kconfig

@@ -0,0 +1,7 @@
+config SCSI_BNX2_ISCSI
+	tristate "Broadcom NetXtreme II iSCSI support"
+	select SCSI_ISCSI_ATTRS
+	select CNIC
+	---help---
+	This driver supports iSCSI offload for the Broadcom NetXtreme II
+	devices.

+ 3 - 0
drivers/scsi/bnx2i/Makefile

@@ -0,0 +1,3 @@
+bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o
+
+obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o

+ 771 - 0
drivers/scsi/bnx2i/bnx2i.h

@@ -0,0 +1,771 @@
+/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#ifndef _BNX2I_H_
+#define _BNX2I_H_
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/kfifo.h>
+#include <linux/netdevice.h>
+#include <linux/completion.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+#include <scsi/iscsi_proto.h>
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+#include "../../net/cnic_if.h"
+#include "57xx_iscsi_hsi.h"
+#include "57xx_iscsi_constants.h"
+
+#define BNX2_ISCSI_DRIVER_NAME		"bnx2i"
+
+#define BNX2I_MAX_ADAPTERS		8
+
+#define ISCSI_MAX_CONNS_PER_HBA		128
+#define ISCSI_MAX_SESS_PER_HBA		ISCSI_MAX_CONNS_PER_HBA
+#define ISCSI_MAX_CMDS_PER_SESS		128
+
+/* Total active commands across all connections supported by devices */
+#define ISCSI_MAX_CMDS_PER_HBA_5708	(28 * (ISCSI_MAX_CMDS_PER_SESS - 1))
+#define ISCSI_MAX_CMDS_PER_HBA_5709	(128 * (ISCSI_MAX_CMDS_PER_SESS - 1))
+#define ISCSI_MAX_CMDS_PER_HBA_57710	(256 * (ISCSI_MAX_CMDS_PER_SESS - 1))
+
+#define ISCSI_MAX_BDS_PER_CMD		32
+
+#define MAX_PAGES_PER_CTRL_STRUCT_POOL	8
+#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS	4
+
+/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
+#define MAX_BD_LENGTH			65535
+#define BD_SPLIT_SIZE			32768
+
+/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
+#define BNX2I_SQ_WQES_MIN 		16
+#define BNX2I_570X_SQ_WQES_MAX 		128
+#define BNX2I_5770X_SQ_WQES_MAX 	512
+#define BNX2I_570X_SQ_WQES_DEFAULT 	128
+#define BNX2I_5770X_SQ_WQES_DEFAULT 	256
+
+#define BNX2I_570X_CQ_WQES_MAX 		128
+#define BNX2I_5770X_CQ_WQES_MAX 	512
+
+#define BNX2I_RQ_WQES_MIN 		16
+#define BNX2I_RQ_WQES_MAX 		32
+#define BNX2I_RQ_WQES_DEFAULT 		16
+
+/* CCELLs per conn */
+#define BNX2I_CCELLS_MIN		16
+#define BNX2I_CCELLS_MAX		96
+#define BNX2I_CCELLS_DEFAULT		64
+
+#define ITT_INVALID_SIGNATURE		0xFFFF
+
+#define ISCSI_CMD_CLEANUP_TIMEOUT	100
+
+#define BNX2I_CONN_CTX_BUF_SIZE		16384
+
+#define BNX2I_SQ_WQE_SIZE		64
+#define BNX2I_RQ_WQE_SIZE		256
+#define BNX2I_CQE_SIZE			64
+
+#define MB_KERNEL_CTX_SHIFT		8
+#define MB_KERNEL_CTX_SIZE		(1 << MB_KERNEL_CTX_SHIFT)
+
+#define CTX_SHIFT			7
+#define GET_CID_NUM(cid_addr)		((cid_addr) >> CTX_SHIFT)
+
+#define CTX_OFFSET 			0x10000
+#define MAX_CID_CNT			0x4000
+
+/* 5709 context registers */
+#define BNX2_MQ_CONFIG2			0x00003d00
+#define BNX2_MQ_CONFIG2_CONT_SZ		(0x7L<<4)
+#define BNX2_MQ_CONFIG2_FIRST_L4L5	(0x1fL<<8)
+
+/* 57710's BAR2 is mapped to doorbell registers */
+#define BNX2X_DOORBELL_PCI_BAR		2
+#define BNX2X_MAX_CQS			8
+
+#define CNIC_ARM_CQE			1
+#define CNIC_DISARM_CQE			0
+
+#define REG_RD(__hba, offset)				\
+		readl(__hba->regview + offset)
+#define REG_WR(__hba, offset, val)			\
+		writel(val, __hba->regview + offset)
+
+
+/**
+ * struct generic_pdu_resc - login pdu resource structure
+ *
+ * @req_buf:            driver buffer used to stage payload associated with
+ *                      the login request
+ * @req_dma_addr:       dma address for iscsi login request payload buffer
+ * @req_buf_size:       actual login request payload length
+ * @req_wr_ptr:         pointer into login request buffer when next data is
+ *                      to be written
+ * @resp_hdr:           iscsi header where iscsi login response header is to
+ *                      be recreated
+ * @resp_buf:           buffer to stage login response payload
+ * @resp_dma_addr:      login response payload buffer dma address
+ * @resp_buf_size:      login response paylod length
+ * @resp_wr_ptr:        pointer into login response buffer when next data is
+ *                      to be written
+ * @req_bd_tbl:         iscsi login request payload BD table
+ * @req_bd_dma:         login request BD table dma address
+ * @resp_bd_tbl:        iscsi login response payload BD table
+ * @resp_bd_dma:        login request BD table dma address
+ *
+ * following structure defines buffer info for generic pdus such as iSCSI Login,
+ *	Logout and NOP
+ */
+struct generic_pdu_resc {
+	char *req_buf;
+	dma_addr_t req_dma_addr;
+	u32 req_buf_size;
+	char *req_wr_ptr;
+	struct iscsi_hdr resp_hdr;
+	char *resp_buf;
+	dma_addr_t resp_dma_addr;
+	u32 resp_buf_size;
+	char *resp_wr_ptr;
+	char *req_bd_tbl;
+	dma_addr_t req_bd_dma;
+	char *resp_bd_tbl;
+	dma_addr_t resp_bd_dma;
+};
+
+
+/**
+ * struct bd_resc_page - tracks DMA'able memory allocated for BD tables
+ *
+ * @link:               list head to link elements
+ * @max_ptrs:           maximun pointers that can be stored in this page
+ * @num_valid:          number of pointer valid in this page
+ * @page:               base addess for page pointer array
+ *
+ * structure to track DMA'able memory allocated for command BD tables
+ */
+struct bd_resc_page {
+	struct list_head link;
+	u32 max_ptrs;
+	u32 num_valid;
+	void *page[1];
+};
+
+
+/**
+ * struct io_bdt - I/O buffer destricptor table
+ *
+ * @bd_tbl:             BD table's virtual address
+ * @bd_tbl_dma:         BD table's dma address
+ * @bd_valid:           num valid BD entries
+ *
+ * IO BD table
+ */
+struct io_bdt {
+	struct iscsi_bd *bd_tbl;
+	dma_addr_t bd_tbl_dma;
+	u16 bd_valid;
+};
+
+
+/**
+ * bnx2i_cmd - iscsi command structure
+ *
+ * @scsi_cmd:           SCSI-ML task pointer corresponding to this iscsi cmd
+ * @sg:                 SG list
+ * @io_tbl:             buffer descriptor (BD) table
+ * @bd_tbl_dma:         buffer descriptor (BD) table's dma address
+ */
+struct bnx2i_cmd {
+	struct iscsi_hdr hdr;
+	struct bnx2i_conn *conn;
+	struct scsi_cmnd *scsi_cmd;
+	struct scatterlist *sg;
+	struct io_bdt io_tbl;
+	dma_addr_t bd_tbl_dma;
+	struct bnx2i_cmd_request req;
+};
+
+
+/**
+ * struct bnx2i_conn - iscsi connection structure
+ *
+ * @cls_conn:              pointer to iscsi cls conn
+ * @hba:                   adapter structure pointer
+ * @iscsi_conn_cid:        iscsi conn id
+ * @fw_cid:                firmware iscsi context id
+ * @ep:                    endpoint structure pointer
+ * @gen_pdu:               login/nopout/logout pdu resources
+ * @violation_notified:    bit mask used to track iscsi error/warning messages
+ *                         already printed out
+ *
+ * iSCSI connection structure
+ */
+struct bnx2i_conn {
+	struct iscsi_cls_conn *cls_conn;
+	struct bnx2i_hba *hba;
+	struct completion cmd_cleanup_cmpl;
+	int is_bound;
+
+	u32 iscsi_conn_cid;
+#define BNX2I_CID_RESERVED	0x5AFF
+	u32 fw_cid;
+
+	struct timer_list poll_timer;
+	/*
+	 * Queue Pair (QP) related structure elements.
+	 */
+	struct bnx2i_endpoint *ep;
+
+	/*
+	 * Buffer for login negotiation process
+	 */
+	struct generic_pdu_resc gen_pdu;
+	u64 violation_notified;
+};
+
+
+
+/**
+ * struct iscsi_cid_queue - Per adapter iscsi cid queue
+ *
+ * @cid_que_base:           queue base memory
+ * @cid_que:                queue memory pointer
+ * @cid_q_prod_idx:         produce index
+ * @cid_q_cons_idx:         consumer index
+ * @cid_q_max_idx:          max index. used to detect wrap around condition
+ * @cid_free_cnt:           queue size
+ * @conn_cid_tbl:           iscsi cid to conn structure mapping table
+ *
+ * Per adapter iSCSI CID Queue
+ */
+struct iscsi_cid_queue {
+	void *cid_que_base;
+	u32 *cid_que;
+	u32 cid_q_prod_idx;
+	u32 cid_q_cons_idx;
+	u32 cid_q_max_idx;
+	u32 cid_free_cnt;
+	struct bnx2i_conn **conn_cid_tbl;
+};
+
+/**
+ * struct bnx2i_hba - bnx2i adapter structure
+ *
+ * @link:                  list head to link elements
+ * @cnic:                  pointer to cnic device
+ * @pcidev:                pointer to pci dev
+ * @netdev:                pointer to netdev structure
+ * @regview:               mapped PCI register space
+ * @age:                   age, incremented by every recovery
+ * @cnic_dev_type:         cnic device type, 5706/5708/5709/57710
+ * @mail_queue_access:     mailbox queue access mode, applicable to 5709 only
+ * @reg_with_cnic:         indicates whether the device is register with CNIC
+ * @adapter_state:         adapter state, UP, GOING_DOWN, LINK_DOWN
+ * @mtu_supported:         Ethernet MTU supported
+ * @shost:                 scsi host pointer
+ * @max_sqes:              SQ size
+ * @max_rqes:              RQ size
+ * @max_cqes:              CQ size
+ * @num_ccell:             number of command cells per connection
+ * @ofld_conns_active:     active connection list
+ * @max_active_conns:      max offload connections supported by this device
+ * @cid_que:               iscsi cid queue
+ * @ep_rdwr_lock:          read / write lock to synchronize various ep lists
+ * @ep_ofld_list:          connection list for pending offload completion
+ * @ep_destroy_list:       connection list for pending offload completion
+ * @mp_bd_tbl:             BD table to be used with middle path requests
+ * @mp_bd_dma:             DMA address of 'mp_bd_tbl' memory buffer
+ * @dummy_buffer:          Dummy buffer to be used with zero length scsicmd reqs
+ * @dummy_buf_dma:         DMA address of 'dummy_buffer' memory buffer
+ * @lock:              	   lock to synchonize access to hba structure
+ * @pci_did:               PCI device ID
+ * @pci_vid:               PCI vendor ID
+ * @pci_sdid:              PCI subsystem device ID
+ * @pci_svid:              PCI subsystem vendor ID
+ * @pci_func:              PCI function number in system pci tree
+ * @pci_devno:             PCI device number in system pci tree
+ * @num_wqe_sent:          statistic counter, total wqe's sent
+ * @num_cqe_rcvd:          statistic counter, total cqe's received
+ * @num_intr_claimed:      statistic counter, total interrupts claimed
+ * @link_changed_count:    statistic counter, num of link change notifications
+ *                         received
+ * @ipaddr_changed_count:  statistic counter, num times IP address changed while
+ *                         at least one connection is offloaded
+ * @num_sess_opened:       statistic counter, total num sessions opened
+ * @num_conn_opened:       statistic counter, total num conns opened on this hba
+ * @ctx_ccell_tasks:       captures number of ccells and tasks supported by
+ *                         currently offloaded connection, used to decode
+ *                         context memory
+ *
+ * Adapter Data Structure
+ */
+struct bnx2i_hba {
+	struct list_head link;
+	struct cnic_dev *cnic;
+	struct pci_dev *pcidev;
+	struct net_device *netdev;
+	void __iomem *regview;
+
+	u32 age;
+	unsigned long cnic_dev_type;
+		#define BNX2I_NX2_DEV_5706		0x0
+		#define BNX2I_NX2_DEV_5708		0x1
+		#define BNX2I_NX2_DEV_5709		0x2
+		#define BNX2I_NX2_DEV_57710		0x3
+	u32 mail_queue_access;
+		#define BNX2I_MQ_KERNEL_MODE		0x0
+		#define BNX2I_MQ_KERNEL_BYPASS_MODE	0x1
+		#define BNX2I_MQ_BIN_MODE		0x2
+	unsigned long  reg_with_cnic;
+		#define BNX2I_CNIC_REGISTERED		1
+
+	unsigned long  adapter_state;
+		#define ADAPTER_STATE_UP		0
+		#define ADAPTER_STATE_GOING_DOWN	1
+		#define ADAPTER_STATE_LINK_DOWN		2
+		#define ADAPTER_STATE_INIT_FAILED	31
+	unsigned int mtu_supported;
+		#define BNX2I_MAX_MTU_SUPPORTED		1500
+
+	struct Scsi_Host *shost;
+
+	u32 max_sqes;
+	u32 max_rqes;
+	u32 max_cqes;
+	u32 num_ccell;
+
+	int ofld_conns_active;
+
+	int max_active_conns;
+	struct iscsi_cid_queue cid_que;
+
+	rwlock_t ep_rdwr_lock;
+	struct list_head ep_ofld_list;
+	struct list_head ep_destroy_list;
+
+	/*
+	 * BD table to be used with MP (Middle Path requests.
+	 */
+	char *mp_bd_tbl;
+	dma_addr_t mp_bd_dma;
+	char *dummy_buffer;
+	dma_addr_t dummy_buf_dma;
+
+	spinlock_t lock;	/* protects hba structure access */
+	struct mutex net_dev_lock;/* sync net device access */
+
+	/*
+	 * PCI related info.
+	 */
+	u16 pci_did;
+	u16 pci_vid;
+	u16 pci_sdid;
+	u16 pci_svid;
+	u16 pci_func;
+	u16 pci_devno;
+
+	/*
+	 * Following are a bunch of statistics useful during development
+	 * and later stage for score boarding.
+	 */
+	u32 num_wqe_sent;
+	u32 num_cqe_rcvd;
+	u32 num_intr_claimed;
+	u32 link_changed_count;
+	u32 ipaddr_changed_count;
+	u32 num_sess_opened;
+	u32 num_conn_opened;
+	unsigned int ctx_ccell_tasks;
+};
+
+
+/*******************************************************************************
+ * 	QP [ SQ / RQ / CQ ] info.
+ ******************************************************************************/
+
+/*
+ * SQ/RQ/CQ generic structure definition
+ */
+struct 	sqe {
+	u8 sqe_byte[BNX2I_SQ_WQE_SIZE];
+};
+
+struct 	rqe {
+	u8 rqe_byte[BNX2I_RQ_WQE_SIZE];
+};
+
+struct 	cqe {
+	u8 cqe_byte[BNX2I_CQE_SIZE];
+};
+
+
+enum {
+#if defined(__LITTLE_ENDIAN)
+	CNIC_EVENT_COAL_INDEX	= 0x0,
+	CNIC_SEND_DOORBELL	= 0x4,
+	CNIC_EVENT_CQ_ARM	= 0x7,
+	CNIC_RECV_DOORBELL	= 0x8
+#elif defined(__BIG_ENDIAN)
+	CNIC_EVENT_COAL_INDEX	= 0x2,
+	CNIC_SEND_DOORBELL	= 0x6,
+	CNIC_EVENT_CQ_ARM	= 0x4,
+	CNIC_RECV_DOORBELL	= 0xa
+#endif
+};
+
+
+/*
+ * CQ DB
+ */
+struct bnx2x_iscsi_cq_pend_cmpl {
+	/* CQ producer, updated by Ustorm */
+	u16 ustrom_prod;
+	/* CQ pending completion counter */
+	u16 pend_cntr;
+};
+
+
+struct bnx2i_5771x_cq_db {
+	struct bnx2x_iscsi_cq_pend_cmpl qp_pend_cmpl[BNX2X_MAX_CQS];
+	/* CQ pending completion ITT array */
+	u16 itt[BNX2X_MAX_CQS];
+	/* Cstorm CQ sequence to notify array, updated by driver */;
+	u16 sqn[BNX2X_MAX_CQS];
+	u32 reserved[4] /* 16 byte allignment */;
+};
+
+
+struct bnx2i_5771x_sq_rq_db {
+	u16 prod_idx;
+	u8 reserved0[14]; /* Pad structure size to 16 bytes */
+};
+
+
+struct bnx2i_5771x_dbell_hdr {
+	u8 header;
+	/* 1 for rx doorbell, 0 for tx doorbell */
+#define B577XX_DOORBELL_HDR_RX				(0x1<<0)
+#define B577XX_DOORBELL_HDR_RX_SHIFT			0
+	/* 0 for normal doorbell, 1 for advertise wnd doorbell */
+#define B577XX_DOORBELL_HDR_DB_TYPE			(0x1<<1)
+#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT		1
+	/* rdma tx only: DPM transaction size specifier (64/128/256/512B) */
+#define B577XX_DOORBELL_HDR_DPM_SIZE			(0x3<<2)
+#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT		2
+	/* connection type */
+#define B577XX_DOORBELL_HDR_CONN_TYPE			(0xF<<4)
+#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT		4
+};
+
+struct bnx2i_5771x_dbell {
+	struct bnx2i_5771x_dbell_hdr dbell;
+	u8 pad[3];
+
+};
+
+/**
+ * struct qp_info - QP (share queue region) atrributes structure
+ *
+ * @ctx_base:           ioremapped pci register base to access doorbell register
+ *                      pertaining to this offloaded connection
+ * @sq_virt:            virtual address of send queue (SQ) region
+ * @sq_phys:            DMA address of SQ memory region
+ * @sq_mem_size:        SQ size
+ * @sq_prod_qe:         SQ producer entry pointer
+ * @sq_cons_qe:         SQ consumer entry pointer
+ * @sq_first_qe:        virtaul address of first entry in SQ
+ * @sq_last_qe:         virtaul address of last entry in SQ
+ * @sq_prod_idx:        SQ producer index
+ * @sq_cons_idx:        SQ consumer index
+ * @sqe_left:           number sq entry left
+ * @sq_pgtbl_virt:      page table describing buffer consituting SQ region
+ * @sq_pgtbl_phys:      dma address of 'sq_pgtbl_virt'
+ * @sq_pgtbl_size:      SQ page table size
+ * @cq_virt:            virtual address of completion queue (CQ) region
+ * @cq_phys:            DMA address of RQ memory region
+ * @cq_mem_size:        CQ size
+ * @cq_prod_qe:         CQ producer entry pointer
+ * @cq_cons_qe:         CQ consumer entry pointer
+ * @cq_first_qe:        virtaul address of first entry in CQ
+ * @cq_last_qe:         virtaul address of last entry in CQ
+ * @cq_prod_idx:        CQ producer index
+ * @cq_cons_idx:        CQ consumer index
+ * @cqe_left:           number cq entry left
+ * @cqe_size:           size of each CQ entry
+ * @cqe_exp_seq_sn:     next expected CQE sequence number
+ * @cq_pgtbl_virt:      page table describing buffer consituting CQ region
+ * @cq_pgtbl_phys:      dma address of 'cq_pgtbl_virt'
+ * @cq_pgtbl_size:    	CQ page table size
+ * @rq_virt:            virtual address of receive queue (RQ) region
+ * @rq_phys:            DMA address of RQ memory region
+ * @rq_mem_size:        RQ size
+ * @rq_prod_qe:         RQ producer entry pointer
+ * @rq_cons_qe:         RQ consumer entry pointer
+ * @rq_first_qe:        virtaul address of first entry in RQ
+ * @rq_last_qe:         virtaul address of last entry in RQ
+ * @rq_prod_idx:        RQ producer index
+ * @rq_cons_idx:        RQ consumer index
+ * @rqe_left:           number rq entry left
+ * @rq_pgtbl_virt:      page table describing buffer consituting RQ region
+ * @rq_pgtbl_phys:      dma address of 'rq_pgtbl_virt'
+ * @rq_pgtbl_size:      RQ page table size
+ *
+ * queue pair (QP) is a per connection shared data structure which is used
+ *	to send work requests (SQ), receive completion notifications (CQ)
+ *	and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure
+ *	below holds queue memory, consumer/producer indexes and page table
+ *	information
+ */
+struct qp_info {
+	void __iomem *ctx_base;
+#define DPM_TRIGER_TYPE			0x40
+
+#define BNX2I_570x_QUE_DB_SIZE		0
+#define BNX2I_5771x_QUE_DB_SIZE		16
+	struct sqe *sq_virt;
+	dma_addr_t sq_phys;
+	u32 sq_mem_size;
+
+	struct sqe *sq_prod_qe;
+	struct sqe *sq_cons_qe;
+	struct sqe *sq_first_qe;
+	struct sqe *sq_last_qe;
+	u16 sq_prod_idx;
+	u16 sq_cons_idx;
+	u32 sqe_left;
+
+	void *sq_pgtbl_virt;
+	dma_addr_t sq_pgtbl_phys;
+	u32 sq_pgtbl_size;	/* set to PAGE_SIZE for 5708 & 5709 */
+
+	struct cqe *cq_virt;
+	dma_addr_t cq_phys;
+	u32 cq_mem_size;
+
+	struct cqe *cq_prod_qe;
+	struct cqe *cq_cons_qe;
+	struct cqe *cq_first_qe;
+	struct cqe *cq_last_qe;
+	u16 cq_prod_idx;
+	u16 cq_cons_idx;
+	u32 cqe_left;
+	u32 cqe_size;
+	u32 cqe_exp_seq_sn;
+
+	void *cq_pgtbl_virt;
+	dma_addr_t cq_pgtbl_phys;
+	u32 cq_pgtbl_size;	/* set to PAGE_SIZE for 5708 & 5709 */
+
+	struct rqe *rq_virt;
+	dma_addr_t rq_phys;
+	u32 rq_mem_size;
+
+	struct rqe *rq_prod_qe;
+	struct rqe *rq_cons_qe;
+	struct rqe *rq_first_qe;
+	struct rqe *rq_last_qe;
+	u16 rq_prod_idx;
+	u16 rq_cons_idx;
+	u32 rqe_left;
+
+	void *rq_pgtbl_virt;
+	dma_addr_t rq_pgtbl_phys;
+	u32 rq_pgtbl_size;	/* set to PAGE_SIZE for 5708 & 5709 */
+};
+
+
+
+/*
+ * CID handles
+ */
+struct ep_handles {
+	u32 fw_cid;
+	u32 drv_iscsi_cid;
+	u16 pg_cid;
+	u16 rsvd;
+};
+
+
+enum {
+	EP_STATE_IDLE                   = 0x0,
+	EP_STATE_PG_OFLD_START          = 0x1,
+	EP_STATE_PG_OFLD_COMPL          = 0x2,
+	EP_STATE_OFLD_START             = 0x4,
+	EP_STATE_OFLD_COMPL             = 0x8,
+	EP_STATE_CONNECT_START          = 0x10,
+	EP_STATE_CONNECT_COMPL          = 0x20,
+	EP_STATE_ULP_UPDATE_START       = 0x40,
+	EP_STATE_ULP_UPDATE_COMPL       = 0x80,
+	EP_STATE_DISCONN_START          = 0x100,
+	EP_STATE_DISCONN_COMPL          = 0x200,
+	EP_STATE_CLEANUP_START          = 0x400,
+	EP_STATE_CLEANUP_CMPL           = 0x800,
+	EP_STATE_TCP_FIN_RCVD           = 0x1000,
+	EP_STATE_TCP_RST_RCVD           = 0x2000,
+	EP_STATE_PG_OFLD_FAILED         = 0x1000000,
+	EP_STATE_ULP_UPDATE_FAILED      = 0x2000000,
+	EP_STATE_CLEANUP_FAILED         = 0x4000000,
+	EP_STATE_OFLD_FAILED            = 0x8000000,
+	EP_STATE_CONNECT_FAILED         = 0x10000000,
+	EP_STATE_DISCONN_TIMEDOUT       = 0x20000000,
+};
+
+/**
+ * struct bnx2i_endpoint - representation of tcp connection in NX2 world
+ *
+ * @link:               list head to link elements
+ * @hba:                adapter to which this connection belongs
+ * @conn:               iscsi connection this EP is linked to
+ * @sess:               iscsi session this EP is linked to
+ * @cm_sk:              cnic sock struct
+ * @hba_age:            age to detect if 'iscsid' issues ep_disconnect()
+ *                      after HBA reset is completed by bnx2i/cnic/bnx2
+ *                      modules
+ * @state:              tracks offload connection state machine
+ * @teardown_mode:      indicates if conn teardown is abortive or orderly
+ * @qp:                 QP information
+ * @ids:                contains chip allocated *context id* & driver assigned
+ *                      *iscsi cid*
+ * @ofld_timer:         offload timer to detect timeout
+ * @ofld_wait:          wait queue
+ *
+ * Endpoint Structure - equivalent of tcp socket structure
+ */
+struct bnx2i_endpoint {
+	struct list_head link;
+	struct bnx2i_hba *hba;
+	struct bnx2i_conn *conn;
+	struct cnic_sock *cm_sk;
+	u32 hba_age;
+	u32 state;
+	unsigned long timestamp;
+	int num_active_cmds;
+
+	struct qp_info qp;
+	struct ep_handles ids;
+		#define ep_iscsi_cid	ids.drv_iscsi_cid
+		#define ep_cid		ids.fw_cid
+		#define ep_pg_cid	ids.pg_cid
+	struct timer_list ofld_timer;
+	wait_queue_head_t ofld_wait;
+};
+
+
+
+/* Global variables */
+extern unsigned int error_mask1, error_mask2;
+extern u64 iscsi_error_mask;
+extern unsigned int en_tcp_dack;
+extern unsigned int event_coal_div;
+
+extern struct scsi_transport_template *bnx2i_scsi_xport_template;
+extern struct iscsi_transport bnx2i_iscsi_transport;
+extern struct cnic_ulp_ops bnx2i_cnic_cb;
+
+extern unsigned int sq_size;
+extern unsigned int rq_size;
+
+extern struct device_attribute *bnx2i_dev_attributes[];
+
+
+
+/*
+ * Function Prototypes
+ */
+extern void bnx2i_identify_device(struct bnx2i_hba *hba);
+extern void bnx2i_register_device(struct bnx2i_hba *hba);
+
+extern void bnx2i_ulp_init(struct cnic_dev *dev);
+extern void bnx2i_ulp_exit(struct cnic_dev *dev);
+extern void bnx2i_start(void *handle);
+extern void bnx2i_stop(void *handle);
+extern void bnx2i_reg_dev_all(void);
+extern void bnx2i_unreg_dev_all(void);
+extern struct bnx2i_hba *get_adapter_list_head(void);
+
+struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
+					  u16 iscsi_cid);
+
+int bnx2i_alloc_ep_pool(void);
+void bnx2i_release_ep_pool(void);
+struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba);
+struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba);
+
+struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic);
+
+struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic);
+void bnx2i_free_hba(struct bnx2i_hba *hba);
+
+void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len);
+void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count);
+
+void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd);
+
+void bnx2i_drop_session(struct iscsi_cls_session *session);
+
+extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba);
+extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
+				  struct iscsi_task *mtask);
+extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
+				  struct iscsi_task *mtask);
+extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
+				    struct bnx2i_cmd *cmnd);
+extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
+				   struct iscsi_task *mtask, u32 ttt,
+				   char *datap, int data_len, int unsol);
+extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn,
+				   struct iscsi_task *mtask);
+extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba,
+				       struct bnx2i_cmd *cmd);
+extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba,
+				     struct bnx2i_endpoint *ep);
+extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn);
+extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
+				    struct bnx2i_endpoint *ep);
+
+extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba,
+			       struct bnx2i_endpoint *ep);
+extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba,
+			       struct bnx2i_endpoint *ep);
+extern void bnx2i_ep_ofld_timer(unsigned long data);
+extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list(
+		struct bnx2i_hba *hba, u32 iscsi_cid);
+extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list(
+		struct bnx2i_hba *hba, u32 iscsi_cid);
+
+extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep);
+extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
+
+/* Debug related function prototypes */
+extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
+
+#endif

+ 2405 - 0
drivers/scsi/bnx2i/bnx2i_hwi.c

@@ -0,0 +1,2405 @@
+/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/libiscsi.h>
+#include "bnx2i.h"
+
+/**
+ * bnx2i_get_cid_num - get cid from ep
+ * @ep: 	endpoint pointer
+ *
+ * Only applicable to 57710 family of devices
+ */
+static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep)
+{
+	u32 cid;
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+		cid = ep->ep_cid;
+	else
+		cid = GET_CID_NUM(ep->ep_cid);
+	return cid;
+}
+
+
+/**
+ * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type
+ * @hba: 		Adapter for which adjustments is to be made
+ *
+ * Only applicable to 57710 family of devices
+ */
+static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
+{
+	u32 num_elements_per_pg;
+
+	if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) ||
+	    test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) ||
+	    test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
+		if (!is_power_of_2(hba->max_sqes))
+			hba->max_sqes = rounddown_pow_of_two(hba->max_sqes);
+
+		if (!is_power_of_2(hba->max_rqes))
+			hba->max_rqes = rounddown_pow_of_two(hba->max_rqes);
+	}
+
+	/* Adjust each queue size if the user selection does not
+	 * yield integral num of page buffers
+	 */
+	/* adjust SQ */
+	num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+	if (hba->max_sqes < num_elements_per_pg)
+		hba->max_sqes = num_elements_per_pg;
+	else if (hba->max_sqes % num_elements_per_pg)
+		hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) &
+				 ~(num_elements_per_pg - 1);
+
+	/* adjust CQ */
+	num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE;
+	if (hba->max_cqes < num_elements_per_pg)
+		hba->max_cqes = num_elements_per_pg;
+	else if (hba->max_cqes % num_elements_per_pg)
+		hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) &
+				 ~(num_elements_per_pg - 1);
+
+	/* adjust RQ */
+	num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
+	if (hba->max_rqes < num_elements_per_pg)
+		hba->max_rqes = num_elements_per_pg;
+	else if (hba->max_rqes % num_elements_per_pg)
+		hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) &
+				 ~(num_elements_per_pg - 1);
+}
+
+
+/**
+ * bnx2i_get_link_state - get network interface link state
+ * @hba:	adapter instance pointer
+ *
+ * updates adapter structure flag based on netdev state
+ */
+static void bnx2i_get_link_state(struct bnx2i_hba *hba)
+{
+	if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
+		set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+	else
+		clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+}
+
+
+/**
+ * bnx2i_iscsi_license_error - displays iscsi license related error message
+ * @hba:		adapter instance pointer
+ * @error_code:		error classification
+ *
+ * Puts out an error log when driver is unable to offload iscsi connection
+ *	due to license restrictions
+ */
+static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
+{
+	if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED)
+		/* iSCSI offload not supported on this device */
+		printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n",
+				hba->netdev->name);
+	if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED)
+		/* iSCSI offload not supported on this LOM device */
+		printk(KERN_ERR "bnx2i: LOM is not enable to "
+				"offload iSCSI connections, dev=%s\n",
+				hba->netdev->name);
+	set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state);
+}
+
+
+/**
+ * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification
+ * @ep:		endpoint (transport indentifier) structure
+ * @action:	action, ARM or DISARM. For now only ARM_CQE is used
+ *
+ * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt
+ *	the driver. EQ event is generated CQ index is hit or at least 1 CQ is
+ *	outstanding and on chip timer expires
+ */
+void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
+{
+	struct bnx2i_5771x_cq_db *cq_db;
+	u16 cq_index;
+
+	if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+		return;
+
+	if (action == CNIC_ARM_CQE) {
+		cq_index = ep->qp.cqe_exp_seq_sn +
+			   ep->num_active_cmds / event_coal_div;
+		cq_index %= (ep->qp.cqe_size * 2 + 1);
+		if (!cq_index) {
+			cq_index = 1;
+			cq_db = (struct bnx2i_5771x_cq_db *)
+					ep->qp.cq_pgtbl_virt;
+			cq_db->sqn[0] = cq_index;
+		}
+	}
+}
+
+
+/**
+ * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer
+ * @conn:		iscsi connection on which RQ event occured
+ * @ptr:		driver buffer to which RQ buffer contents is to
+ *			be copied
+ * @len:		length of valid data inside RQ buf
+ *
+ * Copies RQ buffer contents from shared (DMA'able) memory region to
+ *	driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and
+ *	scsi sense info
+ */
+void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len)
+{
+	if (!bnx2i_conn->ep->qp.rqe_left)
+		return;
+
+	bnx2i_conn->ep->qp.rqe_left--;
+	memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len);
+	if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) {
+		bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe;
+		bnx2i_conn->ep->qp.rq_cons_idx = 0;
+	} else {
+		bnx2i_conn->ep->qp.rq_cons_qe++;
+		bnx2i_conn->ep->qp.rq_cons_idx++;
+	}
+}
+
+
+static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn)
+{
+	struct bnx2i_5771x_dbell dbell;
+	u32 msg;
+
+	memset(&dbell, 0, sizeof(dbell));
+	dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE <<
+			      B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT);
+	msg = *((u32 *)&dbell);
+	/* TODO : get doorbell register mapping */
+	writel(cpu_to_le32(msg), conn->ep->qp.ctx_base);
+}
+
+
+/**
+ * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell
+ * @conn:	iscsi connection on which event to post
+ * @count:	number of RQ buffer being posted to chip
+ *
+ * No need to ring hardware doorbell for 57710 family of devices
+ */
+void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count)
+{
+	struct bnx2i_5771x_sq_rq_db *rq_db;
+	u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000);
+	struct bnx2i_endpoint *ep = bnx2i_conn->ep;
+
+	ep->qp.rqe_left += count;
+	ep->qp.rq_prod_idx &= 0x7FFF;
+	ep->qp.rq_prod_idx += count;
+
+	if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) {
+		ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes;
+		if (!hi_bit)
+			ep->qp.rq_prod_idx |= 0x8000;
+	} else
+		ep->qp.rq_prod_idx |= hi_bit;
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+		rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt;
+		rq_db->prod_idx = ep->qp.rq_prod_idx;
+		/* no need to ring hardware doorbell for 57710 */
+	} else {
+		writew(ep->qp.rq_prod_idx,
+		       ep->qp.ctx_base + CNIC_RECV_DOORBELL);
+	}
+	mmiowb();
+}
+
+
+/**
+ * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine
+ * @conn: 		iscsi connection to which new SQ entries belong
+ * @count: 		number of SQ WQEs to post
+ *
+ * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family
+ *	of devices. For 5706/5708/5709 new SQ WQE count is written into the
+ *	doorbell register
+ */
+static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
+{
+	struct bnx2i_5771x_sq_rq_db *sq_db;
+	struct bnx2i_endpoint *ep = bnx2i_conn->ep;
+
+	ep->num_active_cmds++;
+	wmb();	/* flush SQ WQE memory before the doorbell is rung */
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+		sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt;
+		sq_db->prod_idx = ep->qp.sq_prod_idx;
+		bnx2i_ring_577xx_doorbell(bnx2i_conn);
+	} else
+		writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
+
+	mmiowb(); /* flush posted PCI writes */
+}
+
+
+/**
+ * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters
+ * @conn:	iscsi connection to which new SQ entries belong
+ * @count:	number of SQ WQEs to post
+ *
+ * this routine will update SQ driver parameters and ring the doorbell
+ */
+static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn,
+					      int count)
+{
+	int tmp_cnt;
+
+	if (count == 1) {
+		if (bnx2i_conn->ep->qp.sq_prod_qe ==
+		    bnx2i_conn->ep->qp.sq_last_qe)
+			bnx2i_conn->ep->qp.sq_prod_qe =
+						bnx2i_conn->ep->qp.sq_first_qe;
+		else
+			bnx2i_conn->ep->qp.sq_prod_qe++;
+	} else {
+		if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <=
+		    bnx2i_conn->ep->qp.sq_last_qe)
+			bnx2i_conn->ep->qp.sq_prod_qe += count;
+		else {
+			tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe -
+				bnx2i_conn->ep->qp.sq_prod_qe;
+			bnx2i_conn->ep->qp.sq_prod_qe =
+				&bnx2i_conn->ep->qp.sq_first_qe[count -
+								(tmp_cnt + 1)];
+		}
+	}
+	bnx2i_conn->ep->qp.sq_prod_idx += count;
+	/* Ring the doorbell */
+	bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx);
+}
+
+
+/**
+ * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware
+ * @conn:	iscsi connection
+ * @cmd:	driver command structure which is requesting
+ *		a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Login request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
+			   struct iscsi_task *task)
+{
+	struct bnx2i_cmd *bnx2i_cmd;
+	struct bnx2i_login_request *login_wqe;
+	struct iscsi_login *login_hdr;
+	u32 dword;
+
+	bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
+	login_hdr = (struct iscsi_login *)task->hdr;
+	login_wqe = (struct bnx2i_login_request *)
+						bnx2i_conn->ep->qp.sq_prod_qe;
+
+	login_wqe->op_code = login_hdr->opcode;
+	login_wqe->op_attr = login_hdr->flags;
+	login_wqe->version_max = login_hdr->max_version;
+	login_wqe->version_min = login_hdr->min_version;
+	login_wqe->data_length = ntoh24(login_hdr->dlength);
+	login_wqe->isid_lo = *((u32 *) login_hdr->isid);
+	login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2);
+	login_wqe->tsih = login_hdr->tsih;
+	login_wqe->itt = task->itt |
+		(ISCSI_TASK_TYPE_MPATH << ISCSI_LOGIN_REQUEST_TYPE_SHIFT);
+	login_wqe->cid = login_hdr->cid;
+
+	login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
+	login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+
+	login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
+	login_wqe->resp_bd_list_addr_hi =
+		(u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
+
+	dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) |
+		 (bnx2i_conn->gen_pdu.resp_buf_size <<
+		  ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
+	login_wqe->resp_buffer = dword;
+	login_wqe->flags = 0;
+	login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
+	login_wqe->bd_list_addr_hi =
+		(u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
+	login_wqe->num_bds = 1;
+	login_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+	return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware
+ * @conn:	iscsi connection
+ * @mtask:	driver command structure which is requesting
+ *		a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Login request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
+			 struct iscsi_task *mtask)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_tm *tmfabort_hdr;
+	struct scsi_cmnd *ref_sc;
+	struct iscsi_task *ctask;
+	struct bnx2i_cmd *bnx2i_cmd;
+	struct bnx2i_tmf_request *tmfabort_wqe;
+	u32 dword;
+
+	bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
+	tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
+	tmfabort_wqe = (struct bnx2i_tmf_request *)
+						bnx2i_conn->ep->qp.sq_prod_qe;
+
+	tmfabort_wqe->op_code = tmfabort_hdr->opcode;
+	tmfabort_wqe->op_attr = 0;
+	tmfabort_wqe->op_attr =
+		ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
+	tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]);
+	tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]);
+
+	tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
+	tmfabort_wqe->reserved2 = 0;
+	tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
+
+	ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
+	if (!ctask || ctask->sc)
+		/*
+		 * the iscsi layer must have completed the cmd while this
+		 * was starting up.
+		 */
+		return 0;
+	ref_sc = ctask->sc;
+
+	if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
+		dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+	else
+		dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+	tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt);
+	tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
+
+	tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
+	tmfabort_wqe->bd_list_addr_hi = (u32)
+				((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
+	tmfabort_wqe->num_bds = 1;
+	tmfabort_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+	return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
+ * @conn:	iscsi connection
+ * @cmd:	driver command structure which is requesting
+ *		a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn,
+			     struct bnx2i_cmd *cmd)
+{
+	struct bnx2i_cmd_request *scsi_cmd_wqe;
+
+	scsi_cmd_wqe = (struct bnx2i_cmd_request *)
+						bnx2i_conn->ep->qp.sq_prod_qe;
+	memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request));
+	scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+	return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware
+ * @conn:		iscsi connection
+ * @cmd:		driver command structure which is requesting
+ *			a WQE to sent to chip for further processing
+ * @ttt:		TTT to be used when building pdu header
+ * @datap:		payload buffer pointer
+ * @data_len:		payload data length
+ * @unsol:		indicated whether nopout pdu is unsolicited pdu or
+ *			in response to target's NOPIN w/ TTT != FFFFFFFF
+ *
+ * prepare and post a nopout request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
+			    struct iscsi_task *task, u32 ttt,
+			    char *datap, int data_len, int unsol)
+{
+	struct bnx2i_endpoint *ep = bnx2i_conn->ep;
+	struct bnx2i_cmd *bnx2i_cmd;
+	struct bnx2i_nop_out_request *nopout_wqe;
+	struct iscsi_nopout *nopout_hdr;
+
+	bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
+	nopout_hdr = (struct iscsi_nopout *)task->hdr;
+	nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
+	nopout_wqe->op_code = nopout_hdr->opcode;
+	nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
+	memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+		u32 tmp = nopout_hdr->lun[0];
+		/* 57710 requires LUN field to be swapped */
+		nopout_hdr->lun[0] = nopout_hdr->lun[1];
+		nopout_hdr->lun[1] = tmp;
+	}
+
+	nopout_wqe->itt = ((u16)task->itt |
+			   (ISCSI_TASK_TYPE_MPATH <<
+			    ISCSI_TMF_REQUEST_TYPE_SHIFT));
+	nopout_wqe->ttt = ttt;
+	nopout_wqe->flags = 0;
+	if (!unsol)
+		nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
+	else if (nopout_hdr->itt == RESERVED_ITT)
+		nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
+
+	nopout_wqe->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
+	nopout_wqe->data_length = data_len;
+	if (data_len) {
+		/* handle payload data, not required in first release */
+		printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n");
+	} else {
+		nopout_wqe->bd_list_addr_lo = (u32)
+					bnx2i_conn->hba->mp_bd_dma;
+		nopout_wqe->bd_list_addr_hi =
+			(u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
+		nopout_wqe->num_bds = 1;
+	}
+	nopout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+	return 0;
+}
+
+
+/**
+ * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware
+ * @conn:	iscsi connection
+ * @cmd:	driver command structure which is requesting
+ *		a WQE to sent to chip for further processing
+ *
+ * prepare and post logout request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn,
+			    struct iscsi_task *task)
+{
+	struct bnx2i_cmd *bnx2i_cmd;
+	struct bnx2i_logout_request *logout_wqe;
+	struct iscsi_logout *logout_hdr;
+
+	bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
+	logout_hdr = (struct iscsi_logout *)task->hdr;
+
+	logout_wqe = (struct bnx2i_logout_request *)
+						bnx2i_conn->ep->qp.sq_prod_qe;
+	memset(logout_wqe, 0x00, sizeof(struct bnx2i_logout_request));
+
+	logout_wqe->op_code = logout_hdr->opcode;
+	logout_wqe->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+	logout_wqe->op_attr =
+			logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE;
+	logout_wqe->itt = ((u16)task->itt |
+			   (ISCSI_TASK_TYPE_MPATH <<
+			    ISCSI_LOGOUT_REQUEST_TYPE_SHIFT));
+	logout_wqe->data_length = 0;
+	logout_wqe->cid = 0;
+
+	logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
+	logout_wqe->bd_list_addr_hi = (u32)
+				((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
+	logout_wqe->num_bds = 1;
+	logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+	return 0;
+}
+
+
+/**
+ * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware
+ * @conn:	iscsi connection which requires iscsi parameter update
+ *
+ * sends down iSCSI Conn Update request to move iSCSI conn to FFP
+ */
+void bnx2i_update_iscsi_conn(struct iscsi_conn *conn)
+{
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	struct bnx2i_hba *hba = bnx2i_conn->hba;
+	struct kwqe *kwqe_arr[2];
+	struct iscsi_kwqe_conn_update *update_wqe;
+	struct iscsi_kwqe_conn_update conn_update_kwqe;
+
+	update_wqe = &conn_update_kwqe;
+
+	update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN;
+	update_wqe->hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	/* 5771x requires conn context id to be passed as is */
+	if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type))
+		update_wqe->context_id = bnx2i_conn->ep->ep_cid;
+	else
+		update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7);
+	update_wqe->conn_flags = 0;
+	if (conn->hdrdgst_en)
+		update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST;
+	if (conn->datadgst_en)
+		update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST;
+	if (conn->session->initial_r2t_en)
+		update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T;
+	if (conn->session->imm_data_en)
+		update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA;
+
+	update_wqe->max_send_pdu_length = conn->max_xmit_dlength;
+	update_wqe->max_recv_pdu_length = conn->max_recv_dlength;
+	update_wqe->first_burst_length = conn->session->first_burst;
+	update_wqe->max_burst_length = conn->session->max_burst;
+	update_wqe->exp_stat_sn = conn->exp_statsn;
+	update_wqe->max_outstanding_r2ts = conn->session->max_r2t;
+	update_wqe->session_error_recovery_level = conn->session->erl;
+	iscsi_conn_printk(KERN_ALERT, conn,
+			  "bnx2i: conn update - MBL 0x%x FBL 0x%x"
+			  "MRDSL_I 0x%x MRDSL_T 0x%x \n",
+			  update_wqe->max_burst_length,
+			  update_wqe->first_burst_length,
+			  update_wqe->max_recv_pdu_length,
+			  update_wqe->max_send_pdu_length);
+
+	kwqe_arr[0] = (struct kwqe *) update_wqe;
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
+}
+
+
+/**
+ * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware
+ * @data:	endpoint (transport handle) structure pointer
+ *
+ * routine to handle connection offload/destroy request timeout
+ */
+void bnx2i_ep_ofld_timer(unsigned long data)
+{
+	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data;
+
+	if (ep->state == EP_STATE_OFLD_START) {
+		printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n");
+		ep->state = EP_STATE_OFLD_FAILED;
+	} else if (ep->state == EP_STATE_DISCONN_START) {
+		printk(KERN_ALERT "ofld_timer: CONN_DISCON timeout\n");
+		ep->state = EP_STATE_DISCONN_TIMEDOUT;
+	} else if (ep->state == EP_STATE_CLEANUP_START) {
+		printk(KERN_ALERT "ofld_timer: CONN_CLEANUP timeout\n");
+		ep->state = EP_STATE_CLEANUP_FAILED;
+	}
+
+	wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+static int bnx2i_power_of2(u32 val)
+{
+	u32 power = 0;
+	if (val & (val - 1))
+		return power;
+	val--;
+	while (val) {
+		val = val >> 1;
+		power++;
+	}
+	return power;
+}
+
+
+/**
+ * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request
+ * @hba:	adapter structure pointer
+ * @cmd:	driver command structure which is requesting
+ *		a WQE to sent to chip for further processing
+ *
+ * prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
+{
+	struct bnx2i_cleanup_request *cmd_cleanup;
+
+	cmd_cleanup =
+		(struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe;
+	memset(cmd_cleanup, 0x00, sizeof(struct bnx2i_cleanup_request));
+
+	cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST;
+	cmd_cleanup->itt = cmd->req.itt;
+	cmd_cleanup->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+	bnx2i_ring_dbell_update_sq_params(cmd->conn, 1);
+}
+
+
+/**
+ * bnx2i_send_conn_destroy - initiates iscsi connection teardown process
+ * @hba:	adapter structure pointer
+ * @ep:		endpoint (transport indentifier) structure
+ *
+ * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
+ * 	iscsi connection context clean-up process
+ */
+void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+	struct kwqe *kwqe_arr[2];
+	struct iscsi_kwqe_conn_destroy conn_cleanup;
+
+	memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy));
+
+	conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN;
+	conn_cleanup.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+	/* 5771x requires conn context id to be passed as is */
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+		conn_cleanup.context_id = ep->ep_cid;
+	else
+		conn_cleanup.context_id = (ep->ep_cid >> 7);
+
+	conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid;
+
+	kwqe_arr[0] = (struct kwqe *) &conn_cleanup;
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
+}
+
+
+/**
+ * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process
+ * @hba: 		adapter structure pointer
+ * @ep: 		endpoint (transport indentifier) structure
+ *
+ * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
+					  struct bnx2i_endpoint *ep)
+{
+	struct kwqe *kwqe_arr[2];
+	struct iscsi_kwqe_conn_offload1 ofld_req1;
+	struct iscsi_kwqe_conn_offload2 ofld_req2;
+	dma_addr_t dma_addr;
+	int num_kwqes = 2;
+	u32 *ptbl;
+
+	ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
+	ofld_req1.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
+
+	dma_addr = ep->qp.sq_pgtbl_phys;
+	ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
+	ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+	dma_addr = ep->qp.cq_pgtbl_phys;
+	ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
+	ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+	ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
+	ofld_req2.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	dma_addr = ep->qp.rq_pgtbl_phys;
+	ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
+	ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+	ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
+
+	ofld_req2.sq_first_pte.hi = *ptbl++;
+	ofld_req2.sq_first_pte.lo = *ptbl;
+
+	ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
+	ofld_req2.cq_first_pte.hi = *ptbl++;
+	ofld_req2.cq_first_pte.lo = *ptbl;
+
+	kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+	kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+	ofld_req2.num_additional_wqes = 0;
+
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+}
+
+
+/**
+ * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation
+ * @hba: 		adapter structure pointer
+ * @ep: 		endpoint (transport indentifier) structure
+ *
+ * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
+					   struct bnx2i_endpoint *ep)
+{
+	struct kwqe *kwqe_arr[5];
+	struct iscsi_kwqe_conn_offload1 ofld_req1;
+	struct iscsi_kwqe_conn_offload2 ofld_req2;
+	struct iscsi_kwqe_conn_offload3 ofld_req3[1];
+	dma_addr_t dma_addr;
+	int num_kwqes = 2;
+	u32 *ptbl;
+
+	ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
+	ofld_req1.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
+
+	dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE;
+	ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
+	ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+	dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE;
+	ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
+	ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+	ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
+	ofld_req2.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE;
+	ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
+	ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+	ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
+	ofld_req2.sq_first_pte.hi = *ptbl++;
+	ofld_req2.sq_first_pte.lo = *ptbl;
+
+	ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
+	ofld_req2.cq_first_pte.hi = *ptbl++;
+	ofld_req2.cq_first_pte.lo = *ptbl;
+
+	kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+	kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+
+	ofld_req2.num_additional_wqes = 1;
+	memset(ofld_req3, 0x00, sizeof(ofld_req3[0]));
+	ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
+	ofld_req3[0].qp_first_pte[0].hi = *ptbl++;
+	ofld_req3[0].qp_first_pte[0].lo = *ptbl;
+
+	kwqe_arr[2] = (struct kwqe *) ofld_req3;
+	/* need if we decide to go with multiple KCQE's per conn */
+	num_kwqes += 1;
+
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+}
+
+/**
+ * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process
+ *
+ * @hba: 		adapter structure pointer
+ * @ep: 		endpoint (transport indentifier) structure
+ *
+ * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+		bnx2i_5771x_send_conn_ofld_req(hba, ep);
+	else
+		bnx2i_570x_send_conn_ofld_req(hba, ep);
+}
+
+
+/**
+ * setup_qp_page_tables - iscsi QP page table setup function
+ * @ep:		endpoint (transport indentifier) structure
+ *
+ * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires
+ * 	64-bit address in big endian format. Whereas 10G/sec (57710) requires
+ * 	PT in little endian format
+ */
+static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
+{
+	int num_pages;
+	u32 *ptbl;
+	dma_addr_t page;
+	int cnic_dev_10g;
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+		cnic_dev_10g = 1;
+	else
+		cnic_dev_10g = 0;
+
+	/* SQ page table */
+	memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
+	num_pages = ep->qp.sq_mem_size / PAGE_SIZE;
+	page = ep->qp.sq_phys;
+
+	if (cnic_dev_10g)
+		ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
+	else
+		ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
+	while (num_pages--) {
+		if (cnic_dev_10g) {
+			/* PTE is written in little endian format for 57710 */
+			*ptbl = (u32) page;
+			ptbl++;
+			*ptbl = (u32) ((u64) page >> 32);
+			ptbl++;
+			page += PAGE_SIZE;
+		} else {
+			/* PTE is written in big endian format for
+			 * 5706/5708/5709 devices */
+			*ptbl = (u32) ((u64) page >> 32);
+			ptbl++;
+			*ptbl = (u32) page;
+			ptbl++;
+			page += PAGE_SIZE;
+		}
+	}
+
+	/* RQ page table */
+	memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
+	num_pages = ep->qp.rq_mem_size / PAGE_SIZE;
+	page = ep->qp.rq_phys;
+
+	if (cnic_dev_10g)
+		ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
+	else
+		ptbl = (u32 *) ep->qp.rq_pgtbl_virt;
+	while (num_pages--) {
+		if (cnic_dev_10g) {
+			/* PTE is written in little endian format for 57710 */
+			*ptbl = (u32) page;
+			ptbl++;
+			*ptbl = (u32) ((u64) page >> 32);
+			ptbl++;
+			page += PAGE_SIZE;
+		} else {
+			/* PTE is written in big endian format for
+			 * 5706/5708/5709 devices */
+			*ptbl = (u32) ((u64) page >> 32);
+			ptbl++;
+			*ptbl = (u32) page;
+			ptbl++;
+			page += PAGE_SIZE;
+		}
+	}
+
+	/* CQ page table */
+	memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
+	num_pages = ep->qp.cq_mem_size / PAGE_SIZE;
+	page = ep->qp.cq_phys;
+
+	if (cnic_dev_10g)
+		ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
+	else
+		ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
+	while (num_pages--) {
+		if (cnic_dev_10g) {
+			/* PTE is written in little endian format for 57710 */
+			*ptbl = (u32) page;
+			ptbl++;
+			*ptbl = (u32) ((u64) page >> 32);
+			ptbl++;
+			page += PAGE_SIZE;
+		} else {
+			/* PTE is written in big endian format for
+			 * 5706/5708/5709 devices */
+			*ptbl = (u32) ((u64) page >> 32);
+			ptbl++;
+			*ptbl = (u32) page;
+			ptbl++;
+			page += PAGE_SIZE;
+		}
+	}
+}
+
+
+/**
+ * bnx2i_alloc_qp_resc - allocates required resources for QP.
+ * @hba:	adapter structure pointer
+ * @ep:		endpoint (transport indentifier) structure
+ *
+ * Allocate QP (transport layer for iSCSI connection) resources, DMA'able
+ *	memory for SQ/RQ/CQ and page tables. EP structure elements such
+ *	as producer/consumer indexes/pointers, queue sizes and page table
+ *	contents are setup
+ */
+int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+	struct bnx2i_5771x_cq_db *cq_db;
+
+	ep->hba = hba;
+	ep->conn = NULL;
+	ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0;
+
+	/* Allocate page table memory for SQ which is page aligned */
+	ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
+	ep->qp.sq_mem_size =
+		(ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+	ep->qp.sq_pgtbl_size =
+		(ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *);
+	ep->qp.sq_pgtbl_size =
+		(ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+	ep->qp.sq_pgtbl_virt =
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
+				   &ep->qp.sq_pgtbl_phys, GFP_KERNEL);
+	if (!ep->qp.sq_pgtbl_virt) {
+		printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n",
+				  ep->qp.sq_pgtbl_size);
+		goto mem_alloc_err;
+	}
+
+	/* Allocate memory area for actual SQ element */
+	ep->qp.sq_virt =
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+				   &ep->qp.sq_phys, GFP_KERNEL);
+	if (!ep->qp.sq_virt) {
+		printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
+				  ep->qp.sq_mem_size);
+		goto mem_alloc_err;
+	}
+
+	memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
+	ep->qp.sq_first_qe = ep->qp.sq_virt;
+	ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
+	ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
+	ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1];
+	ep->qp.sq_prod_idx = 0;
+	ep->qp.sq_cons_idx = 0;
+	ep->qp.sqe_left = hba->max_sqes;
+
+	/* Allocate page table memory for CQ which is page aligned */
+	ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
+	ep->qp.cq_mem_size =
+		(ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+	ep->qp.cq_pgtbl_size =
+		(ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *);
+	ep->qp.cq_pgtbl_size =
+		(ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+	ep->qp.cq_pgtbl_virt =
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
+				   &ep->qp.cq_pgtbl_phys, GFP_KERNEL);
+	if (!ep->qp.cq_pgtbl_virt) {
+		printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n",
+				  ep->qp.cq_pgtbl_size);
+		goto mem_alloc_err;
+	}
+
+	/* Allocate memory area for actual CQ element */
+	ep->qp.cq_virt =
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+				   &ep->qp.cq_phys, GFP_KERNEL);
+	if (!ep->qp.cq_virt) {
+		printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
+				  ep->qp.cq_mem_size);
+		goto mem_alloc_err;
+	}
+	memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
+
+	ep->qp.cq_first_qe = ep->qp.cq_virt;
+	ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
+	ep->qp.cq_cons_qe = ep->qp.cq_first_qe;
+	ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1];
+	ep->qp.cq_prod_idx = 0;
+	ep->qp.cq_cons_idx = 0;
+	ep->qp.cqe_left = hba->max_cqes;
+	ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN;
+	ep->qp.cqe_size = hba->max_cqes;
+
+	/* Invalidate all EQ CQE index, req only for 57710 */
+	cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
+	memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS);
+
+	/* Allocate page table memory for RQ which is page aligned */
+	ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
+	ep->qp.rq_mem_size =
+		(ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+	ep->qp.rq_pgtbl_size =
+		(ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *);
+	ep->qp.rq_pgtbl_size =
+		(ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+	ep->qp.rq_pgtbl_virt =
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
+				   &ep->qp.rq_pgtbl_phys, GFP_KERNEL);
+	if (!ep->qp.rq_pgtbl_virt) {
+		printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n",
+				  ep->qp.rq_pgtbl_size);
+		goto mem_alloc_err;
+	}
+
+	/* Allocate memory area for actual RQ element */
+	ep->qp.rq_virt =
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
+				   &ep->qp.rq_phys, GFP_KERNEL);
+	if (!ep->qp.rq_virt) {
+		printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n",
+				  ep->qp.rq_mem_size);
+		goto mem_alloc_err;
+	}
+
+	ep->qp.rq_first_qe = ep->qp.rq_virt;
+	ep->qp.rq_prod_qe = ep->qp.rq_first_qe;
+	ep->qp.rq_cons_qe = ep->qp.rq_first_qe;
+	ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1];
+	ep->qp.rq_prod_idx = 0x8000;
+	ep->qp.rq_cons_idx = 0;
+	ep->qp.rqe_left = hba->max_rqes;
+
+	setup_qp_page_tables(ep);
+
+	return 0;
+
+mem_alloc_err:
+	bnx2i_free_qp_resc(hba, ep);
+	return -ENOMEM;
+}
+
+
+
+/**
+ * bnx2i_free_qp_resc - free memory resources held by QP
+ * @hba:	adapter structure pointer
+ * @ep:	endpoint (transport indentifier) structure
+ *
+ * Free QP resources - SQ/RQ/CQ memory and page tables.
+ */
+void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+	if (ep->qp.ctx_base) {
+		iounmap(ep->qp.ctx_base);
+		ep->qp.ctx_base = NULL;
+	}
+	/* Free SQ mem */
+	if (ep->qp.sq_pgtbl_virt) {
+		dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
+				  ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys);
+		ep->qp.sq_pgtbl_virt = NULL;
+		ep->qp.sq_pgtbl_phys = 0;
+	}
+	if (ep->qp.sq_virt) {
+		dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+				  ep->qp.sq_virt, ep->qp.sq_phys);
+		ep->qp.sq_virt = NULL;
+		ep->qp.sq_phys = 0;
+	}
+
+	/* Free RQ mem */
+	if (ep->qp.rq_pgtbl_virt) {
+		dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
+				  ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys);
+		ep->qp.rq_pgtbl_virt = NULL;
+		ep->qp.rq_pgtbl_phys = 0;
+	}
+	if (ep->qp.rq_virt) {
+		dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
+				  ep->qp.rq_virt, ep->qp.rq_phys);
+		ep->qp.rq_virt = NULL;
+		ep->qp.rq_phys = 0;
+	}
+
+	/* Free CQ mem */
+	if (ep->qp.cq_pgtbl_virt) {
+		dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
+				  ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys);
+		ep->qp.cq_pgtbl_virt = NULL;
+		ep->qp.cq_pgtbl_phys = 0;
+	}
+	if (ep->qp.cq_virt) {
+		dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+				  ep->qp.cq_virt, ep->qp.cq_phys);
+		ep->qp.cq_virt = NULL;
+		ep->qp.cq_phys = 0;
+	}
+}
+
+
+/**
+ * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w
+ * @hba:	adapter structure pointer
+ *
+ * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w
+ * 	This results in iSCSi support validation and on-chip context manager
+ * 	initialization.  Firmware completes this handshake with a CQE carrying
+ * 	the result of iscsi support validation. Parameter carried by
+ * 	iscsi init request determines the number of offloaded connection and
+ * 	tolerance level for iscsi protocol violation this hba/chip can support
+ */
+int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
+{
+	struct kwqe *kwqe_arr[3];
+	struct iscsi_kwqe_init1 iscsi_init;
+	struct iscsi_kwqe_init2 iscsi_init2;
+	int rc = 0;
+	u64 mask64;
+
+	bnx2i_adjust_qp_size(hba);
+
+	iscsi_init.flags =
+		ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
+	if (en_tcp_dack)
+		iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
+	iscsi_init.reserved0 = 0;
+	iscsi_init.num_cqs = 1;
+	iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1;
+	iscsi_init.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
+	iscsi_init.dummy_buffer_addr_hi =
+		(u32) ((u64) hba->dummy_buf_dma >> 32);
+
+	hba->ctx_ccell_tasks =
+			((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
+	iscsi_init.num_ccells_per_conn = hba->num_ccell;
+	iscsi_init.num_tasks_per_conn = hba->max_sqes;
+	iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+	iscsi_init.sq_num_wqes = hba->max_sqes;
+	iscsi_init.cq_log_wqes_per_page =
+		(u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE);
+	iscsi_init.cq_num_wqes = hba->max_cqes;
+	iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
+				   (PAGE_SIZE - 1)) / PAGE_SIZE;
+	iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
+				   (PAGE_SIZE - 1)) / PAGE_SIZE;
+	iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
+	iscsi_init.rq_num_wqes = hba->max_rqes;
+
+
+	iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2;
+	iscsi_init2.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+	iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1;
+	mask64 = 0x0ULL;
+	mask64 |= (
+		/* CISCO MDS */
+		(1UL <<
+		  ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) |
+		/* HP MSA1510i */
+		(1UL <<
+		  ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) |
+		/* EMC */
+		(1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
+	if (error_mask1)
+		iscsi_init2.error_bit_map[0] = error_mask1;
+	else
+		iscsi_init2.error_bit_map[0] = (u32) mask64;
+
+	if (error_mask2)
+		iscsi_init2.error_bit_map[1] = error_mask2;
+	else
+		iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32);
+
+	iscsi_error_mask = mask64;
+
+	kwqe_arr[0] = (struct kwqe *) &iscsi_init;
+	kwqe_arr[1] = (struct kwqe *) &iscsi_init2;
+
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2);
+	return rc;
+}
+
+
+/**
+ * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion.
+ * @conn:	iscsi connection
+ * @cqe:	pointer to newly DMA'ed CQE entry for processing
+ *
+ * process SCSI CMD Response CQE & complete the request to SCSI-ML
+ */
+static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
+				       struct bnx2i_conn *bnx2i_conn,
+				       struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct bnx2i_cmd_response *resp_cqe;
+	struct bnx2i_cmd *bnx2i_cmd;
+	struct iscsi_task *task;
+	struct iscsi_cmd_rsp *hdr;
+	u32 datalen = 0;
+
+	resp_cqe = (struct bnx2i_cmd_response *)cqe;
+	spin_lock(&session->lock);
+	task = iscsi_itt_to_task(conn,
+				 resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
+	if (!task)
+		goto fail;
+
+	bnx2i_cmd = task->dd_data;
+
+	if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
+		conn->datain_pdus_cnt +=
+			resp_cqe->task_stat.read_stat.num_data_outs;
+		conn->rxdata_octets +=
+			bnx2i_cmd->req.total_data_transfer_length;
+	} else {
+		conn->dataout_pdus_cnt +=
+			resp_cqe->task_stat.read_stat.num_data_outs;
+		conn->r2t_pdus_cnt +=
+			resp_cqe->task_stat.read_stat.num_r2ts;
+		conn->txdata_octets +=
+			bnx2i_cmd->req.total_data_transfer_length;
+	}
+	bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
+
+	hdr = (struct iscsi_cmd_rsp *)task->hdr;
+	resp_cqe = (struct bnx2i_cmd_response *)cqe;
+	hdr->opcode = resp_cqe->op_code;
+	hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn);
+	hdr->exp_cmdsn = cpu_to_be32(resp_cqe->exp_cmd_sn);
+	hdr->response = resp_cqe->response;
+	hdr->cmd_status = resp_cqe->status;
+	hdr->flags = resp_cqe->response_flags;
+	hdr->residual_count = cpu_to_be32(resp_cqe->residual_count);
+
+	if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN)
+		goto done;
+
+	if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) {
+		datalen = resp_cqe->data_length;
+		if (datalen < 2)
+			goto done;
+
+		if (datalen > BNX2I_RQ_WQE_SIZE) {
+			iscsi_conn_printk(KERN_ERR, conn,
+					  "sense data len %d > RQ sz\n",
+					  datalen);
+			datalen = BNX2I_RQ_WQE_SIZE;
+		} else if (datalen > ISCSI_DEF_MAX_RECV_SEG_LEN) {
+			iscsi_conn_printk(KERN_ERR, conn,
+					  "sense data len %d > conn data\n",
+					  datalen);
+			datalen = ISCSI_DEF_MAX_RECV_SEG_LEN;
+		}
+
+		bnx2i_get_rq_buf(bnx2i_cmd->conn, conn->data, datalen);
+		bnx2i_put_rq_buf(bnx2i_cmd->conn, 1);
+	}
+
+done:
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
+			     conn->data, datalen);
+fail:
+	spin_unlock(&session->lock);
+	return 0;
+}
+
+
+/**
+ * bnx2i_process_login_resp - this function handles iscsi login response
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process Login Response CQE & complete it to open-iscsi user daemon
+ */
+static int bnx2i_process_login_resp(struct iscsi_session *session,
+				    struct bnx2i_conn *bnx2i_conn,
+				    struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_task *task;
+	struct bnx2i_login_response *login;
+	struct iscsi_login_rsp *resp_hdr;
+	int pld_len;
+	int pad_len;
+
+	login = (struct bnx2i_login_response *) cqe;
+	spin_lock(&session->lock);
+	task = iscsi_itt_to_task(conn,
+				 login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
+	if (!task)
+		goto done;
+
+	resp_hdr = (struct iscsi_login_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
+	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+	resp_hdr->opcode = login->op_code;
+	resp_hdr->flags = login->response_flags;
+	resp_hdr->max_version = login->version_max;
+	resp_hdr->active_version = login->version_active;;
+	resp_hdr->hlength = 0;
+
+	hton24(resp_hdr->dlength, login->data_length);
+	memcpy(resp_hdr->isid, &login->isid_lo, 6);
+	resp_hdr->tsih = cpu_to_be16(login->tsih);
+	resp_hdr->itt = task->hdr->itt;
+	resp_hdr->statsn = cpu_to_be32(login->stat_sn);
+	resp_hdr->exp_cmdsn = cpu_to_be32(login->exp_cmd_sn);
+	resp_hdr->max_cmdsn = cpu_to_be32(login->max_cmd_sn);
+	resp_hdr->status_class = login->status_class;
+	resp_hdr->status_detail = login->status_detail;
+	pld_len = login->data_length;
+	bnx2i_conn->gen_pdu.resp_wr_ptr =
+					bnx2i_conn->gen_pdu.resp_buf + pld_len;
+
+	pad_len = 0;
+	if (pld_len & 0x3)
+		pad_len = 4 - (pld_len % 4);
+
+	if (pad_len) {
+		int i = 0;
+		for (i = 0; i < pad_len; i++) {
+			bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
+			bnx2i_conn->gen_pdu.resp_wr_ptr++;
+		}
+	}
+
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
+		bnx2i_conn->gen_pdu.resp_buf,
+		bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);
+done:
+	spin_unlock(&session->lock);
+	return 0;
+}
+
+/**
+ * bnx2i_process_tmf_resp - this function handles iscsi TMF response
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI TMF Response CQE and wake up the driver eh thread.
+ */
+static int bnx2i_process_tmf_resp(struct iscsi_session *session,
+				  struct bnx2i_conn *bnx2i_conn,
+				  struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_task *task;
+	struct bnx2i_tmf_response *tmf_cqe;
+	struct iscsi_tm_rsp *resp_hdr;
+
+	tmf_cqe = (struct bnx2i_tmf_response *)cqe;
+	spin_lock(&session->lock);
+	task = iscsi_itt_to_task(conn,
+				 tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
+	if (!task)
+		goto done;
+
+	resp_hdr = (struct iscsi_tm_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
+	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+	resp_hdr->opcode = tmf_cqe->op_code;
+	resp_hdr->max_cmdsn = cpu_to_be32(tmf_cqe->max_cmd_sn);
+	resp_hdr->exp_cmdsn = cpu_to_be32(tmf_cqe->exp_cmd_sn);
+	resp_hdr->itt = task->hdr->itt;
+	resp_hdr->response = tmf_cqe->response;
+
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
+done:
+	spin_unlock(&session->lock);
+	return 0;
+}
+
+/**
+ * bnx2i_process_logout_resp - this function handles iscsi logout response
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI Logout Response CQE & make function call to
+ * notify the user daemon.
+ */
+static int bnx2i_process_logout_resp(struct iscsi_session *session,
+				     struct bnx2i_conn *bnx2i_conn,
+				     struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_task *task;
+	struct bnx2i_logout_response *logout;
+	struct iscsi_logout_rsp *resp_hdr;
+
+	logout = (struct bnx2i_logout_response *) cqe;
+	spin_lock(&session->lock);
+	task = iscsi_itt_to_task(conn,
+				 logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
+	if (!task)
+		goto done;
+
+	resp_hdr = (struct iscsi_logout_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
+	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+	resp_hdr->opcode = logout->op_code;
+	resp_hdr->flags = logout->response;
+	resp_hdr->hlength = 0;
+
+	resp_hdr->itt = task->hdr->itt;
+	resp_hdr->statsn = task->hdr->exp_statsn;
+	resp_hdr->exp_cmdsn = cpu_to_be32(logout->exp_cmd_sn);
+	resp_hdr->max_cmdsn = cpu_to_be32(logout->max_cmd_sn);
+
+	resp_hdr->t2wait = cpu_to_be32(logout->time_to_wait);
+	resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain);
+
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
+done:
+	spin_unlock(&session->lock);
+	return 0;
+}
+
+/**
+ * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI NOPIN local completion CQE, frees IIT and command structures
+ */
+static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
+					   struct bnx2i_conn *bnx2i_conn,
+					   struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct bnx2i_nop_in_msg *nop_in;
+	struct iscsi_task *task;
+
+	nop_in = (struct bnx2i_nop_in_msg *)cqe;
+	spin_lock(&session->lock);
+	task = iscsi_itt_to_task(conn,
+				 nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
+	if (task)
+		iscsi_put_task(task);
+	spin_unlock(&session->lock);
+}
+
+/**
+ * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd
+ * @conn:	iscsi connection
+ *
+ * Firmware advances RQ producer index for every unsolicited PDU even if
+ *	payload data length is '0'. This function makes corresponding
+ *	adjustments on the driver side to match this f/w behavior
+ */
+static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn)
+{
+	char dummy_rq_data[2];
+	bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1);
+	bnx2i_put_rq_buf(bnx2i_conn, 1);
+}
+
+
+/**
+ * bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI target's proactive iSCSI NOPIN request
+ */
+static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
+				     struct bnx2i_conn *bnx2i_conn,
+				     struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_task *task;
+	struct bnx2i_nop_in_msg *nop_in;
+	struct iscsi_nopin *hdr;
+	u32 itt;
+	int tgt_async_nop = 0;
+
+	nop_in = (struct bnx2i_nop_in_msg *)cqe;
+	itt = nop_in->itt & ISCSI_NOP_IN_MSG_INDEX;
+
+	spin_lock(&session->lock);
+	hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
+	memset(hdr, 0, sizeof(struct iscsi_hdr));
+	hdr->opcode = nop_in->op_code;
+	hdr->max_cmdsn = cpu_to_be32(nop_in->max_cmd_sn);
+	hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn);
+	hdr->ttt = cpu_to_be32(nop_in->ttt);
+
+	if (itt == (u16) RESERVED_ITT) {
+		bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+		hdr->itt = RESERVED_ITT;
+		tgt_async_nop = 1;
+		goto done;
+	}
+
+	/* this is a response to one of our nop-outs */
+	task = iscsi_itt_to_task(conn, itt);
+	if (task) {
+		hdr->flags = ISCSI_FLAG_CMD_FINAL;
+		hdr->itt = task->hdr->itt;
+		hdr->ttt = cpu_to_be32(nop_in->ttt);
+		memcpy(hdr->lun, nop_in->lun, 8);
+	}
+done:
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
+	spin_unlock(&session->lock);
+
+	return tgt_async_nop;
+}
+
+
+/**
+ * bnx2i_process_async_mesg - this function handles iscsi async message
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI ASYNC Message
+ */
+static void bnx2i_process_async_mesg(struct iscsi_session *session,
+				     struct bnx2i_conn *bnx2i_conn,
+				     struct cqe *cqe)
+{
+	struct bnx2i_async_msg *async_cqe;
+	struct iscsi_async *resp_hdr;
+	u8 async_event;
+
+	bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+
+	async_cqe = (struct bnx2i_async_msg *)cqe;
+	async_event = async_cqe->async_event;
+
+	if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) {
+		iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
+				  "async: scsi events not supported\n");
+		return;
+	}
+
+	spin_lock(&session->lock);
+	resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;
+	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+	resp_hdr->opcode = async_cqe->op_code;
+	resp_hdr->flags = 0x80;
+
+	memcpy(resp_hdr->lun, async_cqe->lun, 8);
+	resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn);
+	resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn);
+
+	resp_hdr->async_event = async_cqe->async_event;
+	resp_hdr->async_vcode = async_cqe->async_vcode;
+
+	resp_hdr->param1 = cpu_to_be16(async_cqe->param1);
+	resp_hdr->param2 = cpu_to_be16(async_cqe->param2);
+	resp_hdr->param3 = cpu_to_be16(async_cqe->param3);
+
+	__iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,
+			     (struct iscsi_hdr *)resp_hdr, NULL, 0);
+	spin_unlock(&session->lock);
+}
+
+
+/**
+ * bnx2i_process_reject_mesg - process iscsi reject pdu
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI REJECT message
+ */
+static void bnx2i_process_reject_mesg(struct iscsi_session *session,
+				      struct bnx2i_conn *bnx2i_conn,
+				      struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct bnx2i_reject_msg *reject;
+	struct iscsi_reject *hdr;
+
+	reject = (struct bnx2i_reject_msg *) cqe;
+	if (reject->data_length) {
+		bnx2i_get_rq_buf(bnx2i_conn, conn->data, reject->data_length);
+		bnx2i_put_rq_buf(bnx2i_conn, 1);
+	} else
+		bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+
+	spin_lock(&session->lock);
+	hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;
+	memset(hdr, 0, sizeof(struct iscsi_hdr));
+	hdr->opcode = reject->op_code;
+	hdr->reason = reject->reason;
+	hton24(hdr->dlength, reject->data_length);
+	hdr->max_cmdsn = cpu_to_be32(reject->max_cmd_sn);
+	hdr->exp_cmdsn = cpu_to_be32(reject->exp_cmd_sn);
+	hdr->ffffffff = cpu_to_be32(RESERVED_ITT);
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,
+			     reject->data_length);
+	spin_unlock(&session->lock);
+}
+
+/**
+ * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process command cleanup response CQE during conn shutdown or error recovery
+ */
+static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
+					   struct bnx2i_conn *bnx2i_conn,
+					   struct cqe *cqe)
+{
+	struct bnx2i_cleanup_response *cmd_clean_rsp;
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_task *task;
+
+	cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
+	spin_lock(&session->lock);
+	task = iscsi_itt_to_task(conn,
+			cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
+	if (!task)
+		printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
+			cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
+	spin_unlock(&session->lock);
+	complete(&bnx2i_conn->cmd_cleanup_cmpl);
+}
+
+
+
+/**
+ * bnx2i_process_new_cqes - process newly DMA'ed CQE's
+ * @bnx2i_conn:		iscsi connection
+ *
+ * this function is called by generic KCQ handler to process all pending CQE's
+ */
+static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_session *session = conn->session;
+	struct qp_info *qp = &bnx2i_conn->ep->qp;
+	struct bnx2i_nop_in_msg *nopin;
+	int tgt_async_msg;
+
+	while (1) {
+		nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe;
+		if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
+			break;
+
+		if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx)))
+			break;
+
+		tgt_async_msg = 0;
+
+		switch (nopin->op_code) {
+		case ISCSI_OP_SCSI_CMD_RSP:
+		case ISCSI_OP_SCSI_DATA_IN:
+			bnx2i_process_scsi_cmd_resp(session, bnx2i_conn,
+						    qp->cq_cons_qe);
+			break;
+		case ISCSI_OP_LOGIN_RSP:
+			bnx2i_process_login_resp(session, bnx2i_conn,
+						 qp->cq_cons_qe);
+			break;
+		case ISCSI_OP_SCSI_TMFUNC_RSP:
+			bnx2i_process_tmf_resp(session, bnx2i_conn,
+					       qp->cq_cons_qe);
+			break;
+		case ISCSI_OP_LOGOUT_RSP:
+			bnx2i_process_logout_resp(session, bnx2i_conn,
+						  qp->cq_cons_qe);
+			break;
+		case ISCSI_OP_NOOP_IN:
+			if (bnx2i_process_nopin_mesg(session, bnx2i_conn,
+						     qp->cq_cons_qe))
+				tgt_async_msg = 1;
+			break;
+		case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION:
+			bnx2i_process_nopin_local_cmpl(session, bnx2i_conn,
+						       qp->cq_cons_qe);
+			break;
+		case ISCSI_OP_ASYNC_EVENT:
+			bnx2i_process_async_mesg(session, bnx2i_conn,
+						 qp->cq_cons_qe);
+			tgt_async_msg = 1;
+			break;
+		case ISCSI_OP_REJECT:
+			bnx2i_process_reject_mesg(session, bnx2i_conn,
+						  qp->cq_cons_qe);
+			break;
+		case ISCSI_OPCODE_CLEANUP_RESPONSE:
+			bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn,
+						       qp->cq_cons_qe);
+			break;
+		default:
+			printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
+					  nopin->op_code);
+		}
+
+		if (!tgt_async_msg)
+			bnx2i_conn->ep->num_active_cmds--;
+
+		/* clear out in production version only, till beta keep opcode
+		 * field intact, will be helpful in debugging (context dump)
+		 * nopin->op_code = 0;
+		 */
+		qp->cqe_exp_seq_sn++;
+		if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1))
+			qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN;
+
+		if (qp->cq_cons_qe == qp->cq_last_qe) {
+			qp->cq_cons_qe = qp->cq_first_qe;
+			qp->cq_cons_idx = 0;
+		} else {
+			qp->cq_cons_qe++;
+			qp->cq_cons_idx++;
+		}
+	}
+	bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
+}
+
+/**
+ * bnx2i_fastpath_notification - process global event queue (KCQ)
+ * @hba:		adapter structure pointer
+ * @new_cqe_kcqe:	pointer to newly DMA'ed KCQE entry
+ *
+ * Fast path event notification handler, KCQ entry carries context id
+ *	of the connection that has 1 or more pending CQ entries
+ */
+static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
+					struct iscsi_kcqe *new_cqe_kcqe)
+{
+	struct bnx2i_conn *conn;
+	u32 iscsi_cid;
+
+	iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
+	conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+	if (!conn) {
+		printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
+		return;
+	}
+	if (!conn->ep) {
+		printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
+		return;
+	}
+
+	bnx2i_process_new_cqes(conn);
+}
+
+
+/**
+ * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE
+ * @hba:		adapter structure pointer
+ * @update_kcqe:	kcqe pointer
+ *
+ * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration
+ */
+static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba,
+					   struct iscsi_kcqe *update_kcqe)
+{
+	struct bnx2i_conn *conn;
+	u32 iscsi_cid;
+
+	iscsi_cid = update_kcqe->iscsi_conn_id;
+	conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+	if (!conn) {
+		printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid);
+		return;
+	}
+	if (!conn->ep) {
+		printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid);
+		return;
+	}
+
+	if (update_kcqe->completion_status) {
+		printk(KERN_ALERT "request failed cid %x\n", iscsi_cid);
+		conn->ep->state = EP_STATE_ULP_UPDATE_FAILED;
+	} else
+		conn->ep->state = EP_STATE_ULP_UPDATE_COMPL;
+
+	wake_up_interruptible(&conn->ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_recovery_que_add_conn - add connection to recovery queue
+ * @hba:		adapter structure pointer
+ * @bnx2i_conn:		iscsi connection
+ *
+ * Add connection to recovery queue and schedule adapter eh worker
+ */
+static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba,
+					struct bnx2i_conn *bnx2i_conn)
+{
+	iscsi_conn_failure(bnx2i_conn->cls_conn->dd_data,
+			   ISCSI_ERR_CONN_FAILED);
+}
+
+
+/**
+ * bnx2i_process_tcp_error - process error notification on a given connection
+ *
+ * @hba: 		adapter structure pointer
+ * @tcp_err: 		tcp error kcqe pointer
+ *
+ * handles tcp level error notifications from FW.
+ */
+static void bnx2i_process_tcp_error(struct bnx2i_hba *hba,
+				    struct iscsi_kcqe *tcp_err)
+{
+	struct bnx2i_conn *bnx2i_conn;
+	u32 iscsi_cid;
+
+	iscsi_cid = tcp_err->iscsi_conn_id;
+	bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+	if (!bnx2i_conn) {
+		printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
+		return;
+	}
+
+	printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n",
+			  iscsi_cid, tcp_err->completion_status);
+	bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
+}
+
+
+/**
+ * bnx2i_process_iscsi_error - process error notification on a given connection
+ * @hba:		adapter structure pointer
+ * @iscsi_err:		iscsi error kcqe pointer
+ *
+ * handles iscsi error notifications from the FW. Firmware based in initial
+ *	handshake classifies iscsi protocol / TCP rfc violation into either
+ *	warning or error indications. If indication is of "Error" type, driver
+ *	will initiate session recovery for that connection/session. For
+ *	"Warning" type indication, driver will put out a system log message
+ *	(there will be only one message for each type for the life of the
+ *	session, this is to avoid un-necessarily overloading the system)
+ */
+static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
+				      struct iscsi_kcqe *iscsi_err)
+{
+	struct bnx2i_conn *bnx2i_conn;
+	u32 iscsi_cid;
+	char warn_notice[] = "iscsi_warning";
+	char error_notice[] = "iscsi_error";
+	char additional_notice[64];
+	char *message;
+	int need_recovery;
+	u64 err_mask64;
+
+	iscsi_cid = iscsi_err->iscsi_conn_id;
+	bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+	if (!bnx2i_conn) {
+		printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
+		return;
+	}
+
+	err_mask64 = (0x1ULL << iscsi_err->completion_status);
+
+	if (err_mask64 & iscsi_error_mask) {
+		need_recovery = 0;
+		message = warn_notice;
+	} else {
+		need_recovery = 1;
+		message = error_notice;
+	}
+
+	switch (iscsi_err->completion_status) {
+	case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR:
+		strcpy(additional_notice, "hdr digest err");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR:
+		strcpy(additional_notice, "data digest err");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE:
+		strcpy(additional_notice, "wrong opcode rcvd");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN:
+		strcpy(additional_notice, "AHS len > 0 rcvd");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT:
+		strcpy(additional_notice, "invalid ITT rcvd");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN:
+		strcpy(additional_notice, "wrong StatSN rcvd");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN:
+		strcpy(additional_notice, "wrong DataSN rcvd");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T:
+		strcpy(additional_notice, "pend R2T violation");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0:
+		strcpy(additional_notice, "ERL0, UO");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1:
+		strcpy(additional_notice, "ERL0, U1");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2:
+		strcpy(additional_notice, "ERL0, U2");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3:
+		strcpy(additional_notice, "ERL0, U3");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4:
+		strcpy(additional_notice, "ERL0, U4");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5:
+		strcpy(additional_notice, "ERL0, U5");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6:
+		strcpy(additional_notice, "ERL0, U6");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN:
+		strcpy(additional_notice, "invalid resi len");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN:
+		strcpy(additional_notice, "MRDSL violation");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO:
+		strcpy(additional_notice, "F-bit not set");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV:
+		strcpy(additional_notice, "invalid TTT");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN:
+		strcpy(additional_notice, "invalid DataSN");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN:
+		strcpy(additional_notice, "burst len violation");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF:
+		strcpy(additional_notice, "buf offset violation");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN:
+		strcpy(additional_notice, "invalid LUN field");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN:
+		strcpy(additional_notice, "invalid R2TSN field");
+		break;
+#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 	\
+	ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
+	case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0:
+		strcpy(additional_notice, "invalid cmd len1");
+		break;
+#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 	\
+	ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
+	case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1:
+		strcpy(additional_notice, "invalid cmd len2");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED:
+		strcpy(additional_notice,
+		       "pend r2t exceeds MaxOutstandingR2T value");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV:
+		strcpy(additional_notice, "TTT is rsvd");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN:
+		strcpy(additional_notice, "MBL violation");
+		break;
+#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO 	\
+	ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
+	case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO:
+		strcpy(additional_notice, "data seg len != 0");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN:
+		strcpy(additional_notice, "reject pdu len error");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN:
+		strcpy(additional_notice, "async pdu len error");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN:
+		strcpy(additional_notice, "nopin pdu len error");
+		break;
+#define BNX2_ERR_PEND_R2T_IN_CLEANUP			\
+	ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
+	case BNX2_ERR_PEND_R2T_IN_CLEANUP:
+		strcpy(additional_notice, "pend r2t in cleanup");
+		break;
+
+	case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT:
+		strcpy(additional_notice, "IP fragments rcvd");
+		break;
+	case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS:
+		strcpy(additional_notice, "IP options error");
+		break;
+	case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG:
+		strcpy(additional_notice, "urgent flag error");
+		break;
+	default:
+		printk(KERN_ALERT "iscsi_err - unknown err %x\n",
+				  iscsi_err->completion_status);
+	}
+
+	if (need_recovery) {
+		iscsi_conn_printk(KERN_ALERT,
+				  bnx2i_conn->cls_conn->dd_data,
+				  "bnx2i: %s - %s\n",
+				  message, additional_notice);
+
+		iscsi_conn_printk(KERN_ALERT,
+				  bnx2i_conn->cls_conn->dd_data,
+				  "conn_err - hostno %d conn %p, "
+				  "iscsi_cid %x cid %x\n",
+				  bnx2i_conn->hba->shost->host_no,
+				  bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid,
+				  bnx2i_conn->ep->ep_cid);
+		bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
+	} else
+		if (!test_and_set_bit(iscsi_err->completion_status,
+				      (void *) &bnx2i_conn->violation_notified))
+			iscsi_conn_printk(KERN_ALERT,
+					  bnx2i_conn->cls_conn->dd_data,
+					  "bnx2i: %s - %s\n",
+					  message, additional_notice);
+}
+
+
+/**
+ * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion
+ * @hba:		adapter structure pointer
+ * @conn_destroy:	conn destroy kcqe pointer
+ *
+ * handles connection destroy completion request.
+ */
+static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
+					    struct iscsi_kcqe *conn_destroy)
+{
+	struct bnx2i_endpoint *ep;
+
+	ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id);
+	if (!ep) {
+		printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending "
+				  "offload request, unexpected complection\n");
+		return;
+	}
+
+	if (hba != ep->hba) {
+		printk(KERN_ALERT "conn destroy- error hba mis-match\n");
+		return;
+	}
+
+	if (conn_destroy->completion_status) {
+		printk(KERN_ALERT "conn_destroy_cmpl: op failed\n");
+		ep->state = EP_STATE_CLEANUP_FAILED;
+	} else
+		ep->state = EP_STATE_CLEANUP_CMPL;
+	wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion
+ * @hba:		adapter structure pointer
+ * @ofld_kcqe:		conn offload kcqe pointer
+ *
+ * handles initial connection offload completion, ep_connect() thread is
+ *	woken-up to continue with LLP connect process
+ */
+static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
+				    struct iscsi_kcqe *ofld_kcqe)
+{
+	u32 cid_addr;
+	struct bnx2i_endpoint *ep;
+	u32 cid_num;
+
+	ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id);
+	if (!ep) {
+		printk(KERN_ALERT "ofld_cmpl: no pend offload request\n");
+		return;
+	}
+
+	if (hba != ep->hba) {
+		printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
+		return;
+	}
+
+	if (ofld_kcqe->completion_status) {
+		if (ofld_kcqe->completion_status ==
+		    ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE)
+			printk(KERN_ALERT "bnx2i: unable to allocate"
+					  " iSCSI context resources\n");
+		ep->state = EP_STATE_OFLD_FAILED;
+	} else {
+		ep->state = EP_STATE_OFLD_COMPL;
+		cid_addr = ofld_kcqe->iscsi_conn_context_id;
+		cid_num = bnx2i_get_cid_num(ep);
+		ep->ep_cid = cid_addr;
+		ep->qp.ctx_base = NULL;
+	}
+	wake_up_interruptible(&ep->ofld_wait);
+}
+
+/**
+ * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE
+ * @hba:		adapter structure pointer
+ * @update_kcqe:	kcqe pointer
+ *
+ * Generic KCQ event handler/dispatcher
+ */
+static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[],
+				u32 num_cqe)
+{
+	struct bnx2i_hba *hba = context;
+	int i = 0;
+	struct iscsi_kcqe *ikcqe = NULL;
+
+	while (i < num_cqe) {
+		ikcqe = (struct iscsi_kcqe *) kcqe[i++];
+
+		if (ikcqe->op_code ==
+		    ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION)
+			bnx2i_fastpath_notification(hba, ikcqe);
+		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN)
+			bnx2i_process_ofld_cmpl(hba, ikcqe);
+		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN)
+			bnx2i_process_update_conn_cmpl(hba, ikcqe);
+		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) {
+			if (ikcqe->completion_status !=
+			    ISCSI_KCQE_COMPLETION_STATUS_SUCCESS)
+				bnx2i_iscsi_license_error(hba, ikcqe->\
+							  completion_status);
+			else {
+				set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+				bnx2i_get_link_state(hba);
+				printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: "
+						 "ISCSI_INIT passed\n",
+						 (u8)hba->pcidev->bus->number,
+						 hba->pci_devno,
+						 (u8)hba->pci_func);
+
+
+			}
+		} else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN)
+			bnx2i_process_conn_destroy_cmpl(hba, ikcqe);
+		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR)
+			bnx2i_process_iscsi_error(hba, ikcqe);
+		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR)
+			bnx2i_process_tcp_error(hba, ikcqe);
+		else
+			printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
+					  ikcqe->op_code);
+	}
+}
+
+
+/**
+ * bnx2i_indicate_netevent - Generic netdev event handler
+ * @context:	adapter structure pointer
+ * @event:	event type
+ *
+ * Handles four netdev events, NETDEV_UP, NETDEV_DOWN,
+ *	NETDEV_GOING_DOWN and NETDEV_CHANGE
+ */
+static void bnx2i_indicate_netevent(void *context, unsigned long event)
+{
+	struct bnx2i_hba *hba = context;
+
+	switch (event) {
+	case NETDEV_UP:
+		if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+			bnx2i_send_fw_iscsi_init_msg(hba);
+		break;
+	case NETDEV_DOWN:
+		clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+		clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+		break;
+	case NETDEV_GOING_DOWN:
+		set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+		iscsi_host_for_each_session(hba->shost,
+					    bnx2i_drop_session);
+		break;
+	case NETDEV_CHANGE:
+		bnx2i_get_link_state(hba);
+		break;
+	default:
+		;
+	}
+}
+
+
+/**
+ * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion
+ * @cm_sk: 		cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *	indicate completion of option-2 TCP connect request.
+ */
+static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk)
+{
+	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+	if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
+		ep->state = EP_STATE_CONNECT_FAILED;
+	else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags))
+		ep->state = EP_STATE_CONNECT_COMPL;
+	else
+		ep->state = EP_STATE_CONNECT_FAILED;
+
+	wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_close_cmpl - process tcp conn close completion
+ * @cm_sk:	cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *	indicate completion of option-2 graceful TCP connect shutdown
+ */
+static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk)
+{
+	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+	ep->state = EP_STATE_DISCONN_COMPL;
+	wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion
+ * @cm_sk:	cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *	indicate completion of option-2 abortive TCP connect termination
+ */
+static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk)
+{
+	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+	ep->state = EP_STATE_DISCONN_COMPL;
+	wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_remote_close - process received TCP FIN
+ * @hba:		adapter structure pointer
+ * @update_kcqe:	kcqe pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to indicate
+ *	async TCP events such as FIN
+ */
+static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk)
+{
+	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+	ep->state = EP_STATE_TCP_FIN_RCVD;
+	if (ep->conn)
+		bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
+}
+
+/**
+ * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup
+ * @hba:		adapter structure pointer
+ * @update_kcqe:	kcqe pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *	indicate async TCP events (RST) sent by the peer.
+ */
+static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
+{
+	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+	ep->state = EP_STATE_TCP_RST_RCVD;
+	if (ep->conn)
+		bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
+}
+
+
+static void bnx2i_send_nl_mesg(struct cnic_dev *dev, u32 msg_type,
+			       char *buf, u16 buflen)
+{
+	struct bnx2i_hba *hba;
+
+	hba = bnx2i_find_hba_for_cnic(dev);
+	if (!hba)
+		return;
+
+	if (iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport,
+				   msg_type, buf, buflen))
+		printk(KERN_ALERT "bnx2i: private nl message send error\n");
+
+}
+
+
+/**
+ * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure
+ *			carrying callback function pointers
+ *
+ */
+struct cnic_ulp_ops bnx2i_cnic_cb = {
+	.cnic_init = bnx2i_ulp_init,
+	.cnic_exit = bnx2i_ulp_exit,
+	.cnic_start = bnx2i_start,
+	.cnic_stop = bnx2i_stop,
+	.indicate_kcqes = bnx2i_indicate_kcqe,
+	.indicate_netevent = bnx2i_indicate_netevent,
+	.cm_connect_complete = bnx2i_cm_connect_cmpl,
+	.cm_close_complete = bnx2i_cm_close_cmpl,
+	.cm_abort_complete = bnx2i_cm_abort_cmpl,
+	.cm_remote_close = bnx2i_cm_remote_close,
+	.cm_remote_abort = bnx2i_cm_remote_abort,
+	.iscsi_nl_send_msg = bnx2i_send_nl_mesg,
+	.owner = THIS_MODULE
+};
+
+
+/**
+ * bnx2i_map_ep_dbell_regs - map connection doorbell registers
+ * @ep: bnx2i endpoint
+ *
+ * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these
+ *	register in BAR #0. Whereas in 57710 these register are accessed by
+ *	mapping BAR #1
+ */
+int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
+{
+	u32 cid_num;
+	u32 reg_off;
+	u32 first_l4l5;
+	u32 ctx_sz;
+	u32 config2;
+	resource_size_t reg_base;
+
+	cid_num = bnx2i_get_cid_num(ep);
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+		reg_base = pci_resource_start(ep->hba->pcidev,
+					      BNX2X_DOORBELL_PCI_BAR);
+		reg_off = PAGE_SIZE * (cid_num & 0x1FFFF) + DPM_TRIGER_TYPE;
+		ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+		goto arm_cq;
+	}
+
+	reg_base = ep->hba->netdev->base_addr;
+	if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
+	    (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
+		config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
+		first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5;
+		ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3;
+		if (ctx_sz)
+			reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE
+				  + PAGE_SIZE *
+				  (((cid_num - first_l4l5) / ctx_sz) + 256);
+		else
+			reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
+	} else
+		/* 5709 device in normal node and 5706/5708 devices */
+		reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
+
+	ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
+					  MB_KERNEL_CTX_SIZE);
+	if (!ep->qp.ctx_base)
+		return -ENOMEM;
+
+arm_cq:
+	bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE);
+	return 0;
+}

+ 438 - 0
drivers/scsi/bnx2i/bnx2i_init.c

@@ -0,0 +1,438 @@
+/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include "bnx2i.h"
+
+static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
+static u32 adapter_count;
+static int bnx2i_reg_device;
+
+#define DRV_MODULE_NAME		"bnx2i"
+#define DRV_MODULE_VERSION	"2.0.1d"
+#define DRV_MODULE_RELDATE	"Mar 25, 2009"
+
+static char version[] __devinitdata =
+		"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
+		" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+
+MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static DEFINE_RWLOCK(bnx2i_dev_lock);
+
+unsigned int event_coal_div = 1;
+module_param(event_coal_div, int, 0664);
+MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
+
+unsigned int en_tcp_dack = 1;
+module_param(en_tcp_dack, int, 0664);
+MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK");
+
+unsigned int error_mask1 = 0x00;
+module_param(error_mask1, int, 0664);
+MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1");
+
+unsigned int error_mask2 = 0x00;
+module_param(error_mask2, int, 0664);
+MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2");
+
+unsigned int sq_size;
+module_param(sq_size, int, 0664);
+MODULE_PARM_DESC(sq_size, "Configure SQ size");
+
+unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT;
+module_param(rq_size, int, 0664);
+MODULE_PARM_DESC(rq_size, "Configure RQ size");
+
+u64 iscsi_error_mask = 0x00;
+
+static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ;
+
+
+/**
+ * bnx2i_identify_device - identifies NetXtreme II device type
+ * @hba: 		Adapter structure pointer
+ *
+ * This function identifies the NX2 device type and sets appropriate
+ *	queue mailbox register access method, 5709 requires driver to
+ *	access MBOX regs using *bin* mode
+ */
+void bnx2i_identify_device(struct bnx2i_hba *hba)
+{
+	hba->cnic_dev_type = 0;
+	if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) ||
+	    (hba->pci_did == PCI_DEVICE_ID_NX2_5706S))
+		set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
+	else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) ||
+	    (hba->pci_did == PCI_DEVICE_ID_NX2_5708S))
+		set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
+	else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) ||
+	    (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
+		set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
+		hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
+	} else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
+		   hba->pci_did == PCI_DEVICE_ID_NX2_57711)
+		set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
+}
+
+
+/**
+ * get_adapter_list_head - returns head of adapter list
+ */
+struct bnx2i_hba *get_adapter_list_head(void)
+{
+	struct bnx2i_hba *hba = NULL;
+	struct bnx2i_hba *tmp_hba;
+
+	if (!adapter_count)
+		goto hba_not_found;
+
+	read_lock(&bnx2i_dev_lock);
+	list_for_each_entry(tmp_hba, &adapter_list, link) {
+		if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) {
+			hba = tmp_hba;
+			break;
+		}
+	}
+	read_unlock(&bnx2i_dev_lock);
+hba_not_found:
+	return hba;
+}
+
+
+/**
+ * bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance
+ * @cnic:	pointer to cnic device instance
+ *
+ */
+struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic)
+{
+	struct bnx2i_hba *hba, *temp;
+
+	read_lock(&bnx2i_dev_lock);
+	list_for_each_entry_safe(hba, temp, &adapter_list, link) {
+		if (hba->cnic == cnic) {
+			read_unlock(&bnx2i_dev_lock);
+			return hba;
+		}
+	}
+	read_unlock(&bnx2i_dev_lock);
+	return NULL;
+}
+
+
+/**
+ * bnx2i_start - cnic callback to initialize & start adapter instance
+ * @handle:	transparent handle pointing to adapter structure
+ *
+ * This function maps adapter structure to pcidev structure and initiates
+ *	firmware handshake to enable/initialize on chip iscsi components
+ * 	This bnx2i - cnic interface api callback is issued after following
+ *	2 conditions are met -
+ *	  a) underlying network interface is up (marked by event 'NETDEV_UP'
+ *		from netdev
+ *	  b) bnx2i adapter instance is registered
+ */
+void bnx2i_start(void *handle)
+{
+#define BNX2I_INIT_POLL_TIME	(1000 / HZ)
+	struct bnx2i_hba *hba = handle;
+	int i = HZ;
+
+	bnx2i_send_fw_iscsi_init_msg(hba);
+	while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
+		msleep(BNX2I_INIT_POLL_TIME);
+}
+
+
+/**
+ * bnx2i_stop - cnic callback to shutdown adapter instance
+ * @handle:	transparent handle pointing to adapter structure
+ *
+ * driver checks if adapter is already in shutdown mode, if not start
+ *	the shutdown process
+ */
+void bnx2i_stop(void *handle)
+{
+	struct bnx2i_hba *hba = handle;
+
+	/* check if cleanup happened in GOING_DOWN context */
+	clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+	if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
+				&hba->adapter_state))
+		iscsi_host_for_each_session(hba->shost,
+					    bnx2i_drop_session);
+}
+
+/**
+ * bnx2i_register_device - register bnx2i adapter instance with the cnic driver
+ * @hba:	Adapter instance to register
+ *
+ * registers bnx2i adapter instance with the cnic driver while holding the
+ *	adapter structure lock
+ */
+void bnx2i_register_device(struct bnx2i_hba *hba)
+{
+	if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
+	    test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+		return;
+	}
+
+	hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
+
+	spin_lock(&hba->lock);
+	bnx2i_reg_device++;
+	spin_unlock(&hba->lock);
+
+	set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+}
+
+
+/**
+ * bnx2i_reg_dev_all - registers all adapter instances with the cnic driver
+ *
+ * registers all bnx2i adapter instances with the cnic driver while holding
+ *	the global resource lock
+ */
+void bnx2i_reg_dev_all(void)
+{
+	struct bnx2i_hba *hba, *temp;
+
+	read_lock(&bnx2i_dev_lock);
+	list_for_each_entry_safe(hba, temp, &adapter_list, link)
+		bnx2i_register_device(hba);
+	read_unlock(&bnx2i_dev_lock);
+}
+
+
+/**
+ * bnx2i_unreg_one_device - unregister adapter instance with the cnic driver
+ * @hba:	Adapter instance to unregister
+ *
+ * registers bnx2i adapter instance with the cnic driver while holding
+ *	the adapter structure lock
+ */
+static void bnx2i_unreg_one_device(struct bnx2i_hba *hba)
+{
+	if (hba->ofld_conns_active ||
+	    !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) ||
+	    test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state))
+		return;
+
+	hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+
+	spin_lock(&hba->lock);
+	bnx2i_reg_device--;
+	spin_unlock(&hba->lock);
+
+	/* ep_disconnect could come before NETDEV_DOWN, driver won't
+	 * see NETDEV_DOWN as it already unregistered itself.
+	 */
+	hba->adapter_state = 0;
+	clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+}
+
+/**
+ * bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver
+ *
+ * unregisters all bnx2i adapter instances with the cnic driver while holding
+ *	the global resource lock
+ */
+void bnx2i_unreg_dev_all(void)
+{
+	struct bnx2i_hba *hba, *temp;
+
+	read_lock(&bnx2i_dev_lock);
+	list_for_each_entry_safe(hba, temp, &adapter_list, link)
+		bnx2i_unreg_one_device(hba);
+	read_unlock(&bnx2i_dev_lock);
+}
+
+
+/**
+ * bnx2i_init_one - initialize an adapter instance and allocate memory resources
+ * @hba:	bnx2i adapter instance
+ * @cnic:	cnic device handle
+ *
+ * Global resource lock and host adapter lock is held during critical sections
+ *	below. This routine is called from cnic_register_driver() context and
+ *	work horse thread which does majority of device specific initialization
+ */
+static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
+{
+	int rc;
+
+	read_lock(&bnx2i_dev_lock);
+	if (bnx2i_reg_device &&
+	    !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+		rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
+		if (rc)		/* duplicate registration */
+			printk(KERN_ERR "bnx2i- dev reg failed\n");
+
+		spin_lock(&hba->lock);
+		bnx2i_reg_device++;
+		hba->age++;
+		spin_unlock(&hba->lock);
+
+		set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+	}
+	read_unlock(&bnx2i_dev_lock);
+
+	write_lock(&bnx2i_dev_lock);
+	list_add_tail(&hba->link, &adapter_list);
+	adapter_count++;
+	write_unlock(&bnx2i_dev_lock);
+	return 0;
+}
+
+
+/**
+ * bnx2i_ulp_init - initialize an adapter instance
+ * @dev:	cnic device handle
+ *
+ * Called from cnic_register_driver() context to initialize all enumerated
+ *	cnic devices. This routine allocate adapter structure and other
+ *	device specific resources.
+ */
+void bnx2i_ulp_init(struct cnic_dev *dev)
+{
+	struct bnx2i_hba *hba;
+
+	/* Allocate a HBA structure for this device */
+	hba = bnx2i_alloc_hba(dev);
+	if (!hba) {
+		printk(KERN_ERR "bnx2i init: hba initialization failed\n");
+		return;
+	}
+
+	/* Get PCI related information and update hba struct members */
+	clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+	if (bnx2i_init_one(hba, dev)) {
+		printk(KERN_ERR "bnx2i - hba %p init failed\n", hba);
+		bnx2i_free_hba(hba);
+	} else
+		hba->cnic = dev;
+}
+
+
+/**
+ * bnx2i_ulp_exit - shuts down adapter instance and frees all resources
+ * @dev:	cnic device handle
+ *
+ */
+void bnx2i_ulp_exit(struct cnic_dev *dev)
+{
+	struct bnx2i_hba *hba;
+
+	hba = bnx2i_find_hba_for_cnic(dev);
+	if (!hba) {
+		printk(KERN_INFO "bnx2i_ulp_exit: hba not "
+				 "found, dev 0x%p\n", dev);
+		return;
+	}
+	write_lock(&bnx2i_dev_lock);
+	list_del_init(&hba->link);
+	adapter_count--;
+
+	if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+		hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+		clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+
+		spin_lock(&hba->lock);
+		bnx2i_reg_device--;
+		spin_unlock(&hba->lock);
+	}
+	write_unlock(&bnx2i_dev_lock);
+
+	bnx2i_free_hba(hba);
+}
+
+
+/**
+ * bnx2i_mod_init - module init entry point
+ *
+ * initialize any driver wide global data structures such as endpoint pool,
+ *	tcp port manager/queue, sysfs. finally driver will register itself
+ *	with the cnic module
+ */
+static int __init bnx2i_mod_init(void)
+{
+	int err;
+
+	printk(KERN_INFO "%s", version);
+
+	if (!is_power_of_2(sq_size))
+		sq_size = roundup_pow_of_two(sq_size);
+
+	bnx2i_scsi_xport_template =
+			iscsi_register_transport(&bnx2i_iscsi_transport);
+	if (!bnx2i_scsi_xport_template) {
+		printk(KERN_ERR "Could not register bnx2i transport.\n");
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb);
+	if (err) {
+		printk(KERN_ERR "Could not register bnx2i cnic driver.\n");
+		goto unreg_xport;
+	}
+
+	return 0;
+
+unreg_xport:
+	iscsi_unregister_transport(&bnx2i_iscsi_transport);
+out:
+	return err;
+}
+
+
+/**
+ * bnx2i_mod_exit - module cleanup/exit entry point
+ *
+ * Global resource lock and host adapter lock is held during critical sections
+ *	in this function. Driver will browse through the adapter list, cleans-up
+ *	each instance, unregisters iscsi transport name and finally driver will
+ *	unregister itself with the cnic module
+ */
+static void __exit bnx2i_mod_exit(void)
+{
+	struct bnx2i_hba *hba;
+
+	write_lock(&bnx2i_dev_lock);
+	while (!list_empty(&adapter_list)) {
+		hba = list_entry(adapter_list.next, struct bnx2i_hba, link);
+		list_del(&hba->link);
+		adapter_count--;
+
+		if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+			hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+			clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+			bnx2i_reg_device--;
+		}
+
+		write_unlock(&bnx2i_dev_lock);
+		bnx2i_free_hba(hba);
+		write_lock(&bnx2i_dev_lock);
+	}
+	write_unlock(&bnx2i_dev_lock);
+
+	iscsi_unregister_transport(&bnx2i_iscsi_transport);
+	cnic_unregister_driver(CNIC_ULP_ISCSI);
+}
+
+module_init(bnx2i_mod_init);
+module_exit(bnx2i_mod_exit);

+ 2064 - 0
drivers/scsi/bnx2i/bnx2i_iscsi.c

@@ -0,0 +1,2064 @@
+/*
+ * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/libiscsi.h>
+#include "bnx2i.h"
+
+struct scsi_transport_template *bnx2i_scsi_xport_template;
+struct iscsi_transport bnx2i_iscsi_transport;
+static struct scsi_host_template bnx2i_host_template;
+
+/*
+ * Global endpoint resource info
+ */
+static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
+
+
+static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
+{
+	int retval = 0;
+
+	if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
+	    test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
+	    test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
+		retval = -EPERM;
+	return retval;
+}
+
+/**
+ * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks
+ * @cmd:		iscsi cmd struct pointer
+ * @buf_off:		absolute buffer offset
+ * @start_bd_off:	u32 pointer to return the offset within the BD
+ *			indicated by 'start_bd_idx' on which 'buf_off' falls
+ * @start_bd_idx:	index of the BD on which 'buf_off' falls
+ *
+ * identifies & marks various bd info for scsi command's imm data,
+ * unsolicited data and the first solicited data seq.
+ */
+static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off,
+				       u32 *start_bd_off, u32 *start_bd_idx)
+{
+	struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl;
+	u32 cur_offset = 0;
+	u32 cur_bd_idx = 0;
+
+	if (buf_off) {
+		while (buf_off >= (cur_offset + bd_tbl->buffer_length)) {
+			cur_offset += bd_tbl->buffer_length;
+			cur_bd_idx++;
+			bd_tbl++;
+		}
+	}
+
+	*start_bd_off = buf_off - cur_offset;
+	*start_bd_idx = cur_bd_idx;
+}
+
+/**
+ * bnx2i_setup_write_cmd_bd_info - sets up BD various information
+ * @task:	transport layer's cmd struct pointer
+ *
+ * identifies & marks various bd info for scsi command's immediate data,
+ * unsolicited data and first solicited data seq which includes BD start
+ * index & BD buf off. his function takes into account iscsi parameter such
+ * as immediate data and unsolicited data is support on this connection.
+ */
+static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task)
+{
+	struct bnx2i_cmd *cmd = task->dd_data;
+	u32 start_bd_offset;
+	u32 start_bd_idx;
+	u32 buffer_offset = 0;
+	u32 cmd_len = cmd->req.total_data_transfer_length;
+
+	/* if ImmediateData is turned off & IntialR2T is turned on,
+	 * there will be no immediate or unsolicited data, just return.
+	 */
+	if (!iscsi_task_has_unsol_data(task) && !task->imm_count)
+		return;
+
+	/* Immediate data */
+	buffer_offset += task->imm_count;
+	if (task->imm_count == cmd_len)
+		return;
+
+	if (iscsi_task_has_unsol_data(task)) {
+		bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
+					   &start_bd_offset, &start_bd_idx);
+		cmd->req.ud_buffer_offset = start_bd_offset;
+		cmd->req.ud_start_bd_index = start_bd_idx;
+		buffer_offset += task->unsol_r2t.data_length;
+	}
+
+	if (buffer_offset != cmd_len) {
+		bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
+					   &start_bd_offset, &start_bd_idx);
+		if ((start_bd_offset > task->conn->session->first_burst) ||
+		    (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) {
+			int i = 0;
+
+			iscsi_conn_printk(KERN_ALERT, task->conn,
+					  "bnx2i- error, buf offset 0x%x "
+					  "bd_valid %d use_sg %d\n",
+					  buffer_offset, cmd->io_tbl.bd_valid,
+					  scsi_sg_count(cmd->scsi_cmd));
+			for (i = 0; i < cmd->io_tbl.bd_valid; i++)
+				iscsi_conn_printk(KERN_ALERT, task->conn,
+						  "bnx2i err, bd[%d]: len %x\n",
+						  i, cmd->io_tbl.bd_tbl[i].\
+						  buffer_length);
+		}
+		cmd->req.sd_buffer_offset = start_bd_offset;
+		cmd->req.sd_start_bd_index = start_bd_idx;
+	}
+}
+
+
+
+/**
+ * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table
+ * @hba:	adapter instance
+ * @cmd:	iscsi cmd struct pointer
+ *
+ * map SG list
+ */
+static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
+{
+	struct scsi_cmnd *sc = cmd->scsi_cmd;
+	struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
+	struct scatterlist *sg;
+	int byte_count = 0;
+	int bd_count = 0;
+	int sg_count;
+	int sg_len;
+	u64 addr;
+	int i;
+
+	BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD);
+
+	sg_count = scsi_dma_map(sc);
+
+	scsi_for_each_sg(sc, sg, sg_count, i) {
+		sg_len = sg_dma_len(sg);
+		addr = (u64) sg_dma_address(sg);
+		bd[bd_count].buffer_addr_lo = addr & 0xffffffff;
+		bd[bd_count].buffer_addr_hi = addr >> 32;
+		bd[bd_count].buffer_length = sg_len;
+		bd[bd_count].flags = 0;
+		if (bd_count == 0)
+			bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN;
+
+		byte_count += sg_len;
+		bd_count++;
+	}
+
+	if (bd_count)
+		bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
+
+	BUG_ON(byte_count != scsi_bufflen(sc));
+	return bd_count;
+}
+
+/**
+ * bnx2i_iscsi_map_sg_list - maps SG list
+ * @cmd:	iscsi cmd struct pointer
+ *
+ * creates BD list table for the command
+ */
+static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd)
+{
+	int bd_count;
+
+	bd_count  = bnx2i_map_scsi_sg(cmd->conn->hba, cmd);
+	if (!bd_count) {
+		struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
+
+		bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0;
+		bd[0].buffer_length = bd[0].flags = 0;
+	}
+	cmd->io_tbl.bd_valid = bd_count;
+}
+
+
+/**
+ * bnx2i_iscsi_unmap_sg_list - unmaps SG list
+ * @cmd:	iscsi cmd struct pointer
+ *
+ * unmap IO buffers and invalidate the BD table
+ */
+void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd)
+{
+	struct scsi_cmnd *sc = cmd->scsi_cmd;
+
+	if (cmd->io_tbl.bd_valid && sc) {
+		scsi_dma_unmap(sc);
+		cmd->io_tbl.bd_valid = 0;
+	}
+}
+
+static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
+{
+	memset(&cmd->req, 0x00, sizeof(cmd->req));
+	cmd->req.op_code = 0xFF;
+	cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma;
+	cmd->req.bd_list_addr_hi =
+		(u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32);
+
+}
+
+
+/**
+ * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid'
+ * @hba:	pointer to adapter instance
+ * @conn:	pointer to iscsi connection
+ * @iscsi_cid:	iscsi context ID, range 0 - (MAX_CONN - 1)
+ *
+ * update iscsi cid table entry with connection pointer. This enables
+ *	driver to quickly get hold of connection structure pointer in
+ *	completion/interrupt thread using iscsi context ID
+ */
+static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba,
+					struct bnx2i_conn *bnx2i_conn,
+					u32 iscsi_cid)
+{
+	if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) {
+		iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
+				 "conn bind - entry #%d not free\n", iscsi_cid);
+		return -EBUSY;
+	}
+
+	hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn;
+	return 0;
+}
+
+
+/**
+ * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr
+ * @hba:	pointer to adapter instance
+ * @iscsi_cid:	iscsi context ID, range 0 - (MAX_CONN - 1)
+ */
+struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
+					  u16 iscsi_cid)
+{
+	if (!hba->cid_que.conn_cid_tbl) {
+		printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n");
+		return NULL;
+
+	} else if (iscsi_cid >= hba->max_active_conns) {
+		printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid);
+		return NULL;
+	}
+	return hba->cid_que.conn_cid_tbl[iscsi_cid];
+}
+
+
+/**
+ * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool
+ * @hba:	pointer to adapter instance
+ */
+static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba)
+{
+	int idx;
+
+	if (!hba->cid_que.cid_free_cnt)
+		return -1;
+
+	idx = hba->cid_que.cid_q_cons_idx;
+	hba->cid_que.cid_q_cons_idx++;
+	if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx)
+		hba->cid_que.cid_q_cons_idx = 0;
+
+	hba->cid_que.cid_free_cnt--;
+	return hba->cid_que.cid_que[idx];
+}
+
+
+/**
+ * bnx2i_free_iscsi_cid - returns tcp port to free list
+ * @hba: 		pointer to adapter instance
+ * @iscsi_cid:		iscsi context ID to free
+ */
+static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid)
+{
+	int idx;
+
+	if (iscsi_cid == (u16) -1)
+		return;
+
+	hba->cid_que.cid_free_cnt++;
+
+	idx = hba->cid_que.cid_q_prod_idx;
+	hba->cid_que.cid_que[idx] = iscsi_cid;
+	hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL;
+	hba->cid_que.cid_q_prod_idx++;
+	if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx)
+		hba->cid_que.cid_q_prod_idx = 0;
+}
+
+
+/**
+ * bnx2i_setup_free_cid_que - sets up free iscsi cid queue
+ * @hba:	pointer to adapter instance
+ *
+ * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table,
+ * 	and initialize table attributes
+ */
+static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba)
+{
+	int mem_size;
+	int i;
+
+	mem_size = hba->max_active_conns * sizeof(u32);
+	mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+	hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL);
+	if (!hba->cid_que.cid_que_base)
+		return -ENOMEM;
+
+	mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *);
+	mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+	hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL);
+	if (!hba->cid_que.conn_cid_tbl) {
+		kfree(hba->cid_que.cid_que_base);
+		hba->cid_que.cid_que_base = NULL;
+		return -ENOMEM;
+	}
+
+	hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base;
+	hba->cid_que.cid_q_prod_idx = 0;
+	hba->cid_que.cid_q_cons_idx = 0;
+	hba->cid_que.cid_q_max_idx = hba->max_active_conns;
+	hba->cid_que.cid_free_cnt = hba->max_active_conns;
+
+	for (i = 0; i < hba->max_active_conns; i++) {
+		hba->cid_que.cid_que[i] = i;
+		hba->cid_que.conn_cid_tbl[i] = NULL;
+	}
+	return 0;
+}
+
+
+/**
+ * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources
+ * @hba:	pointer to adapter instance
+ */
+static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba)
+{
+	kfree(hba->cid_que.cid_que_base);
+	hba->cid_que.cid_que_base = NULL;
+
+	kfree(hba->cid_que.conn_cid_tbl);
+	hba->cid_que.conn_cid_tbl = NULL;
+}
+
+
+/**
+ * bnx2i_alloc_ep - allocates ep structure from global pool
+ * @hba:	pointer to adapter instance
+ *
+ * routine allocates a free endpoint structure from global pool and
+ *	a tcp port to be used for this connection.  Global resource lock,
+ *	'bnx2i_resc_lock' is held while accessing shared global data structures
+ */
+static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
+{
+	struct iscsi_endpoint *ep;
+	struct bnx2i_endpoint *bnx2i_ep;
+
+	ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
+	if (!ep) {
+		printk(KERN_ERR "bnx2i: Could not allocate ep\n");
+		return NULL;
+	}
+
+	bnx2i_ep = ep->dd_data;
+	INIT_LIST_HEAD(&bnx2i_ep->link);
+	bnx2i_ep->state = EP_STATE_IDLE;
+	bnx2i_ep->hba = hba;
+	bnx2i_ep->hba_age = hba->age;
+	hba->ofld_conns_active++;
+	init_waitqueue_head(&bnx2i_ep->ofld_wait);
+	return ep;
+}
+
+
+/**
+ * bnx2i_free_ep - free endpoint
+ * @ep:		pointer to iscsi endpoint structure
+ */
+static void bnx2i_free_ep(struct iscsi_endpoint *ep)
+{
+	struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bnx2i_resc_lock, flags);
+	bnx2i_ep->state = EP_STATE_IDLE;
+	bnx2i_ep->hba->ofld_conns_active--;
+
+	bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
+	if (bnx2i_ep->conn) {
+		bnx2i_ep->conn->ep = NULL;
+		bnx2i_ep->conn = NULL;
+	}
+
+	bnx2i_ep->hba = NULL;
+	spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
+	iscsi_destroy_endpoint(ep);
+}
+
+
+/**
+ * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command
+ * @hba:	adapter instance pointer
+ * @session:	iscsi session pointer
+ * @cmd:	iscsi command structure
+ */
+static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session,
+			   struct bnx2i_cmd *cmd)
+{
+	struct io_bdt *io = &cmd->io_tbl;
+	struct iscsi_bd *bd;
+
+	io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+					ISCSI_MAX_BDS_PER_CMD * sizeof(*bd),
+					&io->bd_tbl_dma, GFP_KERNEL);
+	if (!io->bd_tbl) {
+		iscsi_session_printk(KERN_ERR, session, "Could not "
+				     "allocate bdt.\n");
+		return -ENOMEM;
+	}
+	io->bd_valid = 0;
+	return 0;
+}
+
+/**
+ * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table
+ * @hba:	adapter instance pointer
+ * @session:	iscsi session pointer
+ * @cmd:	iscsi command structure
+ */
+static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba,
+				   struct iscsi_session *session)
+{
+	int i;
+
+	for (i = 0; i < session->cmds_max; i++) {
+		struct iscsi_task *task = session->cmds[i];
+		struct bnx2i_cmd *cmd = task->dd_data;
+
+		if (cmd->io_tbl.bd_tbl)
+			dma_free_coherent(&hba->pcidev->dev,
+					  ISCSI_MAX_BDS_PER_CMD *
+					  sizeof(struct iscsi_bd),
+					  cmd->io_tbl.bd_tbl,
+					  cmd->io_tbl.bd_tbl_dma);
+	}
+
+}
+
+
+/**
+ * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session
+ * @hba:	adapter instance pointer
+ * @session:	iscsi session pointer
+ */
+static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
+				struct iscsi_session *session)
+{
+	int i;
+
+	for (i = 0; i < session->cmds_max; i++) {
+		struct iscsi_task *task = session->cmds[i];
+		struct bnx2i_cmd *cmd = task->dd_data;
+
+		/* Anil */
+		task->hdr = &cmd->hdr;
+		task->hdr_max = sizeof(struct iscsi_hdr);
+
+		if (bnx2i_alloc_bdt(hba, session, cmd))
+			goto free_bdts;
+	}
+
+	return 0;
+
+free_bdts:
+	bnx2i_destroy_cmd_pool(hba, session);
+	return -ENOMEM;
+}
+
+
+/**
+ * bnx2i_setup_mp_bdt - allocate BD table resources
+ * @hba:	pointer to adapter structure
+ *
+ * Allocate memory for dummy buffer and associated BD
+ * table to be used by middle path (MP) requests
+ */
+static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
+{
+	int rc = 0;
+	struct iscsi_bd *mp_bdt;
+	u64 addr;
+
+	hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+					    &hba->mp_bd_dma, GFP_KERNEL);
+	if (!hba->mp_bd_tbl) {
+		printk(KERN_ERR "unable to allocate Middle Path BDT\n");
+		rc = -1;
+		goto out;
+	}
+
+	hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+					       &hba->dummy_buf_dma, GFP_KERNEL);
+	if (!hba->dummy_buffer) {
+		printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				  hba->mp_bd_tbl, hba->mp_bd_dma);
+		hba->mp_bd_tbl = NULL;
+		rc = -1;
+		goto out;
+	}
+
+	mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl;
+	addr = (unsigned long) hba->dummy_buf_dma;
+	mp_bdt->buffer_addr_lo = addr & 0xffffffff;
+	mp_bdt->buffer_addr_hi = addr >> 32;
+	mp_bdt->buffer_length = PAGE_SIZE;
+	mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+			ISCSI_BD_FIRST_IN_BD_CHAIN;
+out:
+	return rc;
+}
+
+
+/**
+ * bnx2i_free_mp_bdt - releases ITT back to free pool
+ * @hba:	pointer to adapter instance
+ *
+ * free MP dummy buffer and associated BD table
+ */
+static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
+{
+	if (hba->mp_bd_tbl) {
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				  hba->mp_bd_tbl, hba->mp_bd_dma);
+		hba->mp_bd_tbl = NULL;
+	}
+	if (hba->dummy_buffer) {
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				  hba->dummy_buffer, hba->dummy_buf_dma);
+		hba->dummy_buffer = NULL;
+	}
+		return;
+}
+
+/**
+ * bnx2i_drop_session - notifies iscsid of connection error.
+ * @hba:	adapter instance pointer
+ * @session:	iscsi session pointer
+ *
+ * This notifies iscsid that there is a error, so it can initiate
+ * recovery.
+ *
+ * This relies on caller using the iscsi class iterator so the object
+ * is refcounted and does not disapper from under us.
+ */
+void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
+{
+	iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+}
+
+/**
+ * bnx2i_ep_destroy_list_add - add an entry to EP destroy list
+ * @hba:	pointer to adapter instance
+ * @ep:		pointer to endpoint (transport indentifier) structure
+ *
+ * EP destroy queue manager
+ */
+static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
+				     struct bnx2i_endpoint *ep)
+{
+	write_lock_bh(&hba->ep_rdwr_lock);
+	list_add_tail(&ep->link, &hba->ep_destroy_list);
+	write_unlock_bh(&hba->ep_rdwr_lock);
+	return 0;
+}
+
+/**
+ * bnx2i_ep_destroy_list_del - add an entry to EP destroy list
+ *
+ * @hba: 		pointer to adapter instance
+ * @ep: 		pointer to endpoint (transport indentifier) structure
+ *
+ * EP destroy queue manager
+ */
+static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
+				     struct bnx2i_endpoint *ep)
+{
+	write_lock_bh(&hba->ep_rdwr_lock);
+	list_del_init(&ep->link);
+	write_unlock_bh(&hba->ep_rdwr_lock);
+
+	return 0;
+}
+
+/**
+ * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
+ * @hba:	pointer to adapter instance
+ * @ep:		pointer to endpoint (transport indentifier) structure
+ *
+ * pending conn offload completion queue manager
+ */
+static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
+				  struct bnx2i_endpoint *ep)
+{
+	write_lock_bh(&hba->ep_rdwr_lock);
+	list_add_tail(&ep->link, &hba->ep_ofld_list);
+	write_unlock_bh(&hba->ep_rdwr_lock);
+	return 0;
+}
+
+/**
+ * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list
+ * @hba: 		pointer to adapter instance
+ * @ep: 		pointer to endpoint (transport indentifier) structure
+ *
+ * pending conn offload completion queue manager
+ */
+static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba,
+				  struct bnx2i_endpoint *ep)
+{
+	write_lock_bh(&hba->ep_rdwr_lock);
+	list_del_init(&ep->link);
+	write_unlock_bh(&hba->ep_rdwr_lock);
+	return 0;
+}
+
+
+/**
+ * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints
+ *
+ * @hba: 		pointer to adapter instance
+ * @iscsi_cid:		iscsi context ID to find
+ *
+ */
+struct bnx2i_endpoint *
+bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
+{
+	struct list_head *list;
+	struct list_head *tmp;
+	struct bnx2i_endpoint *ep;
+
+	read_lock_bh(&hba->ep_rdwr_lock);
+	list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
+		ep = (struct bnx2i_endpoint *)list;
+
+		if (ep->ep_iscsi_cid == iscsi_cid)
+			break;
+		ep = NULL;
+	}
+	read_unlock_bh(&hba->ep_rdwr_lock);
+
+	if (!ep)
+		printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
+	return ep;
+}
+
+
+/**
+ * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list
+ * @hba: 		pointer to adapter instance
+ * @iscsi_cid:		iscsi context ID to find
+ *
+ */
+struct bnx2i_endpoint *
+bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
+{
+	struct list_head *list;
+	struct list_head *tmp;
+	struct bnx2i_endpoint *ep;
+
+	read_lock_bh(&hba->ep_rdwr_lock);
+	list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
+		ep = (struct bnx2i_endpoint *)list;
+
+		if (ep->ep_iscsi_cid == iscsi_cid)
+			break;
+		ep = NULL;
+	}
+	read_unlock_bh(&hba->ep_rdwr_lock);
+
+	if (!ep)
+		printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
+
+	return ep;
+}
+
+/**
+ * bnx2i_setup_host_queue_size - assigns shost->can_queue param
+ * @hba:	pointer to adapter instance
+ * @shost:	scsi host pointer
+ *
+ * Initializes 'can_queue' parameter based on how many outstanding commands
+ * 	the device can handle. Each device 5708/5709/57710 has different
+ *	capabilities
+ */
+static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba,
+					struct Scsi_Host *shost)
+{
+	if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type))
+		shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
+	else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type))
+		shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709;
+	else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+		shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710;
+	else
+		shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
+}
+
+
+/**
+ * bnx2i_alloc_hba - allocate and init adapter instance
+ * @cnic:	cnic device pointer
+ *
+ * allocate & initialize adapter structure and call other
+ *	support routines to do per adapter initialization
+ */
+struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
+{
+	struct Scsi_Host *shost;
+	struct bnx2i_hba *hba;
+
+	shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0);
+	if (!shost)
+		return NULL;
+	shost->dma_boundary = cnic->pcidev->dma_mask;
+	shost->transportt = bnx2i_scsi_xport_template;
+	shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
+	shost->max_channel = 0;
+	shost->max_lun = 512;
+	shost->max_cmd_len = 16;
+
+	hba = iscsi_host_priv(shost);
+	hba->shost = shost;
+	hba->netdev = cnic->netdev;
+	/* Get PCI related information and update hba struct members */
+	hba->pcidev = cnic->pcidev;
+	pci_dev_get(hba->pcidev);
+	hba->pci_did = hba->pcidev->device;
+	hba->pci_vid = hba->pcidev->vendor;
+	hba->pci_sdid = hba->pcidev->subsystem_device;
+	hba->pci_svid = hba->pcidev->subsystem_vendor;
+	hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
+	hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
+	bnx2i_identify_device(hba);
+
+	bnx2i_identify_device(hba);
+	bnx2i_setup_host_queue_size(hba, shost);
+
+	if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
+		hba->regview = ioremap_nocache(hba->netdev->base_addr,
+					       BNX2_MQ_CONFIG2);
+		if (!hba->regview)
+			goto ioreg_map_err;
+	} else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+		hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
+		if (!hba->regview)
+			goto ioreg_map_err;
+	}
+
+	if (bnx2i_setup_mp_bdt(hba))
+		goto mp_bdt_mem_err;
+
+	INIT_LIST_HEAD(&hba->ep_ofld_list);
+	INIT_LIST_HEAD(&hba->ep_destroy_list);
+	rwlock_init(&hba->ep_rdwr_lock);
+
+	hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED;
+
+	/* different values for 5708/5709/57710 */
+	hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA;
+
+	if (bnx2i_setup_free_cid_que(hba))
+		goto cid_que_err;
+
+	/* SQ/RQ/CQ size can be changed via sysfx interface */
+	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+		if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX)
+			hba->max_sqes = sq_size;
+		else
+			hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT;
+	} else {	/* 5706/5708/5709 */
+		if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX)
+			hba->max_sqes = sq_size;
+		else
+			hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT;
+	}
+
+	hba->max_rqes = rq_size;
+	hba->max_cqes = hba->max_sqes + rq_size;
+	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+		if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX)
+			hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX;
+	} else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX)
+		hba->max_cqes = BNX2I_570X_CQ_WQES_MAX;
+
+	hba->num_ccell = hba->max_sqes / 2;
+
+	spin_lock_init(&hba->lock);
+	mutex_init(&hba->net_dev_lock);
+
+	if (iscsi_host_add(shost, &hba->pcidev->dev))
+		goto free_dump_mem;
+	return hba;
+
+free_dump_mem:
+	bnx2i_release_free_cid_que(hba);
+cid_que_err:
+	bnx2i_free_mp_bdt(hba);
+mp_bdt_mem_err:
+	if (hba->regview) {
+		iounmap(hba->regview);
+		hba->regview = NULL;
+	}
+ioreg_map_err:
+	pci_dev_put(hba->pcidev);
+	scsi_host_put(shost);
+	return NULL;
+}
+
+/**
+ * bnx2i_free_hba- releases hba structure and resources held by the adapter
+ * @hba:	pointer to adapter instance
+ *
+ * free adapter structure and call various cleanup routines.
+ */
+void bnx2i_free_hba(struct bnx2i_hba *hba)
+{
+	struct Scsi_Host *shost = hba->shost;
+
+	iscsi_host_remove(shost);
+	INIT_LIST_HEAD(&hba->ep_ofld_list);
+	INIT_LIST_HEAD(&hba->ep_destroy_list);
+	pci_dev_put(hba->pcidev);
+
+	if (hba->regview) {
+		iounmap(hba->regview);
+		hba->regview = NULL;
+	}
+	bnx2i_free_mp_bdt(hba);
+	bnx2i_release_free_cid_que(hba);
+	iscsi_host_free(shost);
+}
+
+/**
+ * bnx2i_conn_free_login_resources - free DMA resources used for login process
+ * @hba:		pointer to adapter instance
+ * @bnx2i_conn:		iscsi connection pointer
+ *
+ * Login related resources, mostly BDT & payload DMA memory is freed
+ */
+static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
+					    struct bnx2i_conn *bnx2i_conn)
+{
+	if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				  bnx2i_conn->gen_pdu.resp_bd_tbl,
+				  bnx2i_conn->gen_pdu.resp_bd_dma);
+		bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
+	}
+
+	if (bnx2i_conn->gen_pdu.req_bd_tbl) {
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				  bnx2i_conn->gen_pdu.req_bd_tbl,
+				  bnx2i_conn->gen_pdu.req_bd_dma);
+		bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
+	}
+
+	if (bnx2i_conn->gen_pdu.resp_buf) {
+		dma_free_coherent(&hba->pcidev->dev,
+				  ISCSI_DEF_MAX_RECV_SEG_LEN,
+				  bnx2i_conn->gen_pdu.resp_buf,
+				  bnx2i_conn->gen_pdu.resp_dma_addr);
+		bnx2i_conn->gen_pdu.resp_buf = NULL;
+	}
+
+	if (bnx2i_conn->gen_pdu.req_buf) {
+		dma_free_coherent(&hba->pcidev->dev,
+				  ISCSI_DEF_MAX_RECV_SEG_LEN,
+				  bnx2i_conn->gen_pdu.req_buf,
+				  bnx2i_conn->gen_pdu.req_dma_addr);
+		bnx2i_conn->gen_pdu.req_buf = NULL;
+	}
+}
+
+/**
+ * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop.
+ * @hba:		pointer to adapter instance
+ * @bnx2i_conn:		iscsi connection pointer
+ *
+ * Mgmt task DNA resources are allocated in this routine.
+ */
+static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
+					    struct bnx2i_conn *bnx2i_conn)
+{
+	/* Allocate memory for login request/response buffers */
+	bnx2i_conn->gen_pdu.req_buf =
+		dma_alloc_coherent(&hba->pcidev->dev,
+				   ISCSI_DEF_MAX_RECV_SEG_LEN,
+				   &bnx2i_conn->gen_pdu.req_dma_addr,
+				   GFP_KERNEL);
+	if (bnx2i_conn->gen_pdu.req_buf == NULL)
+		goto login_req_buf_failure;
+
+	bnx2i_conn->gen_pdu.req_buf_size = 0;
+	bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf;
+
+	bnx2i_conn->gen_pdu.resp_buf =
+		dma_alloc_coherent(&hba->pcidev->dev,
+				   ISCSI_DEF_MAX_RECV_SEG_LEN,
+				   &bnx2i_conn->gen_pdu.resp_dma_addr,
+				   GFP_KERNEL);
+	if (bnx2i_conn->gen_pdu.resp_buf == NULL)
+		goto login_resp_buf_failure;
+
+	bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
+	bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
+
+	bnx2i_conn->gen_pdu.req_bd_tbl =
+		dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				   &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
+	if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
+		goto login_req_bd_tbl_failure;
+
+	bnx2i_conn->gen_pdu.resp_bd_tbl =
+		dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				   &bnx2i_conn->gen_pdu.resp_bd_dma,
+				   GFP_KERNEL);
+	if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
+		goto login_resp_bd_tbl_failure;
+
+	return 0;
+
+login_resp_bd_tbl_failure:
+	dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+			  bnx2i_conn->gen_pdu.req_bd_tbl,
+			  bnx2i_conn->gen_pdu.req_bd_dma);
+	bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
+
+login_req_bd_tbl_failure:
+	dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+			  bnx2i_conn->gen_pdu.resp_buf,
+			  bnx2i_conn->gen_pdu.resp_dma_addr);
+	bnx2i_conn->gen_pdu.resp_buf = NULL;
+login_resp_buf_failure:
+	dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+			  bnx2i_conn->gen_pdu.req_buf,
+			  bnx2i_conn->gen_pdu.req_dma_addr);
+	bnx2i_conn->gen_pdu.req_buf = NULL;
+login_req_buf_failure:
+	iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data,
+			  "login resource alloc failed!!\n");
+	return -ENOMEM;
+
+}
+
+
+/**
+ * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table.
+ * @bnx2i_conn:		iscsi connection pointer
+ *
+ * Allocates buffers and BD tables before shipping requests to cnic
+ *	for PDUs prepared by 'iscsid' daemon
+ */
+static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn)
+{
+	struct iscsi_bd *bd_tbl;
+
+	bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl;
+
+	bd_tbl->buffer_addr_hi =
+		(u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32);
+	bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr;
+	bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr -
+				bnx2i_conn->gen_pdu.req_buf;
+	bd_tbl->reserved0 = 0;
+	bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+			ISCSI_BD_FIRST_IN_BD_CHAIN;
+
+	bd_tbl = (struct iscsi_bd  *) bnx2i_conn->gen_pdu.resp_bd_tbl;
+	bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32;
+	bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr;
+	bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN;
+	bd_tbl->reserved0 = 0;
+	bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+			ISCSI_BD_FIRST_IN_BD_CHAIN;
+}
+
+
+/**
+ * bnx2i_iscsi_send_generic_request - called to send mgmt tasks.
+ * @task:	transport layer task pointer
+ *
+ * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login,
+ *	Nop-out and Logout requests flow through this path.
+ */
+static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
+{
+	struct bnx2i_cmd *cmd = task->dd_data;
+	struct bnx2i_conn *bnx2i_conn = cmd->conn;
+	int rc = 0;
+	char *buf;
+	int data_len;
+
+	bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn);
+	switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+	case ISCSI_OP_LOGIN:
+		bnx2i_send_iscsi_login(bnx2i_conn, task);
+		break;
+	case ISCSI_OP_NOOP_OUT:
+		data_len = bnx2i_conn->gen_pdu.req_buf_size;
+		buf = bnx2i_conn->gen_pdu.req_buf;
+		if (data_len)
+			rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
+						     RESERVED_ITT,
+						     buf, data_len, 1);
+		else
+			rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
+						     RESERVED_ITT,
+						     NULL, 0, 1);
+		break;
+	case ISCSI_OP_LOGOUT:
+		rc = bnx2i_send_iscsi_logout(bnx2i_conn, task);
+		break;
+	case ISCSI_OP_SCSI_TMFUNC:
+		rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
+		break;
+	default:
+		iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
+				  "send_gen: unsupported op 0x%x\n",
+				  task->hdr->opcode);
+	}
+	return rc;
+}
+
+
+/**********************************************************************
+ *		SCSI-ML Interface
+ **********************************************************************/
+
+/**
+ * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe
+ * @sc:		SCSI-ML command pointer
+ * @cmd:	iscsi cmd pointer
+ */
+static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd)
+{
+	u32 dword;
+	int lpcnt;
+	u8 *srcp;
+	u32 *dstp;
+	u32 scsi_lun[2];
+
+	int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun);
+	cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]);
+	cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]);
+
+	lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword);
+	srcp = (u8 *) sc->cmnd;
+	dstp = (u32 *) cmd->req.cdb;
+	while (lpcnt--) {
+		memcpy(&dword, (const void *) srcp, 4);
+		*dstp = cpu_to_be32(dword);
+		srcp += 4;
+		dstp++;
+	}
+	if (sc->cmd_len & 0x3) {
+		dword = (u32) srcp[0] | ((u32) srcp[1] << 8);
+		*dstp = cpu_to_be32(dword);
+	}
+}
+
+static void bnx2i_cleanup_task(struct iscsi_task *task)
+{
+	struct iscsi_conn *conn = task->conn;
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	struct bnx2i_hba *hba = bnx2i_conn->hba;
+
+	/*
+	 * mgmt task or cmd was never sent to us to transmit.
+	 */
+	if (!task->sc || task->state == ISCSI_TASK_PENDING)
+		return;
+	/*
+	 * need to clean-up task context to claim dma buffers
+	 */
+	if (task->state == ISCSI_TASK_ABRT_TMF) {
+		bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
+
+		spin_unlock_bh(&conn->session->lock);
+		wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
+				msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
+		spin_lock_bh(&conn->session->lock);
+	}
+	bnx2i_iscsi_unmap_sg_list(task->dd_data);
+}
+
+/**
+ * bnx2i_mtask_xmit - transmit mtask to chip for further processing
+ * @conn:	transport layer conn structure pointer
+ * @task:	transport layer command structure pointer
+ */
+static int
+bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	struct bnx2i_cmd *cmd = task->dd_data;
+
+	memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
+
+	bnx2i_setup_cmd_wqe_template(cmd);
+	bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
+	if (task->data_count) {
+		memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
+		       task->data_count);
+		bnx2i_conn->gen_pdu.req_wr_ptr =
+			bnx2i_conn->gen_pdu.req_buf + task->data_count;
+	}
+	cmd->conn = conn->dd_data;
+	cmd->scsi_cmd = NULL;
+	return bnx2i_iscsi_send_generic_request(task);
+}
+
+/**
+ * bnx2i_task_xmit - transmit iscsi command to chip for further processing
+ * @task:	transport layer command structure pointer
+ *
+ * maps SG buffers and send request to chip/firmware in the form of SQ WQE
+ */
+static int bnx2i_task_xmit(struct iscsi_task *task)
+{
+	struct iscsi_conn *conn = task->conn;
+	struct iscsi_session *session = conn->session;
+	struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
+	struct bnx2i_hba *hba = iscsi_host_priv(shost);
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	struct scsi_cmnd *sc = task->sc;
+	struct bnx2i_cmd *cmd = task->dd_data;
+	struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
+
+	if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
+		return -ENOTCONN;
+
+	if (!bnx2i_conn->is_bound)
+		return -ENOTCONN;
+
+	/*
+	 * If there is no scsi_cmnd this must be a mgmt task
+	 */
+	if (!sc)
+		return bnx2i_mtask_xmit(conn, task);
+
+	bnx2i_setup_cmd_wqe_template(cmd);
+	cmd->req.op_code = ISCSI_OP_SCSI_CMD;
+	cmd->conn = bnx2i_conn;
+	cmd->scsi_cmd = sc;
+	cmd->req.total_data_transfer_length = scsi_bufflen(sc);
+	cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn);
+
+	bnx2i_iscsi_map_sg_list(cmd);
+	bnx2i_cpy_scsi_cdb(sc, cmd);
+
+	cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
+	if (sc->sc_data_direction == DMA_TO_DEVICE) {
+		cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
+		cmd->req.itt = task->itt |
+			(ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+		bnx2i_setup_write_cmd_bd_info(task);
+	} else {
+		if (scsi_bufflen(sc))
+			cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
+		cmd->req.itt = task->itt |
+			(ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+	}
+
+	cmd->req.num_bds = cmd->io_tbl.bd_valid;
+	if (!cmd->io_tbl.bd_valid) {
+		cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma;
+		cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
+		cmd->req.num_bds = 1;
+	}
+
+	bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd);
+	return 0;
+}
+
+/**
+ * bnx2i_session_create - create a new iscsi session
+ * @cmds_max:		max commands supported
+ * @qdepth:		scsi queue depth to support
+ * @initial_cmdsn:	initial iscsi CMDSN to be used for this session
+ *
+ * Creates a new iSCSI session instance on given device.
+ */
+static struct iscsi_cls_session *
+bnx2i_session_create(struct iscsi_endpoint *ep,
+		     uint16_t cmds_max, uint16_t qdepth,
+		     uint32_t initial_cmdsn)
+{
+	struct Scsi_Host *shost;
+	struct iscsi_cls_session *cls_session;
+	struct bnx2i_hba *hba;
+	struct bnx2i_endpoint *bnx2i_ep;
+
+	if (!ep) {
+		printk(KERN_ERR "bnx2i: missing ep.\n");
+		return NULL;
+	}
+
+	bnx2i_ep = ep->dd_data;
+	shost = bnx2i_ep->hba->shost;
+	hba = iscsi_host_priv(shost);
+	if (bnx2i_adapter_ready(hba))
+		return NULL;
+
+	/*
+	 * user can override hw limit as long as it is within
+	 * the min/max.
+	 */
+	if (cmds_max > hba->max_sqes)
+		cmds_max = hba->max_sqes;
+	else if (cmds_max < BNX2I_SQ_WQES_MIN)
+		cmds_max = BNX2I_SQ_WQES_MIN;
+
+	cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost,
+					  cmds_max, sizeof(struct bnx2i_cmd),
+					  initial_cmdsn, ISCSI_MAX_TARGET);
+	if (!cls_session)
+		return NULL;
+
+	if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data))
+		goto session_teardown;
+	return cls_session;
+
+session_teardown:
+	iscsi_session_teardown(cls_session);
+	return NULL;
+}
+
+
+/**
+ * bnx2i_session_destroy - destroys iscsi session
+ * @cls_session:	pointer to iscsi cls session
+ *
+ * Destroys previously created iSCSI session instance and releases
+ *	all resources held by it
+ */
+static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session)
+{
+	struct iscsi_session *session = cls_session->dd_data;
+	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+	struct bnx2i_hba *hba = iscsi_host_priv(shost);
+
+	bnx2i_destroy_cmd_pool(hba, session);
+	iscsi_session_teardown(cls_session);
+}
+
+
+/**
+ * bnx2i_conn_create - create iscsi connection instance
+ * @cls_session:	pointer to iscsi cls session
+ * @cid:		iscsi cid as per rfc (not NX2's CID terminology)
+ *
+ * Creates a new iSCSI connection instance for a given session
+ */
+static struct iscsi_cls_conn *
+bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
+{
+	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+	struct bnx2i_hba *hba = iscsi_host_priv(shost);
+	struct bnx2i_conn *bnx2i_conn;
+	struct iscsi_cls_conn *cls_conn;
+	struct iscsi_conn *conn;
+
+	cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn),
+				    cid);
+	if (!cls_conn)
+		return NULL;
+	conn = cls_conn->dd_data;
+
+	bnx2i_conn = conn->dd_data;
+	bnx2i_conn->cls_conn = cls_conn;
+	bnx2i_conn->hba = hba;
+	/* 'ep' ptr will be assigned in bind() call */
+	bnx2i_conn->ep = NULL;
+	init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
+
+	if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) {
+		iscsi_conn_printk(KERN_ALERT, conn,
+				  "conn_new: login resc alloc failed!!\n");
+		goto free_conn;
+	}
+
+	return cls_conn;
+
+free_conn:
+	iscsi_conn_teardown(cls_conn);
+	return NULL;
+}
+
+/**
+ * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together
+ * @cls_session:	pointer to iscsi cls session
+ * @cls_conn:		pointer to iscsi cls conn
+ * @transport_fd:	64-bit EP handle
+ * @is_leading:		leading connection on this session?
+ *
+ * Binds together iSCSI session instance, iSCSI connection instance
+ *	and the TCP connection. This routine returns error code if
+ *	TCP connection does not belong on the device iSCSI sess/conn
+ *	is bound
+ */
+static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
+			   struct iscsi_cls_conn *cls_conn,
+			   uint64_t transport_fd, int is_leading)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+	struct bnx2i_hba *hba = iscsi_host_priv(shost);
+	struct bnx2i_endpoint *bnx2i_ep;
+	struct iscsi_endpoint *ep;
+	int ret_code;
+
+	ep = iscsi_lookup_endpoint(transport_fd);
+	if (!ep)
+		return -EINVAL;
+
+	bnx2i_ep = ep->dd_data;
+	if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
+	    (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
+		/* Peer disconnect via' FIN or RST */
+		return -EINVAL;
+
+	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+		return -EINVAL;
+
+	if (bnx2i_ep->hba != hba) {
+		/* Error - TCP connection does not belong to this device
+		 */
+		iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
+				  "conn bind, ep=0x%p (%s) does not",
+				  bnx2i_ep, bnx2i_ep->hba->netdev->name);
+		iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
+				  "belong to hba (%s)\n",
+				  hba->netdev->name);
+		return -EEXIST;
+	}
+
+	bnx2i_ep->conn = bnx2i_conn;
+	bnx2i_conn->ep = bnx2i_ep;
+	bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
+	bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
+	bnx2i_conn->is_bound = 1;
+
+	ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
+						bnx2i_ep->ep_iscsi_cid);
+
+	/* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710
+	 * driver needs to explicitly replenish RQ index during setup.
+	 */
+	if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
+		bnx2i_put_rq_buf(bnx2i_conn, 0);
+
+	bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
+	return ret_code;
+}
+
+
+/**
+ * bnx2i_conn_destroy - destroy iscsi connection instance & release resources
+ * @cls_conn:	pointer to iscsi cls conn
+ *
+ * Destroy an iSCSI connection instance and release memory resources held by
+ *	this connection
+ */
+static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	struct Scsi_Host *shost;
+	struct bnx2i_hba *hba;
+
+	shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
+	hba = iscsi_host_priv(shost);
+
+	bnx2i_conn_free_login_resources(hba, bnx2i_conn);
+	iscsi_conn_teardown(cls_conn);
+}
+
+
+/**
+ * bnx2i_conn_get_param - return iscsi connection parameter to caller
+ * @cls_conn:	pointer to iscsi cls conn
+ * @param:	parameter type identifier
+ * @buf: 	buffer pointer
+ *
+ * returns iSCSI connection parameters
+ */
+static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
+				enum iscsi_param param, char *buf)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	int len = 0;
+
+	switch (param) {
+	case ISCSI_PARAM_CONN_PORT:
+		if (bnx2i_conn->ep)
+			len = sprintf(buf, "%hu\n",
+				      bnx2i_conn->ep->cm_sk->dst_port);
+		break;
+	case ISCSI_PARAM_CONN_ADDRESS:
+		if (bnx2i_conn->ep)
+			len = sprintf(buf, NIPQUAD_FMT "\n",
+				      NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip));
+		break;
+	default:
+		return iscsi_conn_get_param(cls_conn, param, buf);
+	}
+
+	return len;
+}
+
+/**
+ * bnx2i_host_get_param - returns host (adapter) related parameters
+ * @shost:	scsi host pointer
+ * @param:	parameter type identifier
+ * @buf:	buffer pointer
+ */
+static int bnx2i_host_get_param(struct Scsi_Host *shost,
+				enum iscsi_host_param param, char *buf)
+{
+	struct bnx2i_hba *hba = iscsi_host_priv(shost);
+	int len = 0;
+
+	switch (param) {
+	case ISCSI_HOST_PARAM_HWADDRESS:
+		len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6);
+		break;
+	case ISCSI_HOST_PARAM_NETDEV_NAME:
+		len = sprintf(buf, "%s\n", hba->netdev->name);
+		break;
+	default:
+		return iscsi_host_get_param(shost, param, buf);
+	}
+	return len;
+}
+
+/**
+ * bnx2i_conn_start - completes iscsi connection migration to FFP
+ * @cls_conn:	pointer to iscsi cls conn
+ *
+ * last call in FFP migration to handover iscsi conn to the driver
+ */
+static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+
+	bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START;
+	bnx2i_update_iscsi_conn(conn);
+
+	/*
+	 * this should normally not sleep for a long time so it should
+	 * not disrupt the caller.
+	 */
+	bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies;
+	bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+	bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep;
+	add_timer(&bnx2i_conn->ep->ofld_timer);
+	/* update iSCSI context for this conn, wait for CNIC to complete */
+	wait_event_interruptible(bnx2i_conn->ep->ofld_wait,
+			bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START);
+
+	if (signal_pending(current))
+		flush_signals(current);
+	del_timer_sync(&bnx2i_conn->ep->ofld_timer);
+
+	iscsi_conn_start(cls_conn);
+	return 0;
+}
+
+
+/**
+ * bnx2i_conn_get_stats - returns iSCSI stats
+ * @cls_conn:	pointer to iscsi cls conn
+ * @stats:	pointer to iscsi statistic struct
+ */
+static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+				 struct iscsi_stats *stats)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+
+	stats->txdata_octets = conn->txdata_octets;
+	stats->rxdata_octets = conn->rxdata_octets;
+	stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+	stats->dataout_pdus = conn->dataout_pdus_cnt;
+	stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+	stats->datain_pdus = conn->datain_pdus_cnt;
+	stats->r2t_pdus = conn->r2t_pdus_cnt;
+	stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+	stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+	stats->custom_length = 3;
+	strcpy(stats->custom[2].desc, "eh_abort_cnt");
+	stats->custom[2].value = conn->eh_abort_cnt;
+	stats->digest_err = 0;
+	stats->timeout_err = 0;
+	stats->custom_length = 0;
+}
+
+
+/**
+ * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices
+ * @dst_addr:	target IP address
+ *
+ * check if route resolves to BNX2 device
+ */
+static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
+{
+	struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
+	struct bnx2i_hba *hba;
+	struct cnic_dev *cnic = NULL;
+
+	bnx2i_reg_dev_all();
+
+	hba = get_adapter_list_head();
+	if (hba && hba->cnic)
+		cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
+	if (!cnic) {
+		printk(KERN_ALERT "bnx2i: no route,"
+		       "can't connect using cnic\n");
+		goto no_nx2_route;
+	}
+	hba = bnx2i_find_hba_for_cnic(cnic);
+	if (!hba)
+		goto no_nx2_route;
+
+	if (bnx2i_adapter_ready(hba)) {
+		printk(KERN_ALERT "bnx2i: check route, hba not found\n");
+		goto no_nx2_route;
+	}
+	if (hba->netdev->mtu > hba->mtu_supported) {
+		printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n",
+				  hba->netdev->name, hba->netdev->mtu);
+		printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n",
+				  hba->mtu_supported);
+		goto no_nx2_route;
+	}
+	return hba;
+no_nx2_route:
+	return NULL;
+}
+
+
+/**
+ * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources
+ * @hba:	pointer to adapter instance
+ * @ep:		endpoint (transport indentifier) structure
+ *
+ * destroys cm_sock structure and on chip iscsi context
+ */
+static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
+				 struct bnx2i_endpoint *ep)
+{
+	if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
+		hba->cnic->cm_destroy(ep->cm_sk);
+
+	if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
+		ep->state = EP_STATE_DISCONN_COMPL;
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
+	    ep->state == EP_STATE_DISCONN_TIMEDOUT) {
+		printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump,"
+				  " NW/PCIe trace, driver msgs to developers"
+				  " for analysis\n");
+		return 1;
+	}
+
+	ep->state = EP_STATE_CLEANUP_START;
+	init_timer(&ep->ofld_timer);
+	ep->ofld_timer.expires = 10*HZ + jiffies;
+	ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+	ep->ofld_timer.data = (unsigned long) ep;
+	add_timer(&ep->ofld_timer);
+
+	bnx2i_ep_destroy_list_add(hba, ep);
+
+	/* destroy iSCSI context, wait for it to complete */
+	bnx2i_send_conn_destroy(hba, ep);
+	wait_event_interruptible(ep->ofld_wait,
+				 (ep->state != EP_STATE_CLEANUP_START));
+
+	if (signal_pending(current))
+		flush_signals(current);
+	del_timer_sync(&ep->ofld_timer);
+
+	bnx2i_ep_destroy_list_del(hba, ep);
+
+	if (ep->state != EP_STATE_CLEANUP_CMPL)
+		/* should never happen */
+		printk(KERN_ALERT "bnx2i - conn destroy failed\n");
+
+	return 0;
+}
+
+
+/**
+ * bnx2i_ep_connect - establish TCP connection to target portal
+ * @shost:		scsi host
+ * @dst_addr:		target IP address
+ * @non_blocking:	blocking or non-blocking call
+ *
+ * this routine initiates the TCP/IP connection by invoking Option-2 i/f
+ *	with l5_core and the CNIC. This is a multi-step process of resolving
+ *	route to target, create a iscsi connection context, handshaking with
+ *	CNIC module to create/initialize the socket struct and finally
+ *	sending down option-2 request to complete TCP 3-way handshake
+ */
+static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
+					       struct sockaddr *dst_addr,
+					       int non_blocking)
+{
+	u32 iscsi_cid = BNX2I_CID_RESERVED;
+	struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
+	struct sockaddr_in6 *desti6;
+	struct bnx2i_endpoint *bnx2i_ep;
+	struct bnx2i_hba *hba;
+	struct cnic_dev *cnic;
+	struct cnic_sockaddr saddr;
+	struct iscsi_endpoint *ep;
+	int rc = 0;
+
+	if (shost)
+		/* driver is given scsi host to work with */
+		hba = iscsi_host_priv(shost);
+	else
+		/*
+		 * check if the given destination can be reached through
+		 * a iscsi capable NetXtreme2 device
+		 */
+		hba = bnx2i_check_route(dst_addr);
+	if (!hba) {
+		rc = -ENOMEM;
+		goto check_busy;
+	}
+
+	cnic = hba->cnic;
+	ep = bnx2i_alloc_ep(hba);
+	if (!ep) {
+		rc = -ENOMEM;
+		goto check_busy;
+	}
+	bnx2i_ep = ep->dd_data;
+
+	mutex_lock(&hba->net_dev_lock);
+	if (bnx2i_adapter_ready(hba)) {
+		rc = -EPERM;
+		goto net_if_down;
+	}
+
+	bnx2i_ep->state = EP_STATE_IDLE;
+	bnx2i_ep->ep_iscsi_cid = (u16) -1;
+	bnx2i_ep->num_active_cmds = 0;
+	iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
+	if (iscsi_cid == -1) {
+		printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n");
+		rc = -ENOMEM;
+		goto iscsi_cid_err;
+	}
+	bnx2i_ep->hba_age = hba->age;
+
+	rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
+	if (rc != 0) {
+		printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n");
+		rc = -ENOMEM;
+		goto qp_resc_err;
+	}
+
+	bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid;
+	bnx2i_ep->state = EP_STATE_OFLD_START;
+	bnx2i_ep_ofld_list_add(hba, bnx2i_ep);
+
+	init_timer(&bnx2i_ep->ofld_timer);
+	bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies;
+	bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+	bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
+	add_timer(&bnx2i_ep->ofld_timer);
+
+	bnx2i_send_conn_ofld_req(hba, bnx2i_ep);
+
+	/* Wait for CNIC hardware to setup conn context and return 'cid' */
+	wait_event_interruptible(bnx2i_ep->ofld_wait,
+				 bnx2i_ep->state != EP_STATE_OFLD_START);
+
+	if (signal_pending(current))
+		flush_signals(current);
+	del_timer_sync(&bnx2i_ep->ofld_timer);
+
+	bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
+
+	if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
+		rc = -ENOSPC;
+		goto conn_failed;
+	}
+
+	rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid,
+			     iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
+	if (rc) {
+		rc = -EINVAL;
+		goto conn_failed;
+	}
+
+	bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
+	bnx2i_ep->cm_sk->snd_buf = 256 * 1024;
+	clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags);
+
+	memset(&saddr, 0, sizeof(saddr));
+	if (dst_addr->sa_family == AF_INET) {
+		desti = (struct sockaddr_in *) dst_addr;
+		saddr.remote.v4 = *desti;
+		saddr.local.v4.sin_family = desti->sin_family;
+	} else if (dst_addr->sa_family == AF_INET6) {
+		desti6 = (struct sockaddr_in6 *) dst_addr;
+		saddr.remote.v6 = *desti6;
+		saddr.local.v6.sin6_family = desti6->sin6_family;
+	}
+
+	bnx2i_ep->timestamp = jiffies;
+	bnx2i_ep->state = EP_STATE_CONNECT_START;
+	if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+		rc = -EINVAL;
+		goto conn_failed;
+	} else
+		rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr);
+
+	if (rc)
+		goto release_ep;
+
+	if (bnx2i_map_ep_dbell_regs(bnx2i_ep))
+		goto release_ep;
+	mutex_unlock(&hba->net_dev_lock);
+	return ep;
+
+release_ep:
+	if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
+		mutex_unlock(&hba->net_dev_lock);
+		return ERR_PTR(rc);
+	}
+conn_failed:
+net_if_down:
+iscsi_cid_err:
+	bnx2i_free_qp_resc(hba, bnx2i_ep);
+qp_resc_err:
+	bnx2i_free_ep(ep);
+	mutex_unlock(&hba->net_dev_lock);
+check_busy:
+	bnx2i_unreg_dev_all();
+	return ERR_PTR(rc);
+}
+
+
+/**
+ * bnx2i_ep_poll - polls for TCP connection establishement
+ * @ep:			TCP connection (endpoint) handle
+ * @timeout_ms:		timeout value in milli secs
+ *
+ * polls for TCP connect request to complete
+ */
+static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+	struct bnx2i_endpoint *bnx2i_ep;
+	int rc = 0;
+
+	bnx2i_ep = ep->dd_data;
+	if ((bnx2i_ep->state == EP_STATE_IDLE) ||
+	    (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) ||
+	    (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
+		return -1;
+	if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)
+		return 1;
+
+	rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait,
+					      ((bnx2i_ep->state ==
+						EP_STATE_OFLD_FAILED) ||
+					       (bnx2i_ep->state ==
+						EP_STATE_CONNECT_FAILED) ||
+					       (bnx2i_ep->state ==
+						EP_STATE_CONNECT_COMPL)),
+					      msecs_to_jiffies(timeout_ms));
+	if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
+		rc = -1;
+
+	if (rc > 0)
+		return 1;
+	else if (!rc)
+		return 0;	/* timeout */
+	else
+		return rc;
+}
+
+
+/**
+ * bnx2i_ep_tcp_conn_active - check EP state transition
+ * @ep:		endpoint pointer
+ *
+ * check if underlying TCP connection is active
+ */
+static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
+{
+	int ret;
+	int cnic_dev_10g = 0;
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
+		cnic_dev_10g = 1;
+
+	switch (bnx2i_ep->state) {
+	case EP_STATE_CONNECT_START:
+	case EP_STATE_CLEANUP_FAILED:
+	case EP_STATE_OFLD_FAILED:
+	case EP_STATE_DISCONN_TIMEDOUT:
+		ret = 0;
+		break;
+	case EP_STATE_CONNECT_COMPL:
+	case EP_STATE_ULP_UPDATE_START:
+	case EP_STATE_ULP_UPDATE_COMPL:
+	case EP_STATE_TCP_FIN_RCVD:
+	case EP_STATE_ULP_UPDATE_FAILED:
+		ret = 1;
+		break;
+	case EP_STATE_TCP_RST_RCVD:
+		ret = 0;
+		break;
+	case EP_STATE_CONNECT_FAILED:
+		if (cnic_dev_10g)
+			ret = 1;
+		else
+			ret = 0;
+		break;
+	default:
+		ret = 0;
+	}
+
+	return ret;
+}
+
+
+/**
+ * bnx2i_ep_disconnect - executes TCP connection teardown process
+ * @ep:		TCP connection (endpoint) handle
+ *
+ * executes  TCP connection teardown process
+ */
+static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
+{
+	struct bnx2i_endpoint *bnx2i_ep;
+	struct bnx2i_conn *bnx2i_conn = NULL;
+	struct iscsi_session *session = NULL;
+	struct iscsi_conn *conn;
+	struct cnic_dev *cnic;
+	struct bnx2i_hba *hba;
+
+	bnx2i_ep = ep->dd_data;
+
+	/* driver should not attempt connection cleanup untill TCP_CONNECT
+	 * completes either successfully or fails. Timeout is 9-secs, so
+	 * wait for it to complete
+	 */
+	while ((bnx2i_ep->state == EP_STATE_CONNECT_START) &&
+		!time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
+		msleep(250);
+
+	if (bnx2i_ep->conn) {
+		bnx2i_conn = bnx2i_ep->conn;
+		conn = bnx2i_conn->cls_conn->dd_data;
+		session = conn->session;
+
+		spin_lock_bh(&session->lock);
+		bnx2i_conn->is_bound = 0;
+		spin_unlock_bh(&session->lock);
+	}
+
+	hba = bnx2i_ep->hba;
+	if (bnx2i_ep->state == EP_STATE_IDLE)
+		goto return_bnx2i_ep;
+	cnic = hba->cnic;
+
+	mutex_lock(&hba->net_dev_lock);
+
+	if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+		goto free_resc;
+	if (bnx2i_ep->hba_age != hba->age)
+		goto free_resc;
+
+	if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
+		goto destory_conn;
+
+	bnx2i_ep->state = EP_STATE_DISCONN_START;
+
+	init_timer(&bnx2i_ep->ofld_timer);
+	bnx2i_ep->ofld_timer.expires = 10*HZ + jiffies;
+	bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+	bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
+	add_timer(&bnx2i_ep->ofld_timer);
+
+	if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+		int close = 0;
+
+		if (session) {
+			spin_lock_bh(&session->lock);
+			if (session->state == ISCSI_STATE_LOGGING_OUT)
+				close = 1;
+			spin_unlock_bh(&session->lock);
+		}
+		if (close)
+			cnic->cm_close(bnx2i_ep->cm_sk);
+		else
+			cnic->cm_abort(bnx2i_ep->cm_sk);
+	} else
+		goto free_resc;
+
+	/* wait for option-2 conn teardown */
+	wait_event_interruptible(bnx2i_ep->ofld_wait,
+				 bnx2i_ep->state != EP_STATE_DISCONN_START);
+
+	if (signal_pending(current))
+		flush_signals(current);
+	del_timer_sync(&bnx2i_ep->ofld_timer);
+
+destory_conn:
+	if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
+		mutex_unlock(&hba->net_dev_lock);
+		return;
+	}
+free_resc:
+	mutex_unlock(&hba->net_dev_lock);
+	bnx2i_free_qp_resc(hba, bnx2i_ep);
+return_bnx2i_ep:
+	if (bnx2i_conn)
+		bnx2i_conn->ep = NULL;
+
+	bnx2i_free_ep(ep);
+
+	if (!hba->ofld_conns_active)
+		bnx2i_unreg_dev_all();
+}
+
+
+/**
+ * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler
+ * @buf:	pointer to buffer containing iscsi path message
+ *
+ */
+static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
+{
+	struct bnx2i_hba *hba = iscsi_host_priv(shost);
+	char *buf = (char *) params;
+	u16 len = sizeof(*params);
+
+	/* handled by cnic driver */
+	hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf,
+				     len);
+
+	return 0;
+}
+
+
+/*
+ * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
+ * used while registering with the scsi host and iSCSI transport module.
+ */
+static struct scsi_host_template bnx2i_host_template = {
+	.module			= THIS_MODULE,
+	.name			= "Broadcom Offload iSCSI Initiator",
+	.proc_name		= "bnx2i",
+	.queuecommand		= iscsi_queuecommand,
+	.eh_abort_handler	= iscsi_eh_abort,
+	.eh_device_reset_handler = iscsi_eh_device_reset,
+	.eh_target_reset_handler = iscsi_eh_target_reset,
+	.can_queue		= 1024,
+	.max_sectors		= 127,
+	.cmd_per_lun		= 32,
+	.this_id		= -1,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.sg_tablesize		= ISCSI_MAX_BDS_PER_CMD,
+	.shost_attrs		= bnx2i_dev_attributes,
+};
+
+struct iscsi_transport bnx2i_iscsi_transport = {
+	.owner			= THIS_MODULE,
+	.name			= "bnx2i",
+	.caps			= CAP_RECOVERY_L0 | CAP_HDRDGST |
+				  CAP_MULTI_R2T | CAP_DATADGST |
+				  CAP_DATA_PATH_OFFLOAD,
+	.param_mask		= ISCSI_MAX_RECV_DLENGTH |
+				  ISCSI_MAX_XMIT_DLENGTH |
+				  ISCSI_HDRDGST_EN |
+				  ISCSI_DATADGST_EN |
+				  ISCSI_INITIAL_R2T_EN |
+				  ISCSI_MAX_R2T |
+				  ISCSI_IMM_DATA_EN |
+				  ISCSI_FIRST_BURST |
+				  ISCSI_MAX_BURST |
+				  ISCSI_PDU_INORDER_EN |
+				  ISCSI_DATASEQ_INORDER_EN |
+				  ISCSI_ERL |
+				  ISCSI_CONN_PORT |
+				  ISCSI_CONN_ADDRESS |
+				  ISCSI_EXP_STATSN |
+				  ISCSI_PERSISTENT_PORT |
+				  ISCSI_PERSISTENT_ADDRESS |
+				  ISCSI_TARGET_NAME | ISCSI_TPGT |
+				  ISCSI_USERNAME | ISCSI_PASSWORD |
+				  ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+				  ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+				  ISCSI_LU_RESET_TMO |
+				  ISCSI_PING_TMO | ISCSI_RECV_TMO |
+				  ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
+	.host_param_mask	= ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME,
+	.create_session		= bnx2i_session_create,
+	.destroy_session	= bnx2i_session_destroy,
+	.create_conn		= bnx2i_conn_create,
+	.bind_conn		= bnx2i_conn_bind,
+	.destroy_conn		= bnx2i_conn_destroy,
+	.set_param		= iscsi_set_param,
+	.get_conn_param		= bnx2i_conn_get_param,
+	.get_session_param	= iscsi_session_get_param,
+	.get_host_param		= bnx2i_host_get_param,
+	.start_conn		= bnx2i_conn_start,
+	.stop_conn		= iscsi_conn_stop,
+	.send_pdu		= iscsi_conn_send_pdu,
+	.xmit_task		= bnx2i_task_xmit,
+	.get_stats		= bnx2i_conn_get_stats,
+	/* TCP connect - disconnect - option-2 interface calls */
+	.ep_connect		= bnx2i_ep_connect,
+	.ep_poll		= bnx2i_ep_poll,
+	.ep_disconnect		= bnx2i_ep_disconnect,
+	.set_path		= bnx2i_nl_set_path,
+	/* Error recovery timeout call */
+	.session_recovery_timedout = iscsi_session_recovery_timedout,
+	.cleanup_task		= bnx2i_cleanup_task,
+};

+ 142 - 0
drivers/scsi/bnx2i/bnx2i_sysfs.c

@@ -0,0 +1,142 @@
+/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2004 - 2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include "bnx2i.h"
+
+/**
+ * bnx2i_dev_to_hba - maps dev pointer to adapter struct
+ * @dev:	device pointer
+ *
+ * Map device to hba structure
+ */
+static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	return iscsi_host_priv(shost);
+}
+
+
+/**
+ * bnx2i_show_sq_info - return(s currently configured send queue (SQ) size
+ * @dev:	device pointer
+ * @buf:	buffer to return current SQ size parameter
+ *
+ * Returns current SQ size parameter, this paramater determines the number
+ * outstanding iSCSI commands supported on a connection
+ */
+static ssize_t bnx2i_show_sq_info(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+
+	return sprintf(buf, "0x%x\n", hba->max_sqes);
+}
+
+
+/**
+ * bnx2i_set_sq_info - update send queue (SQ) size parameter
+ * @dev:	device pointer
+ * @buf:	buffer to return current SQ size parameter
+ * @count:	parameter buffer size
+ *
+ * Interface for user to change shared queue size allocated for each conn
+ * Must be within SQ limits and a power of 2. For the latter this is needed
+ * because of how libiscsi preallocates tasks.
+ */
+static ssize_t bnx2i_set_sq_info(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+	u32 val;
+	int max_sq_size;
+
+	if (hba->ofld_conns_active)
+		goto skip_config;
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+		max_sq_size = BNX2I_5770X_SQ_WQES_MAX;
+	else
+		max_sq_size = BNX2I_570X_SQ_WQES_MAX;
+
+	if (sscanf(buf, " 0x%x ", &val) > 0) {
+		if ((val >= BNX2I_SQ_WQES_MIN) && (val <= max_sq_size) &&
+		    (is_power_of_2(val)))
+			hba->max_sqes = val;
+	}
+
+	return count;
+
+skip_config:
+	printk(KERN_ERR "bnx2i: device busy, cannot change SQ size\n");
+	return 0;
+}
+
+
+/**
+ * bnx2i_show_ccell_info - returns command cell (HQ) size
+ * @dev:	device pointer
+ * @buf:	buffer to return current SQ size parameter
+ *
+ * returns per-connection TCP history queue size parameter
+ */
+static ssize_t bnx2i_show_ccell_info(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+
+	return sprintf(buf, "0x%x\n", hba->num_ccell);
+}
+
+
+/**
+ * bnx2i_get_link_state - set command cell (HQ) size
+ * @dev:	device pointer
+ * @buf:	buffer to return current SQ size parameter
+ * @count:	parameter buffer size
+ *
+ * updates per-connection TCP history queue size parameter
+ */
+static ssize_t bnx2i_set_ccell_info(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t count)
+{
+	u32 val;
+	struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+
+	if (hba->ofld_conns_active)
+		goto skip_config;
+
+	if (sscanf(buf, " 0x%x ", &val) > 0) {
+		if ((val >= BNX2I_CCELLS_MIN) &&
+		    (val <= BNX2I_CCELLS_MAX)) {
+			hba->num_ccell = val;
+		}
+	}
+
+	return count;
+
+skip_config:
+	printk(KERN_ERR "bnx2i: device busy, cannot change CCELL size\n");
+	return 0;
+}
+
+
+static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR,
+		   bnx2i_show_sq_info, bnx2i_set_sq_info);
+static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR,
+		   bnx2i_show_ccell_info, bnx2i_set_ccell_info);
+
+struct device_attribute *bnx2i_dev_attributes[] = {
+	&dev_attr_sq_size,
+	&dev_attr_num_ccell,
+	NULL
+};

+ 0 - 1
drivers/scsi/cxgb3i/cxgb3i.h

@@ -144,7 +144,6 @@ struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *);
 void cxgb3i_adapter_open(struct t3cdev *);
 void cxgb3i_adapter_open(struct t3cdev *);
 void cxgb3i_adapter_close(struct t3cdev *);
 void cxgb3i_adapter_close(struct t3cdev *);
 
 
-struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *);
 struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *,
 struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *,
 				       struct net_device *);
 				       struct net_device *);
 void cxgb3i_hba_host_remove(struct cxgb3i_hba *);
 void cxgb3i_hba_host_remove(struct cxgb3i_hba *);

+ 22 - 4
drivers/scsi/cxgb3i/cxgb3i_iscsi.c

@@ -13,6 +13,7 @@
 
 
 #include <linux/inet.h>
 #include <linux/inet.h>
 #include <linux/crypto.h>
 #include <linux/crypto.h>
+#include <net/dst.h>
 #include <net/tcp.h>
 #include <net/tcp.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_device.h>
@@ -178,7 +179,7 @@ void cxgb3i_adapter_close(struct t3cdev *t3dev)
  * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device
  * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device
  * @t3dev: t3cdev adapter
  * @t3dev: t3cdev adapter
  */
  */
-struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
+static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
 {
 {
 	struct cxgb3i_adapter *snic;
 	struct cxgb3i_adapter *snic;
 	int i;
 	int i;
@@ -261,20 +262,27 @@ void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba)
 
 
 /**
 /**
  * cxgb3i_ep_connect - establish TCP connection to target portal
  * cxgb3i_ep_connect - establish TCP connection to target portal
+ * @shost:		scsi host to use
  * @dst_addr:		target IP address
  * @dst_addr:		target IP address
  * @non_blocking:	blocking or non-blocking call
  * @non_blocking:	blocking or non-blocking call
  *
  *
  * Initiates a TCP/IP connection to the dst_addr
  * Initiates a TCP/IP connection to the dst_addr
  */
  */
-static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr,
+static struct iscsi_endpoint *cxgb3i_ep_connect(struct Scsi_Host *shost,
+						struct sockaddr *dst_addr,
 						int non_blocking)
 						int non_blocking)
 {
 {
 	struct iscsi_endpoint *ep;
 	struct iscsi_endpoint *ep;
 	struct cxgb3i_endpoint *cep;
 	struct cxgb3i_endpoint *cep;
-	struct cxgb3i_hba *hba;
+	struct cxgb3i_hba *hba = NULL;
 	struct s3_conn *c3cn = NULL;
 	struct s3_conn *c3cn = NULL;
 	int err = 0;
 	int err = 0;
 
 
+	if (shost)
+		hba = iscsi_host_priv(shost);
+
+	cxgb3i_api_debug("shost 0x%p, hba 0x%p.\n", shost, hba);
+
 	c3cn = cxgb3i_c3cn_create();
 	c3cn = cxgb3i_c3cn_create();
 	if (!c3cn) {
 	if (!c3cn) {
 		cxgb3i_log_info("ep connect OOM.\n");
 		cxgb3i_log_info("ep connect OOM.\n");
@@ -282,17 +290,27 @@ static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr,
 		goto release_conn;
 		goto release_conn;
 	}
 	}
 
 
-	err = cxgb3i_c3cn_connect(c3cn, (struct sockaddr_in *)dst_addr);
+	err = cxgb3i_c3cn_connect(hba ? hba->ndev : NULL, c3cn,
+				 (struct sockaddr_in *)dst_addr);
 	if (err < 0) {
 	if (err < 0) {
 		cxgb3i_log_info("ep connect failed.\n");
 		cxgb3i_log_info("ep connect failed.\n");
 		goto release_conn;
 		goto release_conn;
 	}
 	}
+
 	hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev);
 	hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev);
 	if (!hba) {
 	if (!hba) {
 		err = -ENOSPC;
 		err = -ENOSPC;
 		cxgb3i_log_info("NOT going through cxgbi device.\n");
 		cxgb3i_log_info("NOT going through cxgbi device.\n");
 		goto release_conn;
 		goto release_conn;
 	}
 	}
+
+	if (shost && hba != iscsi_host_priv(shost)) {
+		err = -ENOSPC;
+		cxgb3i_log_info("Could not connect through request host%u\n",
+				shost->host_no);
+		goto release_conn;
+	}
+
 	if (c3cn_is_closing(c3cn)) {
 	if (c3cn_is_closing(c3cn)) {
 		err = -ENOSPC;
 		err = -ENOSPC;
 		cxgb3i_log_info("ep connect unable to connect.\n");
 		cxgb3i_log_info("ep connect unable to connect.\n");

+ 14 - 9
drivers/scsi/cxgb3i/cxgb3i_offload.c

@@ -1479,12 +1479,13 @@ static struct net_device *cxgb3_egress_dev(struct net_device *root_dev,
 	return NULL;
 	return NULL;
 }
 }
 
 
-static struct rtable *find_route(__be32 saddr, __be32 daddr,
+static struct rtable *find_route(struct net_device *dev,
+				 __be32 saddr, __be32 daddr,
 				 __be16 sport, __be16 dport)
 				 __be16 sport, __be16 dport)
 {
 {
 	struct rtable *rt;
 	struct rtable *rt;
 	struct flowi fl = {
 	struct flowi fl = {
-		.oif = 0,
+		.oif = dev ? dev->ifindex : 0,
 		.nl_u = {
 		.nl_u = {
 			 .ip4_u = {
 			 .ip4_u = {
 				   .daddr = daddr,
 				   .daddr = daddr,
@@ -1573,36 +1574,40 @@ out_err:
  *
  *
  * return 0 if active open request is sent, < 0 otherwise.
  * return 0 if active open request is sent, < 0 otherwise.
  */
  */
-int cxgb3i_c3cn_connect(struct s3_conn *c3cn, struct sockaddr_in *usin)
+int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
+			struct sockaddr_in *usin)
 {
 {
 	struct rtable *rt;
 	struct rtable *rt;
-	struct net_device *dev;
 	struct cxgb3i_sdev_data *cdata;
 	struct cxgb3i_sdev_data *cdata;
 	struct t3cdev *cdev;
 	struct t3cdev *cdev;
 	__be32 sipv4;
 	__be32 sipv4;
 	int err;
 	int err;
 
 
+	c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev);
+
 	if (usin->sin_family != AF_INET)
 	if (usin->sin_family != AF_INET)
 		return -EAFNOSUPPORT;
 		return -EAFNOSUPPORT;
 
 
 	c3cn->daddr.sin_port = usin->sin_port;
 	c3cn->daddr.sin_port = usin->sin_port;
 	c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
 	c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
 
 
-	rt = find_route(c3cn->saddr.sin_addr.s_addr,
+	rt = find_route(dev, c3cn->saddr.sin_addr.s_addr,
 			c3cn->daddr.sin_addr.s_addr,
 			c3cn->daddr.sin_addr.s_addr,
 			c3cn->saddr.sin_port,
 			c3cn->saddr.sin_port,
 			c3cn->daddr.sin_port);
 			c3cn->daddr.sin_port);
 	if (rt == NULL) {
 	if (rt == NULL) {
-		c3cn_conn_debug("NO route to 0x%x, port %u.\n",
+		c3cn_conn_debug("NO route to 0x%x, port %u, dev %s.\n",
 				c3cn->daddr.sin_addr.s_addr,
 				c3cn->daddr.sin_addr.s_addr,
-				ntohs(c3cn->daddr.sin_port));
+				ntohs(c3cn->daddr.sin_port),
+				dev ? dev->name : "any");
 		return -ENETUNREACH;
 		return -ENETUNREACH;
 	}
 	}
 
 
 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
-		c3cn_conn_debug("multi-cast route to 0x%x, port %u.\n",
+		c3cn_conn_debug("multi-cast route to 0x%x, port %u, dev %s.\n",
 				c3cn->daddr.sin_addr.s_addr,
 				c3cn->daddr.sin_addr.s_addr,
-				ntohs(c3cn->daddr.sin_port));
+				ntohs(c3cn->daddr.sin_port),
+				dev ? dev->name : "any");
 		ip_rt_put(rt);
 		ip_rt_put(rt);
 		return -ENETUNREACH;
 		return -ENETUNREACH;
 	}
 	}

+ 2 - 1
drivers/scsi/cxgb3i/cxgb3i_offload.h

@@ -169,7 +169,8 @@ void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *);
 void cxgb3i_sdev_remove(struct t3cdev *);
 void cxgb3i_sdev_remove(struct t3cdev *);
 
 
 struct s3_conn *cxgb3i_c3cn_create(void);
 struct s3_conn *cxgb3i_c3cn_create(void);
-int cxgb3i_c3cn_connect(struct s3_conn *, struct sockaddr_in *);
+int cxgb3i_c3cn_connect(struct net_device *, struct s3_conn *,
+			struct sockaddr_in *);
 void cxgb3i_c3cn_rx_credits(struct s3_conn *, int);
 void cxgb3i_c3cn_rx_credits(struct s3_conn *, int);
 int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *);
 int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *);
 void cxgb3i_c3cn_release(struct s3_conn *);
 void cxgb3i_c3cn_release(struct s3_conn *);

+ 6 - 0
drivers/scsi/device_handler/scsi_dh_rdac.c

@@ -561,6 +561,12 @@ static int rdac_check_sense(struct scsi_device *sdev,
 	struct rdac_dh_data *h = get_rdac_data(sdev);
 	struct rdac_dh_data *h = get_rdac_data(sdev);
 	switch (sense_hdr->sense_key) {
 	switch (sense_hdr->sense_key) {
 	case NOT_READY:
 	case NOT_READY:
+		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
+			/* LUN Not Ready - Logical Unit Not Ready and is in
+			* the process of becoming ready
+			* Just retry.
+			*/
+			return ADD_TO_MLQUEUE;
 		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
 		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
 			/* LUN Not Ready - Storage firmware incompatible
 			/* LUN Not Ready - Storage firmware incompatible
 			 * Manual code synchonisation required.
 			 * Manual code synchonisation required.

+ 36 - 59
drivers/scsi/fcoe/fcoe.c

@@ -54,7 +54,6 @@ MODULE_LICENSE("GPL v2");
 /* fcoe host list */
 /* fcoe host list */
 LIST_HEAD(fcoe_hostlist);
 LIST_HEAD(fcoe_hostlist);
 DEFINE_RWLOCK(fcoe_hostlist_lock);
 DEFINE_RWLOCK(fcoe_hostlist_lock);
-DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
 DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
 DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
 
 
 /* Function Prototypes */
 /* Function Prototypes */
@@ -71,7 +70,7 @@ static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
 static int fcoe_hostlist_add(const struct fc_lport *);
 static int fcoe_hostlist_add(const struct fc_lport *);
 static int fcoe_hostlist_remove(const struct fc_lport *);
 static int fcoe_hostlist_remove(const struct fc_lport *);
 
 
-static int fcoe_check_wait_queue(struct fc_lport *);
+static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
 static int fcoe_device_notification(struct notifier_block *, ulong, void *);
 static int fcoe_device_notification(struct notifier_block *, ulong, void *);
 static void fcoe_dev_setup(void);
 static void fcoe_dev_setup(void);
 static void fcoe_dev_cleanup(void);
 static void fcoe_dev_cleanup(void);
@@ -146,6 +145,7 @@ static int fcoe_lport_config(struct fc_lport *lp)
 	lp->link_up = 0;
 	lp->link_up = 0;
 	lp->qfull = 0;
 	lp->qfull = 0;
 	lp->max_retry_count = 3;
 	lp->max_retry_count = 3;
+	lp->max_rport_retry_count = 3;
 	lp->e_d_tov = 2 * 1000;	/* FC-FS default */
 	lp->e_d_tov = 2 * 1000;	/* FC-FS default */
 	lp->r_a_tov = 2 * 2 * 1000;
 	lp->r_a_tov = 2 * 2 * 1000;
 	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
 	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
@@ -166,6 +166,18 @@ static int fcoe_lport_config(struct fc_lport *lp)
 	return 0;
 	return 0;
 }
 }
 
 
+/**
+ * fcoe_queue_timer() - fcoe queue timer
+ * @lp: the fc_lport pointer
+ *
+ * Calls fcoe_check_wait_queue on timeout
+ *
+ */
+static void fcoe_queue_timer(ulong lp)
+{
+	fcoe_check_wait_queue((struct fc_lport *)lp, NULL);
+}
+
 /**
 /**
  * fcoe_netdev_config() - Set up netdev for SW FCoE
  * fcoe_netdev_config() - Set up netdev for SW FCoE
  * @lp : ptr to the fc_lport
  * @lp : ptr to the fc_lport
@@ -236,6 +248,7 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
 	}
 	}
 	skb_queue_head_init(&fc->fcoe_pending_queue);
 	skb_queue_head_init(&fc->fcoe_pending_queue);
 	fc->fcoe_pending_queue_active = 0;
 	fc->fcoe_pending_queue_active = 0;
+	setup_timer(&fc->timer, fcoe_queue_timer, (unsigned long)lp);
 
 
 	/* setup Source Mac Address */
 	/* setup Source Mac Address */
 	memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr,
 	memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr,
@@ -386,6 +399,9 @@ static int fcoe_if_destroy(struct net_device *netdev)
 	/* Free existing skbs */
 	/* Free existing skbs */
 	fcoe_clean_pending_queue(lp);
 	fcoe_clean_pending_queue(lp);
 
 
+	/* Stop the timer */
+	del_timer_sync(&fc->timer);
+
 	/* Free memory used by statistical counters */
 	/* Free memory used by statistical counters */
 	fc_lport_free_stats(lp);
 	fc_lport_free_stats(lp);
 
 
@@ -988,7 +1004,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
  */
  */
 int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
 int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
 {
 {
-	int wlen, rc = 0;
+	int wlen;
 	u32 crc;
 	u32 crc;
 	struct ethhdr *eh;
 	struct ethhdr *eh;
 	struct fcoe_crc_eof *cp;
 	struct fcoe_crc_eof *cp;
@@ -1021,8 +1037,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
 	sof = fr_sof(fp);
 	sof = fr_sof(fp);
 	eof = fr_eof(fp);
 	eof = fr_eof(fp);
 
 
-	elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ?
-		sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
+	elen = sizeof(struct ethhdr);
 	hlen = sizeof(struct fcoe_hdr);
 	hlen = sizeof(struct fcoe_hdr);
 	tlen = sizeof(struct fcoe_crc_eof);
 	tlen = sizeof(struct fcoe_crc_eof);
 	wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
 	wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
@@ -1107,18 +1122,9 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
 	/* send down to lld */
 	/* send down to lld */
 	fr_dev(fp) = lp;
 	fr_dev(fp) = lp;
 	if (fc->fcoe_pending_queue.qlen)
 	if (fc->fcoe_pending_queue.qlen)
-		rc = fcoe_check_wait_queue(lp);
-
-	if (rc == 0)
-		rc = fcoe_start_io(skb);
-
-	if (rc) {
-		spin_lock_bh(&fc->fcoe_pending_queue.lock);
-		__skb_queue_tail(&fc->fcoe_pending_queue, skb);
-		spin_unlock_bh(&fc->fcoe_pending_queue.lock);
-		if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
-			lp->qfull = 1;
-	}
+		fcoe_check_wait_queue(lp, skb);
+	else if (fcoe_start_io(skb))
+		fcoe_check_wait_queue(lp, skb);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1267,32 +1273,6 @@ int fcoe_percpu_receive_thread(void *arg)
 	return 0;
 	return 0;
 }
 }
 
 
-/**
- * fcoe_watchdog() - fcoe timer callback
- * @vp:
- *
- * This checks the pending queue length for fcoe and set lport qfull
- * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
- * fcoe_hostlist.
- *
- * Returns: 0 for success
- */
-void fcoe_watchdog(ulong vp)
-{
-	struct fcoe_softc *fc;
-
-	read_lock(&fcoe_hostlist_lock);
-	list_for_each_entry(fc, &fcoe_hostlist, list) {
-		if (fc->ctlr.lp)
-			fcoe_check_wait_queue(fc->ctlr.lp);
-	}
-	read_unlock(&fcoe_hostlist_lock);
-
-	fcoe_timer.expires = jiffies + (1 * HZ);
-	add_timer(&fcoe_timer);
-}
-
-
 /**
 /**
  * fcoe_check_wait_queue() - attempt to clear the transmit backlog
  * fcoe_check_wait_queue() - attempt to clear the transmit backlog
  * @lp: the fc_lport
  * @lp: the fc_lport
@@ -1305,16 +1285,17 @@ void fcoe_watchdog(ulong vp)
  * The wait_queue is used when the skb transmit fails. skb will go
  * The wait_queue is used when the skb transmit fails. skb will go
  * in the wait_queue which will be emptied by the timer function or
  * in the wait_queue which will be emptied by the timer function or
  * by the next skb transmit.
  * by the next skb transmit.
- *
- * Returns: 0 for success
  */
  */
-static int fcoe_check_wait_queue(struct fc_lport *lp)
+static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
 {
 {
 	struct fcoe_softc *fc = lport_priv(lp);
 	struct fcoe_softc *fc = lport_priv(lp);
-	struct sk_buff *skb;
-	int rc = -1;
+	int rc;
 
 
 	spin_lock_bh(&fc->fcoe_pending_queue.lock);
 	spin_lock_bh(&fc->fcoe_pending_queue.lock);
+
+	if (skb)
+		__skb_queue_tail(&fc->fcoe_pending_queue, skb);
+
 	if (fc->fcoe_pending_queue_active)
 	if (fc->fcoe_pending_queue_active)
 		goto out;
 		goto out;
 	fc->fcoe_pending_queue_active = 1;
 	fc->fcoe_pending_queue_active = 1;
@@ -1340,23 +1321,26 @@ static int fcoe_check_wait_queue(struct fc_lport *lp)
 
 
 	if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
 	if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
 		lp->qfull = 0;
 		lp->qfull = 0;
+	if (fc->fcoe_pending_queue.qlen && !timer_pending(&fc->timer))
+		mod_timer(&fc->timer, jiffies + 2);
 	fc->fcoe_pending_queue_active = 0;
 	fc->fcoe_pending_queue_active = 0;
-	rc = fc->fcoe_pending_queue.qlen;
 out:
 out:
+	if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+		lp->qfull = 1;
 	spin_unlock_bh(&fc->fcoe_pending_queue.lock);
 	spin_unlock_bh(&fc->fcoe_pending_queue.lock);
-	return rc;
+	return;
 }
 }
 
 
 /**
 /**
  * fcoe_dev_setup() - setup link change notification interface
  * fcoe_dev_setup() - setup link change notification interface
  */
  */
-static void fcoe_dev_setup()
+static void fcoe_dev_setup(void)
 {
 {
 	register_netdevice_notifier(&fcoe_notifier);
 	register_netdevice_notifier(&fcoe_notifier);
 }
 }
 
 
 /**
 /**
- * fcoe_dev_setup() - cleanup link change notification interface
+ * fcoe_dev_cleanup() - cleanup link change notification interface
  */
  */
 static void fcoe_dev_cleanup(void)
 static void fcoe_dev_cleanup(void)
 {
 {
@@ -1815,10 +1799,6 @@ static int __init fcoe_init(void)
 	/* Setup link change notification */
 	/* Setup link change notification */
 	fcoe_dev_setup();
 	fcoe_dev_setup();
 
 
-	setup_timer(&fcoe_timer, fcoe_watchdog, 0);
-
-	mod_timer(&fcoe_timer, jiffies + (10 * HZ));
-
 	fcoe_if_init();
 	fcoe_if_init();
 
 
 	return 0;
 	return 0;
@@ -1844,9 +1824,6 @@ static void __exit fcoe_exit(void)
 
 
 	fcoe_dev_cleanup();
 	fcoe_dev_cleanup();
 
 
-	/* Stop the timer */
-	del_timer_sync(&fcoe_timer);
-
 	/* releases the associated fcoe hosts */
 	/* releases the associated fcoe hosts */
 	list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
 	list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
 		fcoe_if_destroy(fc->real_dev);
 		fcoe_if_destroy(fc->real_dev);

+ 1 - 0
drivers/scsi/fcoe/fcoe.h

@@ -61,6 +61,7 @@ struct fcoe_softc {
 	struct packet_type  fip_packet_type;
 	struct packet_type  fip_packet_type;
 	struct sk_buff_head fcoe_pending_queue;
 	struct sk_buff_head fcoe_pending_queue;
 	u8	fcoe_pending_queue_active;
 	u8	fcoe_pending_queue_active;
+	struct timer_list timer;		/* queue timer */
 	struct fcoe_ctlr ctlr;
 	struct fcoe_ctlr ctlr;
 };
 };
 
 

+ 13 - 8
drivers/scsi/fcoe/libfcoe.c

@@ -213,7 +213,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
 	sol->desc.size.fd_size = htons(fcoe_size);
 	sol->desc.size.fd_size = htons(fcoe_size);
 
 
 	skb_put(skb, sizeof(*sol));
 	skb_put(skb, sizeof(*sol));
-	skb->protocol = htons(ETH_P_802_3);
+	skb->protocol = htons(ETH_P_FIP);
 	skb_reset_mac_header(skb);
 	skb_reset_mac_header(skb);
 	skb_reset_network_header(skb);
 	skb_reset_network_header(skb);
 	fip->send(fip, skb);
 	fip->send(fip, skb);
@@ -365,7 +365,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
 	}
 	}
 
 
 	skb_put(skb, len);
 	skb_put(skb, len);
-	skb->protocol = htons(ETH_P_802_3);
+	skb->protocol = htons(ETH_P_FIP);
 	skb_reset_mac_header(skb);
 	skb_reset_mac_header(skb);
 	skb_reset_network_header(skb);
 	skb_reset_network_header(skb);
 	fip->send(fip, skb);
 	fip->send(fip, skb);
@@ -424,7 +424,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
 	if (dtype != ELS_FLOGI)
 	if (dtype != ELS_FLOGI)
 		memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN);
 		memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN);
 
 
-	skb->protocol = htons(ETH_P_802_3);
+	skb->protocol = htons(ETH_P_FIP);
 	skb_reset_mac_header(skb);
 	skb_reset_mac_header(skb);
 	skb_reset_network_header(skb);
 	skb_reset_network_header(skb);
 	return 0;
 	return 0;
@@ -447,14 +447,10 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
 	u16 old_xid;
 	u16 old_xid;
 	u8 op;
 	u8 op;
 
 
-	if (fip->state == FIP_ST_NON_FIP)
-		return 0;
-
 	fh = (struct fc_frame_header *)skb->data;
 	fh = (struct fc_frame_header *)skb->data;
 	op = *(u8 *)(fh + 1);
 	op = *(u8 *)(fh + 1);
 
 
-	switch (op) {
-	case ELS_FLOGI:
+	if (op == ELS_FLOGI) {
 		old_xid = fip->flogi_oxid;
 		old_xid = fip->flogi_oxid;
 		fip->flogi_oxid = ntohs(fh->fh_ox_id);
 		fip->flogi_oxid = ntohs(fh->fh_ox_id);
 		if (fip->state == FIP_ST_AUTO) {
 		if (fip->state == FIP_ST_AUTO) {
@@ -466,6 +462,15 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
 			fip->map_dest = 1;
 			fip->map_dest = 1;
 			return 0;
 			return 0;
 		}
 		}
+		if (fip->state == FIP_ST_NON_FIP)
+			fip->map_dest = 1;
+	}
+
+	if (fip->state == FIP_ST_NON_FIP)
+		return 0;
+
+	switch (op) {
+	case ELS_FLOGI:
 		op = FIP_DT_FLOGI;
 		op = FIP_DT_FLOGI;
 		break;
 		break;
 	case ELS_FDISC:
 	case ELS_FDISC:

+ 1 - 0
drivers/scsi/fnic/fnic_main.c

@@ -680,6 +680,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
 	}
 	}
 
 
 	lp->max_retry_count = fnic->config.flogi_retries;
 	lp->max_retry_count = fnic->config.flogi_retries;
+	lp->max_rport_retry_count = fnic->config.plogi_retries;
 	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
 	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
 			      FCP_SPPF_CONF_COMPL);
 			      FCP_SPPF_CONF_COMPL);
 	if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
 	if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)

+ 3 - 2
drivers/scsi/gdth_proc.c

@@ -152,6 +152,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
                          struct Scsi_Host *host, gdth_ha_str *ha)
                          struct Scsi_Host *host, gdth_ha_str *ha)
 {
 {
     int size = 0,len = 0;
     int size = 0,len = 0;
+    int hlen;
     off_t begin = 0,pos = 0;
     off_t begin = 0,pos = 0;
     int id, i, j, k, sec, flag;
     int id, i, j, k, sec, flag;
     int no_mdrv = 0, drv_no, is_mirr;
     int no_mdrv = 0, drv_no, is_mirr;
@@ -192,11 +193,11 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
     if (reserve_list[0] == 0xff)
     if (reserve_list[0] == 0xff)
         strcpy(hrec, "--");
         strcpy(hrec, "--");
     else {
     else {
-        sprintf(hrec, "%d", reserve_list[0]);
+        hlen = sprintf(hrec, "%d", reserve_list[0]);
         for (i = 1;  i < MAX_RES_ARGS; i++) {
         for (i = 1;  i < MAX_RES_ARGS; i++) {
             if (reserve_list[i] == 0xff) 
             if (reserve_list[i] == 0xff) 
                 break;
                 break;
-            sprintf(hrec,"%s,%d", hrec, reserve_list[i]);
+            hlen += snprintf(hrec + hlen , 161 - hlen, ",%d", reserve_list[i]);
         }
         }
     }
     }
     size = sprintf(buffer+len,
     size = sprintf(buffer+len,

+ 304 - 130
drivers/scsi/ibmvscsi/ibmvfc.c

@@ -110,7 +110,7 @@ static const struct {
 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
-	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" },
+	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
 
 
 	{ IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
 	{ IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
@@ -143,6 +143,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *);
 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
 static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
 static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
+static void ibmvfc_npiv_logout(struct ibmvfc_host *);
 
 
 static const char *unknown_error = "unknown error";
 static const char *unknown_error = "unknown error";
 
 
@@ -275,7 +276,7 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
 	int fc_rsp_len = rsp->fcp_rsp_len;
 	int fc_rsp_len = rsp->fcp_rsp_len;
 
 
 	if ((rsp->flags & FCP_RSP_LEN_VALID) &&
 	if ((rsp->flags & FCP_RSP_LEN_VALID) &&
-	    ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
+	    ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
 	     rsp->data.info.rsp_code))
 	     rsp->data.info.rsp_code))
 		return DID_ERROR << 16;
 		return DID_ERROR << 16;
 
 
@@ -431,6 +432,8 @@ static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
 	case IBMVFC_TGT_ACTION_DEL_RPORT:
 	case IBMVFC_TGT_ACTION_DEL_RPORT:
 		break;
 		break;
 	default:
 	default:
+		if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
+			tgt->add_rport = 0;
 		tgt->action = action;
 		tgt->action = action;
 		break;
 		break;
 	}
 	}
@@ -475,6 +478,10 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
 		if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
 		if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
 			vhost->action = action;
 			vhost->action = action;
 		break;
 		break;
+	case IBMVFC_HOST_ACTION_LOGO_WAIT:
+		if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
+			vhost->action = action;
+		break;
 	case IBMVFC_HOST_ACTION_INIT_WAIT:
 	case IBMVFC_HOST_ACTION_INIT_WAIT:
 		if (vhost->action == IBMVFC_HOST_ACTION_INIT)
 		if (vhost->action == IBMVFC_HOST_ACTION_INIT)
 			vhost->action = action;
 			vhost->action = action;
@@ -483,7 +490,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
 		switch (vhost->action) {
 		switch (vhost->action) {
 		case IBMVFC_HOST_ACTION_INIT_WAIT:
 		case IBMVFC_HOST_ACTION_INIT_WAIT:
 		case IBMVFC_HOST_ACTION_NONE:
 		case IBMVFC_HOST_ACTION_NONE:
-		case IBMVFC_HOST_ACTION_TGT_ADD:
+		case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
 			vhost->action = action;
 			vhost->action = action;
 			break;
 			break;
 		default:
 		default:
@@ -494,11 +501,11 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
 		if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
 		if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
 			vhost->action = action;
 			vhost->action = action;
 		break;
 		break;
+	case IBMVFC_HOST_ACTION_LOGO:
 	case IBMVFC_HOST_ACTION_INIT:
 	case IBMVFC_HOST_ACTION_INIT:
 	case IBMVFC_HOST_ACTION_TGT_DEL:
 	case IBMVFC_HOST_ACTION_TGT_DEL:
 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
-	case IBMVFC_HOST_ACTION_TGT_ADD:
 	case IBMVFC_HOST_ACTION_NONE:
 	case IBMVFC_HOST_ACTION_NONE:
 	default:
 	default:
 		vhost->action = action;
 		vhost->action = action;
@@ -576,7 +583,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
 		}
 		}
 
 
 		list_for_each_entry(tgt, &vhost->targets, queue)
 		list_for_each_entry(tgt, &vhost->targets, queue)
-			tgt->need_login = 1;
+			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 		scsi_block_requests(vhost->host);
 		scsi_block_requests(vhost->host);
 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
 		vhost->job_step = ibmvfc_npiv_login;
 		vhost->job_step = ibmvfc_npiv_login;
@@ -646,6 +653,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
 
 
 	vhost->state = IBMVFC_NO_CRQ;
 	vhost->state = IBMVFC_NO_CRQ;
+	vhost->logged_in = 0;
 	dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
 	dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
 	free_page((unsigned long)crq->msgs);
 	free_page((unsigned long)crq->msgs);
 }
 }
@@ -692,6 +700,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
 
 
 	vhost->state = IBMVFC_NO_CRQ;
 	vhost->state = IBMVFC_NO_CRQ;
+	vhost->logged_in = 0;
 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
 
 
 	/* Clean out the queue */
 	/* Clean out the queue */
@@ -807,10 +816,10 @@ static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
 }
 }
 
 
 /**
 /**
- * __ibmvfc_reset_host - Reset the connection to the server (no locking)
+ * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
  * @vhost:	struct ibmvfc host to reset
  * @vhost:	struct ibmvfc host to reset
  **/
  **/
-static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
+static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
 {
 {
 	int rc;
 	int rc;
 
 
@@ -826,9 +835,25 @@ static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
 }
 }
 
 
 /**
 /**
- * ibmvfc_reset_host - Reset the connection to the server
+ * __ibmvfc_reset_host - Reset the connection to the server (no locking)
  * @vhost:	struct ibmvfc host to reset
  * @vhost:	struct ibmvfc host to reset
  **/
  **/
+static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
+{
+	if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
+	    !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
+		scsi_block_requests(vhost->host);
+		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
+		vhost->job_step = ibmvfc_npiv_logout;
+		wake_up(&vhost->work_wait_q);
+	} else
+		ibmvfc_hard_reset_host(vhost);
+}
+
+/**
+ * ibmvfc_reset_host - Reset the connection to the server
+ * @vhost:	ibmvfc host struct
+ **/
 static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
 static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
@@ -842,9 +867,13 @@ static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
  * ibmvfc_retry_host_init - Retry host initialization if allowed
  * ibmvfc_retry_host_init - Retry host initialization if allowed
  * @vhost:	ibmvfc host struct
  * @vhost:	ibmvfc host struct
  *
  *
+ * Returns: 1 if init will be retried / 0 if not
+ *
  **/
  **/
-static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
+static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
 {
 {
+	int retry = 0;
+
 	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
 	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
 		vhost->delay_init = 1;
 		vhost->delay_init = 1;
 		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
 		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
@@ -853,11 +882,14 @@ static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
 			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
 			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
 		} else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
 		} else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
 			__ibmvfc_reset_host(vhost);
 			__ibmvfc_reset_host(vhost);
-		else
+		else {
 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
+			retry = 1;
+		}
 	}
 	}
 
 
 	wake_up(&vhost->work_wait_q);
 	wake_up(&vhost->work_wait_q);
+	return retry;
 }
 }
 
 
 /**
 /**
@@ -1137,8 +1169,9 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
 	login_info->partition_num = vhost->partition_number;
 	login_info->partition_num = vhost->partition_number;
 	login_info->vfc_frame_version = 1;
 	login_info->vfc_frame_version = 1;
 	login_info->fcp_version = 3;
 	login_info->fcp_version = 3;
+	login_info->flags = IBMVFC_FLUSH_ON_HALT;
 	if (vhost->client_migrated)
 	if (vhost->client_migrated)
-		login_info->flags = IBMVFC_CLIENT_MIGRATED;
+		login_info->flags |= IBMVFC_CLIENT_MIGRATED;
 
 
 	login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
 	login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
 	login_info->capabilities = IBMVFC_CAN_MIGRATE;
 	login_info->capabilities = IBMVFC_CAN_MIGRATE;
@@ -1451,6 +1484,27 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
 		    rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
 		    rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
 }
 }
 
 
+/**
+ * ibmvfc_relogin - Log back into the specified device
+ * @sdev:	scsi device struct
+ *
+ **/
+static void ibmvfc_relogin(struct scsi_device *sdev)
+{
+	struct ibmvfc_host *vhost = shost_priv(sdev->host);
+	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+	struct ibmvfc_target *tgt;
+
+	list_for_each_entry(tgt, &vhost->targets, queue) {
+		if (rport == tgt->rport) {
+			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+			break;
+		}
+	}
+
+	ibmvfc_reinit_host(vhost);
+}
+
 /**
 /**
  * ibmvfc_scsi_done - Handle responses from commands
  * ibmvfc_scsi_done - Handle responses from commands
  * @evt:	ibmvfc event to be handled
  * @evt:	ibmvfc event to be handled
@@ -1483,7 +1537,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
 			if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
 			if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
 				memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
 				memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
 			if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
 			if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
-				ibmvfc_reinit_host(evt->vhost);
+				ibmvfc_relogin(cmnd->device);
 
 
 			if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
 			if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
 				cmnd->result = (DID_ERROR << 16);
 				cmnd->result = (DID_ERROR << 16);
@@ -2148,13 +2202,31 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
 				struct ibmvfc_host *vhost)
 				struct ibmvfc_host *vhost)
 {
 {
 	const char *desc = ibmvfc_get_ae_desc(crq->event);
 	const char *desc = ibmvfc_get_ae_desc(crq->event);
+	struct ibmvfc_target *tgt;
 
 
 	ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx,"
 	ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx,"
 		   " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name);
 		   " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name);
 
 
 	switch (crq->event) {
 	switch (crq->event) {
-	case IBMVFC_AE_LINK_UP:
 	case IBMVFC_AE_RESUME:
 	case IBMVFC_AE_RESUME:
+		switch (crq->link_state) {
+		case IBMVFC_AE_LS_LINK_DOWN:
+			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+			break;
+		case IBMVFC_AE_LS_LINK_DEAD:
+			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+			break;
+		case IBMVFC_AE_LS_LINK_UP:
+		case IBMVFC_AE_LS_LINK_BOUNCED:
+		default:
+			vhost->events_to_log |= IBMVFC_AE_LINKUP;
+			vhost->delay_init = 1;
+			__ibmvfc_reset_host(vhost);
+			break;
+		};
+
+		break;
+	case IBMVFC_AE_LINK_UP:
 		vhost->events_to_log |= IBMVFC_AE_LINKUP;
 		vhost->events_to_log |= IBMVFC_AE_LINKUP;
 		vhost->delay_init = 1;
 		vhost->delay_init = 1;
 		__ibmvfc_reset_host(vhost);
 		__ibmvfc_reset_host(vhost);
@@ -2168,9 +2240,23 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
 	case IBMVFC_AE_SCN_NPORT:
 	case IBMVFC_AE_SCN_NPORT:
 	case IBMVFC_AE_SCN_GROUP:
 	case IBMVFC_AE_SCN_GROUP:
 		vhost->events_to_log |= IBMVFC_AE_RSCN;
 		vhost->events_to_log |= IBMVFC_AE_RSCN;
+		ibmvfc_reinit_host(vhost);
+		break;
 	case IBMVFC_AE_ELS_LOGO:
 	case IBMVFC_AE_ELS_LOGO:
 	case IBMVFC_AE_ELS_PRLO:
 	case IBMVFC_AE_ELS_PRLO:
 	case IBMVFC_AE_ELS_PLOGI:
 	case IBMVFC_AE_ELS_PLOGI:
+		list_for_each_entry(tgt, &vhost->targets, queue) {
+			if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
+				break;
+			if (crq->scsi_id && tgt->scsi_id != crq->scsi_id)
+				continue;
+			if (crq->wwpn && tgt->ids.port_name != crq->wwpn)
+				continue;
+			if (crq->node_name && tgt->ids.node_name != crq->node_name)
+				continue;
+			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+		}
+
 		ibmvfc_reinit_host(vhost);
 		ibmvfc_reinit_host(vhost);
 		break;
 		break;
 	case IBMVFC_AE_LINK_DOWN:
 	case IBMVFC_AE_LINK_DOWN:
@@ -2222,6 +2308,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
 		return;
 		return;
 	case IBMVFC_CRQ_XPORT_EVENT:
 	case IBMVFC_CRQ_XPORT_EVENT:
 		vhost->state = IBMVFC_NO_CRQ;
 		vhost->state = IBMVFC_NO_CRQ;
+		vhost->logged_in = 0;
 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
 		if (crq->format == IBMVFC_PARTITION_MIGRATED) {
 		if (crq->format == IBMVFC_PARTITION_MIGRATED) {
 			/* We need to re-setup the interpartition connection */
 			/* We need to re-setup the interpartition connection */
@@ -2299,7 +2386,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
 		done = 1;
 		done = 1;
 	}
 	}
 
 
-	if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE)
+	if (vhost->scan_complete)
 		done = 1;
 		done = 1;
 	spin_unlock_irqrestore(shost->host_lock, flags);
 	spin_unlock_irqrestore(shost->host_lock, flags);
 	return done;
 	return done;
@@ -2434,14 +2521,6 @@ static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
 			vhost->login_buf->resp.partition_name);
 			vhost->login_buf->resp.partition_name);
 }
 }
 
 
-static struct device_attribute ibmvfc_host_partition_name = {
-	.attr = {
-		.name = "partition_name",
-		.mode = S_IRUGO,
-	},
-	.show = ibmvfc_show_host_partition_name,
-};
-
 static ssize_t ibmvfc_show_host_device_name(struct device *dev,
 static ssize_t ibmvfc_show_host_device_name(struct device *dev,
 					    struct device_attribute *attr, char *buf)
 					    struct device_attribute *attr, char *buf)
 {
 {
@@ -2452,14 +2531,6 @@ static ssize_t ibmvfc_show_host_device_name(struct device *dev,
 			vhost->login_buf->resp.device_name);
 			vhost->login_buf->resp.device_name);
 }
 }
 
 
-static struct device_attribute ibmvfc_host_device_name = {
-	.attr = {
-		.name = "device_name",
-		.mode = S_IRUGO,
-	},
-	.show = ibmvfc_show_host_device_name,
-};
-
 static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
 static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
 					 struct device_attribute *attr, char *buf)
 					 struct device_attribute *attr, char *buf)
 {
 {
@@ -2470,14 +2541,6 @@ static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
 			vhost->login_buf->resp.port_loc_code);
 			vhost->login_buf->resp.port_loc_code);
 }
 }
 
 
-static struct device_attribute ibmvfc_host_loc_code = {
-	.attr = {
-		.name = "port_loc_code",
-		.mode = S_IRUGO,
-	},
-	.show = ibmvfc_show_host_loc_code,
-};
-
 static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
 static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
 					 struct device_attribute *attr, char *buf)
 					 struct device_attribute *attr, char *buf)
 {
 {
@@ -2488,14 +2551,6 @@ static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
 			vhost->login_buf->resp.drc_name);
 			vhost->login_buf->resp.drc_name);
 }
 }
 
 
-static struct device_attribute ibmvfc_host_drc_name = {
-	.attr = {
-		.name = "drc_name",
-		.mode = S_IRUGO,
-	},
-	.show = ibmvfc_show_host_drc_name,
-};
-
 static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
 static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
 					     struct device_attribute *attr, char *buf)
 					     struct device_attribute *attr, char *buf)
 {
 {
@@ -2504,13 +2559,13 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
 	return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
 	return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
 }
 }
 
 
-static struct device_attribute ibmvfc_host_npiv_version = {
-	.attr = {
-		.name = "npiv_version",
-		.mode = S_IRUGO,
-	},
-	.show = ibmvfc_show_host_npiv_version,
-};
+static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
+					     struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ibmvfc_host *vhost = shost_priv(shost);
+	return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities);
+}
 
 
 /**
 /**
  * ibmvfc_show_log_level - Show the adapter's error logging level
  * ibmvfc_show_log_level - Show the adapter's error logging level
@@ -2556,14 +2611,14 @@ static ssize_t ibmvfc_store_log_level(struct device *dev,
 	return strlen(buf);
 	return strlen(buf);
 }
 }
 
 
-static struct device_attribute ibmvfc_log_level_attr = {
-	.attr = {
-		.name =		"log_level",
-		.mode =		S_IRUGO | S_IWUSR,
-	},
-	.show = ibmvfc_show_log_level,
-	.store = ibmvfc_store_log_level
-};
+static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
+static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
+static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
+static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
+static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
+static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
+static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
+		   ibmvfc_show_log_level, ibmvfc_store_log_level);
 
 
 #ifdef CONFIG_SCSI_IBMVFC_TRACE
 #ifdef CONFIG_SCSI_IBMVFC_TRACE
 /**
 /**
@@ -2612,12 +2667,13 @@ static struct bin_attribute ibmvfc_trace_attr = {
 #endif
 #endif
 
 
 static struct device_attribute *ibmvfc_attrs[] = {
 static struct device_attribute *ibmvfc_attrs[] = {
-	&ibmvfc_host_partition_name,
-	&ibmvfc_host_device_name,
-	&ibmvfc_host_loc_code,
-	&ibmvfc_host_drc_name,
-	&ibmvfc_host_npiv_version,
-	&ibmvfc_log_level_attr,
+	&dev_attr_partition_name,
+	&dev_attr_device_name,
+	&dev_attr_port_loc_code,
+	&dev_attr_drc_name,
+	&dev_attr_npiv_version,
+	&dev_attr_capabilities,
+	&dev_attr_log_level,
 	NULL
 	NULL
 };
 };
 
 
@@ -2774,15 +2830,19 @@ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
  * @tgt:		ibmvfc target struct
  * @tgt:		ibmvfc target struct
  * @job_step:	initialization job step
  * @job_step:	initialization job step
  *
  *
+ * Returns: 1 if step will be retried / 0 if not
+ *
  **/
  **/
-static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
+static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
 				  void (*job_step) (struct ibmvfc_target *))
 				  void (*job_step) (struct ibmvfc_target *))
 {
 {
 	if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
 	if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 		wake_up(&tgt->vhost->work_wait_q);
 		wake_up(&tgt->vhost->work_wait_q);
+		return 0;
 	} else
 	} else
 		ibmvfc_init_tgt(tgt, job_step);
 		ibmvfc_init_tgt(tgt, job_step);
+	return 1;
 }
 }
 
 
 /* Defined in FC-LS */
 /* Defined in FC-LS */
@@ -2831,7 +2891,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
 	struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
 	struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
 	struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
 	struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
 	u32 status = rsp->common.status;
 	u32 status = rsp->common.status;
-	int index;
+	int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
 
 
 	vhost->discovery_threads--;
 	vhost->discovery_threads--;
 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -2850,7 +2910,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
 						tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
 						tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
 					if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC)
 					if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC)
 						tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
 						tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
-					ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT);
+					tgt->add_rport = 1;
 				} else
 				} else
 					ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 					ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 			} else if (prli_rsp[index].retry)
 			} else if (prli_rsp[index].retry)
@@ -2867,13 +2927,14 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
 		break;
 		break;
 	case IBMVFC_MAD_FAILED:
 	case IBMVFC_MAD_FAILED:
 	default:
 	default:
-		tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
-			ibmvfc_get_cmd_error(rsp->status, rsp->error),
-			rsp->status, rsp->error, status);
 		if (ibmvfc_retry_cmd(rsp->status, rsp->error))
 		if (ibmvfc_retry_cmd(rsp->status, rsp->error))
-			ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
+			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
 		else
 		else
 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+
+		tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
+			ibmvfc_get_cmd_error(rsp->status, rsp->error),
+			rsp->status, rsp->error, status);
 		break;
 		break;
 	};
 	};
 
 
@@ -2932,6 +2993,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
 	struct ibmvfc_host *vhost = evt->vhost;
 	struct ibmvfc_host *vhost = evt->vhost;
 	struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
 	struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
 	u32 status = rsp->common.status;
 	u32 status = rsp->common.status;
+	int level = IBMVFC_DEFAULT_LOG_LEVEL;
 
 
 	vhost->discovery_threads--;
 	vhost->discovery_threads--;
 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -2960,15 +3022,15 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
 		break;
 		break;
 	case IBMVFC_MAD_FAILED:
 	case IBMVFC_MAD_FAILED:
 	default:
 	default:
-		tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
-			ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
-			ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
-			ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
-
 		if (ibmvfc_retry_cmd(rsp->status, rsp->error))
 		if (ibmvfc_retry_cmd(rsp->status, rsp->error))
-			ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
+			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
 		else
 		else
 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+
+		tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
+			ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
+			ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
+			ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
 		break;
 		break;
 	};
 	};
 
 
@@ -3129,13 +3191,13 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
 	case IBMVFC_MAD_SUCCESS:
 	case IBMVFC_MAD_SUCCESS:
 		tgt_dbg(tgt, "ADISC succeeded\n");
 		tgt_dbg(tgt, "ADISC succeeded\n");
 		if (ibmvfc_adisc_needs_plogi(mad, tgt))
 		if (ibmvfc_adisc_needs_plogi(mad, tgt))
-			tgt->need_login = 1;
+			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 		break;
 		break;
 	case IBMVFC_MAD_DRIVER_FAILED:
 	case IBMVFC_MAD_DRIVER_FAILED:
 		break;
 		break;
 	case IBMVFC_MAD_FAILED:
 	case IBMVFC_MAD_FAILED:
 	default:
 	default:
-		tgt->need_login = 1;
+		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 		fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
 		fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
 		fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
 		fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
 		tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
 		tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
@@ -3322,6 +3384,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
 	struct ibmvfc_host *vhost = evt->vhost;
 	struct ibmvfc_host *vhost = evt->vhost;
 	struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
 	struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
 	u32 status = rsp->common.status;
 	u32 status = rsp->common.status;
+	int level = IBMVFC_DEFAULT_LOG_LEVEL;
 
 
 	vhost->discovery_threads--;
 	vhost->discovery_threads--;
 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -3341,19 +3404,19 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
 		break;
 		break;
 	case IBMVFC_MAD_FAILED:
 	case IBMVFC_MAD_FAILED:
 	default:
 	default:
-		tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
-			ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
-			ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
-			ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
-
 		if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
 		if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
 		    rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
 		    rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
 		    rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
 		    rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 		else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
 		else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
-			ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
+			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
 		else
 		else
 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+
+		tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
+			ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
+			ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
+			ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
 		break;
 		break;
 	};
 	};
 
 
@@ -3420,7 +3483,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
 	}
 	}
 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
 
 
-	tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL);
+	tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
 	if (!tgt) {
 	if (!tgt) {
 		dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n",
 		dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n",
 			scsi_id);
 			scsi_id);
@@ -3472,6 +3535,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
 	struct ibmvfc_host *vhost = evt->vhost;
 	struct ibmvfc_host *vhost = evt->vhost;
 	struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
 	struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
 	u32 mad_status = rsp->common.status;
 	u32 mad_status = rsp->common.status;
+	int level = IBMVFC_DEFAULT_LOG_LEVEL;
 
 
 	switch (mad_status) {
 	switch (mad_status) {
 	case IBMVFC_MAD_SUCCESS:
 	case IBMVFC_MAD_SUCCESS:
@@ -3480,9 +3544,9 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
 		break;
 		break;
 	case IBMVFC_MAD_FAILED:
 	case IBMVFC_MAD_FAILED:
-		dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n",
-			ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
-		ibmvfc_retry_host_init(vhost);
+		level += ibmvfc_retry_host_init(vhost);
+		ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
+			   ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
 		break;
 		break;
 	case IBMVFC_MAD_DRIVER_FAILED:
 	case IBMVFC_MAD_DRIVER_FAILED:
 		break;
 		break;
@@ -3534,18 +3598,19 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
 	u32 mad_status = evt->xfer_iu->npiv_login.common.status;
 	u32 mad_status = evt->xfer_iu->npiv_login.common.status;
 	struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
 	struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
 	unsigned int npiv_max_sectors;
 	unsigned int npiv_max_sectors;
+	int level = IBMVFC_DEFAULT_LOG_LEVEL;
 
 
 	switch (mad_status) {
 	switch (mad_status) {
 	case IBMVFC_MAD_SUCCESS:
 	case IBMVFC_MAD_SUCCESS:
 		ibmvfc_free_event(evt);
 		ibmvfc_free_event(evt);
 		break;
 		break;
 	case IBMVFC_MAD_FAILED:
 	case IBMVFC_MAD_FAILED:
-		dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n",
-			ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
 		if (ibmvfc_retry_cmd(rsp->status, rsp->error))
 		if (ibmvfc_retry_cmd(rsp->status, rsp->error))
-			ibmvfc_retry_host_init(vhost);
+			level += ibmvfc_retry_host_init(vhost);
 		else
 		else
 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+		ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
+			   ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
 		ibmvfc_free_event(evt);
 		ibmvfc_free_event(evt);
 		return;
 		return;
 	case IBMVFC_MAD_CRQ_ERROR:
 	case IBMVFC_MAD_CRQ_ERROR:
@@ -3578,6 +3643,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
 		return;
 		return;
 	}
 	}
 
 
+	vhost->logged_in = 1;
 	npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
 	npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
 	dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
 	dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
 		 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
 		 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
@@ -3635,6 +3701,65 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
 };
 };
 
 
+/**
+ * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
+ * @vhost:		ibmvfc host struct
+ *
+ **/
+static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
+{
+	struct ibmvfc_host *vhost = evt->vhost;
+	u32 mad_status = evt->xfer_iu->npiv_logout.common.status;
+
+	ibmvfc_free_event(evt);
+
+	switch (mad_status) {
+	case IBMVFC_MAD_SUCCESS:
+		if (list_empty(&vhost->sent) &&
+		    vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
+			ibmvfc_init_host(vhost, 0);
+			return;
+		}
+		break;
+	case IBMVFC_MAD_FAILED:
+	case IBMVFC_MAD_NOT_SUPPORTED:
+	case IBMVFC_MAD_CRQ_ERROR:
+	case IBMVFC_MAD_DRIVER_FAILED:
+	default:
+		ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
+		break;
+	}
+
+	ibmvfc_hard_reset_host(vhost);
+}
+
+/**
+ * ibmvfc_npiv_logout - Issue an NPIV Logout
+ * @vhost:		ibmvfc host struct
+ *
+ **/
+static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
+{
+	struct ibmvfc_npiv_logout_mad *mad;
+	struct ibmvfc_event *evt;
+
+	evt = ibmvfc_get_event(vhost);
+	ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
+
+	mad = &evt->iu.npiv_logout;
+	memset(mad, 0, sizeof(*mad));
+	mad->common.version = 1;
+	mad->common.opcode = IBMVFC_NPIV_LOGOUT;
+	mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad);
+
+	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
+
+	if (!ibmvfc_send_event(evt, vhost, default_timeout))
+		ibmvfc_dbg(vhost, "Sent NPIV logout\n");
+	else
+		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+}
+
 /**
 /**
  * ibmvfc_dev_init_to_do - Is there target initialization work to do?
  * ibmvfc_dev_init_to_do - Is there target initialization work to do?
  * @vhost:		ibmvfc host struct
  * @vhost:		ibmvfc host struct
@@ -3671,6 +3796,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
 	switch (vhost->action) {
 	switch (vhost->action) {
 	case IBMVFC_HOST_ACTION_NONE:
 	case IBMVFC_HOST_ACTION_NONE:
 	case IBMVFC_HOST_ACTION_INIT_WAIT:
 	case IBMVFC_HOST_ACTION_INIT_WAIT:
+	case IBMVFC_HOST_ACTION_LOGO_WAIT:
 		return 0;
 		return 0;
 	case IBMVFC_HOST_ACTION_TGT_INIT:
 	case IBMVFC_HOST_ACTION_TGT_INIT:
 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
@@ -3683,9 +3809,9 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
 			if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
 			if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
 				return 0;
 				return 0;
 		return 1;
 		return 1;
+	case IBMVFC_HOST_ACTION_LOGO:
 	case IBMVFC_HOST_ACTION_INIT:
 	case IBMVFC_HOST_ACTION_INIT:
 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
-	case IBMVFC_HOST_ACTION_TGT_ADD:
 	case IBMVFC_HOST_ACTION_TGT_DEL:
 	case IBMVFC_HOST_ACTION_TGT_DEL:
 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
 	case IBMVFC_HOST_ACTION_QUERY:
 	case IBMVFC_HOST_ACTION_QUERY:
@@ -3740,25 +3866,26 @@ static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
 static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
 static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
 {
 {
 	struct ibmvfc_host *vhost = tgt->vhost;
 	struct ibmvfc_host *vhost = tgt->vhost;
-	struct fc_rport *rport = tgt->rport;
+	struct fc_rport *rport;
 	unsigned long flags;
 	unsigned long flags;
 
 
-	if (rport) {
-		tgt_dbg(tgt, "Setting rport roles\n");
-		fc_remote_port_rolechg(rport, tgt->ids.roles);
-		spin_lock_irqsave(vhost->host->host_lock, flags);
-		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+	tgt_dbg(tgt, "Adding rport\n");
+	rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+
+	if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
+		tgt_dbg(tgt, "Deleting rport\n");
+		list_del(&tgt->queue);
 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
+		fc_remote_port_delete(rport);
+		del_timer_sync(&tgt->timer);
+		kref_put(&tgt->kref, ibmvfc_release_tgt);
 		return;
 		return;
 	}
 	}
 
 
-	tgt_dbg(tgt, "Adding rport\n");
-	rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
-	spin_lock_irqsave(vhost->host->host_lock, flags);
-	tgt->rport = rport;
-	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
 	if (rport) {
 	if (rport) {
 		tgt_dbg(tgt, "rport add succeeded\n");
 		tgt_dbg(tgt, "rport add succeeded\n");
+		tgt->rport = rport;
 		rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
 		rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
 		rport->supported_classes = 0;
 		rport->supported_classes = 0;
 		tgt->target_id = rport->scsi_target_id;
 		tgt->target_id = rport->scsi_target_id;
@@ -3789,8 +3916,12 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
 	vhost->events_to_log = 0;
 	vhost->events_to_log = 0;
 	switch (vhost->action) {
 	switch (vhost->action) {
 	case IBMVFC_HOST_ACTION_NONE:
 	case IBMVFC_HOST_ACTION_NONE:
+	case IBMVFC_HOST_ACTION_LOGO_WAIT:
 	case IBMVFC_HOST_ACTION_INIT_WAIT:
 	case IBMVFC_HOST_ACTION_INIT_WAIT:
 		break;
 		break;
+	case IBMVFC_HOST_ACTION_LOGO:
+		vhost->job_step(vhost);
+		break;
 	case IBMVFC_HOST_ACTION_INIT:
 	case IBMVFC_HOST_ACTION_INIT:
 		BUG_ON(vhost->state != IBMVFC_INITIALIZING);
 		BUG_ON(vhost->state != IBMVFC_INITIALIZING);
 		if (vhost->delay_init) {
 		if (vhost->delay_init) {
@@ -3836,11 +3967,21 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
 
 
 		if (vhost->state == IBMVFC_INITIALIZING) {
 		if (vhost->state == IBMVFC_INITIALIZING) {
 			if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
 			if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
-				ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
-				ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
-				vhost->init_retries = 0;
-				spin_unlock_irqrestore(vhost->host->host_lock, flags);
-				scsi_unblock_requests(vhost->host);
+				if (vhost->reinit) {
+					vhost->reinit = 0;
+					scsi_block_requests(vhost->host);
+					ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+					spin_unlock_irqrestore(vhost->host->host_lock, flags);
+				} else {
+					ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
+					ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+					wake_up(&vhost->init_wait_q);
+					schedule_work(&vhost->rport_add_work_q);
+					vhost->init_retries = 0;
+					spin_unlock_irqrestore(vhost->host->host_lock, flags);
+					scsi_unblock_requests(vhost->host);
+				}
+
 				return;
 				return;
 			} else {
 			} else {
 				ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
 				ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
@@ -3871,24 +4012,6 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
 		if (!ibmvfc_dev_init_to_do(vhost))
 		if (!ibmvfc_dev_init_to_do(vhost))
 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
 		break;
 		break;
-	case IBMVFC_HOST_ACTION_TGT_ADD:
-		list_for_each_entry(tgt, &vhost->targets, queue) {
-			if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) {
-				spin_unlock_irqrestore(vhost->host->host_lock, flags);
-				ibmvfc_tgt_add_rport(tgt);
-				return;
-			}
-		}
-
-		if (vhost->reinit && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
-			vhost->reinit = 0;
-			scsi_block_requests(vhost->host);
-			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
-		} else {
-			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
-			wake_up(&vhost->init_wait_q);
-		}
-		break;
 	default:
 	default:
 		break;
 		break;
 	};
 	};
@@ -4117,6 +4240,56 @@ nomem:
 	return -ENOMEM;
 	return -ENOMEM;
 }
 }
 
 
+/**
+ * ibmvfc_rport_add_thread - Worker thread for rport adds
+ * @work:	work struct
+ *
+ **/
+static void ibmvfc_rport_add_thread(struct work_struct *work)
+{
+	struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
+						 rport_add_work_q);
+	struct ibmvfc_target *tgt;
+	struct fc_rport *rport;
+	unsigned long flags;
+	int did_work;
+
+	ENTER;
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	do {
+		did_work = 0;
+		if (vhost->state != IBMVFC_ACTIVE)
+			break;
+
+		list_for_each_entry(tgt, &vhost->targets, queue) {
+			if (tgt->add_rport) {
+				did_work = 1;
+				tgt->add_rport = 0;
+				kref_get(&tgt->kref);
+				rport = tgt->rport;
+				if (!rport) {
+					spin_unlock_irqrestore(vhost->host->host_lock, flags);
+					ibmvfc_tgt_add_rport(tgt);
+				} else if (get_device(&rport->dev)) {
+					spin_unlock_irqrestore(vhost->host->host_lock, flags);
+					tgt_dbg(tgt, "Setting rport roles\n");
+					fc_remote_port_rolechg(rport, tgt->ids.roles);
+					put_device(&rport->dev);
+				}
+
+				kref_put(&tgt->kref, ibmvfc_release_tgt);
+				spin_lock_irqsave(vhost->host->host_lock, flags);
+				break;
+			}
+		}
+	} while(did_work);
+
+	if (vhost->state == IBMVFC_ACTIVE)
+		vhost->scan_complete = 1;
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+	LEAVE;
+}
+
 /**
 /**
  * ibmvfc_probe - Adapter hot plug add entry point
  * ibmvfc_probe - Adapter hot plug add entry point
  * @vdev:	vio device struct
  * @vdev:	vio device struct
@@ -4160,6 +4333,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 	strcpy(vhost->partition_name, "UNKNOWN");
 	strcpy(vhost->partition_name, "UNKNOWN");
 	init_waitqueue_head(&vhost->work_wait_q);
 	init_waitqueue_head(&vhost->work_wait_q);
 	init_waitqueue_head(&vhost->init_wait_q);
 	init_waitqueue_head(&vhost->init_wait_q);
+	INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
 
 
 	if ((rc = ibmvfc_alloc_mem(vhost)))
 	if ((rc = ibmvfc_alloc_mem(vhost)))
 		goto free_scsi_host;
 		goto free_scsi_host;

+ 33 - 7
drivers/scsi/ibmvscsi/ibmvfc.h

@@ -29,8 +29,8 @@
 #include "viosrp.h"
 #include "viosrp.h"
 
 
 #define IBMVFC_NAME	"ibmvfc"
 #define IBMVFC_NAME	"ibmvfc"
-#define IBMVFC_DRIVER_VERSION		"1.0.5"
-#define IBMVFC_DRIVER_DATE		"(March 19, 2009)"
+#define IBMVFC_DRIVER_VERSION		"1.0.6"
+#define IBMVFC_DRIVER_DATE		"(May 28, 2009)"
 
 
 #define IBMVFC_DEFAULT_TIMEOUT	60
 #define IBMVFC_DEFAULT_TIMEOUT	60
 #define IBMVFC_ADISC_CANCEL_TIMEOUT	45
 #define IBMVFC_ADISC_CANCEL_TIMEOUT	45
@@ -57,9 +57,10 @@
  * Ensure we have resources for ERP and initialization:
  * Ensure we have resources for ERP and initialization:
  * 1 for ERP
  * 1 for ERP
  * 1 for initialization
  * 1 for initialization
+ * 1 for NPIV Logout
  * 2 for each discovery thread
  * 2 for each discovery thread
  */
  */
-#define IBMVFC_NUM_INTERNAL_REQ	(1 + 1 + (disc_threads * 2))
+#define IBMVFC_NUM_INTERNAL_REQ	(1 + 1 + 1 + (disc_threads * 2))
 
 
 #define IBMVFC_MAD_SUCCESS		0x00
 #define IBMVFC_MAD_SUCCESS		0x00
 #define IBMVFC_MAD_NOT_SUPPORTED	0xF1
 #define IBMVFC_MAD_NOT_SUPPORTED	0xF1
@@ -127,6 +128,7 @@ enum ibmvfc_mad_types {
 	IBMVFC_IMPLICIT_LOGOUT	= 0x0040,
 	IBMVFC_IMPLICIT_LOGOUT	= 0x0040,
 	IBMVFC_PASSTHRU		= 0x0200,
 	IBMVFC_PASSTHRU		= 0x0200,
 	IBMVFC_TMF_MAD		= 0x0100,
 	IBMVFC_TMF_MAD		= 0x0100,
+	IBMVFC_NPIV_LOGOUT	= 0x0800,
 };
 };
 
 
 struct ibmvfc_mad_common {
 struct ibmvfc_mad_common {
@@ -143,6 +145,10 @@ struct ibmvfc_npiv_login_mad {
 	struct srp_direct_buf buffer;
 	struct srp_direct_buf buffer;
 }__attribute__((packed, aligned (8)));
 }__attribute__((packed, aligned (8)));
 
 
+struct ibmvfc_npiv_logout_mad {
+	struct ibmvfc_mad_common common;
+}__attribute__((packed, aligned (8)));
+
 #define IBMVFC_MAX_NAME 256
 #define IBMVFC_MAX_NAME 256
 
 
 struct ibmvfc_npiv_login {
 struct ibmvfc_npiv_login {
@@ -201,7 +207,8 @@ struct ibmvfc_npiv_login_resp {
 #define IBMVFC_NATIVE_FC		0x01
 #define IBMVFC_NATIVE_FC		0x01
 #define IBMVFC_CAN_FLUSH_ON_HALT	0x08
 #define IBMVFC_CAN_FLUSH_ON_HALT	0x08
 	u32 reserved;
 	u32 reserved;
-	u64 capabilites;
+	u64 capabilities;
+#define IBMVFC_CAN_FLUSH_ON_HALT	0x08
 	u32 max_cmds;
 	u32 max_cmds;
 	u32 scsi_id_sz;
 	u32 scsi_id_sz;
 	u64 max_dma_len;
 	u64 max_dma_len;
@@ -541,9 +548,17 @@ struct ibmvfc_crq_queue {
 	dma_addr_t msg_token;
 	dma_addr_t msg_token;
 };
 };
 
 
+enum ibmvfc_ae_link_state {
+	IBMVFC_AE_LS_LINK_UP		= 0x01,
+	IBMVFC_AE_LS_LINK_BOUNCED	= 0x02,
+	IBMVFC_AE_LS_LINK_DOWN		= 0x04,
+	IBMVFC_AE_LS_LINK_DEAD		= 0x08,
+};
+
 struct ibmvfc_async_crq {
 struct ibmvfc_async_crq {
 	volatile u8 valid;
 	volatile u8 valid;
-	u8 pad[3];
+	u8 link_state;
+	u8 pad[2];
 	u32 pad2;
 	u32 pad2;
 	volatile u64 event;
 	volatile u64 event;
 	volatile u64 scsi_id;
 	volatile u64 scsi_id;
@@ -561,6 +576,7 @@ struct ibmvfc_async_crq_queue {
 union ibmvfc_iu {
 union ibmvfc_iu {
 	struct ibmvfc_mad_common mad_common;
 	struct ibmvfc_mad_common mad_common;
 	struct ibmvfc_npiv_login_mad npiv_login;
 	struct ibmvfc_npiv_login_mad npiv_login;
+	struct ibmvfc_npiv_logout_mad npiv_logout;
 	struct ibmvfc_discover_targets discover_targets;
 	struct ibmvfc_discover_targets discover_targets;
 	struct ibmvfc_port_login plogi;
 	struct ibmvfc_port_login plogi;
 	struct ibmvfc_process_login prli;
 	struct ibmvfc_process_login prli;
@@ -575,7 +591,6 @@ enum ibmvfc_target_action {
 	IBMVFC_TGT_ACTION_NONE = 0,
 	IBMVFC_TGT_ACTION_NONE = 0,
 	IBMVFC_TGT_ACTION_INIT,
 	IBMVFC_TGT_ACTION_INIT,
 	IBMVFC_TGT_ACTION_INIT_WAIT,
 	IBMVFC_TGT_ACTION_INIT_WAIT,
-	IBMVFC_TGT_ACTION_ADD_RPORT,
 	IBMVFC_TGT_ACTION_DEL_RPORT,
 	IBMVFC_TGT_ACTION_DEL_RPORT,
 };
 };
 
 
@@ -588,6 +603,7 @@ struct ibmvfc_target {
 	int target_id;
 	int target_id;
 	enum ibmvfc_target_action action;
 	enum ibmvfc_target_action action;
 	int need_login;
 	int need_login;
+	int add_rport;
 	int init_retries;
 	int init_retries;
 	u32 cancel_key;
 	u32 cancel_key;
 	struct ibmvfc_service_parms service_parms;
 	struct ibmvfc_service_parms service_parms;
@@ -627,6 +643,8 @@ struct ibmvfc_event_pool {
 
 
 enum ibmvfc_host_action {
 enum ibmvfc_host_action {
 	IBMVFC_HOST_ACTION_NONE = 0,
 	IBMVFC_HOST_ACTION_NONE = 0,
+	IBMVFC_HOST_ACTION_LOGO,
+	IBMVFC_HOST_ACTION_LOGO_WAIT,
 	IBMVFC_HOST_ACTION_INIT,
 	IBMVFC_HOST_ACTION_INIT,
 	IBMVFC_HOST_ACTION_INIT_WAIT,
 	IBMVFC_HOST_ACTION_INIT_WAIT,
 	IBMVFC_HOST_ACTION_QUERY,
 	IBMVFC_HOST_ACTION_QUERY,
@@ -635,7 +653,6 @@ enum ibmvfc_host_action {
 	IBMVFC_HOST_ACTION_ALLOC_TGTS,
 	IBMVFC_HOST_ACTION_ALLOC_TGTS,
 	IBMVFC_HOST_ACTION_TGT_INIT,
 	IBMVFC_HOST_ACTION_TGT_INIT,
 	IBMVFC_HOST_ACTION_TGT_DEL_FAILED,
 	IBMVFC_HOST_ACTION_TGT_DEL_FAILED,
-	IBMVFC_HOST_ACTION_TGT_ADD,
 };
 };
 
 
 enum ibmvfc_host_state {
 enum ibmvfc_host_state {
@@ -682,6 +699,8 @@ struct ibmvfc_host {
 	int client_migrated;
 	int client_migrated;
 	int reinit;
 	int reinit;
 	int delay_init;
 	int delay_init;
+	int scan_complete;
+	int logged_in;
 	int events_to_log;
 	int events_to_log;
 #define IBMVFC_AE_LINKUP	0x0001
 #define IBMVFC_AE_LINKUP	0x0001
 #define IBMVFC_AE_LINKDOWN	0x0002
 #define IBMVFC_AE_LINKDOWN	0x0002
@@ -692,6 +711,7 @@ struct ibmvfc_host {
 	void (*job_step) (struct ibmvfc_host *);
 	void (*job_step) (struct ibmvfc_host *);
 	struct task_struct *work_thread;
 	struct task_struct *work_thread;
 	struct tasklet_struct tasklet;
 	struct tasklet_struct tasklet;
+	struct work_struct rport_add_work_q;
 	wait_queue_head_t init_wait_q;
 	wait_queue_head_t init_wait_q;
 	wait_queue_head_t work_wait_q;
 	wait_queue_head_t work_wait_q;
 };
 };
@@ -707,6 +727,12 @@ struct ibmvfc_host {
 #define tgt_err(t, fmt, ...)		\
 #define tgt_err(t, fmt, ...)		\
 	dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
 	dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
 
 
+#define tgt_log(t, level, fmt, ...) \
+	do { \
+		if ((t)->vhost->log_level >= level) \
+			tgt_err(t, fmt, ##__VA_ARGS__); \
+	} while (0)
+
 #define ibmvfc_dbg(vhost, ...) \
 #define ibmvfc_dbg(vhost, ...) \
 	DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__))
 	DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__))
 
 

+ 346 - 117
drivers/scsi/ibmvscsi/ibmvscsi.c

@@ -70,6 +70,7 @@
 #include <linux/moduleparam.h>
 #include <linux/moduleparam.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
 #include <linux/delay.h>
+#include <linux/of.h>
 #include <asm/firmware.h>
 #include <asm/firmware.h>
 #include <asm/vio.h>
 #include <asm/vio.h>
 #include <asm/firmware.h>
 #include <asm/firmware.h>
@@ -87,9 +88,15 @@
  */
  */
 static int max_id = 64;
 static int max_id = 64;
 static int max_channel = 3;
 static int max_channel = 3;
-static int init_timeout = 5;
+static int init_timeout = 300;
+static int login_timeout = 60;
+static int info_timeout = 30;
+static int abort_timeout = 60;
+static int reset_timeout = 60;
 static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
 static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
 static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
 static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
+static int fast_fail = 1;
+static int client_reserve = 1;
 
 
 static struct scsi_transport_template *ibmvscsi_transport_template;
 static struct scsi_transport_template *ibmvscsi_transport_template;
 
 
@@ -110,6 +117,10 @@ module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
 module_param_named(max_requests, max_requests, int, S_IRUGO);
 module_param_named(max_requests, max_requests, int, S_IRUGO);
 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
+module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
+module_param_named(client_reserve, client_reserve, int, S_IRUGO );
+MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
 
 
 /* ------------------------------------------------------------
 /* ------------------------------------------------------------
  * Routines for the event pool and event structs
  * Routines for the event pool and event structs
@@ -781,105 +792,53 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
 /* ------------------------------------------------------------
 /* ------------------------------------------------------------
  * Routines for driver initialization
  * Routines for driver initialization
  */
  */
+
 /**
 /**
- * adapter_info_rsp: - Handle response to MAD adapter info request
- * @evt_struct:	srp_event_struct with the response
+ * map_persist_bufs: - Pre-map persistent data for adapter logins
+ * @hostdata:   ibmvscsi_host_data of host
  *
  *
- * Used as a "done" callback by when sending adapter_info. Gets called
- * by ibmvscsi_handle_crq()
-*/
-static void adapter_info_rsp(struct srp_event_struct *evt_struct)
+ * Map the capabilities and adapter info DMA buffers to avoid runtime failures.
+ * Return 1 on error, 0 on success.
+ */
+static int map_persist_bufs(struct ibmvscsi_host_data *hostdata)
 {
 {
-	struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
-	dma_unmap_single(hostdata->dev,
-			 evt_struct->iu.mad.adapter_info.buffer,
-			 evt_struct->iu.mad.adapter_info.common.length,
-			 DMA_BIDIRECTIONAL);
 
 
-	if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
-		dev_err(hostdata->dev, "error %d getting adapter info\n",
-			evt_struct->xfer_iu->mad.adapter_info.common.status);
-	} else {
-		dev_info(hostdata->dev, "host srp version: %s, "
-			 "host partition %s (%d), OS %d, max io %u\n",
-			 hostdata->madapter_info.srp_version,
-			 hostdata->madapter_info.partition_name,
-			 hostdata->madapter_info.partition_number,
-			 hostdata->madapter_info.os_type,
-			 hostdata->madapter_info.port_max_txu[0]);
-		
-		if (hostdata->madapter_info.port_max_txu[0]) 
-			hostdata->host->max_sectors = 
-				hostdata->madapter_info.port_max_txu[0] >> 9;
-		
-		if (hostdata->madapter_info.os_type == 3 &&
-		    strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
-			dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
-				hostdata->madapter_info.srp_version);
-			dev_err(hostdata->dev, "limiting scatterlists to %d\n",
-				MAX_INDIRECT_BUFS);
-			hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
-		}
+	hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps,
+					     sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
+
+	if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) {
+		dev_err(hostdata->dev, "Unable to map capabilities buffer!\n");
+		return 1;
 	}
 	}
+
+	hostdata->adapter_info_addr = dma_map_single(hostdata->dev,
+						     &hostdata->madapter_info,
+						     sizeof(hostdata->madapter_info),
+						     DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) {
+		dev_err(hostdata->dev, "Unable to map adapter info buffer!\n");
+		dma_unmap_single(hostdata->dev, hostdata->caps_addr,
+				 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
+		return 1;
+	}
+
+	return 0;
 }
 }
 
 
 /**
 /**
- * send_mad_adapter_info: - Sends the mad adapter info request
- *      and stores the result so it can be retrieved with
- *      sysfs.  We COULD consider causing a failure if the
- *      returned SRP version doesn't match ours.
- * @hostdata:	ibmvscsi_host_data of host
- * 
- * Returns zero if successful.
-*/
-static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
+ * unmap_persist_bufs: - Unmap persistent data needed for adapter logins
+ * @hostdata:   ibmvscsi_host_data of host
+ *
+ * Unmap the capabilities and adapter info DMA buffers
+ */
+static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata)
 {
 {
-	struct viosrp_adapter_info *req;
-	struct srp_event_struct *evt_struct;
-	unsigned long flags;
-	dma_addr_t addr;
-
-	evt_struct = get_event_struct(&hostdata->pool);
-	if (!evt_struct) {
-		dev_err(hostdata->dev,
-			"couldn't allocate an event for ADAPTER_INFO_REQ!\n");
-		return;
-	}
-
-	init_event_struct(evt_struct,
-			  adapter_info_rsp,
-			  VIOSRP_MAD_FORMAT,
-			  init_timeout);
-	
-	req = &evt_struct->iu.mad.adapter_info;
-	memset(req, 0x00, sizeof(*req));
-	
-	req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
-	req->common.length = sizeof(hostdata->madapter_info);
-	req->buffer = addr = dma_map_single(hostdata->dev,
-					    &hostdata->madapter_info,
-					    sizeof(hostdata->madapter_info),
-					    DMA_BIDIRECTIONAL);
+	dma_unmap_single(hostdata->dev, hostdata->caps_addr,
+			 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
 
 
-	if (dma_mapping_error(hostdata->dev, req->buffer)) {
-		if (!firmware_has_feature(FW_FEATURE_CMO))
-			dev_err(hostdata->dev,
-			        "Unable to map request_buffer for "
-			        "adapter_info!\n");
-		free_event_struct(&hostdata->pool, evt_struct);
-		return;
-	}
-	
-	spin_lock_irqsave(hostdata->host->host_lock, flags);
-	if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) {
-		dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
-		dma_unmap_single(hostdata->dev,
-				 addr,
-				 sizeof(hostdata->madapter_info),
-				 DMA_BIDIRECTIONAL);
-	}
-	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
-};
+	dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr,
+			 sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL);
+}
 
 
 /**
 /**
  * login_rsp: - Handle response to SRP login request
  * login_rsp: - Handle response to SRP login request
@@ -909,9 +868,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
 	}
 	}
 
 
 	dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
 	dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
-
-	if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
-		dev_err(hostdata->dev, "Invalid request_limit.\n");
+	hostdata->client_migrated = 0;
 
 
 	/* Now we know what the real request-limit is.
 	/* Now we know what the real request-limit is.
 	 * This value is set rather than added to request_limit because
 	 * This value is set rather than added to request_limit because
@@ -922,15 +879,12 @@ static void login_rsp(struct srp_event_struct *evt_struct)
 
 
 	/* If we had any pending I/Os, kick them */
 	/* If we had any pending I/Os, kick them */
 	scsi_unblock_requests(hostdata->host);
 	scsi_unblock_requests(hostdata->host);
-
-	send_mad_adapter_info(hostdata);
-	return;
 }
 }
 
 
 /**
 /**
  * send_srp_login: - Sends the srp login
  * send_srp_login: - Sends the srp login
  * @hostdata:	ibmvscsi_host_data of host
  * @hostdata:	ibmvscsi_host_data of host
- * 
+ *
  * Returns zero if successful.
  * Returns zero if successful.
 */
 */
 static int send_srp_login(struct ibmvscsi_host_data *hostdata)
 static int send_srp_login(struct ibmvscsi_host_data *hostdata)
@@ -939,22 +893,17 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
 	unsigned long flags;
 	unsigned long flags;
 	struct srp_login_req *login;
 	struct srp_login_req *login;
 	struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
 	struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
-	if (!evt_struct) {
-		dev_err(hostdata->dev, "couldn't allocate an event for login req!\n");
-		return FAILED;
-	}
 
 
-	init_event_struct(evt_struct,
-			  login_rsp,
-			  VIOSRP_SRP_FORMAT,
-			  init_timeout);
+	BUG_ON(!evt_struct);
+	init_event_struct(evt_struct, login_rsp,
+			  VIOSRP_SRP_FORMAT, login_timeout);
 
 
 	login = &evt_struct->iu.srp.login_req;
 	login = &evt_struct->iu.srp.login_req;
-	memset(login, 0x00, sizeof(struct srp_login_req));
+	memset(login, 0, sizeof(*login));
 	login->opcode = SRP_LOGIN_REQ;
 	login->opcode = SRP_LOGIN_REQ;
 	login->req_it_iu_len = sizeof(union srp_iu);
 	login->req_it_iu_len = sizeof(union srp_iu);
 	login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
 	login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
-	
+
 	spin_lock_irqsave(hostdata->host->host_lock, flags);
 	spin_lock_irqsave(hostdata->host->host_lock, flags);
 	/* Start out with a request limit of 0, since this is negotiated in
 	/* Start out with a request limit of 0, since this is negotiated in
 	 * the login request we are just sending and login requests always
 	 * the login request we are just sending and login requests always
@@ -962,12 +911,240 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
 	 */
 	 */
 	atomic_set(&hostdata->request_limit, 0);
 	atomic_set(&hostdata->request_limit, 0);
 
 
-	rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
+	rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
 	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
 	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
 	dev_info(hostdata->dev, "sent SRP login\n");
 	dev_info(hostdata->dev, "sent SRP login\n");
 	return rc;
 	return rc;
 };
 };
 
 
+/**
+ * capabilities_rsp: - Handle response to MAD adapter capabilities request
+ * @evt_struct:	srp_event_struct with the response
+ *
+ * Used as a "done" callback by when sending adapter_info.
+ */
+static void capabilities_rsp(struct srp_event_struct *evt_struct)
+{
+	struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+
+	if (evt_struct->xfer_iu->mad.capabilities.common.status) {
+		dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
+			evt_struct->xfer_iu->mad.capabilities.common.status);
+	} else {
+		if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP)
+			dev_info(hostdata->dev, "Partition migration not supported\n");
+
+		if (client_reserve) {
+			if (hostdata->caps.reserve.common.server_support ==
+			    SERVER_SUPPORTS_CAP)
+				dev_info(hostdata->dev, "Client reserve enabled\n");
+			else
+				dev_info(hostdata->dev, "Client reserve not supported\n");
+		}
+	}
+
+	send_srp_login(hostdata);
+}
+
+/**
+ * send_mad_capabilities: - Sends the mad capabilities request
+ *      and stores the result so it can be retrieved with
+ * @hostdata:	ibmvscsi_host_data of host
+ */
+static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
+{
+	struct viosrp_capabilities *req;
+	struct srp_event_struct *evt_struct;
+	unsigned long flags;
+	struct device_node *of_node = hostdata->dev->archdata.of_node;
+	const char *location;
+
+	evt_struct = get_event_struct(&hostdata->pool);
+	BUG_ON(!evt_struct);
+
+	init_event_struct(evt_struct, capabilities_rsp,
+			  VIOSRP_MAD_FORMAT, info_timeout);
+
+	req = &evt_struct->iu.mad.capabilities;
+	memset(req, 0, sizeof(*req));
+
+	hostdata->caps.flags = CAP_LIST_SUPPORTED;
+	if (hostdata->client_migrated)
+		hostdata->caps.flags |= CLIENT_MIGRATED;
+
+	strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
+		sizeof(hostdata->caps.name));
+	hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0';
+
+	location = of_get_property(of_node, "ibm,loc-code", NULL);
+	location = location ? location : dev_name(hostdata->dev);
+	strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
+	hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
+
+	req->common.type = VIOSRP_CAPABILITIES_TYPE;
+	req->buffer = hostdata->caps_addr;
+
+	hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES;
+	hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration);
+	hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP;
+	hostdata->caps.migration.ecl = 1;
+
+	if (client_reserve) {
+		hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES;
+		hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve);
+		hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP;
+		hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2;
+		req->common.length = sizeof(hostdata->caps);
+	} else
+		req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve);
+
+	spin_lock_irqsave(hostdata->host->host_lock, flags);
+	if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
+		dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n");
+	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+};
+
+/**
+ * fast_fail_rsp: - Handle response to MAD enable fast fail
+ * @evt_struct:	srp_event_struct with the response
+ *
+ * Used as a "done" callback by when sending enable fast fail. Gets called
+ * by ibmvscsi_handle_crq()
+ */
+static void fast_fail_rsp(struct srp_event_struct *evt_struct)
+{
+	struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+	u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status;
+
+	if (status == VIOSRP_MAD_NOT_SUPPORTED)
+		dev_err(hostdata->dev, "fast_fail not supported in server\n");
+	else if (status == VIOSRP_MAD_FAILED)
+		dev_err(hostdata->dev, "fast_fail request failed\n");
+	else if (status != VIOSRP_MAD_SUCCESS)
+		dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status);
+
+	send_mad_capabilities(hostdata);
+}
+
+/**
+ * init_host - Start host initialization
+ * @hostdata:	ibmvscsi_host_data of host
+ *
+ * Returns zero if successful.
+ */
+static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
+{
+	int rc;
+	unsigned long flags;
+	struct viosrp_fast_fail *fast_fail_mad;
+	struct srp_event_struct *evt_struct;
+
+	if (!fast_fail) {
+		send_mad_capabilities(hostdata);
+		return 0;
+	}
+
+	evt_struct = get_event_struct(&hostdata->pool);
+	BUG_ON(!evt_struct);
+
+	init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout);
+
+	fast_fail_mad = &evt_struct->iu.mad.fast_fail;
+	memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
+	fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL;
+	fast_fail_mad->common.length = sizeof(*fast_fail_mad);
+
+	spin_lock_irqsave(hostdata->host->host_lock, flags);
+	rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
+	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+	return rc;
+}
+
+/**
+ * adapter_info_rsp: - Handle response to MAD adapter info request
+ * @evt_struct:	srp_event_struct with the response
+ *
+ * Used as a "done" callback by when sending adapter_info. Gets called
+ * by ibmvscsi_handle_crq()
+*/
+static void adapter_info_rsp(struct srp_event_struct *evt_struct)
+{
+	struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+
+	if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
+		dev_err(hostdata->dev, "error %d getting adapter info\n",
+			evt_struct->xfer_iu->mad.adapter_info.common.status);
+	} else {
+		dev_info(hostdata->dev, "host srp version: %s, "
+			 "host partition %s (%d), OS %d, max io %u\n",
+			 hostdata->madapter_info.srp_version,
+			 hostdata->madapter_info.partition_name,
+			 hostdata->madapter_info.partition_number,
+			 hostdata->madapter_info.os_type,
+			 hostdata->madapter_info.port_max_txu[0]);
+		
+		if (hostdata->madapter_info.port_max_txu[0]) 
+			hostdata->host->max_sectors = 
+				hostdata->madapter_info.port_max_txu[0] >> 9;
+		
+		if (hostdata->madapter_info.os_type == 3 &&
+		    strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
+			dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
+				hostdata->madapter_info.srp_version);
+			dev_err(hostdata->dev, "limiting scatterlists to %d\n",
+				MAX_INDIRECT_BUFS);
+			hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
+		}
+	}
+
+	enable_fast_fail(hostdata);
+}
+
+/**
+ * send_mad_adapter_info: - Sends the mad adapter info request
+ *      and stores the result so it can be retrieved with
+ *      sysfs.  We COULD consider causing a failure if the
+ *      returned SRP version doesn't match ours.
+ * @hostdata:	ibmvscsi_host_data of host
+ * 
+ * Returns zero if successful.
+*/
+static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
+{
+	struct viosrp_adapter_info *req;
+	struct srp_event_struct *evt_struct;
+	unsigned long flags;
+
+	evt_struct = get_event_struct(&hostdata->pool);
+	BUG_ON(!evt_struct);
+
+	init_event_struct(evt_struct,
+			  adapter_info_rsp,
+			  VIOSRP_MAD_FORMAT,
+			  info_timeout);
+	
+	req = &evt_struct->iu.mad.adapter_info;
+	memset(req, 0x00, sizeof(*req));
+	
+	req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
+	req->common.length = sizeof(hostdata->madapter_info);
+	req->buffer = hostdata->adapter_info_addr;
+
+	spin_lock_irqsave(hostdata->host->host_lock, flags);
+	if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
+		dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
+	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+};
+
+/**
+ * init_adapter: Start virtual adapter initialization sequence
+ *
+ */
+static void init_adapter(struct ibmvscsi_host_data *hostdata)
+{
+	send_mad_adapter_info(hostdata);
+}
+
 /**
 /**
  * sync_completion: Signal that a synchronous command has completed
  * sync_completion: Signal that a synchronous command has completed
  * Note that after returning from this call, the evt_struct is freed.
  * Note that after returning from this call, the evt_struct is freed.
@@ -1029,7 +1206,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
 		init_event_struct(evt,
 		init_event_struct(evt,
 				  sync_completion,
 				  sync_completion,
 				  VIOSRP_SRP_FORMAT,
 				  VIOSRP_SRP_FORMAT,
-				  init_timeout);
+				  abort_timeout);
 
 
 		tsk_mgmt = &evt->iu.srp.tsk_mgmt;
 		tsk_mgmt = &evt->iu.srp.tsk_mgmt;
 	
 	
@@ -1043,7 +1220,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
 		evt->sync_srp = &srp_rsp;
 		evt->sync_srp = &srp_rsp;
 
 
 		init_completion(&evt->comp);
 		init_completion(&evt->comp);
-		rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
+		rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2);
 
 
 		if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
 		if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
 			break;
 			break;
@@ -1152,7 +1329,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
 		init_event_struct(evt,
 		init_event_struct(evt,
 				  sync_completion,
 				  sync_completion,
 				  VIOSRP_SRP_FORMAT,
 				  VIOSRP_SRP_FORMAT,
-				  init_timeout);
+				  reset_timeout);
 
 
 		tsk_mgmt = &evt->iu.srp.tsk_mgmt;
 		tsk_mgmt = &evt->iu.srp.tsk_mgmt;
 
 
@@ -1165,7 +1342,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
 		evt->sync_srp = &srp_rsp;
 		evt->sync_srp = &srp_rsp;
 
 
 		init_completion(&evt->comp);
 		init_completion(&evt->comp);
-		rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
+		rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2);
 
 
 		if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
 		if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
 			break;
 			break;
@@ -1281,7 +1458,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
 			if ((rc = ibmvscsi_ops->send_crq(hostdata,
 			if ((rc = ibmvscsi_ops->send_crq(hostdata,
 							 0xC002000000000000LL, 0)) == 0) {
 							 0xC002000000000000LL, 0)) == 0) {
 				/* Now login */
 				/* Now login */
-				send_srp_login(hostdata);
+				init_adapter(hostdata);
 			} else {
 			} else {
 				dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
 				dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
 			}
 			}
@@ -1291,7 +1468,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
 			dev_info(hostdata->dev, "partner initialization complete\n");
 			dev_info(hostdata->dev, "partner initialization complete\n");
 
 
 			/* Now login */
 			/* Now login */
-			send_srp_login(hostdata);
+			init_adapter(hostdata);
 			break;
 			break;
 		default:
 		default:
 			dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
 			dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
@@ -1303,6 +1480,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
 		if (crq->format == 0x06) {
 		if (crq->format == 0x06) {
 			/* We need to re-setup the interpartition connection */
 			/* We need to re-setup the interpartition connection */
 			dev_info(hostdata->dev, "Re-enabling adapter!\n");
 			dev_info(hostdata->dev, "Re-enabling adapter!\n");
+			hostdata->client_migrated = 1;
 			purge_requests(hostdata, DID_REQUEUE);
 			purge_requests(hostdata, DID_REQUEUE);
 			if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue,
 			if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue,
 							      hostdata)) ||
 							      hostdata)) ||
@@ -1397,7 +1575,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
 	init_event_struct(evt_struct,
 	init_event_struct(evt_struct,
 			  sync_completion,
 			  sync_completion,
 			  VIOSRP_MAD_FORMAT,
 			  VIOSRP_MAD_FORMAT,
-			  init_timeout);
+			  info_timeout);
 
 
 	host_config = &evt_struct->iu.mad.host_config;
 	host_config = &evt_struct->iu.mad.host_config;
 
 
@@ -1419,7 +1597,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
 
 
 	init_completion(&evt_struct->comp);
 	init_completion(&evt_struct->comp);
 	spin_lock_irqsave(hostdata->host->host_lock, flags);
 	spin_lock_irqsave(hostdata->host->host_lock, flags);
-	rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
+	rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
 	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
 	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
 	if (rc == 0)
 	if (rc == 0)
 		wait_for_completion(&evt_struct->comp);
 		wait_for_completion(&evt_struct->comp);
@@ -1444,7 +1622,7 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
 	spin_lock_irqsave(shost->host_lock, lock_flags);
 	spin_lock_irqsave(shost->host_lock, lock_flags);
 	if (sdev->type == TYPE_DISK) {
 	if (sdev->type == TYPE_DISK) {
 		sdev->allow_restart = 1;
 		sdev->allow_restart = 1;
-		blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
+		blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
 	}
 	}
 	scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
 	scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
 	spin_unlock_irqrestore(shost->host_lock, lock_flags);
 	spin_unlock_irqrestore(shost->host_lock, lock_flags);
@@ -1471,6 +1649,46 @@ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
 /* ------------------------------------------------------------
 /* ------------------------------------------------------------
  * sysfs attributes
  * sysfs attributes
  */
  */
+static ssize_t show_host_vhost_loc(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+	int len;
+
+	len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n",
+		       hostdata->caps.loc);
+	return len;
+}
+
+static struct device_attribute ibmvscsi_host_vhost_loc = {
+	.attr = {
+		 .name = "vhost_loc",
+		 .mode = S_IRUGO,
+		 },
+	.show = show_host_vhost_loc,
+};
+
+static ssize_t show_host_vhost_name(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+	int len;
+
+	len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n",
+		       hostdata->caps.name);
+	return len;
+}
+
+static struct device_attribute ibmvscsi_host_vhost_name = {
+	.attr = {
+		 .name = "vhost_name",
+		 .mode = S_IRUGO,
+		 },
+	.show = show_host_vhost_name,
+};
+
 static ssize_t show_host_srp_version(struct device *dev,
 static ssize_t show_host_srp_version(struct device *dev,
 				     struct device_attribute *attr, char *buf)
 				     struct device_attribute *attr, char *buf)
 {
 {
@@ -1594,6 +1812,8 @@ static struct device_attribute ibmvscsi_host_config = {
 };
 };
 
 
 static struct device_attribute *ibmvscsi_attrs[] = {
 static struct device_attribute *ibmvscsi_attrs[] = {
+	&ibmvscsi_host_vhost_loc,
+	&ibmvscsi_host_vhost_name,
 	&ibmvscsi_host_srp_version,
 	&ibmvscsi_host_srp_version,
 	&ibmvscsi_host_partition_name,
 	&ibmvscsi_host_partition_name,
 	&ibmvscsi_host_partition_number,
 	&ibmvscsi_host_partition_number,
@@ -1674,6 +1894,11 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 	atomic_set(&hostdata->request_limit, -1);
 	atomic_set(&hostdata->request_limit, -1);
 	hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
 	hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
 
 
+	if (map_persist_bufs(hostdata)) {
+		dev_err(&vdev->dev, "couldn't map persistent buffers\n");
+		goto persist_bufs_failed;
+	}
+
 	rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
 	rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
 	if (rc != 0 && rc != H_RESOURCE) {
 	if (rc != 0 && rc != H_RESOURCE) {
 		dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
 		dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
@@ -1687,6 +1912,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 	host->max_lun = 8;
 	host->max_lun = 8;
 	host->max_id = max_id;
 	host->max_id = max_id;
 	host->max_channel = max_channel;
 	host->max_channel = max_channel;
+	host->max_cmd_len = 16;
 
 
 	if (scsi_add_host(hostdata->host, hostdata->dev))
 	if (scsi_add_host(hostdata->host, hostdata->dev))
 		goto add_host_failed;
 		goto add_host_failed;
@@ -1733,6 +1959,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
       init_pool_failed:
       init_pool_failed:
 	ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
 	ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
       init_crq_failed:
       init_crq_failed:
+	unmap_persist_bufs(hostdata);
+      persist_bufs_failed:
 	scsi_host_put(host);
 	scsi_host_put(host);
       scsi_host_alloc_failed:
       scsi_host_alloc_failed:
 	return -1;
 	return -1;
@@ -1741,6 +1969,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 static int ibmvscsi_remove(struct vio_dev *vdev)
 static int ibmvscsi_remove(struct vio_dev *vdev)
 {
 {
 	struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
 	struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
+	unmap_persist_bufs(hostdata);
 	release_event_pool(&hostdata->pool, hostdata);
 	release_event_pool(&hostdata->pool, hostdata);
 	ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
 	ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
 					max_events);
 					max_events);

+ 4 - 0
drivers/scsi/ibmvscsi/ibmvscsi.h

@@ -90,6 +90,7 @@ struct event_pool {
 /* all driver data associated with a host adapter */
 /* all driver data associated with a host adapter */
 struct ibmvscsi_host_data {
 struct ibmvscsi_host_data {
 	atomic_t request_limit;
 	atomic_t request_limit;
+	int client_migrated;
 	struct device *dev;
 	struct device *dev;
 	struct event_pool pool;
 	struct event_pool pool;
 	struct crq_queue queue;
 	struct crq_queue queue;
@@ -97,6 +98,9 @@ struct ibmvscsi_host_data {
 	struct list_head sent;
 	struct list_head sent;
 	struct Scsi_Host *host;
 	struct Scsi_Host *host;
 	struct mad_adapter_info_data madapter_info;
 	struct mad_adapter_info_data madapter_info;
+	struct capabilities caps;
+	dma_addr_t caps_addr;
+	dma_addr_t adapter_info_addr;
 };
 };
 
 
 /* routines for managing a command/response queue */
 /* routines for managing a command/response queue */

+ 67 - 1
drivers/scsi/ibmvscsi/viosrp.h

@@ -37,6 +37,7 @@
 
 
 #define SRP_VERSION "16.a"
 #define SRP_VERSION "16.a"
 #define SRP_MAX_IU_LEN	256
 #define SRP_MAX_IU_LEN	256
+#define SRP_MAX_LOC_LEN 32
 
 
 union srp_iu {
 union srp_iu {
 	struct srp_login_req login_req;
 	struct srp_login_req login_req;
@@ -86,7 +87,37 @@ enum viosrp_mad_types {
 	VIOSRP_EMPTY_IU_TYPE = 0x01,
 	VIOSRP_EMPTY_IU_TYPE = 0x01,
 	VIOSRP_ERROR_LOG_TYPE = 0x02,
 	VIOSRP_ERROR_LOG_TYPE = 0x02,
 	VIOSRP_ADAPTER_INFO_TYPE = 0x03,
 	VIOSRP_ADAPTER_INFO_TYPE = 0x03,
-	VIOSRP_HOST_CONFIG_TYPE = 0x04
+	VIOSRP_HOST_CONFIG_TYPE = 0x04,
+	VIOSRP_CAPABILITIES_TYPE = 0x05,
+	VIOSRP_ENABLE_FAST_FAIL = 0x08,
+};
+
+enum viosrp_mad_status {
+	VIOSRP_MAD_SUCCESS = 0x00,
+	VIOSRP_MAD_NOT_SUPPORTED = 0xF1,
+	VIOSRP_MAD_FAILED = 0xF7,
+};
+
+enum viosrp_capability_type {
+	MIGRATION_CAPABILITIES = 0x01,
+	RESERVATION_CAPABILITIES = 0x02,
+};
+
+enum viosrp_capability_support {
+	SERVER_DOES_NOT_SUPPORTS_CAP = 0x0,
+	SERVER_SUPPORTS_CAP = 0x01,
+	SERVER_CAP_DATA = 0x02,
+};
+
+enum viosrp_reserve_type {
+	CLIENT_RESERVE_SCSI_2 = 0x01,
+};
+
+enum viosrp_capability_flag {
+	CLIENT_MIGRATED = 0x01,
+	CLIENT_RECONNECT = 0x02,
+	CAP_LIST_SUPPORTED = 0x04,
+	CAP_LIST_DATA = 0x08,
 };
 };
 
 
 /* 
 /* 
@@ -127,11 +158,46 @@ struct viosrp_host_config {
 	u64 buffer;
 	u64 buffer;
 };
 };
 
 
+struct viosrp_fast_fail {
+	struct mad_common common;
+};
+
+struct viosrp_capabilities {
+	struct mad_common common;
+	u64 buffer;
+};
+
+struct mad_capability_common {
+	u32 cap_type;
+	u16 length;
+	u16 server_support;
+};
+
+struct mad_reserve_cap {
+	struct mad_capability_common common;
+	u32 type;
+};
+
+struct mad_migration_cap {
+	struct mad_capability_common common;
+	u32 ecl;
+};
+
+struct capabilities{
+	u32 flags;
+	char name[SRP_MAX_LOC_LEN];
+	char loc[SRP_MAX_LOC_LEN];
+	struct mad_migration_cap migration;
+	struct mad_reserve_cap reserve;
+};
+
 union mad_iu {
 union mad_iu {
 	struct viosrp_empty_iu empty_iu;
 	struct viosrp_empty_iu empty_iu;
 	struct viosrp_error_log error_log;
 	struct viosrp_error_log error_log;
 	struct viosrp_adapter_info adapter_info;
 	struct viosrp_adapter_info adapter_info;
 	struct viosrp_host_config host_config;
 	struct viosrp_host_config host_config;
+	struct viosrp_fast_fail fast_fail;
+	struct viosrp_capabilities capabilities;
 };
 };
 
 
 union viosrp_iu {
 union viosrp_iu {

+ 3 - 2
drivers/scsi/ipr.c

@@ -7003,6 +7003,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
 		ioa_cfg->sdt_state = ABORT_DUMP;
 		ioa_cfg->sdt_state = ABORT_DUMP;
 	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
 	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
 	ioa_cfg->in_ioa_bringdown = 1;
 	ioa_cfg->in_ioa_bringdown = 1;
+	ioa_cfg->allow_cmds = 0;
 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
 }
 }
@@ -7688,7 +7689,7 @@ static void __ipr_remove(struct pci_dev *pdev)
  * Return value:
  * Return value:
  * 	none
  * 	none
  **/
  **/
-static void ipr_remove(struct pci_dev *pdev)
+static void __devexit ipr_remove(struct pci_dev *pdev)
 {
 {
 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
 
 
@@ -7864,7 +7865,7 @@ static struct pci_driver ipr_driver = {
 	.name = IPR_NAME,
 	.name = IPR_NAME,
 	.id_table = ipr_pci_table,
 	.id_table = ipr_pci_table,
 	.probe = ipr_probe,
 	.probe = ipr_probe,
-	.remove = ipr_remove,
+	.remove = __devexit_p(ipr_remove),
 	.shutdown = ipr_shutdown,
 	.shutdown = ipr_shutdown,
 	.err_handler = &ipr_err_handler,
 	.err_handler = &ipr_err_handler,
 };
 };

+ 4 - 0
drivers/scsi/libfc/fc_exch.c

@@ -1159,6 +1159,10 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
 		atomic_inc(&mp->stats.xid_not_found);
 		atomic_inc(&mp->stats.xid_not_found);
 		goto out;
 		goto out;
 	}
 	}
+	if (ep->esb_stat & ESB_ST_COMPLETE) {
+		atomic_inc(&mp->stats.xid_not_found);
+		goto out;
+	}
 	if (ep->rxid == FC_XID_UNKNOWN)
 	if (ep->rxid == FC_XID_UNKNOWN)
 		ep->rxid = ntohs(fh->fh_rx_id);
 		ep->rxid = ntohs(fh->fh_rx_id);
 	if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
 	if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {

+ 1 - 1
drivers/scsi/libfc/fc_fcp.c

@@ -1896,7 +1896,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
 		sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
 		sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
 		break;
 		break;
 	case FC_CMD_ABORTED:
 	case FC_CMD_ABORTED:
-		sc_cmd->result = (DID_ABORT << 16) | fsp->io_status;
+		sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
 		break;
 		break;
 	case FC_CMD_TIME_OUT:
 	case FC_CMD_TIME_OUT:
 		sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
 		sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;

+ 3 - 3
drivers/scsi/libfc/fc_rport.c

@@ -478,7 +478,7 @@ static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp)
 	if (PTR_ERR(fp) == -FC_EX_CLOSED)
 	if (PTR_ERR(fp) == -FC_EX_CLOSED)
 		return fc_rport_error(rport, fp);
 		return fc_rport_error(rport, fp);
 
 
-	if (rdata->retries < rdata->local_port->max_retry_count) {
+	if (rdata->retries < rdata->local_port->max_rport_retry_count) {
 		FC_DEBUG_RPORT("Error %ld in state %s, retrying\n",
 		FC_DEBUG_RPORT("Error %ld in state %s, retrying\n",
 			       PTR_ERR(fp), fc_rport_state(rport));
 			       PTR_ERR(fp), fc_rport_state(rport));
 		rdata->retries++;
 		rdata->retries++;
@@ -1330,7 +1330,7 @@ int fc_rport_init(struct fc_lport *lport)
 }
 }
 EXPORT_SYMBOL(fc_rport_init);
 EXPORT_SYMBOL(fc_rport_init);
 
 
-int fc_setup_rport()
+int fc_setup_rport(void)
 {
 {
 	rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
 	rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
 	if (!rport_event_queue)
 	if (!rport_event_queue)
@@ -1339,7 +1339,7 @@ int fc_setup_rport()
 }
 }
 EXPORT_SYMBOL(fc_setup_rport);
 EXPORT_SYMBOL(fc_setup_rport);
 
 
-void fc_destroy_rport()
+void fc_destroy_rport(void)
 {
 {
 	destroy_workqueue(rport_event_queue);
 	destroy_workqueue(rport_event_queue);
 }
 }

+ 241 - 227
drivers/scsi/libiscsi.c

@@ -81,7 +81,8 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
 	struct Scsi_Host *shost = conn->session->host;
 	struct Scsi_Host *shost = conn->session->host;
 	struct iscsi_host *ihost = shost_priv(shost);
 	struct iscsi_host *ihost = shost_priv(shost);
 
 
-	queue_work(ihost->workq, &conn->xmitwork);
+	if (ihost->workq)
+		queue_work(ihost->workq, &conn->xmitwork);
 }
 }
 EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
 EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
 
 
@@ -109,11 +110,9 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
 		 * if the window closed with IO queued, then kick the
 		 * if the window closed with IO queued, then kick the
 		 * xmit thread
 		 * xmit thread
 		 */
 		 */
-		if (!list_empty(&session->leadconn->xmitqueue) ||
-		    !list_empty(&session->leadconn->mgmtqueue)) {
-			if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
-				iscsi_conn_queue_work(session->leadconn);
-		}
+		if (!list_empty(&session->leadconn->cmdqueue) ||
+		    !list_empty(&session->leadconn->mgmtqueue))
+			iscsi_conn_queue_work(session->leadconn);
 	}
 	}
 }
 }
 EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
 EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
@@ -257,9 +256,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
 	itt_t itt;
 	itt_t itt;
 	int rc;
 	int rc;
 
 
-	rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
-	if (rc)
-		return rc;
+	if (conn->session->tt->alloc_pdu) {
+		rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
+		if (rc)
+			return rc;
+	}
 	hdr = (struct iscsi_cmd *) task->hdr;
 	hdr = (struct iscsi_cmd *) task->hdr;
 	itt = hdr->itt;
 	itt = hdr->itt;
 	memset(hdr, 0, sizeof(*hdr));
 	memset(hdr, 0, sizeof(*hdr));
@@ -364,7 +365,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
 		return -EIO;
 		return -EIO;
 
 
 	task->state = ISCSI_TASK_RUNNING;
 	task->state = ISCSI_TASK_RUNNING;
-	list_move_tail(&task->running, &conn->run_list);
 
 
 	conn->scsicmd_pdus_cnt++;
 	conn->scsicmd_pdus_cnt++;
 	ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
 	ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
@@ -380,26 +380,25 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
 }
 }
 
 
 /**
 /**
- * iscsi_complete_command - finish a task
+ * iscsi_free_task - free a task
  * @task: iscsi cmd task
  * @task: iscsi cmd task
  *
  *
  * Must be called with session lock.
  * Must be called with session lock.
  * This function returns the scsi command to scsi-ml or cleans
  * This function returns the scsi command to scsi-ml or cleans
  * up mgmt tasks then returns the task to the pool.
  * up mgmt tasks then returns the task to the pool.
  */
  */
-static void iscsi_complete_command(struct iscsi_task *task)
+static void iscsi_free_task(struct iscsi_task *task)
 {
 {
 	struct iscsi_conn *conn = task->conn;
 	struct iscsi_conn *conn = task->conn;
 	struct iscsi_session *session = conn->session;
 	struct iscsi_session *session = conn->session;
 	struct scsi_cmnd *sc = task->sc;
 	struct scsi_cmnd *sc = task->sc;
 
 
+	ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n",
+			  task->itt, task->state, task->sc);
+
 	session->tt->cleanup_task(task);
 	session->tt->cleanup_task(task);
-	list_del_init(&task->running);
-	task->state = ISCSI_TASK_COMPLETED;
+	task->state = ISCSI_TASK_FREE;
 	task->sc = NULL;
 	task->sc = NULL;
-
-	if (conn->task == task)
-		conn->task = NULL;
 	/*
 	/*
 	 * login task is preallocated so do not free
 	 * login task is preallocated so do not free
 	 */
 	 */
@@ -408,9 +407,6 @@ static void iscsi_complete_command(struct iscsi_task *task)
 
 
 	__kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
 	__kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
 
 
-	if (conn->ping_task == task)
-		conn->ping_task = NULL;
-
 	if (sc) {
 	if (sc) {
 		task->sc = NULL;
 		task->sc = NULL;
 		/* SCSI eh reuses commands to verify us */
 		/* SCSI eh reuses commands to verify us */
@@ -433,7 +429,7 @@ EXPORT_SYMBOL_GPL(__iscsi_get_task);
 static void __iscsi_put_task(struct iscsi_task *task)
 static void __iscsi_put_task(struct iscsi_task *task)
 {
 {
 	if (atomic_dec_and_test(&task->refcount))
 	if (atomic_dec_and_test(&task->refcount))
-		iscsi_complete_command(task);
+		iscsi_free_task(task);
 }
 }
 
 
 void iscsi_put_task(struct iscsi_task *task)
 void iscsi_put_task(struct iscsi_task *task)
@@ -446,26 +442,74 @@ void iscsi_put_task(struct iscsi_task *task)
 }
 }
 EXPORT_SYMBOL_GPL(iscsi_put_task);
 EXPORT_SYMBOL_GPL(iscsi_put_task);
 
 
+/**
+ * iscsi_complete_task - finish a task
+ * @task: iscsi cmd task
+ * @state: state to complete task with
+ *
+ * Must be called with session lock.
+ */
+static void iscsi_complete_task(struct iscsi_task *task, int state)
+{
+	struct iscsi_conn *conn = task->conn;
+
+	ISCSI_DBG_SESSION(conn->session,
+			  "complete task itt 0x%x state %d sc %p\n",
+			  task->itt, task->state, task->sc);
+	if (task->state == ISCSI_TASK_COMPLETED ||
+	    task->state == ISCSI_TASK_ABRT_TMF ||
+	    task->state == ISCSI_TASK_ABRT_SESS_RECOV)
+		return;
+	WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
+	task->state = state;
+
+	if (!list_empty(&task->running))
+		list_del_init(&task->running);
+
+	if (conn->task == task)
+		conn->task = NULL;
+
+	if (conn->ping_task == task)
+		conn->ping_task = NULL;
+
+	/* release get from queueing */
+	__iscsi_put_task(task);
+}
+
 /*
 /*
- * session lock must be held
+ * session lock must be held and if not called for a task that is
+ * still pending or from the xmit thread, then xmit thread must
+ * be suspended.
  */
  */
-static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
-			 int err)
+static void fail_scsi_task(struct iscsi_task *task, int err)
 {
 {
+	struct iscsi_conn *conn = task->conn;
 	struct scsi_cmnd *sc;
 	struct scsi_cmnd *sc;
+	int state;
 
 
+	/*
+	 * if a command completes and we get a successful tmf response
+	 * we will hit this because the scsi eh abort code does not take
+	 * a ref to the task.
+	 */
 	sc = task->sc;
 	sc = task->sc;
 	if (!sc)
 	if (!sc)
 		return;
 		return;
 
 
-	if (task->state == ISCSI_TASK_PENDING)
+	if (task->state == ISCSI_TASK_PENDING) {
 		/*
 		/*
 		 * cmd never made it to the xmit thread, so we should not count
 		 * cmd never made it to the xmit thread, so we should not count
 		 * the cmd in the sequencing
 		 * the cmd in the sequencing
 		 */
 		 */
 		conn->session->queued_cmdsn--;
 		conn->session->queued_cmdsn--;
+		/* it was never sent so just complete like normal */
+		state = ISCSI_TASK_COMPLETED;
+	} else if (err == DID_TRANSPORT_DISRUPTED)
+		state = ISCSI_TASK_ABRT_SESS_RECOV;
+	else
+		state = ISCSI_TASK_ABRT_TMF;
 
 
-	sc->result = err;
+	sc->result = err << 16;
 	if (!scsi_bidi_cmnd(sc))
 	if (!scsi_bidi_cmnd(sc))
 		scsi_set_resid(sc, scsi_bufflen(sc));
 		scsi_set_resid(sc, scsi_bufflen(sc));
 	else {
 	else {
@@ -473,10 +517,7 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
 		scsi_in(sc)->resid = scsi_in(sc)->length;
 		scsi_in(sc)->resid = scsi_in(sc)->length;
 	}
 	}
 
 
-	if (conn->task == task)
-		conn->task = NULL;
-	/* release ref from queuecommand */
-	__iscsi_put_task(task);
+	iscsi_complete_task(task, state);
 }
 }
 
 
 static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
 static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
@@ -516,7 +557,6 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
 		session->state = ISCSI_STATE_LOGGING_OUT;
 		session->state = ISCSI_STATE_LOGGING_OUT;
 
 
 	task->state = ISCSI_TASK_RUNNING;
 	task->state = ISCSI_TASK_RUNNING;
-	list_move_tail(&task->running, &conn->mgmt_run_list);
 	ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x "
 	ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x "
 			  "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK,
 			  "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK,
 			  hdr->itt, task->data_count);
 			  hdr->itt, task->data_count);
@@ -528,6 +568,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 		      char *data, uint32_t data_size)
 		      char *data, uint32_t data_size)
 {
 {
 	struct iscsi_session *session = conn->session;
 	struct iscsi_session *session = conn->session;
+	struct iscsi_host *ihost = shost_priv(session->host);
 	struct iscsi_task *task;
 	struct iscsi_task *task;
 	itt_t itt;
 	itt_t itt;
 
 
@@ -544,6 +585,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 		 */
 		 */
 		task = conn->login_task;
 		task = conn->login_task;
 	else {
 	else {
+		if (session->state != ISCSI_STATE_LOGGED_IN)
+			return NULL;
+
 		BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
 		BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
 		BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
 		BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
 
 
@@ -559,6 +603,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 	atomic_set(&task->refcount, 1);
 	atomic_set(&task->refcount, 1);
 	task->conn = conn;
 	task->conn = conn;
 	task->sc = NULL;
 	task->sc = NULL;
+	INIT_LIST_HEAD(&task->running);
+	task->state = ISCSI_TASK_PENDING;
 
 
 	if (data_size) {
 	if (data_size) {
 		memcpy(task->data, data, data_size);
 		memcpy(task->data, data, data_size);
@@ -566,11 +612,14 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 	} else
 	} else
 		task->data_count = 0;
 		task->data_count = 0;
 
 
-	if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
-		iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
-				 "pdu for mgmt task.\n");
-		goto requeue_task;
+	if (conn->session->tt->alloc_pdu) {
+		if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
+			iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
+					 "pdu for mgmt task.\n");
+			goto free_task;
+		}
 	}
 	}
+
 	itt = task->hdr->itt;
 	itt = task->hdr->itt;
 	task->hdr_len = sizeof(struct iscsi_hdr);
 	task->hdr_len = sizeof(struct iscsi_hdr);
 	memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
 	memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
@@ -583,30 +632,22 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 						   task->conn->session->age);
 						   task->conn->session->age);
 	}
 	}
 
 
-	INIT_LIST_HEAD(&task->running);
-	list_add_tail(&task->running, &conn->mgmtqueue);
-
-	if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+	if (!ihost->workq) {
 		if (iscsi_prep_mgmt_task(conn, task))
 		if (iscsi_prep_mgmt_task(conn, task))
 			goto free_task;
 			goto free_task;
 
 
 		if (session->tt->xmit_task(task))
 		if (session->tt->xmit_task(task))
 			goto free_task;
 			goto free_task;
-
-	} else
+	} else {
+		list_add_tail(&task->running, &conn->mgmtqueue);
 		iscsi_conn_queue_work(conn);
 		iscsi_conn_queue_work(conn);
+	}
 
 
 	return task;
 	return task;
 
 
 free_task:
 free_task:
 	__iscsi_put_task(task);
 	__iscsi_put_task(task);
 	return NULL;
 	return NULL;
-
-requeue_task:
-	if (task != conn->login_task)
-		__kfifo_put(session->cmdpool.queue, (void*)&task,
-			    sizeof(void*));
-	return NULL;
 }
 }
 
 
 int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
 int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -701,11 +742,10 @@ invalid_datalen:
 			sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
 			sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
 	}
 	}
 out:
 out:
-	ISCSI_DBG_SESSION(session, "done [sc %p res %d itt 0x%x]\n",
+	ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n",
 			  sc, sc->result, task->itt);
 			  sc, sc->result, task->itt);
 	conn->scsirsp_pdus_cnt++;
 	conn->scsirsp_pdus_cnt++;
-
-	__iscsi_put_task(task);
+	iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
 }
 }
 
 
 /**
 /**
@@ -724,6 +764,7 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 	if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
 	if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
 		return;
 		return;
 
 
+	iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr);
 	sc->result = (DID_OK << 16) | rhdr->cmd_status;
 	sc->result = (DID_OK << 16) | rhdr->cmd_status;
 	conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
 	conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
 	if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
 	if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
@@ -738,8 +779,11 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 			sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
 			sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
 	}
 	}
 
 
+	ISCSI_DBG_SESSION(conn->session, "data in with status done "
+			  "[sc %p res %d itt 0x%x]\n",
+			  sc, sc->result, task->itt);
 	conn->scsirsp_pdus_cnt++;
 	conn->scsirsp_pdus_cnt++;
-	__iscsi_put_task(task);
+	iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
 }
 }
 
 
 static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
@@ -823,7 +867,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
  *
  *
  * The session lock must be held.
  * The session lock must be held.
  */
  */
-static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
 {
 {
 	struct iscsi_session *session = conn->session;
 	struct iscsi_session *session = conn->session;
 	int i;
 	int i;
@@ -840,6 +884,7 @@ static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
 
 
 	return session->cmds[i];
 	return session->cmds[i];
 }
 }
+EXPORT_SYMBOL_GPL(iscsi_itt_to_task);
 
 
 /**
 /**
  * __iscsi_complete_pdu - complete pdu
  * __iscsi_complete_pdu - complete pdu
@@ -959,7 +1004,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 		}
 		}
 
 
 		iscsi_tmf_rsp(conn, hdr);
 		iscsi_tmf_rsp(conn, hdr);
-		__iscsi_put_task(task);
+		iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
 		break;
 		break;
 	case ISCSI_OP_NOOP_IN:
 	case ISCSI_OP_NOOP_IN:
 		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
 		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
@@ -977,7 +1022,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 			goto recv_pdu;
 			goto recv_pdu;
 
 
 		mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
 		mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
-		__iscsi_put_task(task);
+		iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
 		break;
 		break;
 	default:
 	default:
 		rc = ISCSI_ERR_BAD_OPCODE;
 		rc = ISCSI_ERR_BAD_OPCODE;
@@ -989,7 +1034,7 @@ out:
 recv_pdu:
 recv_pdu:
 	if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
 	if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
 		rc = ISCSI_ERR_CONN_FAILED;
 		rc = ISCSI_ERR_CONN_FAILED;
-	__iscsi_put_task(task);
+	iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
 	return rc;
 	return rc;
 }
 }
 EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
 EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
@@ -1166,7 +1211,12 @@ void iscsi_requeue_task(struct iscsi_task *task)
 {
 {
 	struct iscsi_conn *conn = task->conn;
 	struct iscsi_conn *conn = task->conn;
 
 
-	list_move_tail(&task->running, &conn->requeue);
+	/*
+	 * this may be on the requeue list already if the xmit_task callout
+	 * is handling the r2ts while we are adding new ones
+	 */
+	if (list_empty(&task->running))
+		list_add_tail(&task->running, &conn->requeue);
 	iscsi_conn_queue_work(conn);
 	iscsi_conn_queue_work(conn);
 }
 }
 EXPORT_SYMBOL_GPL(iscsi_requeue_task);
 EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1206,6 +1256,7 @@ check_mgmt:
 	while (!list_empty(&conn->mgmtqueue)) {
 	while (!list_empty(&conn->mgmtqueue)) {
 		conn->task = list_entry(conn->mgmtqueue.next,
 		conn->task = list_entry(conn->mgmtqueue.next,
 					 struct iscsi_task, running);
 					 struct iscsi_task, running);
+		list_del_init(&conn->task->running);
 		if (iscsi_prep_mgmt_task(conn, conn->task)) {
 		if (iscsi_prep_mgmt_task(conn, conn->task)) {
 			__iscsi_put_task(conn->task);
 			__iscsi_put_task(conn->task);
 			conn->task = NULL;
 			conn->task = NULL;
@@ -1217,23 +1268,26 @@ check_mgmt:
 	}
 	}
 
 
 	/* process pending command queue */
 	/* process pending command queue */
-	while (!list_empty(&conn->xmitqueue)) {
+	while (!list_empty(&conn->cmdqueue)) {
 		if (conn->tmf_state == TMF_QUEUED)
 		if (conn->tmf_state == TMF_QUEUED)
 			break;
 			break;
 
 
-		conn->task = list_entry(conn->xmitqueue.next,
+		conn->task = list_entry(conn->cmdqueue.next,
 					 struct iscsi_task, running);
 					 struct iscsi_task, running);
+		list_del_init(&conn->task->running);
 		if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
 		if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
-			fail_command(conn, conn->task, DID_IMM_RETRY << 16);
+			fail_scsi_task(conn->task, DID_IMM_RETRY);
 			continue;
 			continue;
 		}
 		}
 		rc = iscsi_prep_scsi_cmd_pdu(conn->task);
 		rc = iscsi_prep_scsi_cmd_pdu(conn->task);
 		if (rc) {
 		if (rc) {
 			if (rc == -ENOMEM) {
 			if (rc == -ENOMEM) {
+				list_add_tail(&conn->task->running,
+					      &conn->cmdqueue);
 				conn->task = NULL;
 				conn->task = NULL;
 				goto again;
 				goto again;
 			} else
 			} else
-				fail_command(conn, conn->task, DID_ABORT << 16);
+				fail_scsi_task(conn->task, DID_ABORT);
 			continue;
 			continue;
 		}
 		}
 		rc = iscsi_xmit_task(conn);
 		rc = iscsi_xmit_task(conn);
@@ -1260,8 +1314,8 @@ check_mgmt:
 
 
 		conn->task = list_entry(conn->requeue.next,
 		conn->task = list_entry(conn->requeue.next,
 					 struct iscsi_task, running);
 					 struct iscsi_task, running);
+		list_del_init(&conn->task->running);
 		conn->task->state = ISCSI_TASK_RUNNING;
 		conn->task->state = ISCSI_TASK_RUNNING;
-		list_move_tail(conn->requeue.next, &conn->run_list);
 		rc = iscsi_xmit_task(conn);
 		rc = iscsi_xmit_task(conn);
 		if (rc)
 		if (rc)
 			goto again;
 			goto again;
@@ -1328,6 +1382,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 {
 {
 	struct iscsi_cls_session *cls_session;
 	struct iscsi_cls_session *cls_session;
 	struct Scsi_Host *host;
 	struct Scsi_Host *host;
+	struct iscsi_host *ihost;
 	int reason = 0;
 	int reason = 0;
 	struct iscsi_session *session;
 	struct iscsi_session *session;
 	struct iscsi_conn *conn;
 	struct iscsi_conn *conn;
@@ -1338,6 +1393,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 	sc->SCp.ptr = NULL;
 	sc->SCp.ptr = NULL;
 
 
 	host = sc->device->host;
 	host = sc->device->host;
+	ihost = shost_priv(host);
 	spin_unlock(host->host_lock);
 	spin_unlock(host->host_lock);
 
 
 	cls_session = starget_to_session(scsi_target(sc->device));
 	cls_session = starget_to_session(scsi_target(sc->device));
@@ -1350,13 +1406,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 		goto fault;
 		goto fault;
 	}
 	}
 
 
-	/*
-	 * ISCSI_STATE_FAILED is a temp. state. The recovery
-	 * code will decide what is best to do with command queued
-	 * during this time
-	 */
-	if (session->state != ISCSI_STATE_LOGGED_IN &&
-	    session->state != ISCSI_STATE_FAILED) {
+	if (session->state != ISCSI_STATE_LOGGED_IN) {
 		/*
 		/*
 		 * to handle the race between when we set the recovery state
 		 * to handle the race between when we set the recovery state
 		 * and block the session we requeue here (commands could
 		 * and block the session we requeue here (commands could
@@ -1364,12 +1414,15 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 		 * up because the block code is not locked)
 		 * up because the block code is not locked)
 		 */
 		 */
 		switch (session->state) {
 		switch (session->state) {
+		case ISCSI_STATE_FAILED:
 		case ISCSI_STATE_IN_RECOVERY:
 		case ISCSI_STATE_IN_RECOVERY:
 			reason = FAILURE_SESSION_IN_RECOVERY;
 			reason = FAILURE_SESSION_IN_RECOVERY;
-			goto reject;
+			sc->result = DID_IMM_RETRY << 16;
+			break;
 		case ISCSI_STATE_LOGGING_OUT:
 		case ISCSI_STATE_LOGGING_OUT:
 			reason = FAILURE_SESSION_LOGGING_OUT;
 			reason = FAILURE_SESSION_LOGGING_OUT;
-			goto reject;
+			sc->result = DID_IMM_RETRY << 16;
+			break;
 		case ISCSI_STATE_RECOVERY_FAILED:
 		case ISCSI_STATE_RECOVERY_FAILED:
 			reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
 			reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
 			sc->result = DID_TRANSPORT_FAILFAST << 16;
 			sc->result = DID_TRANSPORT_FAILFAST << 16;
@@ -1402,9 +1455,8 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 		reason = FAILURE_OOM;
 		reason = FAILURE_OOM;
 		goto reject;
 		goto reject;
 	}
 	}
-	list_add_tail(&task->running, &conn->xmitqueue);
 
 
-	if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+	if (!ihost->workq) {
 		reason = iscsi_prep_scsi_cmd_pdu(task);
 		reason = iscsi_prep_scsi_cmd_pdu(task);
 		if (reason) {
 		if (reason) {
 			if (reason == -ENOMEM) {
 			if (reason == -ENOMEM) {
@@ -1419,8 +1471,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 			reason = FAILURE_SESSION_NOT_READY;
 			reason = FAILURE_SESSION_NOT_READY;
 			goto prepd_reject;
 			goto prepd_reject;
 		}
 		}
-	} else
+	} else {
+		list_add_tail(&task->running, &conn->cmdqueue);
 		iscsi_conn_queue_work(conn);
 		iscsi_conn_queue_work(conn);
+	}
 
 
 	session->queued_cmdsn++;
 	session->queued_cmdsn++;
 	spin_unlock(&session->lock);
 	spin_unlock(&session->lock);
@@ -1429,7 +1483,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 
 
 prepd_reject:
 prepd_reject:
 	sc->scsi_done = NULL;
 	sc->scsi_done = NULL;
-	iscsi_complete_command(task);
+	iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
 reject:
 reject:
 	spin_unlock(&session->lock);
 	spin_unlock(&session->lock);
 	ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
 	ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
@@ -1439,7 +1493,7 @@ reject:
 
 
 prepd_fault:
 prepd_fault:
 	sc->scsi_done = NULL;
 	sc->scsi_done = NULL;
-	iscsi_complete_command(task);
+	iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
 fault:
 fault:
 	spin_unlock(&session->lock);
 	spin_unlock(&session->lock);
 	ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
 	ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
@@ -1608,44 +1662,24 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
  * Fail commands. session lock held and recv side suspended and xmit
  * Fail commands. session lock held and recv side suspended and xmit
  * thread flushed
  * thread flushed
  */
  */
-static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
-			      int error)
+static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,
+			    int error)
 {
 {
-	struct iscsi_task *task, *tmp;
-
-	if (conn->task) {
-		if (lun == -1 ||
-		    (conn->task->sc && conn->task->sc->device->lun == lun))
-			conn->task = NULL;
-	}
+	struct iscsi_task *task;
+	int i;
 
 
-	/* flush pending */
-	list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
-		if (lun == task->sc->device->lun || lun == -1) {
-			ISCSI_DBG_SESSION(conn->session,
-					  "failing pending sc %p itt 0x%x\n",
-					  task->sc, task->itt);
-			fail_command(conn, task, error << 16);
-		}
-	}
+	for (i = 0; i < conn->session->cmds_max; i++) {
+		task = conn->session->cmds[i];
+		if (!task->sc || task->state == ISCSI_TASK_FREE)
+			continue;
 
 
-	list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
-		if (lun == task->sc->device->lun || lun == -1) {
-			ISCSI_DBG_SESSION(conn->session,
-					  "failing requeued sc %p itt 0x%x\n",
-					  task->sc, task->itt);
-			fail_command(conn, task, error << 16);
-		}
-	}
+		if (lun != -1 && lun != task->sc->device->lun)
+			continue;
 
 
-	/* fail all other running */
-	list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
-		if (lun == task->sc->device->lun || lun == -1) {
-			ISCSI_DBG_SESSION(conn->session,
-					 "failing in progress sc %p itt 0x%x\n",
-					 task->sc, task->itt);
-			fail_command(conn, task, error << 16);
-		}
+		ISCSI_DBG_SESSION(conn->session,
+				  "failing sc %p itt 0x%x state %d\n",
+				  task->sc, task->itt, task->state);
+		fail_scsi_task(task, error);
 	}
 	}
 }
 }
 
 
@@ -1655,7 +1689,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
 	struct iscsi_host *ihost = shost_priv(shost);
 	struct iscsi_host *ihost = shost_priv(shost);
 
 
 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
-	if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+	if (ihost->workq)
 		flush_workqueue(ihost->workq);
 		flush_workqueue(ihost->workq);
 }
 }
 EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
 EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
@@ -1663,8 +1697,23 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
 static void iscsi_start_tx(struct iscsi_conn *conn)
 static void iscsi_start_tx(struct iscsi_conn *conn)
 {
 {
 	clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
 	clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
-	if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
-		iscsi_conn_queue_work(conn);
+	iscsi_conn_queue_work(conn);
+}
+
+/*
+ * We want to make sure a ping is in flight. It has timed out.
+ * And we are not busy processing a pdu that is making
+ * progress but got started before the ping and is taking a while
+ * to complete so the ping is just stuck behind it in a queue.
+ */
+static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
+{
+	if (conn->ping_task &&
+	    time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
+			   (conn->ping_timeout * HZ), jiffies))
+		return 1;
+	else
+		return 0;
 }
 }
 
 
 static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
 static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
@@ -1702,16 +1751,20 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
 	 * if the ping timedout then we are in the middle of cleaning up
 	 * if the ping timedout then we are in the middle of cleaning up
 	 * and can let the iscsi eh handle it
 	 * and can let the iscsi eh handle it
 	 */
 	 */
-	if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
-			    (conn->ping_timeout * HZ), jiffies))
+	if (iscsi_has_ping_timed_out(conn)) {
 		rc = BLK_EH_RESET_TIMER;
 		rc = BLK_EH_RESET_TIMER;
+		goto done;
+	}
 	/*
 	/*
 	 * if we are about to check the transport then give the command
 	 * if we are about to check the transport then give the command
 	 * more time
 	 * more time
 	 */
 	 */
 	if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
 	if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
-			   jiffies))
+			   jiffies)) {
 		rc = BLK_EH_RESET_TIMER;
 		rc = BLK_EH_RESET_TIMER;
+		goto done;
+	}
+
 	/* if in the middle of checking the transport then give us more time */
 	/* if in the middle of checking the transport then give us more time */
 	if (conn->ping_task)
 	if (conn->ping_task)
 		rc = BLK_EH_RESET_TIMER;
 		rc = BLK_EH_RESET_TIMER;
@@ -1738,13 +1791,13 @@ static void iscsi_check_transport_timeouts(unsigned long data)
 
 
 	recv_timeout *= HZ;
 	recv_timeout *= HZ;
 	last_recv = conn->last_recv;
 	last_recv = conn->last_recv;
-	if (conn->ping_task &&
-	    time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
-			   jiffies)) {
+
+	if (iscsi_has_ping_timed_out(conn)) {
 		iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
 		iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
-				  "expired, last rx %lu, last ping %lu, "
-				  "now %lu\n", conn->ping_timeout, last_recv,
-				  conn->last_ping, jiffies);
+				  "expired, recv timeout %d, last rx %lu, "
+				  "last ping %lu, now %lu\n",
+				  conn->ping_timeout, conn->recv_timeout,
+				  last_recv, conn->last_ping, jiffies);
 		spin_unlock(&session->lock);
 		spin_unlock(&session->lock);
 		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
 		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
 		return;
 		return;
@@ -1788,6 +1841,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
 	cls_session = starget_to_session(scsi_target(sc->device));
 	cls_session = starget_to_session(scsi_target(sc->device));
 	session = cls_session->dd_data;
 	session = cls_session->dd_data;
 
 
+	ISCSI_DBG_SESSION(session, "aborting sc %p\n", sc);
+
 	mutex_lock(&session->eh_mutex);
 	mutex_lock(&session->eh_mutex);
 	spin_lock_bh(&session->lock);
 	spin_lock_bh(&session->lock);
 	/*
 	/*
@@ -1810,6 +1865,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
 	    sc->SCp.phase != session->age) {
 	    sc->SCp.phase != session->age) {
 		spin_unlock_bh(&session->lock);
 		spin_unlock_bh(&session->lock);
 		mutex_unlock(&session->eh_mutex);
 		mutex_unlock(&session->eh_mutex);
+		ISCSI_DBG_SESSION(session, "failing abort due to dropped "
+				  "session.\n");
 		return FAILED;
 		return FAILED;
 	}
 	}
 
 
@@ -1829,7 +1886,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
 	}
 	}
 
 
 	if (task->state == ISCSI_TASK_PENDING) {
 	if (task->state == ISCSI_TASK_PENDING) {
-		fail_command(conn, task, DID_ABORT << 16);
+		fail_scsi_task(task, DID_ABORT);
 		goto success;
 		goto success;
 	}
 	}
 
 
@@ -1860,7 +1917,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
 		 * then sent more data for the cmd.
 		 * then sent more data for the cmd.
 		 */
 		 */
 		spin_lock(&session->lock);
 		spin_lock(&session->lock);
-		fail_command(conn, task, DID_ABORT << 16);
+		fail_scsi_task(task, DID_ABORT);
 		conn->tmf_state = TMF_INITIAL;
 		conn->tmf_state = TMF_INITIAL;
 		spin_unlock(&session->lock);
 		spin_unlock(&session->lock);
 		iscsi_start_tx(conn);
 		iscsi_start_tx(conn);
@@ -1967,7 +2024,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
 	iscsi_suspend_tx(conn);
 	iscsi_suspend_tx(conn);
 
 
 	spin_lock_bh(&session->lock);
 	spin_lock_bh(&session->lock);
-	fail_all_commands(conn, sc->device->lun, DID_ERROR);
+	fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
 	conn->tmf_state = TMF_INITIAL;
 	conn->tmf_state = TMF_INITIAL;
 	spin_unlock_bh(&session->lock);
 	spin_unlock_bh(&session->lock);
 
 
@@ -2274,6 +2331,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
 		if (cmd_task_size)
 		if (cmd_task_size)
 			task->dd_data = &task[1];
 			task->dd_data = &task[1];
 		task->itt = cmd_i;
 		task->itt = cmd_i;
+		task->state = ISCSI_TASK_FREE;
 		INIT_LIST_HEAD(&task->running);
 		INIT_LIST_HEAD(&task->running);
 	}
 	}
 
 
@@ -2360,10 +2418,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
 	conn->transport_timer.data = (unsigned long)conn;
 	conn->transport_timer.data = (unsigned long)conn;
 	conn->transport_timer.function = iscsi_check_transport_timeouts;
 	conn->transport_timer.function = iscsi_check_transport_timeouts;
 
 
-	INIT_LIST_HEAD(&conn->run_list);
-	INIT_LIST_HEAD(&conn->mgmt_run_list);
 	INIT_LIST_HEAD(&conn->mgmtqueue);
 	INIT_LIST_HEAD(&conn->mgmtqueue);
-	INIT_LIST_HEAD(&conn->xmitqueue);
+	INIT_LIST_HEAD(&conn->cmdqueue);
 	INIT_LIST_HEAD(&conn->requeue);
 	INIT_LIST_HEAD(&conn->requeue);
 	INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
 	INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
 
 
@@ -2531,27 +2587,28 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
 EXPORT_SYMBOL_GPL(iscsi_conn_start);
 EXPORT_SYMBOL_GPL(iscsi_conn_start);
 
 
 static void
 static void
-flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
+fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
 {
 {
-	struct iscsi_task *task, *tmp;
+	struct iscsi_task *task;
+	int i, state;
 
 
-	/* handle pending */
-	list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
-		ISCSI_DBG_SESSION(session, "flushing pending mgmt task "
-				  "itt 0x%x\n", task->itt);
-		/* release ref from prep task */
-		__iscsi_put_task(task);
-	}
+	for (i = 0; i < conn->session->cmds_max; i++) {
+		task = conn->session->cmds[i];
+		if (task->sc)
+			continue;
 
 
-	/* handle running */
-	list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
-		ISCSI_DBG_SESSION(session, "flushing running mgmt task "
-				  "itt 0x%x\n", task->itt);
-		/* release ref from prep task */
-		__iscsi_put_task(task);
-	}
+		if (task->state == ISCSI_TASK_FREE)
+			continue;
+
+		ISCSI_DBG_SESSION(conn->session,
+				  "failing mgmt itt 0x%x state %d\n",
+				  task->itt, task->state);
+		state = ISCSI_TASK_ABRT_SESS_RECOV;
+		if (task->state == ISCSI_TASK_PENDING)
+			state = ISCSI_TASK_COMPLETED;
+		iscsi_complete_task(task, state);
 
 
-	conn->task = NULL;
+	}
 }
 }
 
 
 static void iscsi_start_session_recovery(struct iscsi_session *session,
 static void iscsi_start_session_recovery(struct iscsi_session *session,
@@ -2559,8 +2616,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
 {
 {
 	int old_stop_stage;
 	int old_stop_stage;
 
 
-	del_timer_sync(&conn->transport_timer);
-
 	mutex_lock(&session->eh_mutex);
 	mutex_lock(&session->eh_mutex);
 	spin_lock_bh(&session->lock);
 	spin_lock_bh(&session->lock);
 	if (conn->stop_stage == STOP_CONN_TERM) {
 	if (conn->stop_stage == STOP_CONN_TERM) {
@@ -2578,13 +2633,17 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
 		session->state = ISCSI_STATE_TERMINATE;
 		session->state = ISCSI_STATE_TERMINATE;
 	else if (conn->stop_stage != STOP_CONN_RECOVER)
 	else if (conn->stop_stage != STOP_CONN_RECOVER)
 		session->state = ISCSI_STATE_IN_RECOVERY;
 		session->state = ISCSI_STATE_IN_RECOVERY;
+	spin_unlock_bh(&session->lock);
+
+	del_timer_sync(&conn->transport_timer);
+	iscsi_suspend_tx(conn);
 
 
+	spin_lock_bh(&session->lock);
 	old_stop_stage = conn->stop_stage;
 	old_stop_stage = conn->stop_stage;
 	conn->stop_stage = flag;
 	conn->stop_stage = flag;
 	conn->c_stage = ISCSI_CONN_STOPPED;
 	conn->c_stage = ISCSI_CONN_STOPPED;
 	spin_unlock_bh(&session->lock);
 	spin_unlock_bh(&session->lock);
 
 
-	iscsi_suspend_tx(conn);
 	/*
 	/*
 	 * for connection level recovery we should not calculate
 	 * for connection level recovery we should not calculate
 	 * header digest. conn->hdr_size used for optimization
 	 * header digest. conn->hdr_size used for optimization
@@ -2605,11 +2664,8 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
 	 * flush queues.
 	 * flush queues.
 	 */
 	 */
 	spin_lock_bh(&session->lock);
 	spin_lock_bh(&session->lock);
-	if (flag == STOP_CONN_RECOVER)
-		fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED);
-	else
-		fail_all_commands(conn, -1, DID_ERROR);
-	flush_control_queues(session, conn);
+	fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
+	fail_mgmt_tasks(session, conn);
 	spin_unlock_bh(&session->lock);
 	spin_unlock_bh(&session->lock);
 	mutex_unlock(&session->eh_mutex);
 	mutex_unlock(&session->eh_mutex);
 }
 }
@@ -2651,6 +2707,23 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
 }
 }
 EXPORT_SYMBOL_GPL(iscsi_conn_bind);
 EXPORT_SYMBOL_GPL(iscsi_conn_bind);
 
 
+static int iscsi_switch_str_param(char **param, char *new_val_buf)
+{
+	char *new_val;
+
+	if (*param) {
+		if (!strcmp(*param, new_val_buf))
+			return 0;
+	}
+
+	new_val = kstrdup(new_val_buf, GFP_NOIO);
+	if (!new_val)
+		return -ENOMEM;
+
+	kfree(*param);
+	*param = new_val;
+	return 0;
+}
 
 
 int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
 int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
 		    enum iscsi_param param, char *buf, int buflen)
 		    enum iscsi_param param, char *buf, int buflen)
@@ -2723,38 +2796,15 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
 		sscanf(buf, "%u", &conn->exp_statsn);
 		sscanf(buf, "%u", &conn->exp_statsn);
 		break;
 		break;
 	case ISCSI_PARAM_USERNAME:
 	case ISCSI_PARAM_USERNAME:
-		kfree(session->username);
-		session->username = kstrdup(buf, GFP_KERNEL);
-		if (!session->username)
-			return -ENOMEM;
-		break;
+		return iscsi_switch_str_param(&session->username, buf);
 	case ISCSI_PARAM_USERNAME_IN:
 	case ISCSI_PARAM_USERNAME_IN:
-		kfree(session->username_in);
-		session->username_in = kstrdup(buf, GFP_KERNEL);
-		if (!session->username_in)
-			return -ENOMEM;
-		break;
+		return iscsi_switch_str_param(&session->username_in, buf);
 	case ISCSI_PARAM_PASSWORD:
 	case ISCSI_PARAM_PASSWORD:
-		kfree(session->password);
-		session->password = kstrdup(buf, GFP_KERNEL);
-		if (!session->password)
-			return -ENOMEM;
-		break;
+		return iscsi_switch_str_param(&session->password, buf);
 	case ISCSI_PARAM_PASSWORD_IN:
 	case ISCSI_PARAM_PASSWORD_IN:
-		kfree(session->password_in);
-		session->password_in = kstrdup(buf, GFP_KERNEL);
-		if (!session->password_in)
-			return -ENOMEM;
-		break;
+		return iscsi_switch_str_param(&session->password_in, buf);
 	case ISCSI_PARAM_TARGET_NAME:
 	case ISCSI_PARAM_TARGET_NAME:
-		/* this should not change between logins */
-		if (session->targetname)
-			break;
-
-		session->targetname = kstrdup(buf, GFP_KERNEL);
-		if (!session->targetname)
-			return -ENOMEM;
-		break;
+		return iscsi_switch_str_param(&session->targetname, buf);
 	case ISCSI_PARAM_TPGT:
 	case ISCSI_PARAM_TPGT:
 		sscanf(buf, "%d", &session->tpgt);
 		sscanf(buf, "%d", &session->tpgt);
 		break;
 		break;
@@ -2762,25 +2812,11 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
 		sscanf(buf, "%d", &conn->persistent_port);
 		sscanf(buf, "%d", &conn->persistent_port);
 		break;
 		break;
 	case ISCSI_PARAM_PERSISTENT_ADDRESS:
 	case ISCSI_PARAM_PERSISTENT_ADDRESS:
-		/*
-		 * this is the address returned in discovery so it should
-		 * not change between logins.
-		 */
-		if (conn->persistent_address)
-			break;
-
-		conn->persistent_address = kstrdup(buf, GFP_KERNEL);
-		if (!conn->persistent_address)
-			return -ENOMEM;
-		break;
+		return iscsi_switch_str_param(&conn->persistent_address, buf);
 	case ISCSI_PARAM_IFACE_NAME:
 	case ISCSI_PARAM_IFACE_NAME:
-		if (!session->ifacename)
-			session->ifacename = kstrdup(buf, GFP_KERNEL);
-		break;
+		return iscsi_switch_str_param(&session->ifacename, buf);
 	case ISCSI_PARAM_INITIATOR_NAME:
 	case ISCSI_PARAM_INITIATOR_NAME:
-		if (!session->initiatorname)
-			session->initiatorname = kstrdup(buf, GFP_KERNEL);
-		break;
+		return iscsi_switch_str_param(&session->initiatorname, buf);
 	default:
 	default:
 		return -ENOSYS;
 		return -ENOSYS;
 	}
 	}
@@ -2851,10 +2887,7 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
 		len = sprintf(buf, "%s\n", session->ifacename);
 		len = sprintf(buf, "%s\n", session->ifacename);
 		break;
 		break;
 	case ISCSI_PARAM_INITIATOR_NAME:
 	case ISCSI_PARAM_INITIATOR_NAME:
-		if (!session->initiatorname)
-			len = sprintf(buf, "%s\n", "unknown");
-		else
-			len = sprintf(buf, "%s\n", session->initiatorname);
+		len = sprintf(buf, "%s\n", session->initiatorname);
 		break;
 		break;
 	default:
 	default:
 		return -ENOSYS;
 		return -ENOSYS;
@@ -2920,29 +2953,16 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
 
 
 	switch (param) {
 	switch (param) {
 	case ISCSI_HOST_PARAM_NETDEV_NAME:
 	case ISCSI_HOST_PARAM_NETDEV_NAME:
-		if (!ihost->netdev)
-			len = sprintf(buf, "%s\n", "default");
-		else
-			len = sprintf(buf, "%s\n", ihost->netdev);
+		len = sprintf(buf, "%s\n", ihost->netdev);
 		break;
 		break;
 	case ISCSI_HOST_PARAM_HWADDRESS:
 	case ISCSI_HOST_PARAM_HWADDRESS:
-		if (!ihost->hwaddress)
-			len = sprintf(buf, "%s\n", "default");
-		else
-			len = sprintf(buf, "%s\n", ihost->hwaddress);
+		len = sprintf(buf, "%s\n", ihost->hwaddress);
 		break;
 		break;
 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
-		if (!ihost->initiatorname)
-			len = sprintf(buf, "%s\n", "unknown");
-		else
-			len = sprintf(buf, "%s\n", ihost->initiatorname);
+		len = sprintf(buf, "%s\n", ihost->initiatorname);
 		break;
 		break;
 	case ISCSI_HOST_PARAM_IPADDRESS:
 	case ISCSI_HOST_PARAM_IPADDRESS:
-		if (!strlen(ihost->local_address))
-			len = sprintf(buf, "%s\n", "unknown");
-		else
-			len = sprintf(buf, "%s\n",
-				      ihost->local_address);
+		len = sprintf(buf, "%s\n", ihost->local_address);
 		break;
 		break;
 	default:
 	default:
 		return -ENOSYS;
 		return -ENOSYS;
@@ -2959,17 +2979,11 @@ int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
 
 
 	switch (param) {
 	switch (param) {
 	case ISCSI_HOST_PARAM_NETDEV_NAME:
 	case ISCSI_HOST_PARAM_NETDEV_NAME:
-		if (!ihost->netdev)
-			ihost->netdev = kstrdup(buf, GFP_KERNEL);
-		break;
+		return iscsi_switch_str_param(&ihost->netdev, buf);
 	case ISCSI_HOST_PARAM_HWADDRESS:
 	case ISCSI_HOST_PARAM_HWADDRESS:
-		if (!ihost->hwaddress)
-			ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
-		break;
+		return iscsi_switch_str_param(&ihost->hwaddress, buf);
 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
-		if (!ihost->initiatorname)
-			ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
-		break;
+		return iscsi_switch_str_param(&ihost->initiatorname, buf);
 	default:
 	default:
 		return -ENOSYS;
 		return -ENOSYS;
 	}
 	}

+ 15 - 3
drivers/scsi/libiscsi_tcp.c

@@ -440,8 +440,8 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
 	struct iscsi_tcp_task *tcp_task = task->dd_data;
 	struct iscsi_tcp_task *tcp_task = task->dd_data;
 	struct iscsi_r2t_info *r2t;
 	struct iscsi_r2t_info *r2t;
 
 
-	/* nothing to do for mgmt or pending tasks */
-	if (!task->sc || task->state == ISCSI_TASK_PENDING)
+	/* nothing to do for mgmt */
+	if (!task->sc)
 		return;
 		return;
 
 
 	/* flush task's r2t queues */
 	/* flush task's r2t queues */
@@ -473,7 +473,13 @@ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
 	int datasn = be32_to_cpu(rhdr->datasn);
 	int datasn = be32_to_cpu(rhdr->datasn);
 	unsigned total_in_length = scsi_in(task->sc)->length;
 	unsigned total_in_length = scsi_in(task->sc)->length;
 
 
-	iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
+	/*
+	 * lib iscsi will update this in the completion handling if there
+	 * is status.
+	 */
+	if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
+		iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
+
 	if (tcp_conn->in.datalen == 0)
 	if (tcp_conn->in.datalen == 0)
 		return 0;
 		return 0;
 
 
@@ -857,6 +863,12 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
 	int rc = 0;
 	int rc = 0;
 
 
 	ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset);
 	ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset);
+	/*
+	 * Update for each skb instead of pdu, because over slow networks a
+	 * data_in's data could take a while to read in. We also want to
+	 * account for r2ts.
+	 */
+	conn->last_recv = jiffies;
 
 
 	if (unlikely(conn->suspend_rx)) {
 	if (unlikely(conn->suspend_rx)) {
 		ISCSI_DBG_TCP(conn, "Rx suspended!\n");
 		ISCSI_DBG_TCP(conn, "Rx suspended!\n");

+ 114 - 9
drivers/scsi/lpfc/lpfc.h

@@ -1,7 +1,7 @@
 /*******************************************************************
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -23,6 +23,13 @@
 
 
 struct lpfc_sli2_slim;
 struct lpfc_sli2_slim;
 
 
+#define LPFC_PCI_DEV_LP		0x1
+#define LPFC_PCI_DEV_OC		0x2
+
+#define LPFC_SLI_REV2		2
+#define LPFC_SLI_REV3		3
+#define LPFC_SLI_REV4		4
+
 #define LPFC_MAX_TARGET		4096	/* max number of targets supported */
 #define LPFC_MAX_TARGET		4096	/* max number of targets supported */
 #define LPFC_MAX_DISC_THREADS	64	/* max outstanding discovery els
 #define LPFC_MAX_DISC_THREADS	64	/* max outstanding discovery els
 					   requests */
 					   requests */
@@ -98,9 +105,11 @@ struct lpfc_dma_pool {
 };
 };
 
 
 struct hbq_dmabuf {
 struct hbq_dmabuf {
+	struct lpfc_dmabuf hbuf;
 	struct lpfc_dmabuf dbuf;
 	struct lpfc_dmabuf dbuf;
 	uint32_t size;
 	uint32_t size;
 	uint32_t tag;
 	uint32_t tag;
+	struct lpfc_rcqe rcqe;
 };
 };
 
 
 /* Priority bit.  Set value to exceed low water mark in lpfc_mem. */
 /* Priority bit.  Set value to exceed low water mark in lpfc_mem. */
@@ -134,7 +143,10 @@ typedef struct lpfc_vpd {
 	} rev;
 	} rev;
 	struct {
 	struct {
 #ifdef __BIG_ENDIAN_BITFIELD
 #ifdef __BIG_ENDIAN_BITFIELD
-		uint32_t rsvd2  :24;  /* Reserved                             */
+		uint32_t rsvd3  :19;  /* Reserved                             */
+		uint32_t cdss	: 1;  /* Configure Data Security SLI          */
+		uint32_t rsvd2	: 3;  /* Reserved                             */
+		uint32_t cbg	: 1;  /* Configure BlockGuard                 */
 		uint32_t cmv	: 1;  /* Configure Max VPIs                   */
 		uint32_t cmv	: 1;  /* Configure Max VPIs                   */
 		uint32_t ccrp   : 1;  /* Config Command Ring Polling          */
 		uint32_t ccrp   : 1;  /* Config Command Ring Polling          */
 		uint32_t csah   : 1;  /* Configure Synchronous Abort Handling */
 		uint32_t csah   : 1;  /* Configure Synchronous Abort Handling */
@@ -152,7 +164,10 @@ typedef struct lpfc_vpd {
 		uint32_t csah   : 1;  /* Configure Synchronous Abort Handling */
 		uint32_t csah   : 1;  /* Configure Synchronous Abort Handling */
 		uint32_t ccrp   : 1;  /* Config Command Ring Polling          */
 		uint32_t ccrp   : 1;  /* Config Command Ring Polling          */
 		uint32_t cmv	: 1;  /* Configure Max VPIs                   */
 		uint32_t cmv	: 1;  /* Configure Max VPIs                   */
-		uint32_t rsvd2  :24;  /* Reserved                             */
+		uint32_t cbg	: 1;  /* Configure BlockGuard                 */
+		uint32_t rsvd2	: 3;  /* Reserved                             */
+		uint32_t cdss	: 1;  /* Configure Data Security SLI          */
+		uint32_t rsvd3  :19;  /* Reserved                             */
 #endif
 #endif
 	} sli3Feat;
 	} sli3Feat;
 } lpfc_vpd_t;
 } lpfc_vpd_t;
@@ -264,8 +279,8 @@ enum hba_state {
 };
 };
 
 
 struct lpfc_vport {
 struct lpfc_vport {
-	struct list_head listentry;
 	struct lpfc_hba *phba;
 	struct lpfc_hba *phba;
+	struct list_head listentry;
 	uint8_t port_type;
 	uint8_t port_type;
 #define LPFC_PHYSICAL_PORT 1
 #define LPFC_PHYSICAL_PORT 1
 #define LPFC_NPIV_PORT  2
 #define LPFC_NPIV_PORT  2
@@ -273,6 +288,9 @@ struct lpfc_vport {
 	enum discovery_state port_state;
 	enum discovery_state port_state;
 
 
 	uint16_t vpi;
 	uint16_t vpi;
+	uint16_t vfi;
+	uint8_t vfi_state;
+#define LPFC_VFI_REGISTERED	0x1
 
 
 	uint32_t fc_flag;	/* FC flags */
 	uint32_t fc_flag;	/* FC flags */
 /* Several of these flags are HBA centric and should be moved to
 /* Several of these flags are HBA centric and should be moved to
@@ -385,6 +403,9 @@ struct lpfc_vport {
 #endif
 #endif
 	uint8_t stat_data_enabled;
 	uint8_t stat_data_enabled;
 	uint8_t stat_data_blocked;
 	uint8_t stat_data_blocked;
+	struct list_head rcv_buffer_list;
+	uint32_t vport_flag;
+#define STATIC_VPORT	1
 };
 };
 
 
 struct hbq_s {
 struct hbq_s {
@@ -420,8 +441,66 @@ enum intr_type_t {
 };
 };
 
 
 struct lpfc_hba {
 struct lpfc_hba {
+	/* SCSI interface function jump table entries */
+	int (*lpfc_new_scsi_buf)
+		(struct lpfc_vport *, int);
+	struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
+		(struct lpfc_hba *);
+	int (*lpfc_scsi_prep_dma_buf)
+		(struct lpfc_hba *, struct lpfc_scsi_buf *);
+	void (*lpfc_scsi_unprep_dma_buf)
+		(struct lpfc_hba *, struct lpfc_scsi_buf *);
+	void (*lpfc_release_scsi_buf)
+		(struct lpfc_hba *, struct lpfc_scsi_buf *);
+	void (*lpfc_rampdown_queue_depth)
+		(struct lpfc_hba *);
+	void (*lpfc_scsi_prep_cmnd)
+		(struct lpfc_vport *, struct lpfc_scsi_buf *,
+		 struct lpfc_nodelist *);
+	int (*lpfc_scsi_prep_task_mgmt_cmd)
+		(struct lpfc_vport *, struct lpfc_scsi_buf *,
+		 unsigned int, uint8_t);
+
+	/* IOCB interface function jump table entries */
+	int (*__lpfc_sli_issue_iocb)
+		(struct lpfc_hba *, uint32_t,
+		 struct lpfc_iocbq *, uint32_t);
+	void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
+			 struct lpfc_iocbq *);
+	int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
+
+
+	IOCB_t * (*lpfc_get_iocb_from_iocbq)
+		(struct lpfc_iocbq *);
+	void (*lpfc_scsi_cmd_iocb_cmpl)
+		(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
+
+	/* MBOX interface function jump table entries */
+	int (*lpfc_sli_issue_mbox)
+		(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+	/* Slow-path IOCB process function jump table entries */
+	void (*lpfc_sli_handle_slow_ring_event)
+		(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+		 uint32_t mask);
+	/* INIT device interface function jump table entries */
+	int (*lpfc_sli_hbq_to_firmware)
+		(struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
+	int (*lpfc_sli_brdrestart)
+		(struct lpfc_hba *);
+	int (*lpfc_sli_brdready)
+		(struct lpfc_hba *, uint32_t);
+	void (*lpfc_handle_eratt)
+		(struct lpfc_hba *);
+	void (*lpfc_stop_port)
+		(struct lpfc_hba *);
+
+
+	/* SLI4 specific HBA data structure */
+	struct lpfc_sli4_hba sli4_hba;
+
 	struct lpfc_sli sli;
 	struct lpfc_sli sli;
-	uint32_t sli_rev;		/* SLI2 or SLI3 */
+	uint8_t pci_dev_grp;	/* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
+	uint32_t sli_rev;		/* SLI2, SLI3, or SLI4 */
 	uint32_t sli3_options;		/* Mask of enabled SLI3 options */
 	uint32_t sli3_options;		/* Mask of enabled SLI3 options */
 #define LPFC_SLI3_HBQ_ENABLED		0x01
 #define LPFC_SLI3_HBQ_ENABLED		0x01
 #define LPFC_SLI3_NPIV_ENABLED		0x02
 #define LPFC_SLI3_NPIV_ENABLED		0x02
@@ -429,6 +508,7 @@ struct lpfc_hba {
 #define LPFC_SLI3_CRP_ENABLED		0x08
 #define LPFC_SLI3_CRP_ENABLED		0x08
 #define LPFC_SLI3_INB_ENABLED		0x10
 #define LPFC_SLI3_INB_ENABLED		0x10
 #define LPFC_SLI3_BG_ENABLED		0x20
 #define LPFC_SLI3_BG_ENABLED		0x20
+#define LPFC_SLI3_DSS_ENABLED		0x40
 	uint32_t iocb_cmd_size;
 	uint32_t iocb_cmd_size;
 	uint32_t iocb_rsp_size;
 	uint32_t iocb_rsp_size;
 
 
@@ -442,8 +522,13 @@ struct lpfc_hba {
 
 
 	uint32_t hba_flag;	/* hba generic flags */
 	uint32_t hba_flag;	/* hba generic flags */
 #define HBA_ERATT_HANDLED	0x1 /* This flag is set when eratt handled */
 #define HBA_ERATT_HANDLED	0x1 /* This flag is set when eratt handled */
-
-#define DEFER_ERATT		0x4 /* Deferred error attention in progress */
+#define DEFER_ERATT		0x2 /* Deferred error attention in progress */
+#define HBA_FCOE_SUPPORT	0x4 /* HBA function supports FCOE */
+#define HBA_RECEIVE_BUFFER	0x8 /* Rcv buffer posted to worker thread */
+#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
+#define FCP_XRI_ABORT_EVENT	0x20
+#define ELS_XRI_ABORT_EVENT	0x40
+#define ASYNC_EVENT		0x80
 	struct lpfc_dmabuf slim2p;
 	struct lpfc_dmabuf slim2p;
 
 
 	MAILBOX_t *mbox;
 	MAILBOX_t *mbox;
@@ -502,6 +587,9 @@ struct lpfc_hba {
 	uint32_t cfg_poll;
 	uint32_t cfg_poll;
 	uint32_t cfg_poll_tmo;
 	uint32_t cfg_poll_tmo;
 	uint32_t cfg_use_msi;
 	uint32_t cfg_use_msi;
+	uint32_t cfg_fcp_imax;
+	uint32_t cfg_fcp_wq_count;
+	uint32_t cfg_fcp_eq_count;
 	uint32_t cfg_sg_seg_cnt;
 	uint32_t cfg_sg_seg_cnt;
 	uint32_t cfg_prot_sg_seg_cnt;
 	uint32_t cfg_prot_sg_seg_cnt;
 	uint32_t cfg_sg_dma_buf_size;
 	uint32_t cfg_sg_dma_buf_size;
@@ -511,6 +599,8 @@ struct lpfc_hba {
 	uint32_t cfg_enable_hba_reset;
 	uint32_t cfg_enable_hba_reset;
 	uint32_t cfg_enable_hba_heartbeat;
 	uint32_t cfg_enable_hba_heartbeat;
 	uint32_t cfg_enable_bg;
 	uint32_t cfg_enable_bg;
+	uint32_t cfg_enable_fip;
+	uint32_t cfg_log_verbose;
 
 
 	lpfc_vpd_t vpd;		/* vital product data */
 	lpfc_vpd_t vpd;		/* vital product data */
 
 
@@ -526,11 +616,12 @@ struct lpfc_hba {
 	unsigned long data_flags;
 	unsigned long data_flags;
 
 
 	uint32_t hbq_in_use;		/* HBQs in use flag */
 	uint32_t hbq_in_use;		/* HBQs in use flag */
-	struct list_head hbqbuf_in_list;  /* in-fly hbq buffer list */
+	struct list_head rb_pend_list;  /* Received buffers to be processed */
 	uint32_t hbq_count;	        /* Count of configured HBQs */
 	uint32_t hbq_count;	        /* Count of configured HBQs */
 	struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies  */
 	struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies  */
 
 
 	unsigned long pci_bar0_map;     /* Physical address for PCI BAR0 */
 	unsigned long pci_bar0_map;     /* Physical address for PCI BAR0 */
+	unsigned long pci_bar1_map;     /* Physical address for PCI BAR1 */
 	unsigned long pci_bar2_map;     /* Physical address for PCI BAR2 */
 	unsigned long pci_bar2_map;     /* Physical address for PCI BAR2 */
 	void __iomem *slim_memmap_p;	/* Kernel memory mapped address for
 	void __iomem *slim_memmap_p;	/* Kernel memory mapped address for
 					   PCI BAR0 */
 					   PCI BAR0 */
@@ -593,7 +684,8 @@ struct lpfc_hba {
 	/* pci_mem_pools */
 	/* pci_mem_pools */
 	struct pci_pool *lpfc_scsi_dma_buf_pool;
 	struct pci_pool *lpfc_scsi_dma_buf_pool;
 	struct pci_pool *lpfc_mbuf_pool;
 	struct pci_pool *lpfc_mbuf_pool;
-	struct pci_pool *lpfc_hbq_pool;
+	struct pci_pool *lpfc_hrb_pool;	/* header receive buffer pool */
+	struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
 	struct lpfc_dma_pool lpfc_mbuf_safety_pool;
 	struct lpfc_dma_pool lpfc_mbuf_safety_pool;
 
 
 	mempool_t *mbox_mem_pool;
 	mempool_t *mbox_mem_pool;
@@ -609,6 +701,14 @@ struct lpfc_hba {
 	struct lpfc_vport *pport;	/* physical lpfc_vport pointer */
 	struct lpfc_vport *pport;	/* physical lpfc_vport pointer */
 	uint16_t max_vpi;		/* Maximum virtual nports */
 	uint16_t max_vpi;		/* Maximum virtual nports */
 #define LPFC_MAX_VPI 0xFFFF		/* Max number of VPI supported */
 #define LPFC_MAX_VPI 0xFFFF		/* Max number of VPI supported */
+	uint16_t max_vports;            /*
+					 * For IOV HBAs max_vpi can change
+					 * after a reset. max_vports is max
+					 * number of vports present. This can
+					 * be greater than max_vpi.
+					 */
+	uint16_t vpi_base;
+	uint16_t vfi_base;
 	unsigned long *vpi_bmask;	/* vpi allocation table */
 	unsigned long *vpi_bmask;	/* vpi allocation table */
 
 
 	/* Data structure used by fabric iocb scheduler */
 	/* Data structure used by fabric iocb scheduler */
@@ -667,6 +767,11 @@ struct lpfc_hba {
 /* Maximum number of events that can be outstanding at any time*/
 /* Maximum number of events that can be outstanding at any time*/
 #define LPFC_MAX_EVT_COUNT 512
 #define LPFC_MAX_EVT_COUNT 512
 	atomic_t fast_event_count;
 	atomic_t fast_event_count;
+	struct lpfc_fcf fcf;
+	uint8_t fc_map[3];
+	uint8_t valid_vlan;
+	uint16_t vlan_id;
+	struct list_head fcf_conn_rec_list;
 };
 };
 
 
 static inline struct Scsi_Host *
 static inline struct Scsi_Host *

+ 167 - 83
drivers/scsi/lpfc/lpfc_attr.c

@@ -1,7 +1,7 @@
 /*******************************************************************
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -30,8 +30,10 @@
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_transport_fc.h>
 #include <scsi/scsi_transport_fc.h>
 
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
 #include "lpfc_scsi.h"
@@ -505,12 +507,14 @@ lpfc_issue_lip(struct Scsi_Host *shost)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
 	memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
-	pmboxq->mb.mbxCommand = MBX_DOWN_LINK;
-	pmboxq->mb.mbxOwner = OWN_HOST;
+	pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
+	pmboxq->u.mb.mbxOwner = OWN_HOST;
 
 
 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
 
 
-	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) {
+	if ((mbxstatus == MBX_SUCCESS) &&
+	    (pmboxq->u.mb.mbxStatus == 0 ||
+	     pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
 		memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
 		memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
 		lpfc_init_link(phba, pmboxq, phba->cfg_topology,
 		lpfc_init_link(phba, pmboxq, phba->cfg_topology,
 			       phba->cfg_link_speed);
 			       phba->cfg_link_speed);
@@ -789,7 +793,8 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
 		  uint32_t *mrpi, uint32_t *arpi,
 		  uint32_t *mrpi, uint32_t *arpi,
 		  uint32_t *mvpi, uint32_t *avpi)
 		  uint32_t *mvpi, uint32_t *avpi)
 {
 {
-	struct lpfc_sli   *psli = &phba->sli;
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_mbx_read_config *rd_config;
 	LPFC_MBOXQ_t *pmboxq;
 	LPFC_MBOXQ_t *pmboxq;
 	MAILBOX_t *pmb;
 	MAILBOX_t *pmb;
 	int rc = 0;
 	int rc = 0;
@@ -800,7 +805,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
 	 */
 	 */
 	if (phba->link_state < LPFC_LINK_DOWN ||
 	if (phba->link_state < LPFC_LINK_DOWN ||
 	    !phba->mbox_mem_pool ||
 	    !phba->mbox_mem_pool ||
-	    (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
+	    (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
 		return 0;
 		return 0;
 
 
 	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
 	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -811,13 +816,13 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
 		return 0;
 		return 0;
 	memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
 
 
-	pmb = &pmboxq->mb;
+	pmb = &pmboxq->u.mb;
 	pmb->mbxCommand = MBX_READ_CONFIG;
 	pmb->mbxCommand = MBX_READ_CONFIG;
 	pmb->mbxOwner = OWN_HOST;
 	pmb->mbxOwner = OWN_HOST;
 	pmboxq->context1 = NULL;
 	pmboxq->context1 = NULL;
 
 
 	if ((phba->pport->fc_flag & FC_OFFLINE_MODE) ||
 	if ((phba->pport->fc_flag & FC_OFFLINE_MODE) ||
-		(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+		(!(psli->sli_flag & LPFC_SLI_ACTIVE)))
 		rc = MBX_NOT_FINISHED;
 		rc = MBX_NOT_FINISHED;
 	else
 	else
 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -828,18 +833,37 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
 		return 0;
 		return 0;
 	}
 	}
 
 
-	if (mrpi)
-		*mrpi = pmb->un.varRdConfig.max_rpi;
-	if (arpi)
-		*arpi = pmb->un.varRdConfig.avail_rpi;
-	if (mxri)
-		*mxri = pmb->un.varRdConfig.max_xri;
-	if (axri)
-		*axri = pmb->un.varRdConfig.avail_xri;
-	if (mvpi)
-		*mvpi = pmb->un.varRdConfig.max_vpi;
-	if (avpi)
-		*avpi = pmb->un.varRdConfig.avail_vpi;
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		rd_config = &pmboxq->u.mqe.un.rd_config;
+		if (mrpi)
+			*mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
+		if (arpi)
+			*arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
+					phba->sli4_hba.max_cfg_param.rpi_used;
+		if (mxri)
+			*mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
+		if (axri)
+			*axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
+					phba->sli4_hba.max_cfg_param.xri_used;
+		if (mvpi)
+			*mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
+		if (avpi)
+			*avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) -
+					phba->sli4_hba.max_cfg_param.vpi_used;
+	} else {
+		if (mrpi)
+			*mrpi = pmb->un.varRdConfig.max_rpi;
+		if (arpi)
+			*arpi = pmb->un.varRdConfig.avail_rpi;
+		if (mxri)
+			*mxri = pmb->un.varRdConfig.max_xri;
+		if (axri)
+			*axri = pmb->un.varRdConfig.avail_xri;
+		if (mvpi)
+			*mvpi = pmb->un.varRdConfig.max_vpi;
+		if (avpi)
+			*avpi = pmb->un.varRdConfig.avail_vpi;
+	}
 
 
 	mempool_free(pmboxq, phba->mbox_mem_pool);
 	mempool_free(pmboxq, phba->mbox_mem_pool);
 	return 1;
 	return 1;
@@ -2021,22 +2045,9 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
 # lpfc_log_verbose: Only turn this flag on if you are willing to risk being
 # lpfc_log_verbose: Only turn this flag on if you are willing to risk being
 # deluged with LOTS of information.
 # deluged with LOTS of information.
 # You can set a bit mask to record specific types of verbose messages:
 # You can set a bit mask to record specific types of verbose messages:
-#
-# LOG_ELS                       0x1        ELS events
-# LOG_DISCOVERY                 0x2        Link discovery events
-# LOG_MBOX                      0x4        Mailbox events
-# LOG_INIT                      0x8        Initialization events
-# LOG_LINK_EVENT                0x10       Link events
-# LOG_FCP                       0x40       FCP traffic history
-# LOG_NODE                      0x80       Node table events
-# LOG_BG                        0x200      BlockBuard events
-# LOG_MISC                      0x400      Miscellaneous events
-# LOG_SLI                       0x800      SLI events
-# LOG_FCP_ERROR                 0x1000     Only log FCP errors
-# LOG_LIBDFC                    0x2000     LIBDFC events
-# LOG_ALL_MSG                   0xffff     LOG all messages
+# See lpfc_logmsh.h for definitions.
 */
 */
-LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff,
+LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
 		       "Verbose logging bit-mask");
 		       "Verbose logging bit-mask");
 
 
 /*
 /*
@@ -2266,6 +2277,36 @@ lpfc_param_init(topology, 0, 0, 6)
 static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
 static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
 		lpfc_topology_show, lpfc_topology_store);
 		lpfc_topology_show, lpfc_topology_store);
 
 
+/**
+ * lpfc_static_vport_show: Read callback function for
+ *   lpfc_static_vport sysfs file.
+ * @dev: Pointer to class device object.
+ * @attr: device attribute structure.
+ * @buf: Data buffer.
+ *
+ * This function is the read call back function for
+ * lpfc_static_vport sysfs file. The lpfc_static_vport
+ * sysfs file report the mageability of the vport.
+ **/
+static ssize_t
+lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	if (vport->vport_flag & STATIC_VPORT)
+		sprintf(buf, "1\n");
+	else
+		sprintf(buf, "0\n");
+
+	return strlen(buf);
+}
+
+/*
+ * Sysfs attribute to control the statistical data collection.
+ */
+static DEVICE_ATTR(lpfc_static_vport, S_IRUGO,
+		   lpfc_static_vport_show, NULL);
 
 
 /**
 /**
  * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
  * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
@@ -2341,7 +2382,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
 		if (vports == NULL)
 		if (vports == NULL)
 			return -ENOMEM;
 			return -ENOMEM;
 
 
-		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 			v_shost = lpfc_shost_from_vport(vports[i]);
 			v_shost = lpfc_shost_from_vport(vports[i]);
 			spin_lock_irq(v_shost->host_lock);
 			spin_lock_irq(v_shost->host_lock);
 			/* Block and reset data collection */
 			/* Block and reset data collection */
@@ -2356,7 +2397,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
 		phba->bucket_base = base;
 		phba->bucket_base = base;
 		phba->bucket_step = step;
 		phba->bucket_step = step;
 
 
-		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 			v_shost = lpfc_shost_from_vport(vports[i]);
 			v_shost = lpfc_shost_from_vport(vports[i]);
 
 
 			/* Unblock data collection */
 			/* Unblock data collection */
@@ -2373,7 +2414,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
 		if (vports == NULL)
 		if (vports == NULL)
 			return -ENOMEM;
 			return -ENOMEM;
 
 
-		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 			v_shost = lpfc_shost_from_vport(vports[i]);
 			v_shost = lpfc_shost_from_vport(vports[i]);
 			spin_lock_irq(shost->host_lock);
 			spin_lock_irq(shost->host_lock);
 			vports[i]->stat_data_blocked = 1;
 			vports[i]->stat_data_blocked = 1;
@@ -2844,14 +2885,38 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
 /*
 /*
 # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
 # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
 #		support this feature
 #		support this feature
-#       0  = MSI disabled
+#       0  = MSI disabled (default)
 #       1  = MSI enabled
 #       1  = MSI enabled
-#       2  = MSI-X enabled (default)
-# Value range is [0,2]. Default value is 2.
+#       2  = MSI-X enabled
+# Value range is [0,2]. Default value is 0.
 */
 */
-LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
+LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or "
 	    "MSI-X (2), if possible");
 	    "MSI-X (2), if possible");
 
 
+/*
+# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second
+#
+# Value range is [636,651042]. Default value is 10000.
+*/
+LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST,
+	    "Set the maximum number of fast-path FCP interrupts per second");
+
+/*
+# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
+#
+# Value range is [1,31]. Default value is 4.
+*/
+LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX,
+	    "Set the number of fast-path FCP work queues, if possible");
+
+/*
+# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues
+#
+# Value range is [1,7]. Default value is 1.
+*/
+LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX,
+	    "Set the number of fast-path FCP event queues, if possible");
+
 /*
 /*
 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
 #       0  = HBA resets disabled
 #       0  = HBA resets disabled
@@ -2876,6 +2941,14 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
 */
 */
 LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
 LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
 
 
+/*
+# lpfc_enable_fip: When set, FIP is required to start discovery. If not
+# set, the driver will add an FCF record manually if the port has no
+# FCF records available and start discovery.
+# Value range is [0,1]. Default value is 1 (enabled)
+*/
+LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery");
+
 
 
 /*
 /*
 # lpfc_prot_mask: i
 # lpfc_prot_mask: i
@@ -2942,6 +3015,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_lpfc_peer_port_login,
 	&dev_attr_lpfc_peer_port_login,
 	&dev_attr_lpfc_nodev_tmo,
 	&dev_attr_lpfc_nodev_tmo,
 	&dev_attr_lpfc_devloss_tmo,
 	&dev_attr_lpfc_devloss_tmo,
+	&dev_attr_lpfc_enable_fip,
 	&dev_attr_lpfc_fcp_class,
 	&dev_attr_lpfc_fcp_class,
 	&dev_attr_lpfc_use_adisc,
 	&dev_attr_lpfc_use_adisc,
 	&dev_attr_lpfc_ack0,
 	&dev_attr_lpfc_ack0,
@@ -2969,6 +3043,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_lpfc_poll,
 	&dev_attr_lpfc_poll,
 	&dev_attr_lpfc_poll_tmo,
 	&dev_attr_lpfc_poll_tmo,
 	&dev_attr_lpfc_use_msi,
 	&dev_attr_lpfc_use_msi,
+	&dev_attr_lpfc_fcp_imax,
+	&dev_attr_lpfc_fcp_wq_count,
+	&dev_attr_lpfc_fcp_eq_count,
 	&dev_attr_lpfc_enable_bg,
 	&dev_attr_lpfc_enable_bg,
 	&dev_attr_lpfc_soft_wwnn,
 	&dev_attr_lpfc_soft_wwnn,
 	&dev_attr_lpfc_soft_wwpn,
 	&dev_attr_lpfc_soft_wwpn,
@@ -2991,6 +3068,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
 	&dev_attr_lpfc_lun_queue_depth,
 	&dev_attr_lpfc_lun_queue_depth,
 	&dev_attr_lpfc_nodev_tmo,
 	&dev_attr_lpfc_nodev_tmo,
 	&dev_attr_lpfc_devloss_tmo,
 	&dev_attr_lpfc_devloss_tmo,
+	&dev_attr_lpfc_enable_fip,
 	&dev_attr_lpfc_hba_queue_depth,
 	&dev_attr_lpfc_hba_queue_depth,
 	&dev_attr_lpfc_peer_port_login,
 	&dev_attr_lpfc_peer_port_login,
 	&dev_attr_lpfc_restrict_login,
 	&dev_attr_lpfc_restrict_login,
@@ -3003,6 +3081,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
 	&dev_attr_lpfc_enable_da_id,
 	&dev_attr_lpfc_enable_da_id,
 	&dev_attr_lpfc_max_scsicmpl_time,
 	&dev_attr_lpfc_max_scsicmpl_time,
 	&dev_attr_lpfc_stat_data_ctrl,
 	&dev_attr_lpfc_stat_data_ctrl,
+	&dev_attr_lpfc_static_vport,
 	NULL,
 	NULL,
 };
 };
 
 
@@ -3199,7 +3278,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
 		}
 		}
 	}
 	}
 
 
-	memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off,
+	memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off,
 	       buf, count);
 	       buf, count);
 
 
 	phba->sysfs_mbox.offset = off + count;
 	phba->sysfs_mbox.offset = off + count;
@@ -3241,6 +3320,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 	struct lpfc_hba   *phba = vport->phba;
 	int rc;
 	int rc;
+	MAILBOX_t *pmb;
 
 
 	if (off > MAILBOX_CMD_SIZE)
 	if (off > MAILBOX_CMD_SIZE)
 		return -ERANGE;
 		return -ERANGE;
@@ -3265,8 +3345,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
 	if (off == 0 &&
 	if (off == 0 &&
 	    phba->sysfs_mbox.state  == SMBOX_WRITING &&
 	    phba->sysfs_mbox.state  == SMBOX_WRITING &&
 	    phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
 	    phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
-
-		switch (phba->sysfs_mbox.mbox->mb.mbxCommand) {
+		pmb = &phba->sysfs_mbox.mbox->u.mb;
+		switch (pmb->mbxCommand) {
 			/* Offline only */
 			/* Offline only */
 		case MBX_INIT_LINK:
 		case MBX_INIT_LINK:
 		case MBX_DOWN_LINK:
 		case MBX_DOWN_LINK:
@@ -3283,7 +3363,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
 			if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
 			if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
 				printk(KERN_WARNING "mbox_read:Command 0x%x "
 				printk(KERN_WARNING "mbox_read:Command 0x%x "
 				       "is illegal in on-line state\n",
 				       "is illegal in on-line state\n",
-				       phba->sysfs_mbox.mbox->mb.mbxCommand);
+				       pmb->mbxCommand);
 				sysfs_mbox_idle(phba);
 				sysfs_mbox_idle(phba);
 				spin_unlock_irq(&phba->hbalock);
 				spin_unlock_irq(&phba->hbalock);
 				return -EPERM;
 				return -EPERM;
@@ -3319,13 +3399,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
 		case MBX_CONFIG_PORT:
 		case MBX_CONFIG_PORT:
 		case MBX_RUN_BIU_DIAG:
 		case MBX_RUN_BIU_DIAG:
 			printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
 			printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
-			       phba->sysfs_mbox.mbox->mb.mbxCommand);
+			       pmb->mbxCommand);
 			sysfs_mbox_idle(phba);
 			sysfs_mbox_idle(phba);
 			spin_unlock_irq(&phba->hbalock);
 			spin_unlock_irq(&phba->hbalock);
 			return -EPERM;
 			return -EPERM;
 		default:
 		default:
 			printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
 			printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
-			       phba->sysfs_mbox.mbox->mb.mbxCommand);
+			       pmb->mbxCommand);
 			sysfs_mbox_idle(phba);
 			sysfs_mbox_idle(phba);
 			spin_unlock_irq(&phba->hbalock);
 			spin_unlock_irq(&phba->hbalock);
 			return -EPERM;
 			return -EPERM;
@@ -3335,14 +3415,14 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
 		 * or RESTART mailbox commands until the HBA is restarted.
 		 * or RESTART mailbox commands until the HBA is restarted.
 		 */
 		 */
 		if (phba->pport->stopped &&
 		if (phba->pport->stopped &&
-		    phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY &&
-		    phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART &&
-		    phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS &&
-		    phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN)
+		    pmb->mbxCommand != MBX_DUMP_MEMORY &&
+		    pmb->mbxCommand != MBX_RESTART &&
+		    pmb->mbxCommand != MBX_WRITE_VPARMS &&
+		    pmb->mbxCommand != MBX_WRITE_WWN)
 			lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
 			lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
 					"1259 mbox: Issued mailbox cmd "
 					"1259 mbox: Issued mailbox cmd "
 					"0x%x while in stopped state.\n",
 					"0x%x while in stopped state.\n",
-					phba->sysfs_mbox.mbox->mb.mbxCommand);
+					pmb->mbxCommand);
 
 
 		phba->sysfs_mbox.mbox->vport = vport;
 		phba->sysfs_mbox.mbox->vport = vport;
 
 
@@ -3356,7 +3436,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
 		}
 		}
 
 
 		if ((vport->fc_flag & FC_OFFLINE_MODE) ||
 		if ((vport->fc_flag & FC_OFFLINE_MODE) ||
-		    (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){
+		    (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
 
 
 			spin_unlock_irq(&phba->hbalock);
 			spin_unlock_irq(&phba->hbalock);
 			rc = lpfc_sli_issue_mbox (phba,
 			rc = lpfc_sli_issue_mbox (phba,
@@ -3368,8 +3448,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
 			spin_unlock_irq(&phba->hbalock);
 			spin_unlock_irq(&phba->hbalock);
 			rc = lpfc_sli_issue_mbox_wait (phba,
 			rc = lpfc_sli_issue_mbox_wait (phba,
 						       phba->sysfs_mbox.mbox,
 						       phba->sysfs_mbox.mbox,
-				lpfc_mbox_tmo_val(phba,
-				    phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
+				lpfc_mbox_tmo_val(phba, pmb->mbxCommand) * HZ);
 			spin_lock_irq(&phba->hbalock);
 			spin_lock_irq(&phba->hbalock);
 		}
 		}
 
 
@@ -3391,7 +3470,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
 		return -EAGAIN;
 		return -EAGAIN;
 	}
 	}
 
 
-	memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count);
+	memcpy(buf, (uint8_t *) &pmb + off, count);
 
 
 	phba->sysfs_mbox.offset = off + count;
 	phba->sysfs_mbox.offset = off + count;
 
 
@@ -3585,6 +3664,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
 			case LA_8GHZ_LINK:
 			case LA_8GHZ_LINK:
 				fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
 				fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
 			break;
 			break;
+			case LA_10GHZ_LINK:
+				fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+			break;
 			default:
 			default:
 				fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
 				fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
 			break;
 			break;
@@ -3652,7 +3734,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
 	 */
 	 */
 	if (phba->link_state < LPFC_LINK_DOWN ||
 	if (phba->link_state < LPFC_LINK_DOWN ||
 	    !phba->mbox_mem_pool ||
 	    !phba->mbox_mem_pool ||
-	    (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
+	    (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
 		return NULL;
 		return NULL;
 
 
 	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
 	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -3663,14 +3745,14 @@ lpfc_get_stats(struct Scsi_Host *shost)
 		return NULL;
 		return NULL;
 	memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
 
 
-	pmb = &pmboxq->mb;
+	pmb = &pmboxq->u.mb;
 	pmb->mbxCommand = MBX_READ_STATUS;
 	pmb->mbxCommand = MBX_READ_STATUS;
 	pmb->mbxOwner = OWN_HOST;
 	pmb->mbxOwner = OWN_HOST;
 	pmboxq->context1 = NULL;
 	pmboxq->context1 = NULL;
 	pmboxq->vport = vport;
 	pmboxq->vport = vport;
 
 
 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
-		(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+		(!(psli->sli_flag & LPFC_SLI_ACTIVE)))
 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
 	else
 	else
 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3695,7 +3777,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
 	pmboxq->vport = vport;
 	pmboxq->vport = vport;
 
 
 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
-	    (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+	    (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
 	else
 	else
 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3769,7 +3851,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
 		return;
 		return;
 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
 
 
-	pmb = &pmboxq->mb;
+	pmb = &pmboxq->u.mb;
 	pmb->mbxCommand = MBX_READ_STATUS;
 	pmb->mbxCommand = MBX_READ_STATUS;
 	pmb->mbxOwner = OWN_HOST;
 	pmb->mbxOwner = OWN_HOST;
 	pmb->un.varWords[0] = 0x1; /* reset request */
 	pmb->un.varWords[0] = 0x1; /* reset request */
@@ -3777,7 +3859,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
 	pmboxq->vport = vport;
 	pmboxq->vport = vport;
 
 
 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
-		(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+		(!(psli->sli_flag & LPFC_SLI_ACTIVE)))
 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
 	else
 	else
 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3795,7 +3877,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
 	pmboxq->vport = vport;
 	pmboxq->vport = vport;
 
 
 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
-	    (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+	    (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
 	else
 	else
 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3962,6 +4044,21 @@ lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
 		lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
 		lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
 }
 }
 
 
+/**
+ * lpfc_hba_log_verbose_init - Set hba's log verbose level
+ * @phba: Pointer to lpfc_hba struct.
+ *
+ * This function is called by the lpfc_get_cfgparam() routine to set the
+ * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
+ * log messsage according to the module's lpfc_log_verbose parameter setting
+ * before hba port or vport created.
+ **/
+static void
+lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
+{
+	phba->cfg_log_verbose = verbose;
+}
+
 struct fc_function_template lpfc_transport_functions = {
 struct fc_function_template lpfc_transport_functions = {
 	/* fixed attributes the driver supports */
 	/* fixed attributes the driver supports */
 	.show_host_node_name = 1,
 	.show_host_node_name = 1,
@@ -4105,6 +4202,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
 	lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
 	lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
 	lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
 	lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
 	lpfc_use_msi_init(phba, lpfc_use_msi);
 	lpfc_use_msi_init(phba, lpfc_use_msi);
+	lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
+	lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
+	lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
 	lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
 	lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
 	lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
 	lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
 	lpfc_enable_bg_init(phba, lpfc_enable_bg);
 	lpfc_enable_bg_init(phba, lpfc_enable_bg);
@@ -4113,26 +4213,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
 	phba->cfg_soft_wwpn = 0L;
 	phba->cfg_soft_wwpn = 0L;
 	lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
 	lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
 	lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
 	lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
-	/*
-	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
-	 * used to create the sg_dma_buf_pool must be dynamically calculated.
-	 * 2 segments are added since the IOCB needs a command and response bde.
-	 */
-	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
-			sizeof(struct fcp_rsp) +
-			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
-
-	if (phba->cfg_enable_bg) {
-		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
-		phba->cfg_sg_dma_buf_size +=
-			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
-	}
-
-	/* Also reinitialize the host templates with new values. */
-	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-
 	lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
 	lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
+	lpfc_enable_fip_init(phba, lpfc_enable_fip);
+	lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
+
 	return;
 	return;
 }
 }
 
 

+ 52 - 11
drivers/scsi/lpfc/lpfc_crtn.h

@@ -23,6 +23,8 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
 struct fc_rport;
 struct fc_rport;
 void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
 void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
 void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
+int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
 void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
 void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
 
 
@@ -35,17 +37,19 @@ int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
 int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
 int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
 void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
-int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
-		   LPFC_MBOXQ_t *, uint32_t);
+int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
+		 LPFC_MBOXQ_t *, uint32_t);
 void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
 void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
 void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
 void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
-void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
 void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
 void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
 void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
 void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
+void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
 
 
 struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
 struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
 void lpfc_cleanup_rpis(struct lpfc_vport *, int);
 void lpfc_cleanup_rpis(struct lpfc_vport *, int);
 int lpfc_linkdown(struct lpfc_hba *);
 int lpfc_linkdown(struct lpfc_hba *);
+void lpfc_linkdown_port(struct lpfc_vport *);
 void lpfc_port_link_failure(struct lpfc_vport *);
 void lpfc_port_link_failure(struct lpfc_vport *);
 void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
 
 
@@ -54,6 +58,7 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
 void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
 void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
 void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
 struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
 struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
@@ -105,6 +110,7 @@ int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
 int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
 int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
 int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
 int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
 int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
 int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
+int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
 int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
 int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
 int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
 int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
 int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
 int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
@@ -149,15 +155,19 @@ int lpfc_online(struct lpfc_hba *);
 void lpfc_unblock_mgmt_io(struct lpfc_hba *);
 void lpfc_unblock_mgmt_io(struct lpfc_hba *);
 void lpfc_offline_prep(struct lpfc_hba *);
 void lpfc_offline_prep(struct lpfc_hba *);
 void lpfc_offline(struct lpfc_hba *);
 void lpfc_offline(struct lpfc_hba *);
+void lpfc_reset_hba(struct lpfc_hba *);
 
 
 int lpfc_sli_setup(struct lpfc_hba *);
 int lpfc_sli_setup(struct lpfc_hba *);
 int lpfc_sli_queue_setup(struct lpfc_hba *);
 int lpfc_sli_queue_setup(struct lpfc_hba *);
 
 
 void lpfc_handle_eratt(struct lpfc_hba *);
 void lpfc_handle_eratt(struct lpfc_hba *);
 void lpfc_handle_latt(struct lpfc_hba *);
 void lpfc_handle_latt(struct lpfc_hba *);
-irqreturn_t lpfc_intr_handler(int, void *);
-irqreturn_t lpfc_sp_intr_handler(int, void *);
-irqreturn_t lpfc_fp_intr_handler(int, void *);
+irqreturn_t lpfc_sli_intr_handler(int, void *);
+irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
+irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_sp_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
 
 
 void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
 void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@@ -165,16 +175,32 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
 LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
 LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
+void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_mbox_dev_check(struct lpfc_hba *);
 int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
 int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
+void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
+void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
+void lpfc_init_vpi(struct lpfcMboxq *, uint16_t);
+void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t);
+void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
+void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
 
 
 void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
 void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
 	uint32_t , LPFC_MBOXQ_t *);
 	uint32_t , LPFC_MBOXQ_t *);
 struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
 struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
 void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
 void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
+struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
+void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
+void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
+			uint16_t);
+void lpfc_unregister_unused_fcf(struct lpfc_hba *);
 
 
-int lpfc_mem_alloc(struct lpfc_hba *);
+int lpfc_mem_alloc(struct lpfc_hba *, int align);
 void lpfc_mem_free(struct lpfc_hba *);
 void lpfc_mem_free(struct lpfc_hba *);
+void lpfc_mem_free_all(struct lpfc_hba *);
 void lpfc_stop_vport_timers(struct lpfc_vport *);
 void lpfc_stop_vport_timers(struct lpfc_vport *);
 
 
 void lpfc_poll_timeout(unsigned long ptr);
 void lpfc_poll_timeout(unsigned long ptr);
@@ -186,6 +212,7 @@ void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
 uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
 uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
 void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
 void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
 			   uint32_t);
 			   uint32_t);
+void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *);
 
 
 void lpfc_reset_barrier(struct lpfc_hba * phba);
 void lpfc_reset_barrier(struct lpfc_hba * phba);
 int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
 int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
@@ -198,12 +225,13 @@ int lpfc_sli_host_down(struct lpfc_vport *);
 int lpfc_sli_hba_down(struct lpfc_hba *);
 int lpfc_sli_hba_down(struct lpfc_hba *);
 int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
 int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
 int lpfc_sli_handle_mb_event(struct lpfc_hba *);
 int lpfc_sli_handle_mb_event(struct lpfc_hba *);
-int lpfc_sli_flush_mbox_queue(struct lpfc_hba *);
+void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
 int lpfc_sli_check_eratt(struct lpfc_hba *);
 int lpfc_sli_check_eratt(struct lpfc_hba *);
-int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
+void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
 				    struct lpfc_sli_ring *, uint32_t);
 				    struct lpfc_sli_ring *, uint32_t);
+int lpfc_sli4_handle_received_buffer(struct lpfc_hba *);
 void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
-int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
+int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
 			struct lpfc_iocbq *, uint32_t);
 			struct lpfc_iocbq *, uint32_t);
 void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
 void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
 void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
 void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
@@ -237,7 +265,7 @@ struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
 
 
 int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
 int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
 
 
-int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *,
+int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
 			     struct lpfc_iocbq *, struct lpfc_iocbq *,
 			     struct lpfc_iocbq *, struct lpfc_iocbq *,
 			     uint32_t);
 			     uint32_t);
 void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
 void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
@@ -254,6 +282,12 @@ void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
 const char* lpfc_info(struct Scsi_Host *);
 const char* lpfc_info(struct Scsi_Host *);
 int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
 int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
 
 
+int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_api_table_setup(struct lpfc_hba *, uint8_t);
+
 void lpfc_get_cfgparam(struct lpfc_hba *);
 void lpfc_get_cfgparam(struct lpfc_hba *);
 void lpfc_get_vport_cfgparam(struct lpfc_vport *);
 void lpfc_get_vport_cfgparam(struct lpfc_vport *);
 int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
 int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
@@ -314,8 +348,15 @@ lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
 				struct lpfc_iocbq *);
 				struct lpfc_iocbq *);
 struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
 struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
 void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
 void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
+void lpfc_create_static_vport(struct lpfc_hba *);
+void lpfc_stop_hba_timers(struct lpfc_hba *);
+void lpfc_stop_port(struct lpfc_hba *);
+void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
+int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
+void lpfc_start_fdiscs(struct lpfc_hba *phba);
 
 
 #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
 #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
 #define HBA_EVENT_RSCN                   5
 #define HBA_EVENT_RSCN                   5
 #define HBA_EVENT_LINK_UP                2
 #define HBA_EVENT_LINK_UP                2
 #define HBA_EVENT_LINK_DOWN              3
 #define HBA_EVENT_LINK_DOWN              3
+

+ 9 - 6
drivers/scsi/lpfc/lpfc_ct.c

@@ -1,7 +1,7 @@
 /*******************************************************************
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * www.emulex.com                                                  *
  *                                                                 *
  *                                                                 *
@@ -32,8 +32,10 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 #include <scsi/scsi_transport_fc.h>
 
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
 #include "lpfc_scsi.h"
@@ -267,8 +269,6 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
 	     uint32_t tmo, uint8_t retry)
 	     uint32_t tmo, uint8_t retry)
 {
 {
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_hba  *phba = vport->phba;
-	struct lpfc_sli  *psli = &phba->sli;
-	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
 	IOCB_t *icmd;
 	IOCB_t *icmd;
 	struct lpfc_iocbq *geniocb;
 	struct lpfc_iocbq *geniocb;
 	int rc;
 	int rc;
@@ -331,7 +331,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
 	geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
 	geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
 	geniocb->vport = vport;
 	geniocb->vport = vport;
 	geniocb->retry = retry;
 	geniocb->retry = retry;
-	rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0);
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
 
 
 	if (rc == IOCB_ERROR) {
 	if (rc == IOCB_ERROR) {
 		lpfc_sli_release_iocbq(phba, geniocb);
 		lpfc_sli_release_iocbq(phba, geniocb);
@@ -1578,6 +1578,9 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
 				case LA_8GHZ_LINK:
 				case LA_8GHZ_LINK:
 					ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
 					ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
 				break;
 				break;
+				case LA_10GHZ_LINK:
+					ae->un.PortSpeed = HBA_PORTSPEED_10GBIT;
+				break;
 				default:
 				default:
 					ae->un.PortSpeed =
 					ae->un.PortSpeed =
 						HBA_PORTSPEED_UNKNOWN;
 						HBA_PORTSPEED_UNKNOWN;
@@ -1730,7 +1733,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
 	uint8_t *fwname;
 	uint8_t *fwname;
 
 
 	if (vp->rev.rBit) {
 	if (vp->rev.rBit) {
-		if (psli->sli_flag & LPFC_SLI2_ACTIVE)
+		if (psli->sli_flag & LPFC_SLI_ACTIVE)
 			rev = vp->rev.sli2FwRev;
 			rev = vp->rev.sli2FwRev;
 		else
 		else
 			rev = vp->rev.sli1FwRev;
 			rev = vp->rev.sli1FwRev;
@@ -1756,7 +1759,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
 		}
 		}
 		b4 = (rev & 0x0000000f);
 		b4 = (rev & 0x0000000f);
 
 
-		if (psli->sli_flag & LPFC_SLI2_ACTIVE)
+		if (psli->sli_flag & LPFC_SLI_ACTIVE)
 			fwname = vp->rev.sli2FwName;
 			fwname = vp->rev.sli2FwName;
 		else
 		else
 			fwname = vp->rev.sli1FwName;
 			fwname = vp->rev.sli1FwName;

+ 14 - 7
drivers/scsi/lpfc/lpfc_debugfs.c

@@ -1,7 +1,7 @@
 /*******************************************************************
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2007-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2007-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * www.emulex.com                                                  *
  *                                                                 *
  *                                                                 *
@@ -33,8 +33,10 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 #include <scsi/scsi_transport_fc.h>
 
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
 #include "lpfc_scsi.h"
@@ -280,6 +282,8 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
 	struct lpfc_dmabuf *d_buf;
 	struct lpfc_dmabuf *d_buf;
 	struct hbq_dmabuf *hbq_buf;
 	struct hbq_dmabuf *hbq_buf;
 
 
+	if (phba->sli_rev != 3)
+		return 0;
 	cnt = LPFC_HBQINFO_SIZE;
 	cnt = LPFC_HBQINFO_SIZE;
 	spin_lock_irq(&phba->hbalock);
 	spin_lock_irq(&phba->hbalock);
 
 
@@ -489,12 +493,15 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
 				 pring->next_cmdidx, pring->local_getidx,
 				 pring->next_cmdidx, pring->local_getidx,
 				 pring->flag, pgpp->rspPutInx, pring->numRiocb);
 				 pring->flag, pgpp->rspPutInx, pring->numRiocb);
 	}
 	}
-	word0 = readl(phba->HAregaddr);
-	word1 = readl(phba->CAregaddr);
-	word2 = readl(phba->HSregaddr);
-	word3 = readl(phba->HCregaddr);
-	len +=  snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x HC:%08x\n",
-	word0, word1, word2, word3);
+
+	if (phba->sli_rev <= LPFC_SLI_REV3) {
+		word0 = readl(phba->HAregaddr);
+		word1 = readl(phba->CAregaddr);
+		word2 = readl(phba->HSregaddr);
+		word3 = readl(phba->HCregaddr);
+		len +=  snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
+				 "HC:%08x\n", word0, word1, word2, word3);
+	}
 	spin_unlock_irq(&phba->hbalock);
 	spin_unlock_irq(&phba->hbalock);
 	return len;
 	return len;
 }
 }

+ 1 - 0
drivers/scsi/lpfc/lpfc_disc.h

@@ -135,6 +135,7 @@ struct lpfc_nodelist {
 #define NLP_NODEV_REMOVE   0x08000000	/* Defer removal till discovery ends */
 #define NLP_NODEV_REMOVE   0x08000000	/* Defer removal till discovery ends */
 #define NLP_TARGET_REMOVE  0x10000000   /* Target remove in process */
 #define NLP_TARGET_REMOVE  0x10000000   /* Target remove in process */
 #define NLP_SC_REQ         0x20000000	/* Target requires authentication */
 #define NLP_SC_REQ         0x20000000	/* Target requires authentication */
+#define NLP_RPI_VALID      0x80000000	/* nlp_rpi is valid */
 
 
 /* ndlp usage management macros */
 /* ndlp usage management macros */
 #define NLP_CHK_NODE_ACT(ndlp)		(((ndlp)->nlp_usg_map \
 #define NLP_CHK_NODE_ACT(ndlp)		(((ndlp)->nlp_usg_map \

+ 190 - 85
drivers/scsi/lpfc/lpfc_els.c

@@ -1,7 +1,7 @@
 /*******************************************************************
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -28,8 +28,10 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 #include <scsi/scsi_transport_fc.h>
 
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
 #include "lpfc_scsi.h"
@@ -84,7 +86,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
 	uint32_t ha_copy;
 	uint32_t ha_copy;
 
 
 	if (vport->port_state >= LPFC_VPORT_READY ||
 	if (vport->port_state >= LPFC_VPORT_READY ||
-	    phba->link_state == LPFC_LINK_DOWN)
+	    phba->link_state == LPFC_LINK_DOWN ||
+	    phba->sli_rev > LPFC_SLI_REV3)
 		return 0;
 		return 0;
 
 
 	/* Read the HBA Host Attention Register */
 	/* Read the HBA Host Attention Register */
@@ -219,7 +222,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
 		icmd->un.elsreq64.myID = vport->fc_myDID;
 		icmd->un.elsreq64.myID = vport->fc_myDID;
 
 
 		/* For ELS_REQUEST64_CR, use the VPI by default */
 		/* For ELS_REQUEST64_CR, use the VPI by default */
-		icmd->ulpContext = vport->vpi;
+		icmd->ulpContext = vport->vpi + phba->vpi_base;
 		icmd->ulpCt_h = 0;
 		icmd->ulpCt_h = 0;
 		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
 		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
 		if (elscmd == ELS_CMD_ECHO)
 		if (elscmd == ELS_CMD_ECHO)
@@ -305,7 +308,7 @@ els_iocb_free_pcmb_exit:
  *   0 - successfully issued fabric registration login for @vport
  *   0 - successfully issued fabric registration login for @vport
  *   -ENXIO -- failed to issue fabric registration login for @vport
  *   -ENXIO -- failed to issue fabric registration login for @vport
  **/
  **/
-static int
+int
 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
 {
 {
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_hba  *phba = vport->phba;
@@ -345,8 +348,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
 		err = 4;
 		err = 4;
 		goto fail;
 		goto fail;
 	}
 	}
-	rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
-			    0);
+	rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0);
 	if (rc) {
 	if (rc) {
 		err = 5;
 		err = 5;
 		goto fail_free_mbox;
 		goto fail_free_mbox;
@@ -385,6 +387,75 @@ fail:
 	return -ENXIO;
 	return -ENXIO;
 }
 }
 
 
+/**
+ * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
+ * the @vport. This mailbox command is necessary for FCoE only.
+ *
+ * Return code
+ *   0 - successfully issued REG_VFI for @vport
+ *   A failure code otherwise.
+ **/
+static int
+lpfc_issue_reg_vfi(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	LPFC_MBOXQ_t *mboxq;
+	struct lpfc_nodelist *ndlp;
+	struct serv_parm *sp;
+	struct lpfc_dmabuf *dmabuf;
+	int rc = 0;
+
+	sp = &phba->fc_fabparam;
+	ndlp = lpfc_findnode_did(vport, Fabric_DID);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+		rc = -ENODEV;
+		goto fail;
+	}
+
+	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!dmabuf) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+	dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
+	if (!dmabuf->virt) {
+		rc = -ENOMEM;
+		goto fail_free_dmabuf;
+	}
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		rc = -ENOMEM;
+		goto fail_free_coherent;
+	}
+	vport->port_state = LPFC_FABRIC_CFG_LINK;
+	memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
+	lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
+	mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
+	mboxq->vport = vport;
+	mboxq->context1 = dmabuf;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		rc = -ENXIO;
+		goto fail_free_mbox;
+	}
+	return 0;
+
+fail_free_mbox:
+	mempool_free(mboxq, phba->mbox_mem_pool);
+fail_free_coherent:
+	lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+fail_free_dmabuf:
+	kfree(dmabuf);
+fail:
+	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+		"0289 Issue Register VFI failed: Err %d\n", rc);
+	return rc;
+}
+
 /**
 /**
  * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
  * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
  * @vport: pointer to a host virtual N_Port data structure.
  * @vport: pointer to a host virtual N_Port data structure.
@@ -497,17 +568,24 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		}
 		}
 	}
 	}
 
 
-	lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
-
-	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
-	    vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) {
-		lpfc_register_new_vport(phba, vport, ndlp);
-		return 0;
+	if (phba->sli_rev < LPFC_SLI_REV4) {
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
+		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
+		    vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+			lpfc_register_new_vport(phba, vport, ndlp);
+		else
+			lpfc_issue_fabric_reglogin(vport);
+	} else {
+		ndlp->nlp_type |= NLP_FABRIC;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+		if (vport->vfi_state & LPFC_VFI_REGISTERED) {
+			lpfc_start_fdiscs(phba);
+			lpfc_do_scr_ns_plogi(phba, vport);
+		} else
+			lpfc_issue_reg_vfi(vport);
 	}
 	}
-	lpfc_issue_fabric_reglogin(vport);
 	return 0;
 	return 0;
 }
 }
-
 /**
 /**
  * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
  * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
  * @vport: pointer to a host virtual N_Port data structure.
  * @vport: pointer to a host virtual N_Port data structure.
@@ -815,9 +893,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	if (sp->cmn.fcphHigh < FC_PH3)
 	if (sp->cmn.fcphHigh < FC_PH3)
 		sp->cmn.fcphHigh = FC_PH3;
 		sp->cmn.fcphHigh = FC_PH3;
 
 
-	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+	if  (phba->sli_rev == LPFC_SLI_REV4) {
+		elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
+		elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
+		/* FLOGI needs to be 3 for WQE FCFI */
+		/* Set the fcfi to the fcfi we registered with */
+		elsiocb->iocb.ulpContext = phba->fcf.fcfi;
+	} else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
 		sp->cmn.request_multiple_Nport = 1;
 		sp->cmn.request_multiple_Nport = 1;
-
 		/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
 		/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
 		icmd->ulpCt_h = 1;
 		icmd->ulpCt_h = 1;
 		icmd->ulpCt_l = 0;
 		icmd->ulpCt_l = 0;
@@ -930,6 +1013,8 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
 		if (!ndlp)
 		if (!ndlp)
 			return 0;
 			return 0;
 		lpfc_nlp_init(vport, ndlp, Fabric_DID);
 		lpfc_nlp_init(vport, ndlp, Fabric_DID);
+		/* Set the node type */
+		ndlp->nlp_type |= NLP_FABRIC;
 		/* Put ndlp onto node list */
 		/* Put ndlp onto node list */
 		lpfc_enqueue_node(vport, ndlp);
 		lpfc_enqueue_node(vport, ndlp);
 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
@@ -1350,14 +1435,12 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
 	IOCB_t *icmd;
 	IOCB_t *icmd;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli_ring *pring;
 	struct lpfc_sli *psli;
 	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
 	int ret;
 	int ret;
 
 
 	psli = &phba->sli;
 	psli = &phba->sli;
-	pring = &psli->ring[LPFC_ELS_RING];	/* ELS ring */
 
 
 	ndlp = lpfc_findnode_did(vport, did);
 	ndlp = lpfc_findnode_did(vport, did);
 	if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
 	if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
@@ -1391,7 +1474,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
 
 
 	phba->fc_stat.elsXmitPLOGI++;
 	phba->fc_stat.elsXmitPLOGI++;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
-	ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+	ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 
 
 	if (ret == IOCB_ERROR) {
 	if (ret == IOCB_ERROR) {
 		lpfc_els_free_iocb(phba, elsiocb);
 		lpfc_els_free_iocb(phba, elsiocb);
@@ -1501,14 +1584,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	PRLI *npr;
 	PRLI *npr;
 	IOCB_t *icmd;
 	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli_ring *pring;
-	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
 
 
-	psli = &phba->sli;
-	pring = &psli->ring[LPFC_ELS_RING];	/* ELS ring */
-
 	cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
 	cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
 				     ndlp->nlp_DID, ELS_CMD_PRLI);
 				     ndlp->nlp_DID, ELS_CMD_PRLI);
@@ -1550,7 +1628,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	spin_lock_irq(shost->host_lock);
 	spin_lock_irq(shost->host_lock);
 	ndlp->nlp_flag |= NLP_PRLI_SND;
 	ndlp->nlp_flag |= NLP_PRLI_SND;
 	spin_unlock_irq(shost->host_lock);
 	spin_unlock_irq(shost->host_lock);
-	if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
 		spin_lock_irq(shost->host_lock);
 		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag &= ~NLP_PRLI_SND;
 		ndlp->nlp_flag &= ~NLP_PRLI_SND;
 		spin_unlock_irq(shost->host_lock);
 		spin_unlock_irq(shost->host_lock);
@@ -1608,7 +1687,8 @@ lpfc_adisc_done(struct lpfc_vport *vport)
 	 * and continue discovery.
 	 * and continue discovery.
 	 */
 	 */
 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
-	    !(vport->fc_flag & FC_RSCN_MODE)) {
+	    !(vport->fc_flag & FC_RSCN_MODE) &&
+	    (phba->sli_rev < LPFC_SLI_REV4)) {
 		lpfc_issue_reg_vpi(phba, vport);
 		lpfc_issue_reg_vpi(phba, vport);
 		return;
 		return;
 	}
 	}
@@ -1788,8 +1868,6 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	ADISC *ap;
 	ADISC *ap;
 	IOCB_t *icmd;
 	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli *psli = &phba->sli;
-	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
 
 
@@ -1822,7 +1900,8 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	spin_lock_irq(shost->host_lock);
 	spin_lock_irq(shost->host_lock);
 	ndlp->nlp_flag |= NLP_ADISC_SND;
 	ndlp->nlp_flag |= NLP_ADISC_SND;
 	spin_unlock_irq(shost->host_lock);
 	spin_unlock_irq(shost->host_lock);
-	if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
 		spin_lock_irq(shost->host_lock);
 		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag &= ~NLP_ADISC_SND;
 		ndlp->nlp_flag &= ~NLP_ADISC_SND;
 		spin_unlock_irq(shost->host_lock);
 		spin_unlock_irq(shost->host_lock);
@@ -1937,15 +2016,10 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_hba  *phba = vport->phba;
 	IOCB_t *icmd;
 	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli_ring *pring;
-	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
 	int rc;
 	int rc;
 
 
-	psli = &phba->sli;
-	pring = &psli->ring[LPFC_ELS_RING];
-
 	spin_lock_irq(shost->host_lock);
 	spin_lock_irq(shost->host_lock);
 	if (ndlp->nlp_flag & NLP_LOGO_SND) {
 	if (ndlp->nlp_flag & NLP_LOGO_SND) {
 		spin_unlock_irq(shost->host_lock);
 		spin_unlock_irq(shost->host_lock);
@@ -1978,7 +2052,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	spin_lock_irq(shost->host_lock);
 	spin_lock_irq(shost->host_lock);
 	ndlp->nlp_flag |= NLP_LOGO_SND;
 	ndlp->nlp_flag |= NLP_LOGO_SND;
 	spin_unlock_irq(shost->host_lock);
 	spin_unlock_irq(shost->host_lock);
-	rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 
 
 	if (rc == IOCB_ERROR) {
 	if (rc == IOCB_ERROR) {
 		spin_lock_irq(shost->host_lock);
 		spin_lock_irq(shost->host_lock);
@@ -2058,14 +2132,12 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_hba  *phba = vport->phba;
 	IOCB_t *icmd;
 	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli_ring *pring;
 	struct lpfc_sli *psli;
 	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_nodelist *ndlp;
 
 
 	psli = &phba->sli;
 	psli = &phba->sli;
-	pring = &psli->ring[LPFC_ELS_RING];	/* ELS ring */
 	cmdsize = (sizeof(uint32_t) + sizeof(SCR));
 	cmdsize = (sizeof(uint32_t) + sizeof(SCR));
 
 
 	ndlp = lpfc_findnode_did(vport, nportid);
 	ndlp = lpfc_findnode_did(vport, nportid);
@@ -2108,7 +2180,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 
 
 	phba->fc_stat.elsXmitSCR++;
 	phba->fc_stat.elsXmitSCR++;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
-	if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
 		/* The additional lpfc_nlp_put will cause the following
 		/* The additional lpfc_nlp_put will cause the following
 		 * lpfc_els_free_iocb routine to trigger the rlease of
 		 * lpfc_els_free_iocb routine to trigger the rlease of
 		 * the node.
 		 * the node.
@@ -2152,7 +2225,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_hba  *phba = vport->phba;
 	IOCB_t *icmd;
 	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli_ring *pring;
 	struct lpfc_sli *psli;
 	struct lpfc_sli *psli;
 	FARP *fp;
 	FARP *fp;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
@@ -2162,7 +2234,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_nodelist *ndlp;
 
 
 	psli = &phba->sli;
 	psli = &phba->sli;
-	pring = &psli->ring[LPFC_ELS_RING];	/* ELS ring */
 	cmdsize = (sizeof(uint32_t) + sizeof(FARP));
 	cmdsize = (sizeof(uint32_t) + sizeof(FARP));
 
 
 	ndlp = lpfc_findnode_did(vport, nportid);
 	ndlp = lpfc_findnode_did(vport, nportid);
@@ -2219,7 +2290,8 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 
 
 	phba->fc_stat.elsXmitFARPR++;
 	phba->fc_stat.elsXmitFARPR++;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
-	if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
 		/* The additional lpfc_nlp_put will cause the following
 		/* The additional lpfc_nlp_put will cause the following
 		 * lpfc_els_free_iocb routine to trigger the release of
 		 * lpfc_els_free_iocb routine to trigger the release of
 		 * the node.
 		 * the node.
@@ -2949,6 +3021,14 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
 
 
+	/*
+	 * This routine is used to register and unregister in previous SLI
+	 * modes.
+	 */
+	if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
+	    (phba->sli_rev == LPFC_SLI_REV4))
+		lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
+
 	pmb->context1 = NULL;
 	pmb->context1 = NULL;
 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
 	kfree(mp);
 	kfree(mp);
@@ -2961,6 +3041,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 		 */
 		 */
 		lpfc_nlp_not_used(ndlp);
 		lpfc_nlp_not_used(ndlp);
 	}
 	}
+
 	return;
 	return;
 }
 }
 
 
@@ -3170,7 +3251,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
 	IOCB_t *icmd;
 	IOCB_t *icmd;
 	IOCB_t *oldcmd;
 	IOCB_t *oldcmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli_ring *pring;
 	struct lpfc_sli *psli;
 	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
@@ -3178,7 +3258,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
 	ELS_PKT *els_pkt_ptr;
 	ELS_PKT *els_pkt_ptr;
 
 
 	psli = &phba->sli;
 	psli = &phba->sli;
-	pring = &psli->ring[LPFC_ELS_RING];	/* ELS ring */
 	oldcmd = &oldiocb->iocb;
 	oldcmd = &oldiocb->iocb;
 
 
 	switch (flag) {
 	switch (flag) {
@@ -3266,7 +3345,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
 	}
 	}
 
 
 	phba->fc_stat.elsXmitACC++;
 	phba->fc_stat.elsXmitACC++;
-	rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 	if (rc == IOCB_ERROR) {
 	if (rc == IOCB_ERROR) {
 		lpfc_els_free_iocb(phba, elsiocb);
 		lpfc_els_free_iocb(phba, elsiocb);
 		return 1;
 		return 1;
@@ -3305,15 +3384,12 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
 	IOCB_t *icmd;
 	IOCB_t *icmd;
 	IOCB_t *oldcmd;
 	IOCB_t *oldcmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli_ring *pring;
 	struct lpfc_sli *psli;
 	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
 	int rc;
 	int rc;
 
 
 	psli = &phba->sli;
 	psli = &phba->sli;
-	pring = &psli->ring[LPFC_ELS_RING];	/* ELS ring */
-
 	cmdsize = 2 * sizeof(uint32_t);
 	cmdsize = 2 * sizeof(uint32_t);
 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
 				     ndlp->nlp_DID, ELS_CMD_LS_RJT);
 				     ndlp->nlp_DID, ELS_CMD_LS_RJT);
@@ -3346,7 +3422,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
 
 
 	phba->fc_stat.elsXmitLSRJT++;
 	phba->fc_stat.elsXmitLSRJT++;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
-	rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 
 
 	if (rc == IOCB_ERROR) {
 	if (rc == IOCB_ERROR) {
 		lpfc_els_free_iocb(phba, elsiocb);
 		lpfc_els_free_iocb(phba, elsiocb);
@@ -3379,8 +3455,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
 		       struct lpfc_nodelist *ndlp)
 		       struct lpfc_nodelist *ndlp)
 {
 {
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_hba  *phba = vport->phba;
-	struct lpfc_sli  *psli = &phba->sli;
-	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
 	ADISC *ap;
 	ADISC *ap;
 	IOCB_t *icmd, *oldcmd;
 	IOCB_t *icmd, *oldcmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
@@ -3422,7 +3496,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
 
 
 	phba->fc_stat.elsXmitACC++;
 	phba->fc_stat.elsXmitACC++;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
-	rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 	if (rc == IOCB_ERROR) {
 	if (rc == IOCB_ERROR) {
 		lpfc_els_free_iocb(phba, elsiocb);
 		lpfc_els_free_iocb(phba, elsiocb);
 		return 1;
 		return 1;
@@ -3459,14 +3533,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
 	IOCB_t *icmd;
 	IOCB_t *icmd;
 	IOCB_t *oldcmd;
 	IOCB_t *oldcmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli_ring *pring;
 	struct lpfc_sli *psli;
 	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
 	int rc;
 	int rc;
 
 
 	psli = &phba->sli;
 	psli = &phba->sli;
-	pring = &psli->ring[LPFC_ELS_RING];	/* ELS ring */
 
 
 	cmdsize = sizeof(uint32_t) + sizeof(PRLI);
 	cmdsize = sizeof(uint32_t) + sizeof(PRLI);
 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -3520,7 +3592,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
 	phba->fc_stat.elsXmitACC++;
 	phba->fc_stat.elsXmitACC++;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 
 
-	rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 	if (rc == IOCB_ERROR) {
 	if (rc == IOCB_ERROR) {
 		lpfc_els_free_iocb(phba, elsiocb);
 		lpfc_els_free_iocb(phba, elsiocb);
 		return 1;
 		return 1;
@@ -3562,15 +3634,12 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
 	RNID *rn;
 	RNID *rn;
 	IOCB_t *icmd, *oldcmd;
 	IOCB_t *icmd, *oldcmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli_ring *pring;
 	struct lpfc_sli *psli;
 	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
 	int rc;
 	int rc;
 
 
 	psli = &phba->sli;
 	psli = &phba->sli;
-	pring = &psli->ring[LPFC_ELS_RING];
-
 	cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
 	cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
 					+ (2 * sizeof(struct lpfc_name));
 					+ (2 * sizeof(struct lpfc_name));
 	if (format)
 	if (format)
@@ -3626,7 +3695,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
 	elsiocb->context1 = NULL;  /* Don't need ndlp for cmpl,
 	elsiocb->context1 = NULL;  /* Don't need ndlp for cmpl,
 				    * it could be freed */
 				    * it could be freed */
 
 
-	rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 	if (rc == IOCB_ERROR) {
 	if (rc == IOCB_ERROR) {
 		lpfc_els_free_iocb(phba, elsiocb);
 		lpfc_els_free_iocb(phba, elsiocb);
 		return 1;
 		return 1;
@@ -3839,7 +3908,9 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
 			payload_len -= sizeof(uint32_t);
 			payload_len -= sizeof(uint32_t);
 			switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
 			switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
 			case RSCN_ADDRESS_FORMAT_PORT:
 			case RSCN_ADDRESS_FORMAT_PORT:
-				if (ns_did.un.word == rscn_did.un.word)
+				if ((ns_did.un.b.domain == rscn_did.un.b.domain)
+				    && (ns_did.un.b.area == rscn_did.un.b.area)
+				    && (ns_did.un.b.id == rscn_did.un.b.id))
 					goto return_did_out;
 					goto return_did_out;
 				break;
 				break;
 			case RSCN_ADDRESS_FORMAT_AREA:
 			case RSCN_ADDRESS_FORMAT_AREA:
@@ -4300,7 +4371,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 			lpfc_init_link(phba, mbox,
 			lpfc_init_link(phba, mbox,
 				       phba->cfg_topology,
 				       phba->cfg_topology,
 				       phba->cfg_link_speed);
 				       phba->cfg_link_speed);
-			mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
+			mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
 			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 			mbox->vport = vport;
 			mbox->vport = vport;
 			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
 			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4440,8 +4511,6 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 static void
 static void
 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
 {
-	struct lpfc_sli *psli = &phba->sli;
-	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
 	MAILBOX_t *mb;
 	MAILBOX_t *mb;
 	IOCB_t *icmd;
 	IOCB_t *icmd;
 	RPS_RSP *rps_rsp;
 	RPS_RSP *rps_rsp;
@@ -4451,7 +4520,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	uint16_t xri, status;
 	uint16_t xri, status;
 	uint32_t cmdsize;
 	uint32_t cmdsize;
 
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 
 
 	ndlp = (struct lpfc_nodelist *) pmb->context2;
 	ndlp = (struct lpfc_nodelist *) pmb->context2;
 	xri = (uint16_t) ((unsigned long)(pmb->context1));
 	xri = (uint16_t) ((unsigned long)(pmb->context1));
@@ -4507,7 +4576,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 			 ndlp->nlp_rpi);
 			 ndlp->nlp_rpi);
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 	phba->fc_stat.elsXmitACC++;
 	phba->fc_stat.elsXmitACC++;
-	if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR)
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
 		lpfc_els_free_iocb(phba, elsiocb);
 		lpfc_els_free_iocb(phba, elsiocb);
 	return;
 	return;
 }
 }
@@ -4616,8 +4685,6 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
 	IOCB_t *icmd, *oldcmd;
 	IOCB_t *icmd, *oldcmd;
 	RPL_RSP rpl_rsp;
 	RPL_RSP rpl_rsp;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli *psli = &phba->sli;
-	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 
 
 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -4654,7 +4721,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
 			 ndlp->nlp_rpi);
 			 ndlp->nlp_rpi);
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 	phba->fc_stat.elsXmitACC++;
 	phba->fc_stat.elsXmitACC++;
-	if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
 		lpfc_els_free_iocb(phba, elsiocb);
 		lpfc_els_free_iocb(phba, elsiocb);
 		return 1;
 		return 1;
 	}
 	}
@@ -4883,7 +4951,10 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 		} else {
 		} else {
 			/* FAN verified - skip FLOGI */
 			/* FAN verified - skip FLOGI */
 			vport->fc_myDID = vport->fc_prevDID;
 			vport->fc_myDID = vport->fc_prevDID;
-			lpfc_issue_fabric_reglogin(vport);
+			if (phba->sli_rev < LPFC_SLI_REV4)
+				lpfc_issue_fabric_reglogin(vport);
+			else
+				lpfc_issue_reg_vfi(vport);
 		}
 		}
 	}
 	}
 	return 0;
 	return 0;
@@ -5566,11 +5637,10 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 
 
 dropit:
 dropit:
 	if (vport && !(vport->load_flag & FC_UNLOADING))
 	if (vport && !(vport->load_flag & FC_UNLOADING))
-		lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
-			"(%d):0111 Dropping received ELS cmd "
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+			"0111 Dropping received ELS cmd "
 			"Data: x%x x%x x%x\n",
 			"Data: x%x x%x x%x\n",
-			vport->vpi, icmd->ulpStatus,
-			icmd->un.ulpWord[4], icmd->ulpTimeout);
+			icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
 	phba->fc_stat.elsRcvDrop++;
 	phba->fc_stat.elsRcvDrop++;
 }
 }
 
 
@@ -5646,10 +5716,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	     icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
 	     icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
 		if (icmd->unsli3.rcvsli3.vpi == 0xffff)
 		if (icmd->unsli3.rcvsli3.vpi == 0xffff)
 			vport = phba->pport;
 			vport = phba->pport;
-		else {
-			uint16_t vpi = icmd->unsli3.rcvsli3.vpi;
-			vport = lpfc_find_vport_by_vpid(phba, vpi);
-		}
+		else
+			vport = lpfc_find_vport_by_vpid(phba,
+				icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
 	}
 	}
 	/* If there are no BDEs associated
 	/* If there are no BDEs associated
 	 * with this IOCB, there is nothing to do.
 	 * with this IOCB, there is nothing to do.
@@ -5781,7 +5850,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	struct lpfc_vport *vport = pmb->vport;
 	struct lpfc_vport *vport = pmb->vport;
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 
 
 	spin_lock_irq(shost->host_lock);
 	spin_lock_irq(shost->host_lock);
 	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
 	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
@@ -5818,7 +5887,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 
 
 	} else {
 	} else {
 		if (vport == phba->pport)
 		if (vport == phba->pport)
-			lpfc_issue_fabric_reglogin(vport);
+			if (phba->sli_rev < LPFC_SLI_REV4)
+				lpfc_issue_fabric_reglogin(vport);
+			else
+				lpfc_issue_reg_vfi(vport);
 		else
 		else
 			lpfc_do_scr_ns_plogi(phba, vport);
 			lpfc_do_scr_ns_plogi(phba, vport);
 	}
 	}
@@ -5850,7 +5922,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
 
 
 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (mbox) {
 	if (mbox) {
-		lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox);
+		lpfc_reg_vpi(vport, mbox);
 		mbox->vport = vport;
 		mbox->vport = vport;
 		mbox->context2 = lpfc_nlp_get(ndlp);
 		mbox->context2 = lpfc_nlp_get(ndlp);
 		mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
 		mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
@@ -6139,7 +6211,6 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
 {
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_hba  *phba = vport->phba;
-	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 	IOCB_t *icmd;
 	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
@@ -6169,7 +6240,8 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 	spin_lock_irq(shost->host_lock);
 	spin_lock_irq(shost->host_lock);
 	ndlp->nlp_flag |= NLP_LOGO_SND;
 	ndlp->nlp_flag |= NLP_LOGO_SND;
 	spin_unlock_irq(shost->host_lock);
 	spin_unlock_irq(shost->host_lock);
-	if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
 		spin_lock_irq(shost->host_lock);
 		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag &= ~NLP_LOGO_SND;
 		ndlp->nlp_flag &= ~NLP_LOGO_SND;
 		spin_unlock_irq(shost->host_lock);
 		spin_unlock_irq(shost->host_lock);
@@ -6224,7 +6296,6 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
 	struct lpfc_iocbq *iocb;
 	struct lpfc_iocbq *iocb;
 	unsigned long iflags;
 	unsigned long iflags;
 	int ret;
 	int ret;
-	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 	IOCB_t *cmd;
 	IOCB_t *cmd;
 
 
 repeat:
 repeat:
@@ -6248,7 +6319,7 @@ repeat:
 			"Fabric sched1:   ste:x%x",
 			"Fabric sched1:   ste:x%x",
 			iocb->vport->port_state, 0, 0);
 			iocb->vport->port_state, 0, 0);
 
 
-		ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
+		ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
 
 
 		if (ret == IOCB_ERROR) {
 		if (ret == IOCB_ERROR) {
 			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
 			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6394,7 +6465,6 @@ static int
 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
 {
 {
 	unsigned long iflags;
 	unsigned long iflags;
-	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 	int ready;
 	int ready;
 	int ret;
 	int ret;
 
 
@@ -6418,7 +6488,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
 			"Fabric sched2:   ste:x%x",
 			"Fabric sched2:   ste:x%x",
 			iocb->vport->port_state, 0, 0);
 			iocb->vport->port_state, 0, 0);
 
 
-		ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
+		ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
 
 
 		if (ret == IOCB_ERROR) {
 		if (ret == IOCB_ERROR) {
 			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
 			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6524,3 +6594,38 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
 			      IOERR_SLI_ABORTED);
 			      IOERR_SLI_ABORTED);
 }
 }
+
+/**
+ * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the els xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 slow-path
+ * ELS aborted xri.
+ **/
+void
+lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
+			  struct sli4_wcqe_xri_aborted *axri)
+{
+	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+	unsigned long iflag = 0;
+
+	spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag);
+	list_for_each_entry_safe(sglq_entry, sglq_next,
+			&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
+		if (sglq_entry->sli4_xritag == xri) {
+			list_del(&sglq_entry->list);
+			spin_unlock_irqrestore(
+					&phba->sli4_hba.abts_sgl_list_lock,
+					 iflag);
+			spin_lock_irqsave(&phba->hbalock, iflag);
+
+			list_add_tail(&sglq_entry->list,
+				&phba->sli4_hba.lpfc_sgl_list);
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag);
+}

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 830 - 45
drivers/scsi/lpfc/lpfc_hbadisc.c


+ 99 - 43
drivers/scsi/lpfc/lpfc_hw.h

@@ -1,7 +1,7 @@
 /*******************************************************************
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * www.emulex.com                                                  *
  *                                                                 *
  *                                                                 *
@@ -470,6 +470,35 @@ struct serv_parm {	/* Structure is in Big Endian format */
 	uint8_t vendorVersion[16];
 	uint8_t vendorVersion[16];
 };
 };
 
 
+/*
+ * Virtual Fabric Tagging Header
+ */
+struct fc_vft_header {
+	 uint32_t word0;
+#define fc_vft_hdr_r_ctl_SHIFT		24
+#define fc_vft_hdr_r_ctl_MASK		0xFF
+#define fc_vft_hdr_r_ctl_WORD		word0
+#define fc_vft_hdr_ver_SHIFT		22
+#define fc_vft_hdr_ver_MASK		0x3
+#define fc_vft_hdr_ver_WORD		word0
+#define fc_vft_hdr_type_SHIFT		18
+#define fc_vft_hdr_type_MASK		0xF
+#define fc_vft_hdr_type_WORD		word0
+#define fc_vft_hdr_e_SHIFT		16
+#define fc_vft_hdr_e_MASK		0x1
+#define fc_vft_hdr_e_WORD		word0
+#define fc_vft_hdr_priority_SHIFT	13
+#define fc_vft_hdr_priority_MASK	0x7
+#define fc_vft_hdr_priority_WORD	word0
+#define fc_vft_hdr_vf_id_SHIFT		1
+#define fc_vft_hdr_vf_id_MASK		0xFFF
+#define fc_vft_hdr_vf_id_WORD		word0
+	uint32_t word1;
+#define fc_vft_hdr_hopct_SHIFT		24
+#define fc_vft_hdr_hopct_MASK		0xFF
+#define fc_vft_hdr_hopct_WORD		word1
+};
+
 /*
 /*
  *  Extended Link Service LS_COMMAND codes (Payload Word 0)
  *  Extended Link Service LS_COMMAND codes (Payload Word 0)
  */
  */
@@ -1152,6 +1181,9 @@ typedef struct {
 #define PCI_DEVICE_ID_HORNET        0xfe05
 #define PCI_DEVICE_ID_HORNET        0xfe05
 #define PCI_DEVICE_ID_ZEPHYR_SCSP   0xfe11
 #define PCI_DEVICE_ID_ZEPHYR_SCSP   0xfe11
 #define PCI_DEVICE_ID_ZEPHYR_DCSP   0xfe12
 #define PCI_DEVICE_ID_ZEPHYR_DCSP   0xfe12
+#define PCI_VENDOR_ID_SERVERENGINE  0x19a2
+#define PCI_DEVICE_ID_TIGERSHARK    0x0704
+#define PCI_DEVICE_ID_TIGERSHARK_S  0x0705
 
 
 #define JEDEC_ID_ADDRESS            0x0080001c
 #define JEDEC_ID_ADDRESS            0x0080001c
 #define FIREFLY_JEDEC_ID            0x1ACC
 #define FIREFLY_JEDEC_ID            0x1ACC
@@ -1342,15 +1374,21 @@ typedef struct {		/* FireFly BIU registers */
 #define MBX_READ_LA64       0x95
 #define MBX_READ_LA64       0x95
 #define MBX_REG_VPI	    0x96
 #define MBX_REG_VPI	    0x96
 #define MBX_UNREG_VPI	    0x97
 #define MBX_UNREG_VPI	    0x97
-#define MBX_REG_VNPID	    0x96
-#define MBX_UNREG_VNPID	    0x97
 
 
 #define MBX_WRITE_WWN       0x98
 #define MBX_WRITE_WWN       0x98
 #define MBX_SET_DEBUG       0x99
 #define MBX_SET_DEBUG       0x99
 #define MBX_LOAD_EXP_ROM    0x9C
 #define MBX_LOAD_EXP_ROM    0x9C
-
-#define MBX_MAX_CMDS        0x9D
+#define MBX_SLI4_CONFIG	    0x9B
+#define MBX_SLI4_REQ_FTRS   0x9D
+#define MBX_MAX_CMDS        0x9E
+#define MBX_RESUME_RPI      0x9E
 #define MBX_SLI2_CMD_MASK   0x80
 #define MBX_SLI2_CMD_MASK   0x80
+#define MBX_REG_VFI         0x9F
+#define MBX_REG_FCFI        0xA0
+#define MBX_UNREG_VFI       0xA1
+#define MBX_UNREG_FCFI	    0xA2
+#define MBX_INIT_VFI        0xA3
+#define MBX_INIT_VPI        0xA4
 
 
 /* IOCB Commands */
 /* IOCB Commands */
 
 
@@ -1440,6 +1478,16 @@ typedef struct {		/* FireFly BIU registers */
 #define CMD_IOCB_LOGENTRY_CN		0x94
 #define CMD_IOCB_LOGENTRY_CN		0x94
 #define CMD_IOCB_LOGENTRY_ASYNC_CN	0x96
 #define CMD_IOCB_LOGENTRY_ASYNC_CN	0x96
 
 
+/* Unhandled Data Security SLI Commands */
+#define DSSCMD_IWRITE64_CR 		0xD8
+#define DSSCMD_IWRITE64_CX		0xD9
+#define DSSCMD_IREAD64_CR		0xDA
+#define DSSCMD_IREAD64_CX		0xDB
+#define DSSCMD_INVALIDATE_DEK		0xDC
+#define DSSCMD_SET_KEK			0xDD
+#define DSSCMD_GET_KEK_ID		0xDE
+#define DSSCMD_GEN_XFER			0xDF
+
 #define CMD_MAX_IOCB_CMD        0xE6
 #define CMD_MAX_IOCB_CMD        0xE6
 #define CMD_IOCB_MASK           0xff
 #define CMD_IOCB_MASK           0xff
 
 
@@ -1466,6 +1514,7 @@ typedef struct {		/* FireFly BIU registers */
 #define MBXERR_BAD_RCV_LENGTH       14
 #define MBXERR_BAD_RCV_LENGTH       14
 #define MBXERR_DMA_ERROR            15
 #define MBXERR_DMA_ERROR            15
 #define MBXERR_ERROR                16
 #define MBXERR_ERROR                16
+#define MBXERR_LINK_DOWN            0x33
 #define MBX_NOT_FINISHED           255
 #define MBX_NOT_FINISHED           255
 
 
 #define MBX_BUSY                   0xffffff /* Attempted cmd to busy Mailbox */
 #define MBX_BUSY                   0xffffff /* Attempted cmd to busy Mailbox */
@@ -1504,32 +1553,6 @@ struct ulp_bde {
 #endif
 #endif
 };
 };
 
 
-struct ulp_bde64 {	/* SLI-2 */
-	union ULP_BDE_TUS {
-		uint32_t w;
-		struct {
-#ifdef __BIG_ENDIAN_BITFIELD
-			uint32_t bdeFlags:8;	/* BDE Flags 0 IS A SUPPORTED
-						   VALUE !! */
-			uint32_t bdeSize:24;	/* Size of buffer (in bytes) */
-#else	/*  __LITTLE_ENDIAN_BITFIELD */
-			uint32_t bdeSize:24;	/* Size of buffer (in bytes) */
-			uint32_t bdeFlags:8;	/* BDE Flags 0 IS A SUPPORTED
-						   VALUE !! */
-#endif
-#define BUFF_TYPE_BDE_64    0x00	/* BDE (Host_resident) */
-#define BUFF_TYPE_BDE_IMMED 0x01	/* Immediate Data BDE */
-#define BUFF_TYPE_BDE_64P   0x02	/* BDE (Port-resident) */
-#define BUFF_TYPE_BDE_64I   0x08	/* Input BDE (Host-resident) */
-#define BUFF_TYPE_BDE_64IP  0x0A	/* Input BDE (Port-resident) */
-#define BUFF_TYPE_BLP_64    0x40	/* BLP (Host-resident) */
-#define BUFF_TYPE_BLP_64P   0x42	/* BLP (Port-resident) */
-		} f;
-	} tus;
-	uint32_t addrLow;
-	uint32_t addrHigh;
-};
-
 typedef struct ULP_BDL {	/* SLI-2 */
 typedef struct ULP_BDL {	/* SLI-2 */
 #ifdef __BIG_ENDIAN_BITFIELD
 #ifdef __BIG_ENDIAN_BITFIELD
 	uint32_t bdeFlags:8;	/* BDL Flags */
 	uint32_t bdeFlags:8;	/* BDL Flags */
@@ -2287,7 +2310,7 @@ typedef struct {
 	uint32_t rsvd3;
 	uint32_t rsvd3;
 	uint32_t rsvd4;
 	uint32_t rsvd4;
 	uint32_t rsvd5;
 	uint32_t rsvd5;
-	uint16_t rsvd6;
+	uint16_t vfi;
 	uint16_t vpi;
 	uint16_t vpi;
 #else	/*  __LITTLE_ENDIAN */
 #else	/*  __LITTLE_ENDIAN */
 	uint32_t rsvd1;
 	uint32_t rsvd1;
@@ -2297,7 +2320,7 @@ typedef struct {
 	uint32_t rsvd4;
 	uint32_t rsvd4;
 	uint32_t rsvd5;
 	uint32_t rsvd5;
 	uint16_t vpi;
 	uint16_t vpi;
-	uint16_t rsvd6;
+	uint16_t vfi;
 #endif
 #endif
 } REG_VPI_VAR;
 } REG_VPI_VAR;
 
 
@@ -2457,7 +2480,7 @@ typedef struct {
 	uint32_t entry_index:16;
 	uint32_t entry_index:16;
 #endif
 #endif
 
 
-	uint32_t rsvd1;
+	uint32_t sli4_length;
 	uint32_t word_cnt;
 	uint32_t word_cnt;
 	uint32_t resp_offset;
 	uint32_t resp_offset;
 } DUMP_VAR;
 } DUMP_VAR;
@@ -2470,9 +2493,32 @@ typedef struct {
 #define  DMP_RSP_OFFSET          0x14   /* word 5 contains first word of rsp */
 #define  DMP_RSP_OFFSET          0x14   /* word 5 contains first word of rsp */
 #define  DMP_RSP_SIZE            0x6C   /* maximum of 27 words of rsp data */
 #define  DMP_RSP_SIZE            0x6C   /* maximum of 27 words of rsp data */
 
 
+#define  DMP_REGION_VPORT	 0x16   /* VPort info region */
+#define  DMP_VPORT_REGION_SIZE	 0x200
+#define  DMP_MBOX_OFFSET_WORD	 0x5
+
+#define  DMP_REGION_FCOEPARAM	 0x17   /* fcoe param region */
+#define  DMP_FCOEPARAM_RGN_SIZE	 0x400
+
 #define  WAKE_UP_PARMS_REGION_ID    4
 #define  WAKE_UP_PARMS_REGION_ID    4
 #define  WAKE_UP_PARMS_WORD_SIZE   15
 #define  WAKE_UP_PARMS_WORD_SIZE   15
 
 
+struct vport_rec {
+	uint8_t wwpn[8];
+	uint8_t wwnn[8];
+};
+
+#define VPORT_INFO_SIG 0x32324752
+#define VPORT_INFO_REV_MASK 0xff
+#define VPORT_INFO_REV 0x1
+#define MAX_STATIC_VPORT_COUNT 16
+struct static_vport_info {
+	uint32_t 		signature;
+	uint32_t		rev;
+	struct vport_rec 	vport_list[MAX_STATIC_VPORT_COUNT];
+	uint32_t		resvd[66];
+};
+
 /* Option rom version structure */
 /* Option rom version structure */
 struct prog_id {
 struct prog_id {
 #ifdef __BIG_ENDIAN_BITFIELD
 #ifdef __BIG_ENDIAN_BITFIELD
@@ -2697,7 +2743,9 @@ typedef struct {
 #endif
 #endif
 
 
 #ifdef __BIG_ENDIAN_BITFIELD
 #ifdef __BIG_ENDIAN_BITFIELD
-	uint32_t rsvd1     : 23;  /* Reserved                             */
+	uint32_t rsvd1     : 19;  /* Reserved                             */
+	uint32_t cdss      :  1;  /* Configure Data Security SLI          */
+	uint32_t rsvd2     :  3;  /* Reserved                             */
 	uint32_t cbg       :  1;  /* Configure BlockGuard                 */
 	uint32_t cbg       :  1;  /* Configure BlockGuard                 */
 	uint32_t cmv       :  1;  /* Configure Max VPIs                   */
 	uint32_t cmv       :  1;  /* Configure Max VPIs                   */
 	uint32_t ccrp      :  1;  /* Config Command Ring Polling          */
 	uint32_t ccrp      :  1;  /* Config Command Ring Polling          */
@@ -2717,10 +2765,14 @@ typedef struct {
 	uint32_t ccrp      :  1;  /* Config Command Ring Polling          */
 	uint32_t ccrp      :  1;  /* Config Command Ring Polling          */
 	uint32_t cmv	   :  1;  /* Configure Max VPIs                   */
 	uint32_t cmv	   :  1;  /* Configure Max VPIs                   */
 	uint32_t cbg       :  1;  /* Configure BlockGuard                 */
 	uint32_t cbg       :  1;  /* Configure BlockGuard                 */
-	uint32_t rsvd1     : 23;  /* Reserved                             */
+	uint32_t rsvd2     :  3;  /* Reserved                             */
+	uint32_t cdss      :  1;  /* Configure Data Security SLI          */
+	uint32_t rsvd1     : 19;  /* Reserved                             */
 #endif
 #endif
 #ifdef __BIG_ENDIAN_BITFIELD
 #ifdef __BIG_ENDIAN_BITFIELD
-	uint32_t rsvd2     : 23;  /* Reserved                             */
+	uint32_t rsvd3     : 19;  /* Reserved                             */
+	uint32_t gdss      :  1;  /* Configure Data Security SLI          */
+	uint32_t rsvd4     :  3;  /* Reserved                             */
 	uint32_t gbg       :  1;  /* Grant BlockGuard                     */
 	uint32_t gbg       :  1;  /* Grant BlockGuard                     */
 	uint32_t gmv	   :  1;  /* Grant Max VPIs                       */
 	uint32_t gmv	   :  1;  /* Grant Max VPIs                       */
 	uint32_t gcrp	   :  1;  /* Grant Command Ring Polling           */
 	uint32_t gcrp	   :  1;  /* Grant Command Ring Polling           */
@@ -2740,7 +2792,9 @@ typedef struct {
 	uint32_t gcrp	   :  1;  /* Grant Command Ring Polling           */
 	uint32_t gcrp	   :  1;  /* Grant Command Ring Polling           */
 	uint32_t gmv	   :  1;  /* Grant Max VPIs                       */
 	uint32_t gmv	   :  1;  /* Grant Max VPIs                       */
 	uint32_t gbg       :  1;  /* Grant BlockGuard                     */
 	uint32_t gbg       :  1;  /* Grant BlockGuard                     */
-	uint32_t rsvd2     : 23;  /* Reserved                             */
+	uint32_t rsvd4     :  3;  /* Reserved                             */
+	uint32_t gdss      :  1;  /* Configure Data Security SLI          */
+	uint32_t rsvd3     : 19;  /* Reserved                             */
 #endif
 #endif
 
 
 #ifdef __BIG_ENDIAN_BITFIELD
 #ifdef __BIG_ENDIAN_BITFIELD
@@ -2753,20 +2807,20 @@ typedef struct {
 
 
 #ifdef __BIG_ENDIAN_BITFIELD
 #ifdef __BIG_ENDIAN_BITFIELD
 	uint32_t max_hbq   : 16;  /* Max HBQs Host expect to configure    */
 	uint32_t max_hbq   : 16;  /* Max HBQs Host expect to configure    */
-	uint32_t rsvd3     : 16;  /* Max HBQs Host expect to configure    */
+	uint32_t rsvd5     : 16;  /* Max HBQs Host expect to configure    */
 #else	/*  __LITTLE_ENDIAN */
 #else	/*  __LITTLE_ENDIAN */
-	uint32_t rsvd3     : 16;  /* Max HBQs Host expect to configure    */
+	uint32_t rsvd5     : 16;  /* Max HBQs Host expect to configure    */
 	uint32_t max_hbq   : 16;  /* Max HBQs Host expect to configure    */
 	uint32_t max_hbq   : 16;  /* Max HBQs Host expect to configure    */
 #endif
 #endif
 
 
-	uint32_t rsvd4;           /* Reserved                             */
+	uint32_t rsvd6;           /* Reserved                             */
 
 
 #ifdef __BIG_ENDIAN_BITFIELD
 #ifdef __BIG_ENDIAN_BITFIELD
-	uint32_t rsvd5      : 16;  /* Reserved                             */
+	uint32_t rsvd7      : 16;  /* Reserved                             */
 	uint32_t max_vpi    : 16;  /* Max number of virt N-Ports           */
 	uint32_t max_vpi    : 16;  /* Max number of virt N-Ports           */
 #else	/*  __LITTLE_ENDIAN */
 #else	/*  __LITTLE_ENDIAN */
 	uint32_t max_vpi    : 16;  /* Max number of virt N-Ports           */
 	uint32_t max_vpi    : 16;  /* Max number of virt N-Ports           */
-	uint32_t rsvd5      : 16;  /* Reserved                             */
+	uint32_t rsvd7      : 16;  /* Reserved                             */
 #endif
 #endif
 
 
 } CONFIG_PORT_VAR;
 } CONFIG_PORT_VAR;
@@ -3666,3 +3720,5 @@ lpfc_error_lost_link(IOCB_t *iocbp)
 #define MENLO_TIMEOUT 30
 #define MENLO_TIMEOUT 30
 #define SETVAR_MLOMNT 0x103107
 #define SETVAR_MLOMNT 0x103107
 #define SETVAR_MLORST 0x103007
 #define SETVAR_MLORST 0x103007
+
+#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */

+ 2141 - 0
drivers/scsi/lpfc/lpfc_hw4.h

@@ -0,0 +1,2141 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2009 Emulex.  All rights reserved.                *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+/* Macros to deal with bit fields. Each bit field must have 3 #defines
+ * associated with it (_SHIFT, _MASK, and _WORD).
+ * EG. For a bit field that is in the 7th bit of the "field4" field of a
+ * structure and is 2 bits in size the following #defines must exist:
+ *	struct temp {
+ *		uint32_t	field1;
+ *		uint32_t	field2;
+ *		uint32_t	field3;
+ *		uint32_t	field4;
+ *	#define example_bit_field_SHIFT		7
+ *	#define example_bit_field_MASK		0x03
+ *	#define example_bit_field_WORD		field4
+ *		uint32_t	field5;
+ *	};
+ * Then the macros below may be used to get or set the value of that field.
+ * EG. To get the value of the bit field from the above example:
+ *	struct temp t1;
+ *	value = bf_get(example_bit_field, &t1);
+ * And then to set that bit field:
+ *	bf_set(example_bit_field, &t1, 2);
+ * Or clear that bit field:
+ *	bf_set(example_bit_field, &t1, 0);
+ */
+#define bf_get(name, ptr) \
+	(((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
+#define bf_set(name, ptr, value) \
+	((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
+		 ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
+
+struct dma_address {
+	uint32_t addr_lo;
+	uint32_t addr_hi;
+};
+
+#define LPFC_SLI4_BAR0		1
+#define LPFC_SLI4_BAR1		2
+#define LPFC_SLI4_BAR2		4
+
+#define LPFC_SLI4_MBX_EMBED	true
+#define LPFC_SLI4_MBX_NEMBED	false
+
+#define LPFC_SLI4_MB_WORD_COUNT		64
+#define LPFC_MAX_MQ_PAGE		8
+#define LPFC_MAX_WQ_PAGE		8
+#define LPFC_MAX_CQ_PAGE		4
+#define LPFC_MAX_EQ_PAGE		8
+
+#define LPFC_VIR_FUNC_MAX       32 /* Maximum number of virtual functions */
+#define LPFC_PCI_FUNC_MAX        5 /* Maximum number of PCI functions */
+#define LPFC_VFR_PAGE_SIZE	0x1000 /* 4KB BAR2 per-VF register page size */
+
+/* Define SLI4 Alignment requirements. */
+#define LPFC_ALIGN_16_BYTE	16
+#define LPFC_ALIGN_64_BYTE	64
+
+/* Define SLI4 specific definitions. */
+#define LPFC_MQ_CQE_BYTE_OFFSET	256
+#define LPFC_MBX_CMD_HDR_LENGTH 16
+#define LPFC_MBX_ERROR_RANGE	0x4000
+#define LPFC_BMBX_BIT1_ADDR_HI	0x2
+#define LPFC_BMBX_BIT1_ADDR_LO	0
+#define LPFC_RPI_HDR_COUNT	64
+#define LPFC_HDR_TEMPLATE_SIZE	4096
+#define LPFC_RPI_ALLOC_ERROR 	0xFFFF
+#define LPFC_FCF_RECORD_WD_CNT	132
+#define LPFC_ENTIRE_FCF_DATABASE 0
+#define LPFC_DFLT_FCF_INDEX	 0
+
+/* Virtual function numbers */
+#define LPFC_VF0		0
+#define LPFC_VF1		1
+#define LPFC_VF2		2
+#define LPFC_VF3		3
+#define LPFC_VF4		4
+#define LPFC_VF5		5
+#define LPFC_VF6		6
+#define LPFC_VF7		7
+#define LPFC_VF8		8
+#define LPFC_VF9		9
+#define LPFC_VF10		10
+#define LPFC_VF11		11
+#define LPFC_VF12		12
+#define LPFC_VF13		13
+#define LPFC_VF14		14
+#define LPFC_VF15		15
+#define LPFC_VF16		16
+#define LPFC_VF17		17
+#define LPFC_VF18		18
+#define LPFC_VF19		19
+#define LPFC_VF20		20
+#define LPFC_VF21		21
+#define LPFC_VF22		22
+#define LPFC_VF23		23
+#define LPFC_VF24		24
+#define LPFC_VF25		25
+#define LPFC_VF26		26
+#define LPFC_VF27		27
+#define LPFC_VF28		28
+#define LPFC_VF29		29
+#define LPFC_VF30		30
+#define LPFC_VF31		31
+
+/* PCI function numbers */
+#define LPFC_PCI_FUNC0		0
+#define LPFC_PCI_FUNC1		1
+#define LPFC_PCI_FUNC2		2
+#define LPFC_PCI_FUNC3		3
+#define LPFC_PCI_FUNC4		4
+
+/* Active interrupt test count */
+#define LPFC_ACT_INTR_CNT	4
+
+/* Delay Multiplier constant */
+#define LPFC_DMULT_CONST       651042
+#define LPFC_MIM_IMAX          636
+#define LPFC_FP_DEF_IMAX       10000
+#define LPFC_SP_DEF_IMAX       10000
+
+struct ulp_bde64 {
+	union ULP_BDE_TUS {
+		uint32_t w;
+		struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+			uint32_t bdeFlags:8;	/* BDE Flags 0 IS A SUPPORTED
+						   VALUE !! */
+			uint32_t bdeSize:24;	/* Size of buffer (in bytes) */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+			uint32_t bdeSize:24;	/* Size of buffer (in bytes) */
+			uint32_t bdeFlags:8;	/* BDE Flags 0 IS A SUPPORTED
+						   VALUE !! */
+#endif
+#define BUFF_TYPE_BDE_64    0x00	/* BDE (Host_resident) */
+#define BUFF_TYPE_BDE_IMMED 0x01	/* Immediate Data BDE */
+#define BUFF_TYPE_BDE_64P   0x02	/* BDE (Port-resident) */
+#define BUFF_TYPE_BDE_64I   0x08	/* Input BDE (Host-resident) */
+#define BUFF_TYPE_BDE_64IP  0x0A	/* Input BDE (Port-resident) */
+#define BUFF_TYPE_BLP_64    0x40	/* BLP (Host-resident) */
+#define BUFF_TYPE_BLP_64P   0x42	/* BLP (Port-resident) */
+		} f;
+	} tus;
+	uint32_t addrLow;
+	uint32_t addrHigh;
+};
+
+struct lpfc_sli4_flags {
+	uint32_t word0;
+#define lpfc_fip_flag_SHIFT 0
+#define lpfc_fip_flag_MASK 0x00000001
+#define lpfc_fip_flag_WORD word0
+};
+
+/* event queue entry structure */
+struct lpfc_eqe {
+	uint32_t word0;
+#define lpfc_eqe_resource_id_SHIFT	16
+#define lpfc_eqe_resource_id_MASK	0x000000FF
+#define lpfc_eqe_resource_id_WORD	word0
+#define lpfc_eqe_minor_code_SHIFT	4
+#define lpfc_eqe_minor_code_MASK	0x00000FFF
+#define lpfc_eqe_minor_code_WORD	word0
+#define lpfc_eqe_major_code_SHIFT	1
+#define lpfc_eqe_major_code_MASK	0x00000007
+#define lpfc_eqe_major_code_WORD	word0
+#define lpfc_eqe_valid_SHIFT		0
+#define lpfc_eqe_valid_MASK		0x00000001
+#define lpfc_eqe_valid_WORD		word0
+};
+
+/* completion queue entry structure (common fields for all cqe types) */
+struct lpfc_cqe {
+	uint32_t reserved0;
+	uint32_t reserved1;
+	uint32_t reserved2;
+	uint32_t word3;
+#define lpfc_cqe_valid_SHIFT		31
+#define lpfc_cqe_valid_MASK		0x00000001
+#define lpfc_cqe_valid_WORD		word3
+#define lpfc_cqe_code_SHIFT		16
+#define lpfc_cqe_code_MASK		0x000000FF
+#define lpfc_cqe_code_WORD		word3
+};
+
+/* Completion Queue Entry Status Codes */
+#define CQE_STATUS_SUCCESS		0x0
+#define CQE_STATUS_FCP_RSP_FAILURE	0x1
+#define CQE_STATUS_REMOTE_STOP		0x2
+#define CQE_STATUS_LOCAL_REJECT		0x3
+#define CQE_STATUS_NPORT_RJT		0x4
+#define CQE_STATUS_FABRIC_RJT		0x5
+#define CQE_STATUS_NPORT_BSY		0x6
+#define CQE_STATUS_FABRIC_BSY		0x7
+#define CQE_STATUS_INTERMED_RSP		0x8
+#define CQE_STATUS_LS_RJT		0x9
+#define CQE_STATUS_CMD_REJECT		0xb
+#define CQE_STATUS_FCP_TGT_LENCHECK	0xc
+#define CQE_STATUS_NEED_BUFF_ENTRY	0xf
+
+/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */
+#define CQE_HW_STATUS_NO_ERR		0x0
+#define CQE_HW_STATUS_UNDERRUN		0x1
+#define CQE_HW_STATUS_OVERRUN		0x2
+
+/* Completion Queue Entry Codes */
+#define CQE_CODE_COMPL_WQE		0x1
+#define CQE_CODE_RELEASE_WQE		0x2
+#define CQE_CODE_RECEIVE		0x4
+#define CQE_CODE_XRI_ABORTED		0x5
+
+/* completion queue entry for wqe completions */
+struct lpfc_wcqe_complete {
+	uint32_t word0;
+#define lpfc_wcqe_c_request_tag_SHIFT	16
+#define lpfc_wcqe_c_request_tag_MASK	0x0000FFFF
+#define lpfc_wcqe_c_request_tag_WORD	word0
+#define lpfc_wcqe_c_status_SHIFT	8
+#define lpfc_wcqe_c_status_MASK		0x000000FF
+#define lpfc_wcqe_c_status_WORD		word0
+#define lpfc_wcqe_c_hw_status_SHIFT	0
+#define lpfc_wcqe_c_hw_status_MASK	0x000000FF
+#define lpfc_wcqe_c_hw_status_WORD	word0
+	uint32_t total_data_placed;
+	uint32_t parameter;
+	uint32_t word3;
+#define lpfc_wcqe_c_valid_SHIFT		lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_c_valid_MASK		lpfc_cqe_valid_MASK
+#define lpfc_wcqe_c_valid_WORD		lpfc_cqe_valid_WORD
+#define lpfc_wcqe_c_xb_SHIFT		28
+#define lpfc_wcqe_c_xb_MASK		0x00000001
+#define lpfc_wcqe_c_xb_WORD		word3
+#define lpfc_wcqe_c_pv_SHIFT		27
+#define lpfc_wcqe_c_pv_MASK		0x00000001
+#define lpfc_wcqe_c_pv_WORD		word3
+#define lpfc_wcqe_c_priority_SHIFT	24
+#define lpfc_wcqe_c_priority_MASK		0x00000007
+#define lpfc_wcqe_c_priority_WORD		word3
+#define lpfc_wcqe_c_code_SHIFT		lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_c_code_MASK		lpfc_cqe_code_MASK
+#define lpfc_wcqe_c_code_WORD		lpfc_cqe_code_WORD
+};
+
+/* completion queue entry for wqe release */
+struct lpfc_wcqe_release {
+	uint32_t reserved0;
+	uint32_t reserved1;
+	uint32_t word2;
+#define lpfc_wcqe_r_wq_id_SHIFT		16
+#define lpfc_wcqe_r_wq_id_MASK		0x0000FFFF
+#define lpfc_wcqe_r_wq_id_WORD		word2
+#define lpfc_wcqe_r_wqe_index_SHIFT	0
+#define lpfc_wcqe_r_wqe_index_MASK	0x0000FFFF
+#define lpfc_wcqe_r_wqe_index_WORD	word2
+	uint32_t word3;
+#define lpfc_wcqe_r_valid_SHIFT		lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_r_valid_MASK		lpfc_cqe_valid_MASK
+#define lpfc_wcqe_r_valid_WORD		lpfc_cqe_valid_WORD
+#define lpfc_wcqe_r_code_SHIFT		lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_r_code_MASK		lpfc_cqe_code_MASK
+#define lpfc_wcqe_r_code_WORD		lpfc_cqe_code_WORD
+};
+
+struct sli4_wcqe_xri_aborted {
+	uint32_t word0;
+#define lpfc_wcqe_xa_status_SHIFT		8
+#define lpfc_wcqe_xa_status_MASK		0x000000FF
+#define lpfc_wcqe_xa_status_WORD		word0
+	uint32_t parameter;
+	uint32_t word2;
+#define lpfc_wcqe_xa_remote_xid_SHIFT	16
+#define lpfc_wcqe_xa_remote_xid_MASK	0x0000FFFF
+#define lpfc_wcqe_xa_remote_xid_WORD	word2
+#define lpfc_wcqe_xa_xri_SHIFT		0
+#define lpfc_wcqe_xa_xri_MASK		0x0000FFFF
+#define lpfc_wcqe_xa_xri_WORD		word2
+	uint32_t word3;
+#define lpfc_wcqe_xa_valid_SHIFT	lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_xa_valid_MASK		lpfc_cqe_valid_MASK
+#define lpfc_wcqe_xa_valid_WORD		lpfc_cqe_valid_WORD
+#define lpfc_wcqe_xa_ia_SHIFT		30
+#define lpfc_wcqe_xa_ia_MASK		0x00000001
+#define lpfc_wcqe_xa_ia_WORD		word3
+#define CQE_XRI_ABORTED_IA_REMOTE	0
+#define CQE_XRI_ABORTED_IA_LOCAL	1
+#define lpfc_wcqe_xa_br_SHIFT		29
+#define lpfc_wcqe_xa_br_MASK		0x00000001
+#define lpfc_wcqe_xa_br_WORD		word3
+#define CQE_XRI_ABORTED_BR_BA_ACC	0
+#define CQE_XRI_ABORTED_BR_BA_RJT	1
+#define lpfc_wcqe_xa_eo_SHIFT		28
+#define lpfc_wcqe_xa_eo_MASK		0x00000001
+#define lpfc_wcqe_xa_eo_WORD		word3
+#define CQE_XRI_ABORTED_EO_REMOTE	0
+#define CQE_XRI_ABORTED_EO_LOCAL	1
+#define lpfc_wcqe_xa_code_SHIFT		lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_xa_code_MASK		lpfc_cqe_code_MASK
+#define lpfc_wcqe_xa_code_WORD		lpfc_cqe_code_WORD
+};
+
+/* completion queue entry structure for rqe completion */
+struct lpfc_rcqe {
+	uint32_t word0;
+#define lpfc_rcqe_bindex_SHIFT		16
+#define lpfc_rcqe_bindex_MASK		0x0000FFF
+#define lpfc_rcqe_bindex_WORD		word0
+#define lpfc_rcqe_status_SHIFT		8
+#define lpfc_rcqe_status_MASK		0x000000FF
+#define lpfc_rcqe_status_WORD		word0
+#define FC_STATUS_RQ_SUCCESS		0x10 /* Async receive successful */
+#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 	0x11 /* payload truncated */
+#define FC_STATUS_INSUFF_BUF_NEED_BUF 	0x12 /* Insufficient buffers */
+#define FC_STATUS_INSUFF_BUF_FRM_DISC 	0x13 /* Frame Discard */
+	uint32_t reserved1;
+	uint32_t word2;
+#define lpfc_rcqe_length_SHIFT		16
+#define lpfc_rcqe_length_MASK		0x0000FFFF
+#define lpfc_rcqe_length_WORD		word2
+#define lpfc_rcqe_rq_id_SHIFT		6
+#define lpfc_rcqe_rq_id_MASK		0x000003FF
+#define lpfc_rcqe_rq_id_WORD		word2
+#define lpfc_rcqe_fcf_id_SHIFT		0
+#define lpfc_rcqe_fcf_id_MASK		0x0000003F
+#define lpfc_rcqe_fcf_id_WORD		word2
+	uint32_t word3;
+#define lpfc_rcqe_valid_SHIFT		lpfc_cqe_valid_SHIFT
+#define lpfc_rcqe_valid_MASK		lpfc_cqe_valid_MASK
+#define lpfc_rcqe_valid_WORD		lpfc_cqe_valid_WORD
+#define lpfc_rcqe_port_SHIFT		30
+#define lpfc_rcqe_port_MASK		0x00000001
+#define lpfc_rcqe_port_WORD		word3
+#define lpfc_rcqe_hdr_length_SHIFT	24
+#define lpfc_rcqe_hdr_length_MASK	0x0000001F
+#define lpfc_rcqe_hdr_length_WORD	word3
+#define lpfc_rcqe_code_SHIFT		lpfc_cqe_code_SHIFT
+#define lpfc_rcqe_code_MASK		lpfc_cqe_code_MASK
+#define lpfc_rcqe_code_WORD		lpfc_cqe_code_WORD
+#define lpfc_rcqe_eof_SHIFT		8
+#define lpfc_rcqe_eof_MASK		0x000000FF
+#define lpfc_rcqe_eof_WORD		word3
+#define FCOE_EOFn	0x41
+#define FCOE_EOFt	0x42
+#define FCOE_EOFni	0x49
+#define FCOE_EOFa	0x50
+#define lpfc_rcqe_sof_SHIFT		0
+#define lpfc_rcqe_sof_MASK		0x000000FF
+#define lpfc_rcqe_sof_WORD		word3
+#define FCOE_SOFi2	0x2d
+#define FCOE_SOFi3	0x2e
+#define FCOE_SOFn2	0x35
+#define FCOE_SOFn3	0x36
+};
+
+struct lpfc_wqe_generic{
+	struct ulp_bde64 bde;
+	uint32_t word3;
+	uint32_t word4;
+	uint32_t word5;
+	uint32_t word6;
+#define lpfc_wqe_gen_context_SHIFT	16
+#define lpfc_wqe_gen_context_MASK	0x0000FFFF
+#define lpfc_wqe_gen_context_WORD	word6
+#define lpfc_wqe_gen_xri_SHIFT		0
+#define lpfc_wqe_gen_xri_MASK		0x0000FFFF
+#define lpfc_wqe_gen_xri_WORD		word6
+	uint32_t word7;
+#define lpfc_wqe_gen_lnk_SHIFT		23
+#define lpfc_wqe_gen_lnk_MASK		0x00000001
+#define lpfc_wqe_gen_lnk_WORD		word7
+#define lpfc_wqe_gen_erp_SHIFT		22
+#define lpfc_wqe_gen_erp_MASK		0x00000001
+#define lpfc_wqe_gen_erp_WORD		word7
+#define lpfc_wqe_gen_pu_SHIFT		20
+#define lpfc_wqe_gen_pu_MASK		0x00000003
+#define lpfc_wqe_gen_pu_WORD		word7
+#define lpfc_wqe_gen_class_SHIFT	16
+#define lpfc_wqe_gen_class_MASK		0x00000007
+#define lpfc_wqe_gen_class_WORD		word7
+#define lpfc_wqe_gen_command_SHIFT	8
+#define lpfc_wqe_gen_command_MASK	0x000000FF
+#define lpfc_wqe_gen_command_WORD	word7
+#define lpfc_wqe_gen_status_SHIFT	4
+#define lpfc_wqe_gen_status_MASK	0x0000000F
+#define lpfc_wqe_gen_status_WORD	word7
+#define lpfc_wqe_gen_ct_SHIFT		2
+#define lpfc_wqe_gen_ct_MASK		0x00000007
+#define lpfc_wqe_gen_ct_WORD		word7
+	uint32_t abort_tag;
+	uint32_t word9;
+#define lpfc_wqe_gen_request_tag_SHIFT	0
+#define lpfc_wqe_gen_request_tag_MASK	0x0000FFFF
+#define lpfc_wqe_gen_request_tag_WORD	word9
+	uint32_t word10;
+#define lpfc_wqe_gen_ccp_SHIFT		24
+#define lpfc_wqe_gen_ccp_MASK		0x000000FF
+#define lpfc_wqe_gen_ccp_WORD		word10
+#define lpfc_wqe_gen_ccpe_SHIFT		23
+#define lpfc_wqe_gen_ccpe_MASK		0x00000001
+#define lpfc_wqe_gen_ccpe_WORD		word10
+#define lpfc_wqe_gen_pv_SHIFT		19
+#define lpfc_wqe_gen_pv_MASK		0x00000001
+#define lpfc_wqe_gen_pv_WORD		word10
+#define lpfc_wqe_gen_pri_SHIFT		16
+#define lpfc_wqe_gen_pri_MASK		0x00000007
+#define lpfc_wqe_gen_pri_WORD		word10
+	uint32_t word11;
+#define lpfc_wqe_gen_cq_id_SHIFT	16
+#define lpfc_wqe_gen_cq_id_MASK		0x000003FF
+#define lpfc_wqe_gen_cq_id_WORD		word11
+#define LPFC_WQE_CQ_ID_DEFAULT	0x3ff
+#define lpfc_wqe_gen_wqec_SHIFT		7
+#define lpfc_wqe_gen_wqec_MASK		0x00000001
+#define lpfc_wqe_gen_wqec_WORD		word11
+#define lpfc_wqe_gen_cmd_type_SHIFT	0
+#define lpfc_wqe_gen_cmd_type_MASK	0x0000000F
+#define lpfc_wqe_gen_cmd_type_WORD	word11
+	uint32_t payload[4];
+};
+
+struct lpfc_rqe {
+	uint32_t address_hi;
+	uint32_t address_lo;
+};
+
+/* buffer descriptors */
+struct lpfc_bde4 {
+	uint32_t addr_hi;
+	uint32_t addr_lo;
+	uint32_t word2;
+#define lpfc_bde4_last_SHIFT		31
+#define lpfc_bde4_last_MASK		0x00000001
+#define lpfc_bde4_last_WORD		word2
+#define lpfc_bde4_sge_offset_SHIFT	0
+#define lpfc_bde4_sge_offset_MASK	0x000003FF
+#define lpfc_bde4_sge_offset_WORD	word2
+	uint32_t word3;
+#define lpfc_bde4_length_SHIFT		0
+#define lpfc_bde4_length_MASK		0x000000FF
+#define lpfc_bde4_length_WORD		word3
+};
+
+struct lpfc_register {
+	uint32_t word0;
+};
+
+#define LPFC_UERR_STATUS_HI		0x00A4
+#define LPFC_UERR_STATUS_LO		0x00A0
+#define LPFC_ONLINE0			0x00B0
+#define LPFC_ONLINE1			0x00B4
+#define LPFC_SCRATCHPAD			0x0058
+
+/* BAR0 Registers */
+#define LPFC_HST_STATE			0x00AC
+#define lpfc_hst_state_perr_SHIFT	31
+#define lpfc_hst_state_perr_MASK	0x1
+#define lpfc_hst_state_perr_WORD	word0
+#define lpfc_hst_state_sfi_SHIFT	30
+#define lpfc_hst_state_sfi_MASK		0x1
+#define lpfc_hst_state_sfi_WORD		word0
+#define lpfc_hst_state_nip_SHIFT	29
+#define lpfc_hst_state_nip_MASK		0x1
+#define lpfc_hst_state_nip_WORD		word0
+#define lpfc_hst_state_ipc_SHIFT	28
+#define lpfc_hst_state_ipc_MASK		0x1
+#define lpfc_hst_state_ipc_WORD		word0
+#define lpfc_hst_state_xrom_SHIFT	27
+#define lpfc_hst_state_xrom_MASK	0x1
+#define lpfc_hst_state_xrom_WORD	word0
+#define lpfc_hst_state_dl_SHIFT		26
+#define lpfc_hst_state_dl_MASK		0x1
+#define lpfc_hst_state_dl_WORD		word0
+#define lpfc_hst_state_port_status_SHIFT	0
+#define lpfc_hst_state_port_status_MASK		0xFFFF
+#define lpfc_hst_state_port_status_WORD		word0
+
+#define LPFC_POST_STAGE_POWER_ON_RESET			0x0000
+#define LPFC_POST_STAGE_AWAITING_HOST_RDY		0x0001
+#define LPFC_POST_STAGE_HOST_RDY			0x0002
+#define LPFC_POST_STAGE_BE_RESET			0x0003
+#define LPFC_POST_STAGE_SEEPROM_CS_START		0x0100
+#define LPFC_POST_STAGE_SEEPROM_CS_DONE			0x0101
+#define LPFC_POST_STAGE_DDR_CONFIG_START		0x0200
+#define LPFC_POST_STAGE_DDR_CONFIG_DONE			0x0201
+#define LPFC_POST_STAGE_DDR_CALIBRATE_START		0x0300
+#define LPFC_POST_STAGE_DDR_CALIBRATE_DONE		0x0301
+#define LPFC_POST_STAGE_DDR_TEST_START			0x0400
+#define LPFC_POST_STAGE_DDR_TEST_DONE			0x0401
+#define LPFC_POST_STAGE_REDBOOT_INIT_START		0x0600
+#define LPFC_POST_STAGE_REDBOOT_INIT_DONE		0x0601
+#define LPFC_POST_STAGE_FW_IMAGE_LOAD_START		0x0700
+#define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE		0x0701
+#define LPFC_POST_STAGE_ARMFW_START			0x0800
+#define LPFC_POST_STAGE_DHCP_QUERY_START		0x0900
+#define LPFC_POST_STAGE_DHCP_QUERY_DONE			0x0901
+#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START	0x0A00
+#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE	0x0A01
+#define LPFC_POST_STAGE_RC_OPTION_SET			0x0B00
+#define LPFC_POST_STAGE_SWITCH_LINK			0x0B01
+#define LPFC_POST_STAGE_SEND_ICDS_MESSAGE		0x0B02
+#define LPFC_POST_STAGE_PERFROM_TFTP			0x0B03
+#define LPFC_POST_STAGE_PARSE_XML			0x0B04
+#define LPFC_POST_STAGE_DOWNLOAD_IMAGE			0x0B05
+#define LPFC_POST_STAGE_FLASH_IMAGE			0x0B06
+#define LPFC_POST_STAGE_RC_DONE				0x0B07
+#define LPFC_POST_STAGE_REBOOT_SYSTEM			0x0B08
+#define LPFC_POST_STAGE_MAC_ADDRESS			0x0C00
+#define LPFC_POST_STAGE_ARMFW_READY			0xC000
+#define LPFC_POST_STAGE_ARMFW_UE 			0xF000
+
+#define lpfc_scratchpad_slirev_SHIFT			4
+#define lpfc_scratchpad_slirev_MASK			0xF
+#define lpfc_scratchpad_slirev_WORD			word0
+#define lpfc_scratchpad_chiptype_SHIFT			8
+#define lpfc_scratchpad_chiptype_MASK			0xFF
+#define lpfc_scratchpad_chiptype_WORD			word0
+#define lpfc_scratchpad_featurelevel1_SHIFT		16
+#define lpfc_scratchpad_featurelevel1_MASK		0xFF
+#define lpfc_scratchpad_featurelevel1_WORD		word0
+#define lpfc_scratchpad_featurelevel2_SHIFT		24
+#define lpfc_scratchpad_featurelevel2_MASK		0xFF
+#define lpfc_scratchpad_featurelevel2_WORD		word0
+
+/* BAR1 Registers */
+#define LPFC_IMR_MASK_ALL	0xFFFFFFFF
+#define LPFC_ISCR_CLEAR_ALL	0xFFFFFFFF
+
+#define LPFC_HST_ISR0		0x0C18
+#define LPFC_HST_ISR1		0x0C1C
+#define LPFC_HST_ISR2		0x0C20
+#define LPFC_HST_ISR3		0x0C24
+#define LPFC_HST_ISR4		0x0C28
+
+#define LPFC_HST_IMR0		0x0C48
+#define LPFC_HST_IMR1		0x0C4C
+#define LPFC_HST_IMR2		0x0C50
+#define LPFC_HST_IMR3		0x0C54
+#define LPFC_HST_IMR4		0x0C58
+
+#define LPFC_HST_ISCR0		0x0C78
+#define LPFC_HST_ISCR1		0x0C7C
+#define LPFC_HST_ISCR2		0x0C80
+#define LPFC_HST_ISCR3		0x0C84
+#define LPFC_HST_ISCR4		0x0C88
+
+#define LPFC_SLI4_INTR0			BIT0
+#define LPFC_SLI4_INTR1			BIT1
+#define LPFC_SLI4_INTR2			BIT2
+#define LPFC_SLI4_INTR3			BIT3
+#define LPFC_SLI4_INTR4			BIT4
+#define LPFC_SLI4_INTR5			BIT5
+#define LPFC_SLI4_INTR6			BIT6
+#define LPFC_SLI4_INTR7			BIT7
+#define LPFC_SLI4_INTR8			BIT8
+#define LPFC_SLI4_INTR9			BIT9
+#define LPFC_SLI4_INTR10		BIT10
+#define LPFC_SLI4_INTR11		BIT11
+#define LPFC_SLI4_INTR12		BIT12
+#define LPFC_SLI4_INTR13		BIT13
+#define LPFC_SLI4_INTR14		BIT14
+#define LPFC_SLI4_INTR15		BIT15
+#define LPFC_SLI4_INTR16		BIT16
+#define LPFC_SLI4_INTR17		BIT17
+#define LPFC_SLI4_INTR18		BIT18
+#define LPFC_SLI4_INTR19		BIT19
+#define LPFC_SLI4_INTR20		BIT20
+#define LPFC_SLI4_INTR21		BIT21
+#define LPFC_SLI4_INTR22		BIT22
+#define LPFC_SLI4_INTR23		BIT23
+#define LPFC_SLI4_INTR24		BIT24
+#define LPFC_SLI4_INTR25		BIT25
+#define LPFC_SLI4_INTR26		BIT26
+#define LPFC_SLI4_INTR27		BIT27
+#define LPFC_SLI4_INTR28		BIT28
+#define LPFC_SLI4_INTR29		BIT29
+#define LPFC_SLI4_INTR30		BIT30
+#define LPFC_SLI4_INTR31		BIT31
+
+/* BAR2 Registers */
+#define LPFC_RQ_DOORBELL		0x00A0
+#define lpfc_rq_doorbell_num_posted_SHIFT	16
+#define lpfc_rq_doorbell_num_posted_MASK	0x3FFF
+#define lpfc_rq_doorbell_num_posted_WORD	word0
+#define LPFC_RQ_POST_BATCH		8	/* RQEs to post at one time */
+#define lpfc_rq_doorbell_id_SHIFT		0
+#define lpfc_rq_doorbell_id_MASK		0x03FF
+#define lpfc_rq_doorbell_id_WORD		word0
+
+#define LPFC_WQ_DOORBELL		0x0040
+#define lpfc_wq_doorbell_num_posted_SHIFT	24
+#define lpfc_wq_doorbell_num_posted_MASK	0x00FF
+#define lpfc_wq_doorbell_num_posted_WORD	word0
+#define lpfc_wq_doorbell_index_SHIFT		16
+#define lpfc_wq_doorbell_index_MASK		0x00FF
+#define lpfc_wq_doorbell_index_WORD		word0
+#define lpfc_wq_doorbell_id_SHIFT		0
+#define lpfc_wq_doorbell_id_MASK		0xFFFF
+#define lpfc_wq_doorbell_id_WORD		word0
+
+#define LPFC_EQCQ_DOORBELL		0x0120
+#define lpfc_eqcq_doorbell_arm_SHIFT		29
+#define lpfc_eqcq_doorbell_arm_MASK		0x0001
+#define lpfc_eqcq_doorbell_arm_WORD		word0
+#define lpfc_eqcq_doorbell_num_released_SHIFT	16
+#define lpfc_eqcq_doorbell_num_released_MASK	0x1FFF
+#define lpfc_eqcq_doorbell_num_released_WORD	word0
+#define lpfc_eqcq_doorbell_qt_SHIFT		10
+#define lpfc_eqcq_doorbell_qt_MASK		0x0001
+#define lpfc_eqcq_doorbell_qt_WORD		word0
+#define LPFC_QUEUE_TYPE_COMPLETION	0
+#define LPFC_QUEUE_TYPE_EVENT		1
+#define lpfc_eqcq_doorbell_eqci_SHIFT		9
+#define lpfc_eqcq_doorbell_eqci_MASK		0x0001
+#define lpfc_eqcq_doorbell_eqci_WORD		word0
+#define lpfc_eqcq_doorbell_cqid_SHIFT		0
+#define lpfc_eqcq_doorbell_cqid_MASK		0x03FF
+#define lpfc_eqcq_doorbell_cqid_WORD		word0
+#define lpfc_eqcq_doorbell_eqid_SHIFT		0
+#define lpfc_eqcq_doorbell_eqid_MASK		0x01FF
+#define lpfc_eqcq_doorbell_eqid_WORD		word0
+
+#define LPFC_BMBX			0x0160
+#define lpfc_bmbx_addr_SHIFT		2
+#define lpfc_bmbx_addr_MASK		0x3FFFFFFF
+#define lpfc_bmbx_addr_WORD		word0
+#define lpfc_bmbx_hi_SHIFT		1
+#define lpfc_bmbx_hi_MASK		0x0001
+#define lpfc_bmbx_hi_WORD		word0
+#define lpfc_bmbx_rdy_SHIFT		0
+#define lpfc_bmbx_rdy_MASK		0x0001
+#define lpfc_bmbx_rdy_WORD		word0
+
+#define LPFC_MQ_DOORBELL			0x0140
+#define lpfc_mq_doorbell_num_posted_SHIFT	16
+#define lpfc_mq_doorbell_num_posted_MASK	0x3FFF
+#define lpfc_mq_doorbell_num_posted_WORD	word0
+#define lpfc_mq_doorbell_id_SHIFT		0
+#define lpfc_mq_doorbell_id_MASK		0x03FF
+#define lpfc_mq_doorbell_id_WORD		word0
+
+struct lpfc_sli4_cfg_mhdr {
+	uint32_t word1;
+#define lpfc_mbox_hdr_emb_SHIFT		0
+#define lpfc_mbox_hdr_emb_MASK		0x00000001
+#define lpfc_mbox_hdr_emb_WORD		word1
+#define lpfc_mbox_hdr_sge_cnt_SHIFT	3
+#define lpfc_mbox_hdr_sge_cnt_MASK	0x0000001F
+#define lpfc_mbox_hdr_sge_cnt_WORD	word1
+	uint32_t payload_length;
+	uint32_t tag_lo;
+	uint32_t tag_hi;
+	uint32_t reserved5;
+};
+
+union lpfc_sli4_cfg_shdr {
+	struct {
+		uint32_t word6;
+#define lpfc_mbox_hdr_opcode_SHIFT		0
+#define lpfc_mbox_hdr_opcode_MASK		0x000000FF
+#define lpfc_mbox_hdr_opcode_WORD		word6
+#define lpfc_mbox_hdr_subsystem_SHIFT		8
+#define lpfc_mbox_hdr_subsystem_MASK		0x000000FF
+#define lpfc_mbox_hdr_subsystem_WORD		word6
+#define lpfc_mbox_hdr_port_number_SHIFT		16
+#define lpfc_mbox_hdr_port_number_MASK		0x000000FF
+#define lpfc_mbox_hdr_port_number_WORD		word6
+#define lpfc_mbox_hdr_domain_SHIFT		24
+#define lpfc_mbox_hdr_domain_MASK		0x000000FF
+#define lpfc_mbox_hdr_domain_WORD		word6
+		uint32_t timeout;
+		uint32_t request_length;
+		uint32_t reserved9;
+	} request;
+	struct {
+		uint32_t word6;
+#define lpfc_mbox_hdr_opcode_SHIFT		0
+#define lpfc_mbox_hdr_opcode_MASK		0x000000FF
+#define lpfc_mbox_hdr_opcode_WORD		word6
+#define lpfc_mbox_hdr_subsystem_SHIFT		8
+#define lpfc_mbox_hdr_subsystem_MASK		0x000000FF
+#define lpfc_mbox_hdr_subsystem_WORD		word6
+#define lpfc_mbox_hdr_domain_SHIFT		24
+#define lpfc_mbox_hdr_domain_MASK		0x000000FF
+#define lpfc_mbox_hdr_domain_WORD		word6
+		uint32_t word7;
+#define lpfc_mbox_hdr_status_SHIFT		0
+#define lpfc_mbox_hdr_status_MASK		0x000000FF
+#define lpfc_mbox_hdr_status_WORD		word7
+#define lpfc_mbox_hdr_add_status_SHIFT		8
+#define lpfc_mbox_hdr_add_status_MASK		0x000000FF
+#define lpfc_mbox_hdr_add_status_WORD		word7
+		uint32_t response_length;
+		uint32_t actual_response_length;
+	} response;
+};
+
+/* Mailbox structures */
+struct mbox_header {
+	struct lpfc_sli4_cfg_mhdr cfg_mhdr;
+	union  lpfc_sli4_cfg_shdr cfg_shdr;
+};
+
+/* Subsystem Definitions */
+#define LPFC_MBOX_SUBSYSTEM_COMMON	0x1
+#define LPFC_MBOX_SUBSYSTEM_FCOE	0xC
+
+/* Device Specific Definitions */
+
+/* The HOST ENDIAN defines are in Big Endian format. */
+#define HOST_ENDIAN_LOW_WORD0   0xFF3412FF
+#define HOST_ENDIAN_HIGH_WORD1	0xFF7856FF
+
+/* Common Opcodes */
+#define LPFC_MBOX_OPCODE_CQ_CREATE		0x0C
+#define LPFC_MBOX_OPCODE_EQ_CREATE		0x0D
+#define LPFC_MBOX_OPCODE_MQ_CREATE		0x15
+#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES	0x20
+#define LPFC_MBOX_OPCODE_NOP			0x21
+#define LPFC_MBOX_OPCODE_MQ_DESTROY		0x35
+#define LPFC_MBOX_OPCODE_CQ_DESTROY		0x36
+#define LPFC_MBOX_OPCODE_EQ_DESTROY		0x37
+#define LPFC_MBOX_OPCODE_FUNCTION_RESET		0x3D
+
+/* FCoE Opcodes */
+#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE			0x01
+#define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY		0x02
+#define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES		0x03
+#define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES		0x04
+#define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE			0x05
+#define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY		0x06
+#define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE		0x08
+#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF			0x09
+#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF		0x0A
+#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE		0x0B
+
+/* Mailbox command structures */
+struct eq_context {
+	uint32_t word0;
+#define lpfc_eq_context_size_SHIFT	31
+#define lpfc_eq_context_size_MASK	0x00000001
+#define lpfc_eq_context_size_WORD	word0
+#define LPFC_EQE_SIZE_4			0x0
+#define LPFC_EQE_SIZE_16		0x1
+#define lpfc_eq_context_valid_SHIFT	29
+#define lpfc_eq_context_valid_MASK	0x00000001
+#define lpfc_eq_context_valid_WORD	word0
+	uint32_t word1;
+#define lpfc_eq_context_count_SHIFT	26
+#define lpfc_eq_context_count_MASK	0x00000003
+#define lpfc_eq_context_count_WORD	word1
+#define LPFC_EQ_CNT_256		0x0
+#define LPFC_EQ_CNT_512		0x1
+#define LPFC_EQ_CNT_1024	0x2
+#define LPFC_EQ_CNT_2048	0x3
+#define LPFC_EQ_CNT_4096	0x4
+	uint32_t word2;
+#define lpfc_eq_context_delay_multi_SHIFT	13
+#define lpfc_eq_context_delay_multi_MASK	0x000003FF
+#define lpfc_eq_context_delay_multi_WORD	word2
+	uint32_t reserved3;
+};
+
+struct sgl_page_pairs {
+	uint32_t sgl_pg0_addr_lo;
+	uint32_t sgl_pg0_addr_hi;
+	uint32_t sgl_pg1_addr_lo;
+	uint32_t sgl_pg1_addr_hi;
+};
+
+struct lpfc_mbx_post_sgl_pages {
+	struct mbox_header header;
+	uint32_t word0;
+#define lpfc_post_sgl_pages_xri_SHIFT	0
+#define lpfc_post_sgl_pages_xri_MASK	0x0000FFFF
+#define lpfc_post_sgl_pages_xri_WORD	word0
+#define lpfc_post_sgl_pages_xricnt_SHIFT	16
+#define lpfc_post_sgl_pages_xricnt_MASK	0x0000FFFF
+#define lpfc_post_sgl_pages_xricnt_WORD	word0
+	struct sgl_page_pairs  sgl_pg_pairs[1];
+};
+
+/* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */
+struct lpfc_mbx_post_uembed_sgl_page1 {
+	union  lpfc_sli4_cfg_shdr cfg_shdr;
+	uint32_t word0;
+	struct sgl_page_pairs sgl_pg_pairs;
+};
+
+struct lpfc_mbx_sge {
+	uint32_t pa_lo;
+	uint32_t pa_hi;
+	uint32_t length;
+};
+
+struct lpfc_mbx_nembed_cmd {
+	struct lpfc_sli4_cfg_mhdr cfg_mhdr;
+#define LPFC_SLI4_MBX_SGE_MAX_PAGES	19
+	struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES];
+};
+
+struct lpfc_mbx_nembed_sge_virt {
+	void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES];
+};
+
+struct lpfc_mbx_eq_create {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_eq_create_num_pages_SHIFT	0
+#define lpfc_mbx_eq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_eq_create_num_pages_WORD	word0
+			struct eq_context context;
+			struct dma_address page[LPFC_MAX_EQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_eq_create_q_id_SHIFT	0
+#define lpfc_mbx_eq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_eq_create_q_id_WORD	word0
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_eq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_eq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_eq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_eq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_nop {
+	struct mbox_header header;
+	uint32_t context[2];
+};
+
+struct cq_context {
+	uint32_t word0;
+#define lpfc_cq_context_event_SHIFT	31
+#define lpfc_cq_context_event_MASK	0x00000001
+#define lpfc_cq_context_event_WORD	word0
+#define lpfc_cq_context_valid_SHIFT	29
+#define lpfc_cq_context_valid_MASK	0x00000001
+#define lpfc_cq_context_valid_WORD	word0
+#define lpfc_cq_context_count_SHIFT	27
+#define lpfc_cq_context_count_MASK	0x00000003
+#define lpfc_cq_context_count_WORD	word0
+#define LPFC_CQ_CNT_256		0x0
+#define LPFC_CQ_CNT_512		0x1
+#define LPFC_CQ_CNT_1024	0x2
+	uint32_t word1;
+#define lpfc_cq_eq_id_SHIFT		22
+#define lpfc_cq_eq_id_MASK		0x000000FF
+#define lpfc_cq_eq_id_WORD		word1
+	uint32_t reserved0;
+	uint32_t reserved1;
+};
+
+struct lpfc_mbx_cq_create {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_cq_create_num_pages_SHIFT	0
+#define lpfc_mbx_cq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_num_pages_WORD	word0
+			struct cq_context context;
+			struct dma_address page[LPFC_MAX_CQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_cq_create_q_id_SHIFT	0
+#define lpfc_mbx_cq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_q_id_WORD	word0
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_cq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_cq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_cq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_cq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+struct wq_context {
+	uint32_t reserved0;
+	uint32_t reserved1;
+	uint32_t reserved2;
+	uint32_t reserved3;
+};
+
+struct lpfc_mbx_wq_create {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_wq_create_num_pages_SHIFT	0
+#define lpfc_mbx_wq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_wq_create_num_pages_WORD	word0
+#define lpfc_mbx_wq_create_cq_id_SHIFT		16
+#define lpfc_mbx_wq_create_cq_id_MASK		0x0000FFFF
+#define lpfc_mbx_wq_create_cq_id_WORD		word0
+			struct dma_address page[LPFC_MAX_WQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_wq_create_q_id_SHIFT	0
+#define lpfc_mbx_wq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_wq_create_q_id_WORD	word0
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_wq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_wq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_wq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_wq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+#define LPFC_HDR_BUF_SIZE 128
+#define LPFC_DATA_BUF_SIZE 4096
+struct rq_context {
+	uint32_t word0;
+#define lpfc_rq_context_rq_size_SHIFT	16
+#define lpfc_rq_context_rq_size_MASK	0x0000000F
+#define lpfc_rq_context_rq_size_WORD	word0
+#define LPFC_RQ_RING_SIZE_512		9	/* 512 entries */
+#define LPFC_RQ_RING_SIZE_1024		10	/* 1024 entries */
+#define LPFC_RQ_RING_SIZE_2048		11	/* 2048 entries */
+#define LPFC_RQ_RING_SIZE_4096		12	/* 4096 entries */
+	uint32_t reserved1;
+	uint32_t word2;
+#define lpfc_rq_context_cq_id_SHIFT	16
+#define lpfc_rq_context_cq_id_MASK	0x000003FF
+#define lpfc_rq_context_cq_id_WORD	word2
+#define lpfc_rq_context_buf_size_SHIFT	0
+#define lpfc_rq_context_buf_size_MASK	0x0000FFFF
+#define lpfc_rq_context_buf_size_WORD	word2
+	uint32_t reserved3;
+};
+
+struct lpfc_mbx_rq_create {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_rq_create_num_pages_SHIFT	0
+#define lpfc_mbx_rq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_rq_create_num_pages_WORD	word0
+			struct rq_context context;
+			struct dma_address page[LPFC_MAX_WQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_rq_create_q_id_SHIFT	0
+#define lpfc_mbx_rq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_rq_create_q_id_WORD	word0
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_rq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_rq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_rq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_rq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+struct mq_context {
+	uint32_t word0;
+#define lpfc_mq_context_cq_id_SHIFT	22
+#define lpfc_mq_context_cq_id_MASK	0x000003FF
+#define lpfc_mq_context_cq_id_WORD	word0
+#define lpfc_mq_context_count_SHIFT	16
+#define lpfc_mq_context_count_MASK	0x0000000F
+#define lpfc_mq_context_count_WORD	word0
+#define LPFC_MQ_CNT_16		0x5
+#define LPFC_MQ_CNT_32		0x6
+#define LPFC_MQ_CNT_64		0x7
+#define LPFC_MQ_CNT_128		0x8
+	uint32_t word1;
+#define lpfc_mq_context_valid_SHIFT	31
+#define lpfc_mq_context_valid_MASK	0x00000001
+#define lpfc_mq_context_valid_WORD	word1
+	uint32_t reserved2;
+	uint32_t reserved3;
+};
+
+struct lpfc_mbx_mq_create {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_mq_create_num_pages_SHIFT	0
+#define lpfc_mbx_mq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_mq_create_num_pages_WORD	word0
+			struct mq_context context;
+			struct dma_address page[LPFC_MAX_MQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_mq_create_q_id_SHIFT	0
+#define lpfc_mbx_mq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_mq_create_q_id_WORD	word0
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_mq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_mq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_mq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_mq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_post_hdr_tmpl {
+	struct mbox_header header;
+	uint32_t word10;
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT  0
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK   0x0000FFFF
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD   word10
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT   16
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK    0x0000FFFF
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD    word10
+	uint32_t rpi_paddr_lo;
+	uint32_t rpi_paddr_hi;
+};
+
+struct sli4_sge {	/* SLI-4 */
+	uint32_t addr_hi;
+	uint32_t addr_lo;
+
+	uint32_t word2;
+#define lpfc_sli4_sge_offset_SHIFT	0 /* Offset of buffer - Not used*/
+#define lpfc_sli4_sge_offset_MASK	0x00FFFFFF
+#define lpfc_sli4_sge_offset_WORD	word2
+#define lpfc_sli4_sge_last_SHIFT	31 /* Last SEG in the SGL sets
+						this  flag !! */
+#define lpfc_sli4_sge_last_MASK		0x00000001
+#define lpfc_sli4_sge_last_WORD		word2
+	uint32_t word3;
+#define lpfc_sli4_sge_len_SHIFT		0
+#define lpfc_sli4_sge_len_MASK		0x0001FFFF
+#define lpfc_sli4_sge_len_WORD		word3
+};
+
+struct fcf_record {
+	uint32_t max_rcv_size;
+	uint32_t fka_adv_period;
+	uint32_t fip_priority;
+	uint32_t word3;
+#define lpfc_fcf_record_mac_0_SHIFT		0
+#define lpfc_fcf_record_mac_0_MASK		0x000000FF
+#define lpfc_fcf_record_mac_0_WORD		word3
+#define lpfc_fcf_record_mac_1_SHIFT		8
+#define lpfc_fcf_record_mac_1_MASK		0x000000FF
+#define lpfc_fcf_record_mac_1_WORD		word3
+#define lpfc_fcf_record_mac_2_SHIFT		16
+#define lpfc_fcf_record_mac_2_MASK		0x000000FF
+#define lpfc_fcf_record_mac_2_WORD		word3
+#define lpfc_fcf_record_mac_3_SHIFT		24
+#define lpfc_fcf_record_mac_3_MASK		0x000000FF
+#define lpfc_fcf_record_mac_3_WORD		word3
+	uint32_t word4;
+#define lpfc_fcf_record_mac_4_SHIFT		0
+#define lpfc_fcf_record_mac_4_MASK		0x000000FF
+#define lpfc_fcf_record_mac_4_WORD		word4
+#define lpfc_fcf_record_mac_5_SHIFT		8
+#define lpfc_fcf_record_mac_5_MASK		0x000000FF
+#define lpfc_fcf_record_mac_5_WORD		word4
+#define lpfc_fcf_record_fcf_avail_SHIFT		16
+#define lpfc_fcf_record_fcf_avail_MASK		0x000000FF
+#define lpfc_fcf_record_fc_avail_WORD		word4
+#define lpfc_fcf_record_mac_addr_prov_SHIFT	24
+#define lpfc_fcf_record_mac_addr_prov_MASK	0x000000FF
+#define lpfc_fcf_record_mac_addr_prov_WORD	word4
+#define LPFC_FCF_FPMA           1 	/* Fabric Provided MAC Address */
+#define LPFC_FCF_SPMA           2       /* Server Provided MAC Address */
+	uint32_t word5;
+#define lpfc_fcf_record_fab_name_0_SHIFT	0
+#define lpfc_fcf_record_fab_name_0_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_0_WORD		word5
+#define lpfc_fcf_record_fab_name_1_SHIFT	8
+#define lpfc_fcf_record_fab_name_1_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_1_WORD		word5
+#define lpfc_fcf_record_fab_name_2_SHIFT	16
+#define lpfc_fcf_record_fab_name_2_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_2_WORD		word5
+#define lpfc_fcf_record_fab_name_3_SHIFT	24
+#define lpfc_fcf_record_fab_name_3_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_3_WORD		word5
+	uint32_t word6;
+#define lpfc_fcf_record_fab_name_4_SHIFT	0
+#define lpfc_fcf_record_fab_name_4_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_4_WORD		word6
+#define lpfc_fcf_record_fab_name_5_SHIFT	8
+#define lpfc_fcf_record_fab_name_5_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_5_WORD		word6
+#define lpfc_fcf_record_fab_name_6_SHIFT	16
+#define lpfc_fcf_record_fab_name_6_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_6_WORD		word6
+#define lpfc_fcf_record_fab_name_7_SHIFT	24
+#define lpfc_fcf_record_fab_name_7_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_7_WORD		word6
+	uint32_t word7;
+#define lpfc_fcf_record_fc_map_0_SHIFT		0
+#define lpfc_fcf_record_fc_map_0_MASK		0x000000FF
+#define lpfc_fcf_record_fc_map_0_WORD		word7
+#define lpfc_fcf_record_fc_map_1_SHIFT		8
+#define lpfc_fcf_record_fc_map_1_MASK		0x000000FF
+#define lpfc_fcf_record_fc_map_1_WORD		word7
+#define lpfc_fcf_record_fc_map_2_SHIFT		16
+#define lpfc_fcf_record_fc_map_2_MASK		0x000000FF
+#define lpfc_fcf_record_fc_map_2_WORD		word7
+#define lpfc_fcf_record_fcf_valid_SHIFT		24
+#define lpfc_fcf_record_fcf_valid_MASK		0x000000FF
+#define lpfc_fcf_record_fcf_valid_WORD		word7
+	uint32_t word8;
+#define lpfc_fcf_record_fcf_index_SHIFT		0
+#define lpfc_fcf_record_fcf_index_MASK		0x0000FFFF
+#define lpfc_fcf_record_fcf_index_WORD		word8
+#define lpfc_fcf_record_fcf_state_SHIFT		16
+#define lpfc_fcf_record_fcf_state_MASK		0x0000FFFF
+#define lpfc_fcf_record_fcf_state_WORD		word8
+	uint8_t vlan_bitmap[512];
+};
+
+struct lpfc_mbx_read_fcf_tbl {
+	union lpfc_sli4_cfg_shdr cfg_shdr;
+	union {
+		struct {
+			uint32_t word10;
+#define lpfc_mbx_read_fcf_tbl_indx_SHIFT	0
+#define lpfc_mbx_read_fcf_tbl_indx_MASK		0x0000FFFF
+#define lpfc_mbx_read_fcf_tbl_indx_WORD		word10
+		} request;
+		struct {
+			uint32_t eventag;
+		} response;
+	} u;
+	uint32_t word11;
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT	0
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK	0x0000FFFF
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD	word11
+};
+
+struct lpfc_mbx_add_fcf_tbl_entry {
+	union lpfc_sli4_cfg_shdr cfg_shdr;
+	uint32_t word10;
+#define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT        0
+#define lpfc_mbx_add_fcf_tbl_fcfi_MASK         0x0000FFFF
+#define lpfc_mbx_add_fcf_tbl_fcfi_WORD         word10
+	struct lpfc_mbx_sge fcf_sge;
+};
+
+struct lpfc_mbx_del_fcf_tbl_entry {
+	struct mbox_header header;
+	uint32_t word10;
+#define lpfc_mbx_del_fcf_tbl_count_SHIFT	0
+#define lpfc_mbx_del_fcf_tbl_count_MASK		0x0000FFFF
+#define lpfc_mbx_del_fcf_tbl_count_WORD		word10
+#define lpfc_mbx_del_fcf_tbl_index_SHIFT	16
+#define lpfc_mbx_del_fcf_tbl_index_MASK		0x0000FFFF
+#define lpfc_mbx_del_fcf_tbl_index_WORD		word10
+};
+
+/* Status field for embedded SLI_CONFIG mailbox command */
+#define STATUS_SUCCESS					0x0
+#define STATUS_FAILED 					0x1
+#define STATUS_ILLEGAL_REQUEST				0x2
+#define STATUS_ILLEGAL_FIELD				0x3
+#define STATUS_INSUFFICIENT_BUFFER 			0x4
+#define STATUS_UNAUTHORIZED_REQUEST			0x5
+#define STATUS_FLASHROM_SAVE_FAILED			0x17
+#define STATUS_FLASHROM_RESTORE_FAILED			0x18
+#define STATUS_ICCBINDEX_ALLOC_FAILED			0x1a
+#define STATUS_IOCTLHANDLE_ALLOC_FAILED 		0x1b
+#define STATUS_INVALID_PHY_ADDR_FROM_OSM		0x1c
+#define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM		0x1d
+#define STATUS_ASSERT_FAILED				0x1e
+#define STATUS_INVALID_SESSION				0x1f
+#define STATUS_INVALID_CONNECTION			0x20
+#define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT		0x21
+#define STATUS_BTL_NO_FREE_SLOT_PATH			0x24
+#define STATUS_BTL_NO_FREE_SLOT_TGTID			0x25
+#define STATUS_OSM_DEVSLOT_NOT_FOUND			0x26
+#define STATUS_FLASHROM_READ_FAILED			0x27
+#define STATUS_POLL_IOCTL_TIMEOUT			0x28
+#define STATUS_ERROR_ACITMAIN				0x2a
+#define STATUS_REBOOT_REQUIRED				0x2c
+#define STATUS_FCF_IN_USE				0x3a
+
+struct lpfc_mbx_sli4_config {
+	struct mbox_header header;
+};
+
+struct lpfc_mbx_init_vfi {
+	uint32_t word1;
+#define lpfc_init_vfi_vr_SHIFT		31
+#define lpfc_init_vfi_vr_MASK		0x00000001
+#define lpfc_init_vfi_vr_WORD		word1
+#define lpfc_init_vfi_vt_SHIFT		30
+#define lpfc_init_vfi_vt_MASK		0x00000001
+#define lpfc_init_vfi_vt_WORD		word1
+#define lpfc_init_vfi_vf_SHIFT		29
+#define lpfc_init_vfi_vf_MASK		0x00000001
+#define lpfc_init_vfi_vf_WORD		word1
+#define lpfc_init_vfi_vfi_SHIFT		0
+#define lpfc_init_vfi_vfi_MASK		0x0000FFFF
+#define lpfc_init_vfi_vfi_WORD		word1
+	uint32_t word2;
+#define lpfc_init_vfi_fcfi_SHIFT	0
+#define lpfc_init_vfi_fcfi_MASK		0x0000FFFF
+#define lpfc_init_vfi_fcfi_WORD		word2
+	uint32_t word3;
+#define lpfc_init_vfi_pri_SHIFT		13
+#define lpfc_init_vfi_pri_MASK		0x00000007
+#define lpfc_init_vfi_pri_WORD		word3
+#define lpfc_init_vfi_vf_id_SHIFT	1
+#define lpfc_init_vfi_vf_id_MASK	0x00000FFF
+#define lpfc_init_vfi_vf_id_WORD	word3
+	uint32_t word4;
+#define lpfc_init_vfi_hop_count_SHIFT	24
+#define lpfc_init_vfi_hop_count_MASK	0x000000FF
+#define lpfc_init_vfi_hop_count_WORD	word4
+};
+
+struct lpfc_mbx_reg_vfi {
+	uint32_t word1;
+#define lpfc_reg_vfi_vp_SHIFT		28
+#define lpfc_reg_vfi_vp_MASK		0x00000001
+#define lpfc_reg_vfi_vp_WORD		word1
+#define lpfc_reg_vfi_vfi_SHIFT		0
+#define lpfc_reg_vfi_vfi_MASK		0x0000FFFF
+#define lpfc_reg_vfi_vfi_WORD		word1
+	uint32_t word2;
+#define lpfc_reg_vfi_vpi_SHIFT		16
+#define lpfc_reg_vfi_vpi_MASK		0x0000FFFF
+#define lpfc_reg_vfi_vpi_WORD		word2
+#define lpfc_reg_vfi_fcfi_SHIFT		0
+#define lpfc_reg_vfi_fcfi_MASK		0x0000FFFF
+#define lpfc_reg_vfi_fcfi_WORD		word2
+	uint32_t word3_rsvd;
+	uint32_t word4_rsvd;
+	struct ulp_bde64 bde;
+	uint32_t word8_rsvd;
+	uint32_t word9_rsvd;
+	uint32_t word10;
+#define lpfc_reg_vfi_nport_id_SHIFT		0
+#define lpfc_reg_vfi_nport_id_MASK		0x00FFFFFF
+#define lpfc_reg_vfi_nport_id_WORD		word10
+};
+
+struct lpfc_mbx_init_vpi {
+	uint32_t word1;
+#define lpfc_init_vpi_vfi_SHIFT		16
+#define lpfc_init_vpi_vfi_MASK		0x0000FFFF
+#define lpfc_init_vpi_vfi_WORD		word1
+#define lpfc_init_vpi_vpi_SHIFT		0
+#define lpfc_init_vpi_vpi_MASK		0x0000FFFF
+#define lpfc_init_vpi_vpi_WORD		word1
+};
+
+struct lpfc_mbx_read_vpi {
+	uint32_t word1_rsvd;
+	uint32_t word2;
+#define lpfc_mbx_read_vpi_vnportid_SHIFT	0
+#define lpfc_mbx_read_vpi_vnportid_MASK		0x00FFFFFF
+#define lpfc_mbx_read_vpi_vnportid_WORD		word2
+	uint32_t word3_rsvd;
+	uint32_t word4;
+#define lpfc_mbx_read_vpi_acq_alpa_SHIFT	0
+#define lpfc_mbx_read_vpi_acq_alpa_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_acq_alpa_WORD		word4
+#define lpfc_mbx_read_vpi_pb_SHIFT		15
+#define lpfc_mbx_read_vpi_pb_MASK		0x00000001
+#define lpfc_mbx_read_vpi_pb_WORD		word4
+#define lpfc_mbx_read_vpi_spec_alpa_SHIFT	16
+#define lpfc_mbx_read_vpi_spec_alpa_MASK	0x000000FF
+#define lpfc_mbx_read_vpi_spec_alpa_WORD	word4
+#define lpfc_mbx_read_vpi_ns_SHIFT		30
+#define lpfc_mbx_read_vpi_ns_MASK		0x00000001
+#define lpfc_mbx_read_vpi_ns_WORD		word4
+#define lpfc_mbx_read_vpi_hl_SHIFT		31
+#define lpfc_mbx_read_vpi_hl_MASK		0x00000001
+#define lpfc_mbx_read_vpi_hl_WORD		word4
+	uint32_t word5_rsvd;
+	uint32_t word6;
+#define lpfc_mbx_read_vpi_vpi_SHIFT		0
+#define lpfc_mbx_read_vpi_vpi_MASK		0x0000FFFF
+#define lpfc_mbx_read_vpi_vpi_WORD		word6
+	uint32_t word7;
+#define lpfc_mbx_read_vpi_mac_0_SHIFT		0
+#define lpfc_mbx_read_vpi_mac_0_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_0_WORD		word7
+#define lpfc_mbx_read_vpi_mac_1_SHIFT		8
+#define lpfc_mbx_read_vpi_mac_1_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_1_WORD		word7
+#define lpfc_mbx_read_vpi_mac_2_SHIFT		16
+#define lpfc_mbx_read_vpi_mac_2_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_2_WORD		word7
+#define lpfc_mbx_read_vpi_mac_3_SHIFT		24
+#define lpfc_mbx_read_vpi_mac_3_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_3_WORD		word7
+	uint32_t word8;
+#define lpfc_mbx_read_vpi_mac_4_SHIFT		0
+#define lpfc_mbx_read_vpi_mac_4_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_4_WORD		word8
+#define lpfc_mbx_read_vpi_mac_5_SHIFT		8
+#define lpfc_mbx_read_vpi_mac_5_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_5_WORD		word8
+#define lpfc_mbx_read_vpi_vlan_tag_SHIFT	16
+#define lpfc_mbx_read_vpi_vlan_tag_MASK		0x00000FFF
+#define lpfc_mbx_read_vpi_vlan_tag_WORD		word8
+#define lpfc_mbx_read_vpi_vv_SHIFT		28
+#define lpfc_mbx_read_vpi_vv_MASK		0x0000001
+#define lpfc_mbx_read_vpi_vv_WORD		word8
+};
+
+struct lpfc_mbx_unreg_vfi {
+	uint32_t word1_rsvd;
+	uint32_t word2;
+#define lpfc_unreg_vfi_vfi_SHIFT	0
+#define lpfc_unreg_vfi_vfi_MASK		0x0000FFFF
+#define lpfc_unreg_vfi_vfi_WORD		word2
+};
+
+struct lpfc_mbx_resume_rpi {
+	uint32_t word1;
+#define lpfc_resume_rpi_rpi_SHIFT	0
+#define lpfc_resume_rpi_rpi_MASK	0x0000FFFF
+#define lpfc_resume_rpi_rpi_WORD	word1
+	uint32_t event_tag;
+	uint32_t word3_rsvd;
+	uint32_t word4_rsvd;
+	uint32_t word5_rsvd;
+	uint32_t word6;
+#define lpfc_resume_rpi_vpi_SHIFT	0
+#define lpfc_resume_rpi_vpi_MASK	0x0000FFFF
+#define lpfc_resume_rpi_vpi_WORD	word6
+#define lpfc_resume_rpi_vfi_SHIFT	16
+#define lpfc_resume_rpi_vfi_MASK	0x0000FFFF
+#define lpfc_resume_rpi_vfi_WORD	word6
+};
+
+#define REG_FCF_INVALID_QID	0xFFFF
+struct lpfc_mbx_reg_fcfi {
+	uint32_t word1;
+#define lpfc_reg_fcfi_info_index_SHIFT	0
+#define lpfc_reg_fcfi_info_index_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_info_index_WORD	word1
+#define lpfc_reg_fcfi_fcfi_SHIFT	16
+#define lpfc_reg_fcfi_fcfi_MASK		0x0000FFFF
+#define lpfc_reg_fcfi_fcfi_WORD		word1
+	uint32_t word2;
+#define lpfc_reg_fcfi_rq_id1_SHIFT	0
+#define lpfc_reg_fcfi_rq_id1_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_rq_id1_WORD	word2
+#define lpfc_reg_fcfi_rq_id0_SHIFT	16
+#define lpfc_reg_fcfi_rq_id0_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_rq_id0_WORD	word2
+	uint32_t word3;
+#define lpfc_reg_fcfi_rq_id3_SHIFT	0
+#define lpfc_reg_fcfi_rq_id3_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_rq_id3_WORD	word3
+#define lpfc_reg_fcfi_rq_id2_SHIFT	16
+#define lpfc_reg_fcfi_rq_id2_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_rq_id2_WORD	word3
+	uint32_t word4;
+#define lpfc_reg_fcfi_type_match0_SHIFT	24
+#define lpfc_reg_fcfi_type_match0_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_match0_WORD	word4
+#define lpfc_reg_fcfi_type_mask0_SHIFT	16
+#define lpfc_reg_fcfi_type_mask0_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_mask0_WORD	word4
+#define lpfc_reg_fcfi_rctl_match0_SHIFT	8
+#define lpfc_reg_fcfi_rctl_match0_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_match0_WORD	word4
+#define lpfc_reg_fcfi_rctl_mask0_SHIFT	0
+#define lpfc_reg_fcfi_rctl_mask0_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_mask0_WORD	word4
+	uint32_t word5;
+#define lpfc_reg_fcfi_type_match1_SHIFT	24
+#define lpfc_reg_fcfi_type_match1_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_match1_WORD	word5
+#define lpfc_reg_fcfi_type_mask1_SHIFT	16
+#define lpfc_reg_fcfi_type_mask1_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_mask1_WORD	word5
+#define lpfc_reg_fcfi_rctl_match1_SHIFT	8
+#define lpfc_reg_fcfi_rctl_match1_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_match1_WORD	word5
+#define lpfc_reg_fcfi_rctl_mask1_SHIFT	0
+#define lpfc_reg_fcfi_rctl_mask1_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_mask1_WORD	word5
+	uint32_t word6;
+#define lpfc_reg_fcfi_type_match2_SHIFT	24
+#define lpfc_reg_fcfi_type_match2_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_match2_WORD	word6
+#define lpfc_reg_fcfi_type_mask2_SHIFT	16
+#define lpfc_reg_fcfi_type_mask2_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_mask2_WORD	word6
+#define lpfc_reg_fcfi_rctl_match2_SHIFT	8
+#define lpfc_reg_fcfi_rctl_match2_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_match2_WORD	word6
+#define lpfc_reg_fcfi_rctl_mask2_SHIFT	0
+#define lpfc_reg_fcfi_rctl_mask2_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_mask2_WORD	word6
+	uint32_t word7;
+#define lpfc_reg_fcfi_type_match3_SHIFT	24
+#define lpfc_reg_fcfi_type_match3_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_match3_WORD	word7
+#define lpfc_reg_fcfi_type_mask3_SHIFT	16
+#define lpfc_reg_fcfi_type_mask3_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_mask3_WORD	word7
+#define lpfc_reg_fcfi_rctl_match3_SHIFT	8
+#define lpfc_reg_fcfi_rctl_match3_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_match3_WORD	word7
+#define lpfc_reg_fcfi_rctl_mask3_SHIFT	0
+#define lpfc_reg_fcfi_rctl_mask3_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_mask3_WORD	word7
+	uint32_t word8;
+#define lpfc_reg_fcfi_mam_SHIFT		13
+#define lpfc_reg_fcfi_mam_MASK		0x00000003
+#define lpfc_reg_fcfi_mam_WORD		word8
+#define LPFC_MAM_BOTH		0	/* Both SPMA and FPMA */
+#define LPFC_MAM_SPMA		1	/* Server Provided MAC Address */
+#define LPFC_MAM_FPMA		2	/* Fabric Provided MAC Address */
+#define lpfc_reg_fcfi_vv_SHIFT		12
+#define lpfc_reg_fcfi_vv_MASK		0x00000001
+#define lpfc_reg_fcfi_vv_WORD		word8
+#define lpfc_reg_fcfi_vlan_tag_SHIFT	0
+#define lpfc_reg_fcfi_vlan_tag_MASK	0x00000FFF
+#define lpfc_reg_fcfi_vlan_tag_WORD	word8
+};
+
+struct lpfc_mbx_unreg_fcfi {
+	uint32_t word1_rsv;
+	uint32_t word2;
+#define lpfc_unreg_fcfi_SHIFT		0
+#define lpfc_unreg_fcfi_MASK		0x0000FFFF
+#define lpfc_unreg_fcfi_WORD		word2
+};
+
+struct lpfc_mbx_read_rev {
+	uint32_t word1;
+#define lpfc_mbx_rd_rev_sli_lvl_SHIFT  		16
+#define lpfc_mbx_rd_rev_sli_lvl_MASK   		0x0000000F
+#define lpfc_mbx_rd_rev_sli_lvl_WORD   		word1
+#define lpfc_mbx_rd_rev_fcoe_SHIFT		20
+#define lpfc_mbx_rd_rev_fcoe_MASK		0x00000001
+#define lpfc_mbx_rd_rev_fcoe_WORD		word1
+#define lpfc_mbx_rd_rev_vpd_SHIFT		29
+#define lpfc_mbx_rd_rev_vpd_MASK		0x00000001
+#define lpfc_mbx_rd_rev_vpd_WORD		word1
+	uint32_t first_hw_rev;
+	uint32_t second_hw_rev;
+	uint32_t word4_rsvd;
+	uint32_t third_hw_rev;
+	uint32_t word6;
+#define lpfc_mbx_rd_rev_fcph_low_SHIFT		0
+#define lpfc_mbx_rd_rev_fcph_low_MASK		0x000000FF
+#define lpfc_mbx_rd_rev_fcph_low_WORD		word6
+#define lpfc_mbx_rd_rev_fcph_high_SHIFT		8
+#define lpfc_mbx_rd_rev_fcph_high_MASK		0x000000FF
+#define lpfc_mbx_rd_rev_fcph_high_WORD		word6
+#define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT	16
+#define lpfc_mbx_rd_rev_ftr_lvl_low_MASK	0x000000FF
+#define lpfc_mbx_rd_rev_ftr_lvl_low_WORD	word6
+#define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT	24
+#define lpfc_mbx_rd_rev_ftr_lvl_high_MASK	0x000000FF
+#define lpfc_mbx_rd_rev_ftr_lvl_high_WORD	word6
+	uint32_t word7_rsvd;
+	uint32_t fw_id_rev;
+	uint8_t  fw_name[16];
+	uint32_t ulp_fw_id_rev;
+	uint8_t  ulp_fw_name[16];
+	uint32_t word18_47_rsvd[30];
+	uint32_t word48;
+#define lpfc_mbx_rd_rev_avail_len_SHIFT		0
+#define lpfc_mbx_rd_rev_avail_len_MASK		0x00FFFFFF
+#define lpfc_mbx_rd_rev_avail_len_WORD		word48
+	uint32_t vpd_paddr_low;
+	uint32_t vpd_paddr_high;
+	uint32_t avail_vpd_len;
+	uint32_t rsvd_52_63[12];
+};
+
+struct lpfc_mbx_read_config {
+	uint32_t word1;
+#define lpfc_mbx_rd_conf_max_bbc_SHIFT		0
+#define lpfc_mbx_rd_conf_max_bbc_MASK		0x000000FF
+#define lpfc_mbx_rd_conf_max_bbc_WORD		word1
+#define lpfc_mbx_rd_conf_init_bbc_SHIFT		8
+#define lpfc_mbx_rd_conf_init_bbc_MASK		0x000000FF
+#define lpfc_mbx_rd_conf_init_bbc_WORD		word1
+	uint32_t word2;
+#define lpfc_mbx_rd_conf_nport_did_SHIFT	0
+#define lpfc_mbx_rd_conf_nport_did_MASK		0x00FFFFFF
+#define lpfc_mbx_rd_conf_nport_did_WORD		word2
+#define lpfc_mbx_rd_conf_topology_SHIFT		24
+#define lpfc_mbx_rd_conf_topology_MASK		0x000000FF
+#define lpfc_mbx_rd_conf_topology_WORD		word2
+	uint32_t word3;
+#define lpfc_mbx_rd_conf_ao_SHIFT		0
+#define lpfc_mbx_rd_conf_ao_MASK		0x00000001
+#define lpfc_mbx_rd_conf_ao_WORD		word3
+#define lpfc_mbx_rd_conf_bb_scn_SHIFT		8
+#define lpfc_mbx_rd_conf_bb_scn_MASK		0x0000000F
+#define lpfc_mbx_rd_conf_bb_scn_WORD		word3
+#define lpfc_mbx_rd_conf_cbb_scn_SHIFT		12
+#define lpfc_mbx_rd_conf_cbb_scn_MASK		0x0000000F
+#define lpfc_mbx_rd_conf_cbb_scn_WORD		word3
+#define lpfc_mbx_rd_conf_mc_SHIFT		29
+#define lpfc_mbx_rd_conf_mc_MASK		0x00000001
+#define lpfc_mbx_rd_conf_mc_WORD		word3
+	uint32_t word4;
+#define lpfc_mbx_rd_conf_e_d_tov_SHIFT		0
+#define lpfc_mbx_rd_conf_e_d_tov_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_e_d_tov_WORD		word4
+	uint32_t word5;
+#define lpfc_mbx_rd_conf_lp_tov_SHIFT		0
+#define lpfc_mbx_rd_conf_lp_tov_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_lp_tov_WORD		word5
+	uint32_t word6;
+#define lpfc_mbx_rd_conf_r_a_tov_SHIFT		0
+#define lpfc_mbx_rd_conf_r_a_tov_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_r_a_tov_WORD		word6
+	uint32_t word7;
+#define lpfc_mbx_rd_conf_r_t_tov_SHIFT		0
+#define lpfc_mbx_rd_conf_r_t_tov_MASK		0x000000FF
+#define lpfc_mbx_rd_conf_r_t_tov_WORD		word7
+	uint32_t word8;
+#define lpfc_mbx_rd_conf_al_tov_SHIFT		0
+#define lpfc_mbx_rd_conf_al_tov_MASK		0x0000000F
+#define lpfc_mbx_rd_conf_al_tov_WORD		word8
+	uint32_t word9;
+#define lpfc_mbx_rd_conf_lmt_SHIFT		0
+#define lpfc_mbx_rd_conf_lmt_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_lmt_WORD		word9
+	uint32_t word10;
+#define lpfc_mbx_rd_conf_max_alpa_SHIFT		0
+#define lpfc_mbx_rd_conf_max_alpa_MASK		0x000000FF
+#define lpfc_mbx_rd_conf_max_alpa_WORD		word10
+	uint32_t word11_rsvd;
+	uint32_t word12;
+#define lpfc_mbx_rd_conf_xri_base_SHIFT		0
+#define lpfc_mbx_rd_conf_xri_base_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_xri_base_WORD		word12
+#define lpfc_mbx_rd_conf_xri_count_SHIFT	16
+#define lpfc_mbx_rd_conf_xri_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_xri_count_WORD		word12
+	uint32_t word13;
+#define lpfc_mbx_rd_conf_rpi_base_SHIFT		0
+#define lpfc_mbx_rd_conf_rpi_base_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_rpi_base_WORD		word13
+#define lpfc_mbx_rd_conf_rpi_count_SHIFT	16
+#define lpfc_mbx_rd_conf_rpi_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_rpi_count_WORD		word13
+	uint32_t word14;
+#define lpfc_mbx_rd_conf_vpi_base_SHIFT		0
+#define lpfc_mbx_rd_conf_vpi_base_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_vpi_base_WORD		word14
+#define lpfc_mbx_rd_conf_vpi_count_SHIFT	16
+#define lpfc_mbx_rd_conf_vpi_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_vpi_count_WORD		word14
+	uint32_t word15;
+#define lpfc_mbx_rd_conf_vfi_base_SHIFT         0
+#define lpfc_mbx_rd_conf_vfi_base_MASK          0x0000FFFF
+#define lpfc_mbx_rd_conf_vfi_base_WORD          word15
+#define lpfc_mbx_rd_conf_vfi_count_SHIFT        16
+#define lpfc_mbx_rd_conf_vfi_count_MASK         0x0000FFFF
+#define lpfc_mbx_rd_conf_vfi_count_WORD         word15
+	uint32_t word16;
+#define lpfc_mbx_rd_conf_fcfi_base_SHIFT	0
+#define lpfc_mbx_rd_conf_fcfi_base_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_fcfi_base_WORD		word16
+#define lpfc_mbx_rd_conf_fcfi_count_SHIFT	16
+#define lpfc_mbx_rd_conf_fcfi_count_MASK	0x0000FFFF
+#define lpfc_mbx_rd_conf_fcfi_count_WORD	word16
+	uint32_t word17;
+#define lpfc_mbx_rd_conf_rq_count_SHIFT		0
+#define lpfc_mbx_rd_conf_rq_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_rq_count_WORD		word17
+#define lpfc_mbx_rd_conf_eq_count_SHIFT		16
+#define lpfc_mbx_rd_conf_eq_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_eq_count_WORD		word17
+	uint32_t word18;
+#define lpfc_mbx_rd_conf_wq_count_SHIFT		0
+#define lpfc_mbx_rd_conf_wq_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_wq_count_WORD		word18
+#define lpfc_mbx_rd_conf_cq_count_SHIFT		16
+#define lpfc_mbx_rd_conf_cq_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_cq_count_WORD		word18
+};
+
+struct lpfc_mbx_request_features {
+	uint32_t word1;
+#define lpfc_mbx_rq_ftr_qry_SHIFT		0
+#define lpfc_mbx_rq_ftr_qry_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_qry_WORD		word1
+	uint32_t word2;
+#define lpfc_mbx_rq_ftr_rq_iaab_SHIFT		0
+#define lpfc_mbx_rq_ftr_rq_iaab_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_iaab_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_npiv_SHIFT		1
+#define lpfc_mbx_rq_ftr_rq_npiv_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_npiv_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_dif_SHIFT		2
+#define lpfc_mbx_rq_ftr_rq_dif_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_dif_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_vf_SHIFT		3
+#define lpfc_mbx_rq_ftr_rq_vf_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_vf_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT		4
+#define lpfc_mbx_rq_ftr_rq_fcpi_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpi_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT		5
+#define lpfc_mbx_rq_ftr_rq_fcpt_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpt_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT		6
+#define lpfc_mbx_rq_ftr_rq_fcpc_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpc_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT		7
+#define lpfc_mbx_rq_ftr_rq_ifip_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_ifip_WORD		word2
+	uint32_t word3;
+#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT		0
+#define lpfc_mbx_rq_ftr_rsp_iaab_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_iaab_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT		1
+#define lpfc_mbx_rq_ftr_rsp_npiv_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_npiv_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_dif_SHIFT		2
+#define lpfc_mbx_rq_ftr_rsp_dif_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_dif_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_vf_SHIFT		3
+#define lpfc_mbx_rq_ftr_rsp_vf__MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_vf_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT		4
+#define lpfc_mbx_rq_ftr_rsp_fcpi_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpi_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT		5
+#define lpfc_mbx_rq_ftr_rsp_fcpt_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpt_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT		6
+#define lpfc_mbx_rq_ftr_rsp_fcpc_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpc_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT		7
+#define lpfc_mbx_rq_ftr_rsp_ifip_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_ifip_WORD		word3
+};
+
+/* Mailbox Completion Queue Error Messages */
+#define MB_CQE_STATUS_SUCCESS 			0x0
+#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES	0x1
+#define MB_CQE_STATUS_INVALID_PARAMETER		0x2
+#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES	0x3
+#define MB_CEQ_STATUS_QUEUE_FLUSHING		0x4
+#define MB_CQE_STATUS_DMA_FAILED		0x5
+
+/* mailbox queue entry structure */
+struct lpfc_mqe {
+	uint32_t word0;
+#define lpfc_mqe_status_SHIFT		16
+#define lpfc_mqe_status_MASK		0x0000FFFF
+#define lpfc_mqe_status_WORD		word0
+#define lpfc_mqe_command_SHIFT		8
+#define lpfc_mqe_command_MASK		0x000000FF
+#define lpfc_mqe_command_WORD		word0
+	union {
+		uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1];
+		/* sli4 mailbox commands */
+		struct lpfc_mbx_sli4_config sli4_config;
+		struct lpfc_mbx_init_vfi init_vfi;
+		struct lpfc_mbx_reg_vfi reg_vfi;
+		struct lpfc_mbx_reg_vfi unreg_vfi;
+		struct lpfc_mbx_init_vpi init_vpi;
+		struct lpfc_mbx_resume_rpi resume_rpi;
+		struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
+		struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
+		struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
+		struct lpfc_mbx_reg_fcfi reg_fcfi;
+		struct lpfc_mbx_unreg_fcfi unreg_fcfi;
+		struct lpfc_mbx_mq_create mq_create;
+		struct lpfc_mbx_eq_create eq_create;
+		struct lpfc_mbx_cq_create cq_create;
+		struct lpfc_mbx_wq_create wq_create;
+		struct lpfc_mbx_rq_create rq_create;
+		struct lpfc_mbx_mq_destroy mq_destroy;
+		struct lpfc_mbx_eq_destroy eq_destroy;
+		struct lpfc_mbx_cq_destroy cq_destroy;
+		struct lpfc_mbx_wq_destroy wq_destroy;
+		struct lpfc_mbx_rq_destroy rq_destroy;
+		struct lpfc_mbx_post_sgl_pages post_sgl_pages;
+		struct lpfc_mbx_nembed_cmd nembed_cmd;
+		struct lpfc_mbx_read_rev read_rev;
+		struct lpfc_mbx_read_vpi read_vpi;
+		struct lpfc_mbx_read_config rd_config;
+		struct lpfc_mbx_request_features req_ftrs;
+		struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
+		struct lpfc_mbx_nop nop;
+	} un;
+};
+
+struct lpfc_mcqe {
+	uint32_t word0;
+#define lpfc_mcqe_status_SHIFT		0
+#define lpfc_mcqe_status_MASK		0x0000FFFF
+#define lpfc_mcqe_status_WORD		word0
+#define lpfc_mcqe_ext_status_SHIFT	16
+#define lpfc_mcqe_ext_status_MASK  	0x0000FFFF
+#define lpfc_mcqe_ext_status_WORD 	word0
+	uint32_t mcqe_tag0;
+	uint32_t mcqe_tag1;
+	uint32_t trailer;
+#define lpfc_trailer_valid_SHIFT	31
+#define lpfc_trailer_valid_MASK		0x00000001
+#define lpfc_trailer_valid_WORD		trailer
+#define lpfc_trailer_async_SHIFT	30
+#define lpfc_trailer_async_MASK		0x00000001
+#define lpfc_trailer_async_WORD		trailer
+#define lpfc_trailer_hpi_SHIFT		29
+#define lpfc_trailer_hpi_MASK		0x00000001
+#define lpfc_trailer_hpi_WORD		trailer
+#define lpfc_trailer_completed_SHIFT	28
+#define lpfc_trailer_completed_MASK	0x00000001
+#define lpfc_trailer_completed_WORD	trailer
+#define lpfc_trailer_consumed_SHIFT	27
+#define lpfc_trailer_consumed_MASK	0x00000001
+#define lpfc_trailer_consumed_WORD	trailer
+#define lpfc_trailer_type_SHIFT		16
+#define lpfc_trailer_type_MASK		0x000000FF
+#define lpfc_trailer_type_WORD		trailer
+#define lpfc_trailer_code_SHIFT		8
+#define lpfc_trailer_code_MASK		0x000000FF
+#define lpfc_trailer_code_WORD		trailer
+#define LPFC_TRAILER_CODE_LINK	0x1
+#define LPFC_TRAILER_CODE_FCOE	0x2
+#define LPFC_TRAILER_CODE_DCBX	0x3
+};
+
+struct lpfc_acqe_link {
+	uint32_t word0;
+#define lpfc_acqe_link_speed_SHIFT		24
+#define lpfc_acqe_link_speed_MASK		0x000000FF
+#define lpfc_acqe_link_speed_WORD		word0
+#define LPFC_ASYNC_LINK_SPEED_ZERO		0x0
+#define LPFC_ASYNC_LINK_SPEED_10MBPS		0x1
+#define LPFC_ASYNC_LINK_SPEED_100MBPS		0x2
+#define LPFC_ASYNC_LINK_SPEED_1GBPS		0x3
+#define LPFC_ASYNC_LINK_SPEED_10GBPS		0x4
+#define lpfc_acqe_link_duplex_SHIFT		16
+#define lpfc_acqe_link_duplex_MASK		0x000000FF
+#define lpfc_acqe_link_duplex_WORD		word0
+#define LPFC_ASYNC_LINK_DUPLEX_NONE		0x0
+#define LPFC_ASYNC_LINK_DUPLEX_HALF		0x1
+#define LPFC_ASYNC_LINK_DUPLEX_FULL		0x2
+#define lpfc_acqe_link_status_SHIFT		8
+#define lpfc_acqe_link_status_MASK		0x000000FF
+#define lpfc_acqe_link_status_WORD		word0
+#define LPFC_ASYNC_LINK_STATUS_DOWN		0x0
+#define LPFC_ASYNC_LINK_STATUS_UP		0x1
+#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN	0x2
+#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP	0x3
+#define lpfc_acqe_link_physical_SHIFT		0
+#define lpfc_acqe_link_physical_MASK		0x000000FF
+#define lpfc_acqe_link_physical_WORD		word0
+#define LPFC_ASYNC_LINK_PORT_A			0x0
+#define LPFC_ASYNC_LINK_PORT_B			0x1
+	uint32_t word1;
+#define lpfc_acqe_link_fault_SHIFT	0
+#define lpfc_acqe_link_fault_MASK	0x000000FF
+#define lpfc_acqe_link_fault_WORD	word1
+#define LPFC_ASYNC_LINK_FAULT_NONE	0x0
+#define LPFC_ASYNC_LINK_FAULT_LOCAL	0x1
+#define LPFC_ASYNC_LINK_FAULT_REMOTE	0x2
+	uint32_t event_tag;
+	uint32_t trailer;
+};
+
+struct lpfc_acqe_fcoe {
+	uint32_t fcf_index;
+	uint32_t word1;
+#define lpfc_acqe_fcoe_fcf_count_SHIFT		0
+#define lpfc_acqe_fcoe_fcf_count_MASK		0x0000FFFF
+#define lpfc_acqe_fcoe_fcf_count_WORD		word1
+#define lpfc_acqe_fcoe_event_type_SHIFT		16
+#define lpfc_acqe_fcoe_event_type_MASK		0x0000FFFF
+#define lpfc_acqe_fcoe_event_type_WORD		word1
+#define LPFC_FCOE_EVENT_TYPE_NEW_FCF		0x1
+#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL	0x2
+#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD		0x3
+	uint32_t event_tag;
+	uint32_t trailer;
+};
+
+struct lpfc_acqe_dcbx {
+	uint32_t tlv_ttl;
+	uint32_t reserved;
+	uint32_t event_tag;
+	uint32_t trailer;
+};
+
+/*
+ * Define the bootstrap mailbox (bmbx) region used to communicate
+ * mailbox command between the host and port. The mailbox consists
+ * of a payload area of 256 bytes and a completion queue of length
+ * 16 bytes.
+ */
+struct lpfc_bmbx_create {
+	struct lpfc_mqe mqe;
+	struct lpfc_mcqe mcqe;
+};
+
+#define SGL_ALIGN_SZ 64
+#define SGL_PAGE_SIZE 4096
+/* align SGL addr on a size boundary - adjust address up */
+#define NO_XRI ((uint16_t)-1)
+struct wqe_common {
+	uint32_t word6;
+#define wqe_xri_SHIFT         0
+#define wqe_xri_MASK          0x0000FFFF
+#define wqe_xri_WORD          word6
+#define wqe_ctxt_tag_SHIFT    16
+#define wqe_ctxt_tag_MASK     0x0000FFFF
+#define wqe_ctxt_tag_WORD     word6
+	uint32_t word7;
+#define wqe_ct_SHIFT          2
+#define wqe_ct_MASK           0x00000003
+#define wqe_ct_WORD           word7
+#define wqe_status_SHIFT      4
+#define wqe_status_MASK       0x0000000f
+#define wqe_status_WORD       word7
+#define wqe_cmnd_SHIFT        8
+#define wqe_cmnd_MASK         0x000000ff
+#define wqe_cmnd_WORD         word7
+#define wqe_class_SHIFT       16
+#define wqe_class_MASK        0x00000007
+#define wqe_class_WORD        word7
+#define wqe_pu_SHIFT          20
+#define wqe_pu_MASK           0x00000003
+#define wqe_pu_WORD           word7
+#define wqe_erp_SHIFT         22
+#define wqe_erp_MASK          0x00000001
+#define wqe_erp_WORD          word7
+#define wqe_lnk_SHIFT         23
+#define wqe_lnk_MASK          0x00000001
+#define wqe_lnk_WORD          word7
+#define wqe_tmo_SHIFT         24
+#define wqe_tmo_MASK          0x000000ff
+#define wqe_tmo_WORD          word7
+	uint32_t abort_tag; /* word 8 in WQE */
+	uint32_t word9;
+#define wqe_reqtag_SHIFT      0
+#define wqe_reqtag_MASK       0x0000FFFF
+#define wqe_reqtag_WORD       word9
+#define wqe_rcvoxid_SHIFT     16
+#define wqe_rcvoxid_MASK       0x0000FFFF
+#define wqe_rcvoxid_WORD       word9
+	uint32_t word10;
+#define wqe_pri_SHIFT         16
+#define wqe_pri_MASK          0x00000007
+#define wqe_pri_WORD          word10
+#define wqe_pv_SHIFT          19
+#define wqe_pv_MASK           0x00000001
+#define wqe_pv_WORD           word10
+#define wqe_xc_SHIFT          21
+#define wqe_xc_MASK           0x00000001
+#define wqe_xc_WORD           word10
+#define wqe_ccpe_SHIFT        23
+#define wqe_ccpe_MASK         0x00000001
+#define wqe_ccpe_WORD         word10
+#define wqe_ccp_SHIFT         24
+#define wqe_ccp_MASK         0x000000ff
+#define wqe_ccp_WORD         word10
+	uint32_t word11;
+#define wqe_cmd_type_SHIFT  0
+#define wqe_cmd_type_MASK   0x0000000f
+#define wqe_cmd_type_WORD   word11
+#define wqe_wqec_SHIFT      7
+#define wqe_wqec_MASK       0x00000001
+#define wqe_wqec_WORD       word11
+#define wqe_cqid_SHIFT      16
+#define wqe_cqid_MASK       0x000003ff
+#define wqe_cqid_WORD       word11
+};
+
+struct wqe_did {
+	uint32_t word5;
+#define wqe_els_did_SHIFT         0
+#define wqe_els_did_MASK          0x00FFFFFF
+#define wqe_els_did_WORD          word5
+#define wqe_xmit_bls_ar_SHIFT         30
+#define wqe_xmit_bls_ar_MASK          0x00000001
+#define wqe_xmit_bls_ar_WORD          word5
+#define wqe_xmit_bls_xo_SHIFT         31
+#define wqe_xmit_bls_xo_MASK          0x00000001
+#define wqe_xmit_bls_xo_WORD          word5
+};
+
+struct els_request64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t payload_len;
+	uint32_t word4;
+#define els_req64_sid_SHIFT         0
+#define els_req64_sid_MASK          0x00FFFFFF
+#define els_req64_sid_WORD          word4
+#define els_req64_sp_SHIFT          24
+#define els_req64_sp_MASK           0x00000001
+#define els_req64_sp_WORD           word4
+#define els_req64_vf_SHIFT          25
+#define els_req64_vf_MASK           0x00000001
+#define els_req64_vf_WORD           word4
+	struct wqe_did	wqe_dest;
+	struct wqe_common wqe_com; /* words 6-11 */
+	uint32_t word12;
+#define els_req64_vfid_SHIFT        1
+#define els_req64_vfid_MASK         0x00000FFF
+#define els_req64_vfid_WORD         word12
+#define els_req64_pri_SHIFT         13
+#define els_req64_pri_MASK          0x00000007
+#define els_req64_pri_WORD          word12
+	uint32_t word13;
+#define els_req64_hopcnt_SHIFT      24
+#define els_req64_hopcnt_MASK       0x000000ff
+#define els_req64_hopcnt_WORD       word13
+	uint32_t reserved[2];
+};
+
+struct xmit_els_rsp64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t rsvd3;
+	uint32_t rsvd4;
+	struct wqe_did	wqe_dest;
+	struct wqe_common wqe_com; /* words 6-11 */
+	uint32_t rsvd_12_15[4];
+};
+
+struct xmit_bls_rsp64_wqe {
+	uint32_t payload0;
+	uint32_t word1;
+#define xmit_bls_rsp64_rxid_SHIFT  0
+#define xmit_bls_rsp64_rxid_MASK   0x0000ffff
+#define xmit_bls_rsp64_rxid_WORD   word1
+#define xmit_bls_rsp64_oxid_SHIFT  16
+#define xmit_bls_rsp64_oxid_MASK   0x0000ffff
+#define xmit_bls_rsp64_oxid_WORD   word1
+	uint32_t word2;
+#define xmit_bls_rsp64_seqcntlo_SHIFT  0
+#define xmit_bls_rsp64_seqcntlo_MASK   0x0000ffff
+#define xmit_bls_rsp64_seqcntlo_WORD   word2
+#define xmit_bls_rsp64_seqcnthi_SHIFT  16
+#define xmit_bls_rsp64_seqcnthi_MASK   0x0000ffff
+#define xmit_bls_rsp64_seqcnthi_WORD   word2
+	uint32_t rsrvd3;
+	uint32_t rsrvd4;
+	struct wqe_did	wqe_dest;
+	struct wqe_common wqe_com; /* words 6-11 */
+	uint32_t rsvd_12_15[4];
+};
+struct wqe_rctl_dfctl {
+	uint32_t word5;
+#define wqe_si_SHIFT 2
+#define wqe_si_MASK  0x000000001
+#define wqe_si_WORD  word5
+#define wqe_la_SHIFT 3
+#define wqe_la_MASK  0x000000001
+#define wqe_la_WORD  word5
+#define wqe_ls_SHIFT 7
+#define wqe_ls_MASK  0x000000001
+#define wqe_ls_WORD  word5
+#define wqe_dfctl_SHIFT 8
+#define wqe_dfctl_MASK  0x0000000ff
+#define wqe_dfctl_WORD  word5
+#define wqe_type_SHIFT 16
+#define wqe_type_MASK  0x0000000ff
+#define wqe_type_WORD  word5
+#define wqe_rctl_SHIFT 24
+#define wqe_rctl_MASK  0x0000000ff
+#define wqe_rctl_WORD  word5
+};
+
+struct xmit_seq64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t paylaod_offset;
+	uint32_t relative_offset;
+	struct wqe_rctl_dfctl wge_ctl;
+	struct wqe_common wqe_com; /* words 6-11 */
+	/* Note: word10 different REVISIT */
+	uint32_t xmit_len;
+	uint32_t rsvd_12_15[3];
+};
+struct xmit_bcast64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t paylaod_len;
+	uint32_t rsvd4;
+	struct wqe_rctl_dfctl wge_ctl; /* word 5 */
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_15[4];
+};
+
+struct gen_req64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t command_len;
+	uint32_t payload_len;
+	struct wqe_rctl_dfctl wge_ctl; /* word 5 */
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_15[4];
+};
+
+struct create_xri_wqe {
+	uint32_t rsrvd[5];           /* words 0-4 */
+	struct wqe_did	wqe_dest;  /* word 5 */
+	struct wqe_common wqe_com; /* words 6-11 */
+	uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+#define T_REQUEST_TAG 3
+#define T_XRI_TAG 1
+
+struct abort_cmd_wqe {
+	uint32_t rsrvd[3];
+	uint32_t word3;
+#define	abort_cmd_ia_SHIFT  0
+#define	abort_cmd_ia_MASK  0x000000001
+#define	abort_cmd_ia_WORD  word3
+#define	abort_cmd_criteria_SHIFT  8
+#define	abort_cmd_criteria_MASK  0x0000000ff
+#define	abort_cmd_criteria_WORD  word3
+	uint32_t rsrvd4;
+	uint32_t rsrvd5;
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+struct fcp_iwrite64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t payload_len;
+	uint32_t total_xfer_len;
+	uint32_t initial_xfer_len;
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+struct fcp_iread64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t payload_len;          /* word 3 */
+	uint32_t total_xfer_len;       /* word 4 */
+	uint32_t rsrvd5;               /* word 5 */
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+struct fcp_icmnd64_wqe {
+	struct ulp_bde64 bde;	 /* words 0-2 */
+	uint32_t rsrvd[3];             /* words 3-5 */
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+
+union lpfc_wqe {
+	uint32_t words[16];
+	struct lpfc_wqe_generic generic;
+	struct fcp_icmnd64_wqe fcp_icmd;
+	struct fcp_iread64_wqe fcp_iread;
+	struct fcp_iwrite64_wqe fcp_iwrite;
+	struct abort_cmd_wqe abort_cmd;
+	struct create_xri_wqe create_xri;
+	struct xmit_bcast64_wqe xmit_bcast64;
+	struct xmit_seq64_wqe xmit_sequence;
+	struct xmit_bls_rsp64_wqe xmit_bls_rsp;
+	struct xmit_els_rsp64_wqe xmit_els_rsp;
+	struct els_request64_wqe els_req;
+	struct gen_req64_wqe gen_req;
+};
+
+#define FCP_COMMAND 0x0
+#define FCP_COMMAND_DATA_OUT 0x1
+#define ELS_COMMAND_NON_FIP 0xC
+#define ELS_COMMAND_FIP 0xD
+#define OTHER_COMMAND 0x8
+

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 758 - 199
drivers/scsi/lpfc/lpfc_init.c


+ 30 - 24
drivers/scsi/lpfc/lpfc_logmsg.h

@@ -1,7 +1,7 @@
 /*******************************************************************
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * www.emulex.com                                                  *
  *                                                                 *
  *                                                                 *
@@ -18,33 +18,39 @@
  * included with this package.                                     *
  * included with this package.                                     *
  *******************************************************************/
  *******************************************************************/
 
 
-#define LOG_ELS                       0x1	/* ELS events */
-#define LOG_DISCOVERY                 0x2	/* Link discovery events */
-#define LOG_MBOX                      0x4	/* Mailbox events */
-#define LOG_INIT                      0x8	/* Initialization events */
-#define LOG_LINK_EVENT                0x10	/* Link events */
-#define LOG_IP                        0x20	/* IP traffic history */
-#define LOG_FCP                       0x40	/* FCP traffic history */
-#define LOG_NODE                      0x80	/* Node table events */
-#define LOG_TEMP                      0x100	/* Temperature sensor events */
-#define LOG_BG			      0x200	/* BlockGuard events */
-#define LOG_MISC                      0x400	/* Miscellaneous events */
-#define LOG_SLI                       0x800	/* SLI events */
-#define LOG_FCP_ERROR                 0x1000	/* log errors, not underruns */
-#define LOG_LIBDFC                    0x2000	/* Libdfc events */
-#define LOG_VPORT                     0x4000	/* NPIV events */
-#define LOG_ALL_MSG                   0xffff	/* LOG all messages */
+#define LOG_ELS		0x00000001	/* ELS events */
+#define LOG_DISCOVERY	0x00000002	/* Link discovery events */
+#define LOG_MBOX	0x00000004	/* Mailbox events */
+#define LOG_INIT	0x00000008	/* Initialization events */
+#define LOG_LINK_EVENT	0x00000010	/* Link events */
+#define LOG_IP		0x00000020	/* IP traffic history */
+#define LOG_FCP		0x00000040	/* FCP traffic history */
+#define LOG_NODE	0x00000080	/* Node table events */
+#define LOG_TEMP	0x00000100	/* Temperature sensor events */
+#define LOG_BG		0x00000200	/* BlockGuard events */
+#define LOG_MISC	0x00000400	/* Miscellaneous events */
+#define LOG_SLI		0x00000800	/* SLI events */
+#define LOG_FCP_ERROR	0x00001000	/* log errors, not underruns */
+#define LOG_LIBDFC	0x00002000	/* Libdfc events */
+#define LOG_VPORT	0x00004000	/* NPIV events */
+#define LOF_SECURITY	0x00008000	/* Security events */
+#define LOG_EVENT	0x00010000	/* CT,TEMP,DUMP, logging */
+#define LOG_ALL_MSG	0xffffffff	/* LOG all messages */
 
 
 #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
 #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
-	do { \
-	{ if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \
+do { \
+	{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
 		dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
 		dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
 			   fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
 			   fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
-	} while (0)
+} while (0)
 
 
 #define lpfc_printf_log(phba, level, mask, fmt, arg...) \
 #define lpfc_printf_log(phba, level, mask, fmt, arg...) \
-	do { \
-	{ if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \
+do { \
+	{ uint32_t log_verbose = (phba)->pport ? \
+				 (phba)->pport->cfg_log_verbose : \
+				 (phba)->cfg_log_verbose; \
+	  if (((mask) & log_verbose) || (level[1] <= '3')) \
 		dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
 		dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
-			   fmt, phba->brd_no, ##arg); } \
-	} while (0)
+			   fmt, phba->brd_no, ##arg); \
+	} \
+} while (0)

+ 631 - 43
drivers/scsi/lpfc/lpfc_mbox.c

@@ -1,7 +1,7 @@
 /*******************************************************************
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -28,8 +28,10 @@
 
 
 #include <scsi/scsi.h>
 #include <scsi/scsi.h>
 
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
 #include "lpfc_scsi.h"
@@ -38,6 +40,44 @@
 #include "lpfc_crtn.h"
 #include "lpfc_crtn.h"
 #include "lpfc_compat.h"
 #include "lpfc_compat.h"
 
 
+/**
+ * lpfc_dump_static_vport - Dump HBA's static vport information.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @offset: offset for dumping vport info.
+ *
+ * The dump mailbox command provides a method for the device driver to obtain
+ * various types of information from the HBA device.
+ *
+ * This routine prepares the mailbox command for dumping list of static
+ * vports to be created.
+ **/
+void
+lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
+		uint16_t offset)
+{
+	MAILBOX_t *mb;
+	void *ctx;
+
+	mb = &pmb->u.mb;
+	ctx = pmb->context2;
+
+	/* Setup to dump vport info region */
+	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+	mb->mbxCommand = MBX_DUMP_MEMORY;
+	mb->un.varDmp.cv = 1;
+	mb->un.varDmp.type = DMP_NV_PARAMS;
+	mb->un.varDmp.entry_index = offset;
+	mb->un.varDmp.region_id = DMP_REGION_VPORT;
+	mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
+	mb->un.varDmp.co = 0;
+	mb->un.varDmp.resp_offset = 0;
+	pmb->context2 = ctx;
+	mb->mbxOwner = OWN_HOST;
+
+	return;
+}
+
 /**
 /**
  * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory
  * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory
  * @phba: pointer to lpfc hba data structure.
  * @phba: pointer to lpfc hba data structure.
@@ -58,7 +98,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
 	MAILBOX_t *mb;
 	MAILBOX_t *mb;
 	void *ctx;
 	void *ctx;
 
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	ctx = pmb->context2;
 	ctx = pmb->context2;
 
 
 	/* Setup to dump VPD region */
 	/* Setup to dump VPD region */
@@ -90,7 +130,7 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	MAILBOX_t *mb;
 	MAILBOX_t *mb;
 	void *ctx;
 	void *ctx;
 
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	/* Save context so that we can restore after memset */
 	/* Save context so that we can restore after memset */
 	ctx = pmb->context2;
 	ctx = pmb->context2;
 
 
@@ -125,7 +165,7 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
 {
 	MAILBOX_t *mb;
 	MAILBOX_t *mb;
 
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	mb->mbxCommand = MBX_READ_NV;
 	mb->mbxCommand = MBX_READ_NV;
 	mb->mbxOwner = OWN_HOST;
 	mb->mbxOwner = OWN_HOST;
@@ -151,7 +191,7 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
 {
 {
 	MAILBOX_t *mb;
 	MAILBOX_t *mb;
 
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
 	mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
 	mb->un.varCfgAsyncEvent.ring = ring;
 	mb->un.varCfgAsyncEvent.ring = ring;
@@ -177,7 +217,7 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
 {
 	MAILBOX_t *mb;
 	MAILBOX_t *mb;
 
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	mb->mbxCommand = MBX_HEARTBEAT;
 	mb->mbxCommand = MBX_HEARTBEAT;
 	mb->mbxOwner = OWN_HOST;
 	mb->mbxOwner = OWN_HOST;
@@ -211,7 +251,7 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
 	struct lpfc_sli *psli;
 	struct lpfc_sli *psli;
 
 
 	psli = &phba->sli;
 	psli = &phba->sli;
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 
 	INIT_LIST_HEAD(&mp->list);
 	INIT_LIST_HEAD(&mp->list);
@@ -248,7 +288,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
 {
 	MAILBOX_t *mb;
 	MAILBOX_t *mb;
 
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 
 	mb->un.varClearLA.eventTag = phba->fc_eventTag;
 	mb->un.varClearLA.eventTag = phba->fc_eventTag;
@@ -275,7 +315,7 @@ void
 lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
 {
 	struct lpfc_vport  *vport = phba->pport;
 	struct lpfc_vport  *vport = phba->pport;
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 
 	/* NEW_FEATURE
 	/* NEW_FEATURE
@@ -321,7 +361,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 int
 int
 lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
 {
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	uint32_t attentionConditions[2];
 	uint32_t attentionConditions[2];
 
 
 	/* Sanity check */
 	/* Sanity check */
@@ -405,7 +445,7 @@ lpfc_init_link(struct lpfc_hba * phba,
 	struct lpfc_sli *psli;
 	struct lpfc_sli *psli;
 	MAILBOX_t *mb;
 	MAILBOX_t *mb;
 
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 
 	psli = &phba->sli;
 	psli = &phba->sli;
@@ -492,7 +532,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
 	struct lpfc_sli *psli;
 	struct lpfc_sli *psli;
 
 
 	psli = &phba->sli;
 	psli = &phba->sli;
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 
 	mb->mbxOwner = OWN_HOST;
 	mb->mbxOwner = OWN_HOST;
@@ -515,7 +555,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
 	mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
 	mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
 	mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
 	mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
 	mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
 	mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
-	mb->un.varRdSparm.vpi = vpi;
+	mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
 
 
 	/* save address for completion */
 	/* save address for completion */
 	pmb->context1 = mp;
 	pmb->context1 = mp;
@@ -544,10 +584,12 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
 {
 {
 	MAILBOX_t *mb;
 	MAILBOX_t *mb;
 
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 
 	mb->un.varUnregDID.did = did;
 	mb->un.varUnregDID.did = did;
+	if (vpi != 0xffff)
+		vpi += phba->vpi_base;
 	mb->un.varUnregDID.vpi = vpi;
 	mb->un.varUnregDID.vpi = vpi;
 
 
 	mb->mbxCommand = MBX_UNREG_D_ID;
 	mb->mbxCommand = MBX_UNREG_D_ID;
@@ -573,7 +615,7 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
 {
 	MAILBOX_t *mb;
 	MAILBOX_t *mb;
 
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 
 	mb->mbxCommand = MBX_READ_CONFIG;
 	mb->mbxCommand = MBX_READ_CONFIG;
@@ -598,7 +640,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
 {
 	MAILBOX_t *mb;
 	MAILBOX_t *mb;
 
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 
 	mb->mbxCommand = MBX_READ_LNK_STAT;
 	mb->mbxCommand = MBX_READ_LNK_STAT;
@@ -607,7 +649,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 }
 }
 
 
 /**
 /**
- * lpfc_reg_login - Prepare a mailbox command for registering remote login
+ * lpfc_reg_rpi - Prepare a mailbox command for registering remote login
  * @phba: pointer to lpfc hba data structure.
  * @phba: pointer to lpfc hba data structure.
  * @vpi: virtual N_Port identifier.
  * @vpi: virtual N_Port identifier.
  * @did: remote port identifier.
  * @did: remote port identifier.
@@ -631,17 +673,23 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
  *    1 - DMA memory allocation failed
  *    1 - DMA memory allocation failed
  **/
  **/
 int
 int
-lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
+lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
 	       uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
 	       uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
 {
 {
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	uint8_t *sparam;
 	uint8_t *sparam;
 	struct lpfc_dmabuf *mp;
 	struct lpfc_dmabuf *mp;
 
 
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 
 	mb->un.varRegLogin.rpi = 0;
 	mb->un.varRegLogin.rpi = 0;
-	mb->un.varRegLogin.vpi = vpi;
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba);
+		if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
+			return 1;
+	}
+
+	mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
 	mb->un.varRegLogin.did = did;
 	mb->un.varRegLogin.did = did;
 	mb->un.varWords[30] = flag;	/* Set flag to issue action on cmpl */
 	mb->un.varWords[30] = flag;	/* Set flag to issue action on cmpl */
 
 
@@ -697,15 +745,16 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
 {
 {
 	MAILBOX_t *mb;
 	MAILBOX_t *mb;
 
 
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 
 	mb->un.varUnregLogin.rpi = (uint16_t) rpi;
 	mb->un.varUnregLogin.rpi = (uint16_t) rpi;
 	mb->un.varUnregLogin.rsvd1 = 0;
 	mb->un.varUnregLogin.rsvd1 = 0;
-	mb->un.varUnregLogin.vpi = vpi;
+	mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
 
 
 	mb->mbxCommand = MBX_UNREG_LOGIN;
 	mb->mbxCommand = MBX_UNREG_LOGIN;
 	mb->mbxOwner = OWN_HOST;
 	mb->mbxOwner = OWN_HOST;
+
 	return;
 	return;
 }
 }
 
 
@@ -725,15 +774,15 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
  * This routine prepares the mailbox command for registering a virtual N_Port.
  * This routine prepares the mailbox command for registering a virtual N_Port.
  **/
  **/
 void
 void
-lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
-	     LPFC_MBOXQ_t *pmb)
+lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
 {
 {
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 
 
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 
-	mb->un.varRegVpi.vpi = vpi;
-	mb->un.varRegVpi.sid = sid;
+	mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
+	mb->un.varRegVpi.sid = vport->fc_myDID;
+	mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
 
 
 	mb->mbxCommand = MBX_REG_VPI;
 	mb->mbxCommand = MBX_REG_VPI;
 	mb->mbxOwner = OWN_HOST;
 	mb->mbxOwner = OWN_HOST;
@@ -760,10 +809,10 @@ lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
 void
 void
 lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
 lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
 {
 {
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 
-	mb->un.varUnregVpi.vpi = vpi;
+	mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
 
 
 	mb->mbxCommand = MBX_UNREG_VPI;
 	mb->mbxCommand = MBX_UNREG_VPI;
 	mb->mbxOwner = OWN_HOST;
 	mb->mbxOwner = OWN_HOST;
@@ -852,7 +901,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
 void
 void
 lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
 {
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	mb->un.varRdRev.cv = 1;
 	mb->un.varRdRev.cv = 1;
 	mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
 	mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
@@ -945,7 +994,7 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
 		uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
 		uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
 {
 {
 	int i;
 	int i;
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
 	struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
 
 
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -1020,7 +1069,7 @@ void
 lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
 lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
 {
 {
 	int i;
 	int i;
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	struct lpfc_sli *psli;
 	struct lpfc_sli *psli;
 	struct lpfc_sli_ring *pring;
 	struct lpfc_sli_ring *pring;
 
 
@@ -1075,7 +1124,7 @@ void
 lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
 {
 	MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
 	MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	dma_addr_t pdma_addr;
 	dma_addr_t pdma_addr;
 	uint32_t bar_low, bar_high;
 	uint32_t bar_low, bar_high;
 	size_t offset;
 	size_t offset;
@@ -1099,21 +1148,22 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 
 
 	/* If HBA supports SLI=3 ask for it */
 	/* If HBA supports SLI=3 ask for it */
 
 
-	if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
+	if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
 		if (phba->cfg_enable_bg)
 		if (phba->cfg_enable_bg)
 			mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
 			mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
+		mb->un.varCfgPort.cdss = 1; /* Configure Security */
 		mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
 		mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
 		mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
 		mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
 		mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
 		mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
 		mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
 		mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
 		if (phba->max_vpi && phba->cfg_enable_npiv &&
 		if (phba->max_vpi && phba->cfg_enable_npiv &&
 		    phba->vpd.sli3Feat.cmv) {
 		    phba->vpd.sli3Feat.cmv) {
-			mb->un.varCfgPort.max_vpi = phba->max_vpi;
+			mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
 			mb->un.varCfgPort.cmv = 1;
 			mb->un.varCfgPort.cmv = 1;
 		} else
 		} else
 			mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
 			mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
 	} else
 	} else
-		phba->sli_rev = 2;
+		phba->sli_rev = LPFC_SLI_REV2;
 	mb->un.varCfgPort.sli_mode = phba->sli_rev;
 	mb->un.varCfgPort.sli_mode = phba->sli_rev;
 
 
 	/* Now setup pcb */
 	/* Now setup pcb */
@@ -1245,7 +1295,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 void
 void
 lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
 {
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 
 
 	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
 	mb->mbxCommand = MBX_KILL_BOARD;
 	mb->mbxCommand = MBX_KILL_BOARD;
@@ -1304,29 +1354,98 @@ lpfc_mbox_get(struct lpfc_hba * phba)
 	return mbq;
 	return mbq;
 }
 }
 
 
+/**
+ * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list
+ * @phba: pointer to lpfc hba data structure.
+ * @mbq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine put the completed mailbox command into the mailbox command
+ * complete list. This is the unlocked version of the routine. The mailbox
+ * complete list is used by the driver worker thread to process mailbox
+ * complete callback functions outside the driver interrupt handler.
+ **/
+void
+__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
+{
+	list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
+}
+
 /**
 /**
  * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list
  * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list
  * @phba: pointer to lpfc hba data structure.
  * @phba: pointer to lpfc hba data structure.
  * @mbq: pointer to the driver internal queue element for mailbox command.
  * @mbq: pointer to the driver internal queue element for mailbox command.
  *
  *
  * This routine put the completed mailbox command into the mailbox command
  * This routine put the completed mailbox command into the mailbox command
- * complete list. This routine is called from driver interrupt handler
- * context.The mailbox complete list is used by the driver worker thread
- * to process mailbox complete callback functions outside the driver interrupt
- * handler.
+ * complete list. This is the locked version of the routine. The mailbox
+ * complete list is used by the driver worker thread to process mailbox
+ * complete callback functions outside the driver interrupt handler.
  **/
  **/
 void
 void
-lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
+lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
 {
 {
 	unsigned long iflag;
 	unsigned long iflag;
 
 
 	/* This function expects to be called from interrupt context */
 	/* This function expects to be called from interrupt context */
 	spin_lock_irqsave(&phba->hbalock, iflag);
 	spin_lock_irqsave(&phba->hbalock, iflag);
-	list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
+	__lpfc_mbox_cmpl_put(phba, mbq);
 	spin_unlock_irqrestore(&phba->hbalock, iflag);
 	spin_unlock_irqrestore(&phba->hbalock, iflag);
 	return;
 	return;
 }
 }
 
 
+/**
+ * lpfc_mbox_cmd_check - Check the validality of a mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is to check whether a mailbox command is valid to be issued.
+ * This check will be performed by both the mailbox issue API when a client
+ * is to issue a mailbox command to the mailbox transport.
+ *
+ * Return 0 - pass the check, -ENODEV - fail the check
+ **/
+int
+lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	/* Mailbox command that have a completion handler must also have a
+	 * vport specified.
+	 */
+	if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
+	    mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
+		if (!mboxq->vport) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
+					"1814 Mbox x%x failed, no vport\n",
+					mboxq->u.mb.mbxCommand);
+			dump_stack();
+			return -ENODEV;
+		}
+	}
+	return 0;
+}
+
+/**
+ * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to check whether the HBA device is ready for posting a
+ * mailbox command. It is used by the mailbox transport API at the time the
+ * to post a mailbox command to the device.
+ *
+ * Return 0 - pass the check, -ENODEV - fail the check
+ **/
+int
+lpfc_mbox_dev_check(struct lpfc_hba *phba)
+{
+	/* If the PCI channel is in offline state, do not issue mbox */
+	if (unlikely(pci_channel_offline(phba->pcidev)))
+		return -ENODEV;
+
+	/* If the HBA is in error state, do not issue mbox */
+	if (phba->link_state == LPFC_HBA_ERROR)
+		return -ENODEV;
+
+	return 0;
+}
+
 /**
 /**
  * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value
  * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value
  * @phba: pointer to lpfc hba data structure.
  * @phba: pointer to lpfc hba data structure.
@@ -1350,6 +1469,475 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
 	case MBX_WRITE_WWN:     /* 0x98 */
 	case MBX_WRITE_WWN:     /* 0x98 */
 	case MBX_LOAD_EXP_ROM:	/* 0x9C */
 	case MBX_LOAD_EXP_ROM:	/* 0x9C */
 		return LPFC_MBOX_TMO_FLASH_CMD;
 		return LPFC_MBOX_TMO_FLASH_CMD;
+	case MBX_SLI4_CONFIG:	/* 0x9b */
+		return LPFC_MBOX_SLI4_CONFIG_TMO;
 	}
 	}
 	return LPFC_MBOX_TMO;
 	return LPFC_MBOX_TMO;
 }
 }
+
+/**
+ * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command
+ * @mbox: pointer to lpfc mbox command.
+ * @sgentry: sge entry index.
+ * @phyaddr: physical address for the sge
+ * @length: Length of the sge.
+ *
+ * This routine sets up an entry in the non-embedded mailbox command at the sge
+ * index location.
+ **/
+void
+lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
+		      dma_addr_t phyaddr, uint32_t length)
+{
+	struct lpfc_mbx_nembed_cmd *nembed_sge;
+
+	nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+				&mbox->u.mqe.un.nembed_cmd;
+	nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
+	nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
+	nembed_sge->sge[sgentry].length = length;
+}
+
+/**
+ * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command
+ * @mbox: pointer to lpfc mbox command.
+ * @sgentry: sge entry index.
+ *
+ * This routine gets an entry from the non-embedded mailbox command at the sge
+ * index location.
+ **/
+void
+lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
+		      struct lpfc_mbx_sge *sge)
+{
+	struct lpfc_mbx_nembed_cmd *nembed_sge;
+
+	nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+				&mbox->u.mqe.un.nembed_cmd;
+	sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
+	sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
+	sge->length = nembed_sge->sge[sgentry].length;
+}
+
+/**
+ * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command.
+ *
+ * This routine frees SLI4 specific mailbox command for sending IOCTL command.
+ **/
+void
+lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+	struct lpfc_mbx_sli4_config *sli4_cfg;
+	struct lpfc_mbx_sge sge;
+	dma_addr_t phyaddr;
+	uint32_t sgecount, sgentry;
+
+	sli4_cfg = &mbox->u.mqe.un.sli4_config;
+
+	/* For embedded mbox command, just free the mbox command */
+	if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
+		mempool_free(mbox, phba->mbox_mem_pool);
+		return;
+	}
+
+	/* For non-embedded mbox command, we need to free the pages first */
+	sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
+	/* There is nothing we can do if there is no sge address array */
+	if (unlikely(!mbox->sge_array)) {
+		mempool_free(mbox, phba->mbox_mem_pool);
+		return;
+	}
+	/* Each non-embedded DMA memory was allocated in the length of a page */
+	for (sgentry = 0; sgentry < sgecount; sgentry++) {
+		lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
+		phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
+		dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE,
+				  mbox->sge_array->addr[sgentry], phyaddr);
+	}
+	/* Free the sge address array memory */
+	kfree(mbox->sge_array);
+	/* Finally, free the mailbox command itself */
+	mempool_free(mbox, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_config - Initialize the  SLI4 Config Mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command.
+ * @subsystem: The sli4 config sub mailbox subsystem.
+ * @opcode: The sli4 config sub mailbox command opcode.
+ * @length: Length of the sli4 config mailbox command.
+ *
+ * This routine sets up the header fields of SLI4 specific mailbox command
+ * for sending IOCTL command.
+ *
+ * Return: the actual length of the mbox command allocated (mostly useful
+ *         for none embedded mailbox command).
+ **/
+int
+lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
+		 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
+{
+	struct lpfc_mbx_sli4_config *sli4_config;
+	union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
+	uint32_t alloc_len;
+	uint32_t resid_len;
+	uint32_t pagen, pcount;
+	void *viraddr;
+	dma_addr_t phyaddr;
+
+	/* Set up SLI4 mailbox command header fields */
+	memset(mbox, 0, sizeof(*mbox));
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
+
+	/* Set up SLI4 ioctl command header fields */
+	sli4_config = &mbox->u.mqe.un.sli4_config;
+
+	/* Setup for the embedded mbox command */
+	if (emb) {
+		/* Set up main header fields */
+		bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
+		sli4_config->header.cfg_mhdr.payload_length =
+					LPFC_MBX_CMD_HDR_LENGTH + length;
+		/* Set up sub-header fields following main header */
+		bf_set(lpfc_mbox_hdr_opcode,
+			&sli4_config->header.cfg_shdr.request, opcode);
+		bf_set(lpfc_mbox_hdr_subsystem,
+			&sli4_config->header.cfg_shdr.request, subsystem);
+		sli4_config->header.cfg_shdr.request.request_length = length;
+		return length;
+	}
+
+	/* Setup for the none-embedded mbox command */
+	pcount = (PAGE_ALIGN(length))/PAGE_SIZE;
+	pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
+				LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
+	/* Allocate record for keeping SGE virtual addresses */
+	mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
+				  GFP_KERNEL);
+	if (!mbox->sge_array)
+		return 0;
+
+	for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
+		/* The DMA memory is always allocated in the length of a
+		 * page even though the last SGE might not fill up to a
+		 * page, this is used as a priori size of PAGE_SIZE for
+		 * the later DMA memory free.
+		 */
+		viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE,
+					     &phyaddr, GFP_KERNEL);
+		/* In case of malloc fails, proceed with whatever we have */
+		if (!viraddr)
+			break;
+		mbox->sge_array->addr[pagen] = viraddr;
+		/* Keep the first page for later sub-header construction */
+		if (pagen == 0)
+			cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
+		resid_len = length - alloc_len;
+		if (resid_len > PAGE_SIZE) {
+			lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
+					      PAGE_SIZE);
+			alloc_len += PAGE_SIZE;
+		} else {
+			lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
+					      resid_len);
+			alloc_len = length;
+		}
+	}
+
+	/* Set up main header fields in mailbox command */
+	sli4_config->header.cfg_mhdr.payload_length = alloc_len;
+	bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
+
+	/* Set up sub-header fields into the first page */
+	if (pagen > 0) {
+		bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
+		bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
+		cfg_shdr->request.request_length =
+				alloc_len - sizeof(union  lpfc_sli4_cfg_shdr);
+	}
+	/* The sub-header is in DMA memory, which needs endian converstion */
+	lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
+			      sizeof(union  lpfc_sli4_cfg_shdr));
+
+	return alloc_len;
+}
+
+/**
+ * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command.
+ *
+ * This routine gets the opcode from a SLI4 specific mailbox command for
+ * sending IOCTL command. If the mailbox command is not MBX_SLI4_CONFIG
+ * (0x9B) or if the IOCTL sub-header is not present, opcode 0x0 shall be
+ * returned.
+ **/
+uint8_t
+lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+	struct lpfc_mbx_sli4_config *sli4_cfg;
+	union lpfc_sli4_cfg_shdr *cfg_shdr;
+
+	if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
+		return 0;
+	sli4_cfg = &mbox->u.mqe.un.sli4_config;
+
+	/* For embedded mbox command, get opcode from embedded sub-header*/
+	if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
+		cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
+		return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
+	}
+
+	/* For non-embedded mbox command, get opcode from first dma page */
+	if (unlikely(!mbox->sge_array))
+		return 0;
+	cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
+	return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
+}
+
+/**
+ * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
+ * @mboxq: pointer to lpfc mbox command.
+ *
+ * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES
+ * mailbox command.
+ **/
+void
+lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
+{
+	/* Set up SLI4 mailbox command header fields */
+	memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
+	bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
+
+	/* Set up host requested features. */
+	bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
+
+	/* Virtual fabrics and FIPs are not supported yet. */
+	bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
+
+	/* Enable DIF (block guard) only if configured to do so. */
+	if (phba->cfg_enable_bg)
+		bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
+
+	/* Enable NPIV only if configured to do so. */
+	if (phba->max_vpi && phba->cfg_enable_npiv)
+		bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
+
+	return;
+}
+
+/**
+ * lpfc_init_vfi - Initialize the INIT_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vport: Vport associated with the VF.
+ *
+ * This routine initializes @mbox to all zeros and then fills in the mailbox
+ * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI
+ * in the context of an FCF. The driver issues this command to setup a VFI
+ * before issuing a FLOGI to login to the VSAN. The driver should also issue a
+ * REG_VFI after a successful VSAN login.
+ **/
+void
+lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
+{
+	struct lpfc_mbx_init_vfi *init_vfi;
+
+	memset(mbox, 0, sizeof(*mbox));
+	init_vfi = &mbox->u.mqe.un.init_vfi;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
+	bf_set(lpfc_init_vfi_vr, init_vfi, 1);
+	bf_set(lpfc_init_vfi_vt, init_vfi, 1);
+	bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
+	bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
+}
+
+/**
+ * lpfc_reg_vfi - Initialize the REG_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vport: vport associated with the VF.
+ * @phys: BDE DMA bus address used to send the service parameters to the HBA.
+ *
+ * This routine initializes @mbox to all zeros and then fills in the mailbox
+ * fields from @vport, and uses @buf as a DMAable buffer to send the vport's
+ * fc service parameters to the HBA for this VFI. REG_VFI configures virtual
+ * fabrics identified by VFI in the context of an FCF.
+ **/
+void
+lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
+{
+	struct lpfc_mbx_reg_vfi *reg_vfi;
+
+	memset(mbox, 0, sizeof(*mbox));
+	reg_vfi = &mbox->u.mqe.un.reg_vfi;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
+	bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
+	bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
+	bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
+	bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
+	reg_vfi->bde.addrHigh = putPaddrHigh(phys);
+	reg_vfi->bde.addrLow = putPaddrLow(phys);
+	reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
+	reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+	bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
+}
+
+/**
+ * lpfc_init_vpi - Initialize the INIT_VPI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vpi: VPI to be initialized.
+ *
+ * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the
+ * command to activate a virtual N_Port. The HBA assigns a MAC address to use
+ * with the virtual N Port.  The SLI Host issues this command before issuing a
+ * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a
+ * successful virtual NPort login.
+ **/
+void
+lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi)
+{
+	memset(mbox, 0, sizeof(*mbox));
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
+	bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi);
+}
+
+/**
+ * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vfi: VFI to be unregistered.
+ *
+ * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
+ * (logical NPort) into the inactive state. The SLI Host must have logged out
+ * and unregistered all remote N_Ports to abort any activity on the virtual
+ * fabric. The SLI Port posts the mailbox response after marking the virtual
+ * fabric inactive.
+ **/
+void
+lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi)
+{
+	memset(mbox, 0, sizeof(*mbox));
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
+	bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi);
+}
+
+/**
+ * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters.
+ * @phba: pointer to the hba structure containing.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * This function create a SLI4 dump mailbox command to dump FCoE
+ * parameters stored in region 23.
+ **/
+int
+lpfc_dump_fcoe_param(struct lpfc_hba *phba,
+		struct lpfcMboxq *mbox)
+{
+	struct lpfc_dmabuf *mp = NULL;
+	MAILBOX_t *mb;
+
+	memset(mbox, 0, sizeof(*mbox));
+	mb = &mbox->u.mb;
+
+	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (mp)
+		mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+
+	if (!mp || !mp->virt) {
+		kfree(mp);
+		/* dump_fcoe_param failed to allocate memory */
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+			"2569 lpfc_dump_fcoe_param: memory"
+			" allocation failed \n");
+		return 1;
+	}
+
+	memset(mp->virt, 0, LPFC_BPL_SIZE);
+	INIT_LIST_HEAD(&mp->list);
+
+	/* save address for completion */
+	mbox->context1 = (uint8_t *) mp;
+
+	mb->mbxCommand = MBX_DUMP_MEMORY;
+	mb->un.varDmp.type = DMP_NV_PARAMS;
+	mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM;
+	mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE;
+	mb->un.varWords[3] = putPaddrLow(mp->phys);
+	mb->un.varWords[4] = putPaddrHigh(mp->phys);
+	return 0;
+}
+
+/**
+ * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command
+ * @phba: pointer to the hba structure containing the FCF index and RQ ID.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The
+ * SLI Host uses the command to activate an FCF after it has acquired FCF
+ * information via a READ_FCF mailbox command. This mailbox command also is used
+ * to indicate where received unsolicited frames from this FCF will be sent. By
+ * default this routine will set up the FCF to forward all unsolicited frames
+ * the the RQ ID passed in the @phba. This can be overridden by the caller for
+ * more complicated setups.
+ **/
+void
+lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+	struct lpfc_mbx_reg_fcfi *reg_fcfi;
+
+	memset(mbox, 0, sizeof(*mbox));
+	reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
+	bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
+	bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
+	bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
+	bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
+	bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx);
+	/* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
+	bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
+		(~phba->fcf.addr_mode) & 0x3);
+	if (phba->fcf.fcf_flag & FCF_VALID_VLAN) {
+		bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
+		bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id);
+	}
+}
+
+/**
+ * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @fcfi: FCFI to be unregistered.
+ *
+ * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs).
+ * The SLI Host uses the command to inactivate an FCFI.
+ **/
+void
+lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
+{
+	memset(mbox, 0, sizeof(*mbox));
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
+	bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
+}
+
+/**
+ * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @ndlp: The nodelist structure that describes the RPI to resume.
+ *
+ * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a
+ * link event.
+ **/
+void
+lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_mbx_resume_rpi *resume_rpi;
+
+	memset(mbox, 0, sizeof(*mbox));
+	resume_rpi = &mbox->u.mqe.un.resume_rpi;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
+	bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi);
+	bf_set(lpfc_resume_rpi_vpi, resume_rpi,
+	       ndlp->vport->vpi + ndlp->vport->phba->vpi_base);
+	bf_set(lpfc_resume_rpi_vfi, resume_rpi,
+	       ndlp->vport->vfi + ndlp->vport->phba->vfi_base);
+}

+ 162 - 44
drivers/scsi/lpfc/lpfc_mem.c

@@ -1,7 +1,7 @@
 /*******************************************************************
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -28,8 +28,10 @@
 
 
 #include <scsi/scsi.h>
 #include <scsi/scsi.h>
 
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
 #include "lpfc_scsi.h"
@@ -45,7 +47,7 @@
  * @phba: HBA to allocate pools for
  * @phba: HBA to allocate pools for
  *
  *
  * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
  * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
- * lpfc_mbuf_pool, lpfc_hbq_pool.  Creates and allocates kmalloc-backed mempools
+ * lpfc_mbuf_pool, lpfc_hrb_pool.  Creates and allocates kmalloc-backed mempools
  * for LPFC_MBOXQ_t and lpfc_nodelist.  Also allocates the VPI bitmask.
  * for LPFC_MBOXQ_t and lpfc_nodelist.  Also allocates the VPI bitmask.
  *
  *
  * Notes: Not interrupt-safe.  Must be called with no locks held.  If any
  * Notes: Not interrupt-safe.  Must be called with no locks held.  If any
@@ -56,19 +58,30 @@
  *   -ENOMEM on failure (if any memory allocations fail)
  *   -ENOMEM on failure (if any memory allocations fail)
  **/
  **/
 int
 int
-lpfc_mem_alloc(struct lpfc_hba * phba)
+lpfc_mem_alloc(struct lpfc_hba *phba, int align)
 {
 {
 	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
 	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
 	int longs;
 	int longs;
 	int i;
 	int i;
 
 
-	phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool",
-				phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		phba->lpfc_scsi_dma_buf_pool =
+			pci_pool_create("lpfc_scsi_dma_buf_pool",
+				phba->pcidev,
+				phba->cfg_sg_dma_buf_size,
+				phba->cfg_sg_dma_buf_size,
+				0);
+	else
+		phba->lpfc_scsi_dma_buf_pool =
+			pci_pool_create("lpfc_scsi_dma_buf_pool",
+				phba->pcidev, phba->cfg_sg_dma_buf_size,
+				align, 0);
 	if (!phba->lpfc_scsi_dma_buf_pool)
 	if (!phba->lpfc_scsi_dma_buf_pool)
 		goto fail;
 		goto fail;
 
 
 	phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
 	phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
-							LPFC_BPL_SIZE, 8,0);
+							LPFC_BPL_SIZE,
+							align, 0);
 	if (!phba->lpfc_mbuf_pool)
 	if (!phba->lpfc_mbuf_pool)
 		goto fail_free_dma_buf_pool;
 		goto fail_free_dma_buf_pool;
 
 
@@ -97,23 +110,31 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
 						sizeof(struct lpfc_nodelist));
 						sizeof(struct lpfc_nodelist));
 	if (!phba->nlp_mem_pool)
 	if (!phba->nlp_mem_pool)
 		goto fail_free_mbox_pool;
 		goto fail_free_mbox_pool;
-
-	phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev,
-					      LPFC_BPL_SIZE, 8, 0);
-	if (!phba->lpfc_hbq_pool)
+	phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
+					      phba->pcidev,
+					      LPFC_HDR_BUF_SIZE, align, 0);
+	if (!phba->lpfc_hrb_pool)
 		goto fail_free_nlp_mem_pool;
 		goto fail_free_nlp_mem_pool;
+	phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
+					      phba->pcidev,
+					      LPFC_DATA_BUF_SIZE, align, 0);
+	if (!phba->lpfc_drb_pool)
+		goto fail_free_hbq_pool;
 
 
 	/* vpi zero is reserved for the physical port so add 1 to max */
 	/* vpi zero is reserved for the physical port so add 1 to max */
 	longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
 	longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
 	phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
 	phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
 	if (!phba->vpi_bmask)
 	if (!phba->vpi_bmask)
-		goto fail_free_hbq_pool;
+		goto fail_free_dbq_pool;
 
 
 	return 0;
 	return 0;
 
 
+ fail_free_dbq_pool:
+	pci_pool_destroy(phba->lpfc_drb_pool);
+	phba->lpfc_drb_pool = NULL;
  fail_free_hbq_pool:
  fail_free_hbq_pool:
-	lpfc_sli_hbqbuf_free_all(phba);
-	pci_pool_destroy(phba->lpfc_hbq_pool);
+	pci_pool_destroy(phba->lpfc_hrb_pool);
+	phba->lpfc_hrb_pool = NULL;
  fail_free_nlp_mem_pool:
  fail_free_nlp_mem_pool:
 	mempool_destroy(phba->nlp_mem_pool);
 	mempool_destroy(phba->nlp_mem_pool);
 	phba->nlp_mem_pool = NULL;
 	phba->nlp_mem_pool = NULL;
@@ -136,27 +157,73 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
 }
 }
 
 
 /**
 /**
- * lpfc_mem_free - Frees all PCI and memory allocated by lpfc_mem_alloc
+ * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
  * @phba: HBA to free memory for
  * @phba: HBA to free memory for
  *
  *
- * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool,
- * lpfc_hbq_pool.  Frees kmalloc-backed mempools for LPFC_MBOXQ_t and
- * lpfc_nodelist.  Also frees the VPI bitmask
+ * Description: Free the memory allocated by lpfc_mem_alloc routine. This
+ * routine is a the counterpart of lpfc_mem_alloc.
  *
  *
  * Returns: None
  * Returns: None
  **/
  **/
 void
 void
-lpfc_mem_free(struct lpfc_hba * phba)
+lpfc_mem_free(struct lpfc_hba *phba)
 {
 {
-	struct lpfc_sli *psli = &phba->sli;
-	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
-	LPFC_MBOXQ_t *mbox, *next_mbox;
-	struct lpfc_dmabuf   *mp;
 	int i;
 	int i;
+	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
 
 
+	/* Free VPI bitmask memory */
 	kfree(phba->vpi_bmask);
 	kfree(phba->vpi_bmask);
+
+	/* Free HBQ pools */
 	lpfc_sli_hbqbuf_free_all(phba);
 	lpfc_sli_hbqbuf_free_all(phba);
+	pci_pool_destroy(phba->lpfc_drb_pool);
+	phba->lpfc_drb_pool = NULL;
+	pci_pool_destroy(phba->lpfc_hrb_pool);
+	phba->lpfc_hrb_pool = NULL;
+
+	/* Free NLP memory pool */
+	mempool_destroy(phba->nlp_mem_pool);
+	phba->nlp_mem_pool = NULL;
+
+	/* Free mbox memory pool */
+	mempool_destroy(phba->mbox_mem_pool);
+	phba->mbox_mem_pool = NULL;
+
+	/* Free MBUF memory pool */
+	for (i = 0; i < pool->current_count; i++)
+		pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+			      pool->elements[i].phys);
+	kfree(pool->elements);
+
+	pci_pool_destroy(phba->lpfc_mbuf_pool);
+	phba->lpfc_mbuf_pool = NULL;
 
 
+	/* Free DMA buffer memory pool */
+	pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
+	phba->lpfc_scsi_dma_buf_pool = NULL;
+
+	return;
+}
+
+/**
+ * lpfc_mem_free_all - Frees all PCI and driver memory
+ * @phba: HBA to free memory for
+ *
+ * Description: Free memory from PCI and driver memory pools and also those
+ * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
+ * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
+ * the VPI bitmask.
+ *
+ * Returns: None
+ **/
+void
+lpfc_mem_free_all(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	LPFC_MBOXQ_t *mbox, *next_mbox;
+	struct lpfc_dmabuf   *mp;
+
+	/* Free memory used in mailbox queue back to mailbox memory pool */
 	list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
 	list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
 		mp = (struct lpfc_dmabuf *) (mbox->context1);
 		mp = (struct lpfc_dmabuf *) (mbox->context1);
 		if (mp) {
 		if (mp) {
@@ -166,6 +233,7 @@ lpfc_mem_free(struct lpfc_hba * phba)
 		list_del(&mbox->list);
 		list_del(&mbox->list);
 		mempool_free(mbox, phba->mbox_mem_pool);
 		mempool_free(mbox, phba->mbox_mem_pool);
 	}
 	}
+	/* Free memory used in mailbox cmpl list back to mailbox memory pool */
 	list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
 	list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
 		mp = (struct lpfc_dmabuf *) (mbox->context1);
 		mp = (struct lpfc_dmabuf *) (mbox->context1);
 		if (mp) {
 		if (mp) {
@@ -175,8 +243,10 @@ lpfc_mem_free(struct lpfc_hba * phba)
 		list_del(&mbox->list);
 		list_del(&mbox->list);
 		mempool_free(mbox, phba->mbox_mem_pool);
 		mempool_free(mbox, phba->mbox_mem_pool);
 	}
 	}
-
+	/* Free the active mailbox command back to the mailbox memory pool */
+	spin_lock_irq(&phba->hbalock);
 	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
 	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
 	if (psli->mbox_active) {
 	if (psli->mbox_active) {
 		mbox = psli->mbox_active;
 		mbox = psli->mbox_active;
 		mp = (struct lpfc_dmabuf *) (mbox->context1);
 		mp = (struct lpfc_dmabuf *) (mbox->context1);
@@ -188,27 +258,14 @@ lpfc_mem_free(struct lpfc_hba * phba)
 		psli->mbox_active = NULL;
 		psli->mbox_active = NULL;
 	}
 	}
 
 
-	for (i = 0; i < pool->current_count; i++)
-		pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
-						 pool->elements[i].phys);
-	kfree(pool->elements);
-
-	pci_pool_destroy(phba->lpfc_hbq_pool);
-	mempool_destroy(phba->nlp_mem_pool);
-	mempool_destroy(phba->mbox_mem_pool);
-
-	pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
-	pci_pool_destroy(phba->lpfc_mbuf_pool);
-
-	phba->lpfc_hbq_pool = NULL;
-	phba->nlp_mem_pool = NULL;
-	phba->mbox_mem_pool = NULL;
-	phba->lpfc_scsi_dma_buf_pool = NULL;
-	phba->lpfc_mbuf_pool = NULL;
+	/* Free and destroy all the allocated memory pools */
+	lpfc_mem_free(phba);
 
 
 	/* Free the iocb lookup array */
 	/* Free the iocb lookup array */
 	kfree(psli->iocbq_lookup);
 	kfree(psli->iocbq_lookup);
 	psli->iocbq_lookup = NULL;
 	psli->iocbq_lookup = NULL;
+
+	return;
 }
 }
 
 
 /**
 /**
@@ -305,7 +362,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
  * lpfc_els_hbq_alloc - Allocate an HBQ buffer
  * lpfc_els_hbq_alloc - Allocate an HBQ buffer
  * @phba: HBA to allocate HBQ buffer for
  * @phba: HBA to allocate HBQ buffer for
  *
  *
- * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI
+ * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
  * pool along a non-DMA-mapped container for it.
  * pool along a non-DMA-mapped container for it.
  *
  *
  * Notes: Not interrupt-safe.  Must be called with no locks held.
  * Notes: Not interrupt-safe.  Must be called with no locks held.
@@ -323,7 +380,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
 	if (!hbqbp)
 	if (!hbqbp)
 		return NULL;
 		return NULL;
 
 
-	hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
+	hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
 					  &hbqbp->dbuf.phys);
 					  &hbqbp->dbuf.phys);
 	if (!hbqbp->dbuf.virt) {
 	if (!hbqbp->dbuf.virt) {
 		kfree(hbqbp);
 		kfree(hbqbp);
@@ -334,7 +391,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
 }
 }
 
 
 /**
 /**
- * lpfc_mem_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
+ * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
  * @phba: HBA buffer was allocated for
  * @phba: HBA buffer was allocated for
  * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
  * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
  *
  *
@@ -348,11 +405,72 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
 void
 void
 lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
 lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
 {
 {
-	pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
+	pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
 	kfree(hbqbp);
 	kfree(hbqbp);
 	return;
 	return;
 }
 }
 
 
+/**
+ * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
+ * @phba: HBA to allocate a receive buffer for
+ *
+ * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
+ * pool along a non-DMA-mapped container for it.
+ *
+ * Notes: Not interrupt-safe.  Must be called with no locks held.
+ *
+ * Returns:
+ *   pointer to HBQ on success
+ *   NULL on failure
+ **/
+struct hbq_dmabuf *
+lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
+{
+	struct hbq_dmabuf *dma_buf;
+
+	dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+	if (!dma_buf)
+		return NULL;
+
+	dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
+					    &dma_buf->hbuf.phys);
+	if (!dma_buf->hbuf.virt) {
+		kfree(dma_buf);
+		return NULL;
+	}
+	dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
+					    &dma_buf->dbuf.phys);
+	if (!dma_buf->dbuf.virt) {
+		pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+			      dma_buf->hbuf.phys);
+		kfree(dma_buf);
+		return NULL;
+	}
+	dma_buf->size = LPFC_BPL_SIZE;
+	return dma_buf;
+}
+
+/**
+ * lpfc_sli4_rb_free - Frees a receive buffer
+ * @phba: HBA buffer was allocated for
+ * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc
+ *
+ * Description: Frees both the container and the DMA-mapped buffers returned by
+ * lpfc_sli4_rb_alloc.
+ *
+ * Notes: Can be called with or without locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
+{
+	pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
+	pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
+	kfree(dmab);
+	return;
+}
+
 /**
 /**
  * lpfc_in_buf_free - Free a DMA buffer
  * lpfc_in_buf_free - Free a DMA buffer
  * @phba: HBA buffer is associated with
  * @phba: HBA buffer is associated with

+ 37 - 14
drivers/scsi/lpfc/lpfc_nportdisc.c

@@ -1,7 +1,7 @@
  /*******************************************************************
  /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -28,8 +28,10 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 #include <scsi/scsi_transport_fc.h>
 
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
 #include "lpfc_scsi.h"
@@ -361,7 +363,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	if (!mbox)
 	if (!mbox)
 		goto out;
 		goto out;
 
 
-	rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID,
+	rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
 			    (uint8_t *) sp, mbox, 0);
 			    (uint8_t *) sp, mbox, 0);
 	if (rc) {
 	if (rc) {
 		mempool_free(mbox, phba->mbox_mem_pool);
 		mempool_free(mbox, phba->mbox_mem_pool);
@@ -495,11 +497,19 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
 		lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
 	else
 	else
 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+	if ((ndlp->nlp_type & NLP_FABRIC) &&
+		vport->port_type == LPFC_NPIV_PORT) {
+		lpfc_linkdown_port(vport);
+		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_DELAY_TMO;
+		spin_unlock_irq(shost->host_lock);
 
 
-	if ((!(ndlp->nlp_type & NLP_FABRIC) &&
-	     ((ndlp->nlp_type & NLP_FCP_TARGET) ||
-	      !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
-	    (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
+		ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+	} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
+		((ndlp->nlp_type & NLP_FCP_TARGET) ||
+		!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
+		(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
 		/* Only try to re-login if this is NOT a Fabric Node */
 		/* Only try to re-login if this is NOT a Fabric Node */
 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
 		spin_lock_irq(shost->host_lock);
 		spin_lock_irq(shost->host_lock);
@@ -567,7 +577,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
 {
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
 
-	if (!ndlp->nlp_rpi) {
+	if (!(ndlp->nlp_flag & NLP_RPI_VALID)) {
 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
 		return 0;
 		return 0;
 	}
 	}
@@ -857,7 +867,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
 
 
 	lpfc_unreg_rpi(vport, ndlp);
 	lpfc_unreg_rpi(vport, ndlp);
 
 
-	if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID,
+	if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
 			   (uint8_t *) sp, mbox, 0) == 0) {
 			   (uint8_t *) sp, mbox, 0) == 0) {
 		switch (ndlp->nlp_DID) {
 		switch (ndlp->nlp_DID) {
 		case NameServer_DID:
 		case NameServer_DID:
@@ -1068,6 +1078,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
 	struct lpfc_iocbq *cmdiocb, *rspiocb;
 	struct lpfc_iocbq *cmdiocb, *rspiocb;
 	IOCB_t *irsp;
 	IOCB_t *irsp;
 	ADISC *ap;
 	ADISC *ap;
+	int rc;
 
 
 	cmdiocb = (struct lpfc_iocbq *) arg;
 	cmdiocb = (struct lpfc_iocbq *) arg;
 	rspiocb = cmdiocb->context_un.rsp_iocb;
 	rspiocb = cmdiocb->context_un.rsp_iocb;
@@ -1093,6 +1104,15 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
 		return ndlp->nlp_state;
 		return ndlp->nlp_state;
 	}
 	}
 
 
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		rc = lpfc_sli4_resume_rpi(ndlp);
+		if (rc) {
+			/* Stay in state and retry. */
+			ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+			return ndlp->nlp_state;
+		}
+	}
+
 	if (ndlp->nlp_type & NLP_FCP_TARGET) {
 	if (ndlp->nlp_type & NLP_FCP_TARGET) {
 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
@@ -1100,6 +1120,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 	}
 	}
+
 	return ndlp->nlp_state;
 	return ndlp->nlp_state;
 }
 }
 
 
@@ -1190,7 +1211,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
 
 
 	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
 	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
 	if ((mb = phba->sli.mbox_active)) {
 	if ((mb = phba->sli.mbox_active)) {
-		if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
+		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
 			lpfc_nlp_put(ndlp);
 			lpfc_nlp_put(ndlp);
 			mb->context2 = NULL;
 			mb->context2 = NULL;
@@ -1200,7 +1221,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
 
 
 	spin_lock_irq(&phba->hbalock);
 	spin_lock_irq(&phba->hbalock);
 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
-		if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
+		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
 			mp = (struct lpfc_dmabuf *) (mb->context1);
 			mp = (struct lpfc_dmabuf *) (mb->context1);
 			if (mp) {
 			if (mp) {
@@ -1251,7 +1272,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
 {
 {
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
-	MAILBOX_t *mb = &pmb->mb;
+	MAILBOX_t *mb = &pmb->u.mb;
 	uint32_t did  = mb->un.varWords[1];
 	uint32_t did  = mb->un.varWords[1];
 
 
 	if (mb->mbxStatus) {
 	if (mb->mbxStatus) {
@@ -1283,6 +1304,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
 	}
 	}
 
 
 	ndlp->nlp_rpi = mb->un.varWords[0];
 	ndlp->nlp_rpi = mb->un.varWords[0];
+	ndlp->nlp_flag |= NLP_RPI_VALID;
 
 
 	/* Only if we are not a fabric nport do we issue PRLI */
 	/* Only if we are not a fabric nport do we issue PRLI */
 	if (!(ndlp->nlp_type & NLP_FABRIC)) {
 	if (!(ndlp->nlp_type & NLP_FABRIC)) {
@@ -1878,11 +1900,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
 			    void *arg, uint32_t evt)
 			    void *arg, uint32_t evt)
 {
 {
 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
-	MAILBOX_t    *mb = &pmb->mb;
+	MAILBOX_t    *mb = &pmb->u.mb;
 
 
-	if (!mb->mbxStatus)
+	if (!mb->mbxStatus) {
 		ndlp->nlp_rpi = mb->un.varWords[0];
 		ndlp->nlp_rpi = mb->un.varWords[0];
-	else {
+		ndlp->nlp_flag |= NLP_RPI_VALID;
+	} else {
 		if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
 		if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
 			lpfc_drop_node(vport, ndlp);
 			lpfc_drop_node(vport, ndlp);
 			return NLP_STE_FREED_NODE;
 			return NLP_STE_FREED_NODE;

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 766 - 118
drivers/scsi/lpfc/lpfc_scsi.c


+ 2 - 0
drivers/scsi/lpfc/lpfc_scsi.h

@@ -140,6 +140,8 @@ struct lpfc_scsi_buf {
 	struct fcp_rsp *fcp_rsp;
 	struct fcp_rsp *fcp_rsp;
 	struct ulp_bde64 *fcp_bpl;
 	struct ulp_bde64 *fcp_bpl;
 
 
+	dma_addr_t dma_phys_bpl;
+
 	/* cur_iocbq has phys of the dma-able buffer.
 	/* cur_iocbq has phys of the dma-able buffer.
 	 * Iotag is in here
 	 * Iotag is in here
 	 */
 	 */

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 686 - 74
drivers/scsi/lpfc/lpfc_sli.c


+ 23 - 6
drivers/scsi/lpfc/lpfc_sli.h

@@ -29,13 +29,23 @@ typedef enum _lpfc_ctx_cmd {
 	LPFC_CTX_HOST
 	LPFC_CTX_HOST
 } lpfc_ctx_cmd;
 } lpfc_ctx_cmd;
 
 
+/* This structure is used to carry the needed response IOCB states */
+struct lpfc_sli4_rspiocb_info {
+	uint8_t hw_status;
+	uint8_t bfield;
+#define LPFC_XB	0x1
+#define LPFC_PV	0x2
+	uint8_t priority;
+	uint8_t reserved;
+};
+
 /* This structure is used to handle IOCB requests / responses */
 /* This structure is used to handle IOCB requests / responses */
 struct lpfc_iocbq {
 struct lpfc_iocbq {
 	/* lpfc_iocbqs are used in double linked lists */
 	/* lpfc_iocbqs are used in double linked lists */
 	struct list_head list;
 	struct list_head list;
 	struct list_head clist;
 	struct list_head clist;
 	uint16_t iotag;         /* pre-assigned IO tag */
 	uint16_t iotag;         /* pre-assigned IO tag */
-	uint16_t rsvd1;
+	uint16_t sli4_xritag;   /* pre-assigned XRI, (OXID) tag. */
 
 
 	IOCB_t iocb;		/* IOCB cmd */
 	IOCB_t iocb;		/* IOCB cmd */
 	uint8_t retry;		/* retry counter for IOCB cmd - if needed */
 	uint8_t retry;		/* retry counter for IOCB cmd - if needed */
@@ -65,7 +75,7 @@ struct lpfc_iocbq {
 			   struct lpfc_iocbq *);
 			   struct lpfc_iocbq *);
 	void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
 	void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
 			   struct lpfc_iocbq *);
 			   struct lpfc_iocbq *);
-
+	struct lpfc_sli4_rspiocb_info sli4_info;
 };
 };
 
 
 #define SLI_IOCB_RET_IOCB      1	/* Return IOCB if cmd ring full */
 #define SLI_IOCB_RET_IOCB      1	/* Return IOCB if cmd ring full */
@@ -81,14 +91,18 @@ struct lpfc_iocbq {
 typedef struct lpfcMboxq {
 typedef struct lpfcMboxq {
 	/* MBOXQs are used in single linked lists */
 	/* MBOXQs are used in single linked lists */
 	struct list_head list;	/* ptr to next mailbox command */
 	struct list_head list;	/* ptr to next mailbox command */
-	MAILBOX_t mb;		/* Mailbox cmd */
-	struct lpfc_vport *vport;/* virutal port pointer */
+	union {
+		MAILBOX_t mb;		/* Mailbox cmd */
+		struct lpfc_mqe mqe;
+	} u;
+	struct lpfc_vport *vport;/* virtual port pointer */
 	void *context1;		/* caller context information */
 	void *context1;		/* caller context information */
 	void *context2;		/* caller context information */
 	void *context2;		/* caller context information */
 
 
 	void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
 	void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
 	uint8_t mbox_flag;
 	uint8_t mbox_flag;
-
+	struct lpfc_mcqe mcqe;
+	struct lpfc_mbx_nembed_sge_virt *sge_array;
 } LPFC_MBOXQ_t;
 } LPFC_MBOXQ_t;
 
 
 #define MBX_POLL        1	/* poll mailbox till command done, then
 #define MBX_POLL        1	/* poll mailbox till command done, then
@@ -230,10 +244,11 @@ struct lpfc_sli {
 
 
 	/* Additional sli_flags */
 	/* Additional sli_flags */
 #define LPFC_SLI_MBOX_ACTIVE      0x100	/* HBA mailbox is currently active */
 #define LPFC_SLI_MBOX_ACTIVE      0x100	/* HBA mailbox is currently active */
-#define LPFC_SLI2_ACTIVE          0x200	/* SLI2 overlay in firmware is active */
+#define LPFC_SLI_ACTIVE           0x200	/* SLI in firmware is active */
 #define LPFC_PROCESS_LA           0x400	/* Able to process link attention */
 #define LPFC_PROCESS_LA           0x400	/* Able to process link attention */
 #define LPFC_BLOCK_MGMT_IO        0x800	/* Don't allow mgmt mbx or iocb cmds */
 #define LPFC_BLOCK_MGMT_IO        0x800	/* Don't allow mgmt mbx or iocb cmds */
 #define LPFC_MENLO_MAINT          0x1000 /* need for menl fw download */
 #define LPFC_MENLO_MAINT          0x1000 /* need for menl fw download */
+#define LPFC_SLI_ASYNC_MBX_BLK    0x2000 /* Async mailbox is blocked */
 
 
 	struct lpfc_sli_ring ring[LPFC_MAX_RING];
 	struct lpfc_sli_ring ring[LPFC_MAX_RING];
 	int fcp_ring;		/* ring used for FCP initiator commands */
 	int fcp_ring;		/* ring used for FCP initiator commands */
@@ -261,6 +276,8 @@ struct lpfc_sli {
 
 
 #define LPFC_MBOX_TMO           30	/* Sec tmo for outstanding mbox
 #define LPFC_MBOX_TMO           30	/* Sec tmo for outstanding mbox
 					   command */
 					   command */
+#define LPFC_MBOX_SLI4_CONFIG_TMO 60	/* Sec tmo for outstanding mbox
+					   command */
 #define LPFC_MBOX_TMO_FLASH_CMD 300     /* Sec tmo for outstanding FLASH write
 #define LPFC_MBOX_TMO_FLASH_CMD 300     /* Sec tmo for outstanding FLASH write
 					 * or erase cmds. This is especially
 					 * or erase cmds. This is especially
 					 * long because of the potential of
 					 * long because of the potential of

+ 467 - 0
drivers/scsi/lpfc/lpfc_sli4.h

@@ -0,0 +1,467 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2009 Emulex.  All rights reserved.                *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#define LPFC_ACTIVE_MBOX_WAIT_CNT               100
+#define LPFC_RELEASE_NOTIFICATION_INTERVAL	32
+#define LPFC_GET_QE_REL_INT			32
+#define LPFC_RPI_LOW_WATER_MARK			10
+/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
+#define LPFC_NEMBED_MBOX_SGL_CNT		254
+
+/* Multi-queue arrangement for fast-path FCP work queues */
+#define LPFC_FN_EQN_MAX       8
+#define LPFC_SP_EQN_DEF       1
+#define LPFC_FP_EQN_DEF       1
+#define LPFC_FP_EQN_MIN       1
+#define LPFC_FP_EQN_MAX       (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
+
+#define LPFC_FN_WQN_MAX       32
+#define LPFC_SP_WQN_DEF       1
+#define LPFC_FP_WQN_DEF       4
+#define LPFC_FP_WQN_MIN       1
+#define LPFC_FP_WQN_MAX       (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
+
+/*
+ * Provide the default FCF Record attributes used by the driver
+ * when nonFIP mode is configured and there is no other default
+ * FCF Record attributes.
+ */
+#define LPFC_FCOE_FCF_DEF_INDEX	0
+#define LPFC_FCOE_FCF_GET_FIRST	0xFFFF
+#define LPFC_FCOE_FCF_NEXT_NONE	0xFFFF
+
+/* First 3 bytes of default FCF MAC is specified by FC_MAP */
+#define LPFC_FCOE_FCF_MAC3	0xFF
+#define LPFC_FCOE_FCF_MAC4	0xFF
+#define LPFC_FCOE_FCF_MAC5	0xFE
+#define LPFC_FCOE_FCF_MAP0	0x0E
+#define LPFC_FCOE_FCF_MAP1	0xFC
+#define LPFC_FCOE_FCF_MAP2	0x00
+#define LPFC_FCOE_MAX_RCV_SIZE	0x5AC
+#define LPFC_FCOE_FKA_ADV_PER	0
+#define LPFC_FCOE_FIP_PRIORITY	0x80
+
+enum lpfc_sli4_queue_type {
+	LPFC_EQ,
+	LPFC_GCQ,
+	LPFC_MCQ,
+	LPFC_WCQ,
+	LPFC_RCQ,
+	LPFC_MQ,
+	LPFC_WQ,
+	LPFC_HRQ,
+	LPFC_DRQ
+};
+
+/* The queue sub-type defines the functional purpose of the queue */
+enum lpfc_sli4_queue_subtype {
+	LPFC_NONE,
+	LPFC_MBOX,
+	LPFC_FCP,
+	LPFC_ELS,
+	LPFC_USOL
+};
+
+union sli4_qe {
+	void *address;
+	struct lpfc_eqe *eqe;
+	struct lpfc_cqe *cqe;
+	struct lpfc_mcqe *mcqe;
+	struct lpfc_wcqe_complete *wcqe_complete;
+	struct lpfc_wcqe_release *wcqe_release;
+	struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
+	struct lpfc_rcqe_complete *rcqe_complete;
+	struct lpfc_mqe *mqe;
+	union  lpfc_wqe *wqe;
+	struct lpfc_rqe *rqe;
+};
+
+struct lpfc_queue {
+	struct list_head list;
+	enum lpfc_sli4_queue_type type;
+	enum lpfc_sli4_queue_subtype subtype;
+	struct lpfc_hba *phba;
+	struct list_head child_list;
+	uint32_t entry_count;	/* Number of entries to support on the queue */
+	uint32_t entry_size;	/* Size of each queue entry. */
+	uint32_t queue_id;	/* Queue ID assigned by the hardware */
+	struct list_head page_list;
+	uint32_t page_count;	/* Number of pages allocated for this queue */
+
+	uint32_t host_index;	/* The host's index for putting or getting */
+	uint32_t hba_index;	/* The last known hba index for get or put */
+	union sli4_qe qe[1];	/* array to index entries (must be last) */
+};
+
+struct lpfc_cq_event {
+	struct list_head list;
+	union {
+		struct lpfc_mcqe		mcqe_cmpl;
+		struct lpfc_acqe_link		acqe_link;
+		struct lpfc_acqe_fcoe		acqe_fcoe;
+		struct lpfc_acqe_dcbx		acqe_dcbx;
+		struct lpfc_rcqe		rcqe_cmpl;
+		struct sli4_wcqe_xri_aborted	wcqe_axri;
+	} cqe;
+};
+
+struct lpfc_sli4_link {
+	uint8_t speed;
+	uint8_t duplex;
+	uint8_t status;
+	uint8_t physical;
+	uint8_t fault;
+};
+
+struct lpfc_fcf {
+	uint8_t	 fabric_name[8];
+	uint8_t  mac_addr[6];
+	uint16_t fcf_indx;
+	uint16_t fcfi;
+	uint32_t fcf_flag;
+#define FCF_AVAILABLE	0x01 /* FCF available for discovery */
+#define FCF_REGISTERED	0x02 /* FCF registered with FW */
+#define FCF_DISCOVERED	0x04 /* FCF discovery started  */
+#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */
+#define FCF_IN_USE	0x10 /* Atleast one discovery completed */
+#define FCF_VALID_VLAN	0x20 /* Use the vlan id specified */
+	uint32_t priority;
+	uint32_t addr_mode;
+	uint16_t vlan_id;
+};
+
+#define LPFC_REGION23_SIGNATURE "RG23"
+#define LPFC_REGION23_VERSION	1
+#define LPFC_REGION23_LAST_REC  0xff
+struct lpfc_fip_param_hdr {
+	uint8_t type;
+#define FCOE_PARAM_TYPE		0xA0
+	uint8_t length;
+#define FCOE_PARAM_LENGTH	2
+	uint8_t parm_version;
+#define FIPP_VERSION		0x01
+	uint8_t parm_flags;
+#define	lpfc_fip_param_hdr_fipp_mode_SHIFT	6
+#define	lpfc_fip_param_hdr_fipp_mode_MASK	0x3
+#define lpfc_fip_param_hdr_fipp_mode_WORD	parm_flags
+#define	FIPP_MODE_ON				0x2
+#define	FIPP_MODE_OFF				0x0
+#define FIPP_VLAN_VALID				0x1
+};
+
+struct lpfc_fcoe_params {
+	uint8_t fc_map[3];
+	uint8_t reserved1;
+	uint16_t vlan_tag;
+	uint8_t reserved[2];
+};
+
+struct lpfc_fcf_conn_hdr {
+	uint8_t type;
+#define FCOE_CONN_TBL_TYPE		0xA1
+	uint8_t length;   /* words */
+	uint8_t reserved[2];
+};
+
+struct lpfc_fcf_conn_rec {
+	uint16_t flags;
+#define	FCFCNCT_VALID		0x0001
+#define	FCFCNCT_BOOT		0x0002
+#define	FCFCNCT_PRIMARY		0x0004   /* if not set, Secondary */
+#define	FCFCNCT_FBNM_VALID	0x0008
+#define	FCFCNCT_SWNM_VALID	0x0010
+#define	FCFCNCT_VLAN_VALID	0x0020
+#define	FCFCNCT_AM_VALID	0x0040
+#define	FCFCNCT_AM_PREFERRED	0x0080   /* if not set, AM Required */
+#define	FCFCNCT_AM_SPMA		0x0100	 /* if not set, FPMA */
+
+	uint16_t vlan_tag;
+	uint8_t fabric_name[8];
+	uint8_t switch_name[8];
+};
+
+struct lpfc_fcf_conn_entry {
+	struct list_head list;
+	struct lpfc_fcf_conn_rec conn_rec;
+};
+
+/*
+ * Define the host's bootstrap mailbox.  This structure contains
+ * the member attributes needed to create, use, and destroy the
+ * bootstrap mailbox region.
+ *
+ * The macro definitions for the bmbx data structure are defined
+ * in lpfc_hw4.h with the register definition.
+ */
+struct lpfc_bmbx {
+	struct lpfc_dmabuf *dmabuf;
+	struct dma_address dma_address;
+	void *avirt;
+	dma_addr_t aphys;
+	uint32_t bmbx_size;
+};
+
+#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
+
+#define LPFC_EQE_SIZE_4B 	4
+#define LPFC_EQE_SIZE_16B	16
+#define LPFC_CQE_SIZE		16
+#define LPFC_WQE_SIZE		64
+#define LPFC_MQE_SIZE		256
+#define LPFC_RQE_SIZE		8
+
+#define LPFC_EQE_DEF_COUNT	1024
+#define LPFC_CQE_DEF_COUNT      256
+#define LPFC_WQE_DEF_COUNT      64
+#define LPFC_MQE_DEF_COUNT      16
+#define LPFC_RQE_DEF_COUNT	512
+
+#define LPFC_QUEUE_NOARM	false
+#define LPFC_QUEUE_REARM	true
+
+
+/*
+ * SLI4 CT field defines
+ */
+#define SLI4_CT_RPI 0
+#define SLI4_CT_VPI 1
+#define SLI4_CT_VFI 2
+#define SLI4_CT_FCFI 3
+
+#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000
+
+/*
+ * SLI4 specific data structures
+ */
+struct lpfc_max_cfg_param {
+	uint16_t max_xri;
+	uint16_t xri_base;
+	uint16_t xri_used;
+	uint16_t max_rpi;
+	uint16_t rpi_base;
+	uint16_t rpi_used;
+	uint16_t max_vpi;
+	uint16_t vpi_base;
+	uint16_t vpi_used;
+	uint16_t max_vfi;
+	uint16_t vfi_base;
+	uint16_t vfi_used;
+	uint16_t max_fcfi;
+	uint16_t fcfi_base;
+	uint16_t fcfi_used;
+	uint16_t max_eq;
+	uint16_t max_rq;
+	uint16_t max_cq;
+	uint16_t max_wq;
+};
+
+struct lpfc_hba;
+/* SLI4 HBA multi-fcp queue handler struct */
+struct lpfc_fcp_eq_hdl {
+	uint32_t idx;
+	struct lpfc_hba *phba;
+};
+
+/* SLI4 HBA data structure entries */
+struct lpfc_sli4_hba {
+	void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
+					     PCI BAR0, config space registers */
+	void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for
+					     PCI BAR1, control registers */
+	void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
+					     PCI BAR2, doorbell registers */
+	/* BAR0 PCI config space register memory map */
+	void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */
+	void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
+	void __iomem *ONLINE0regaddr; /* Address to components of internal UE */
+	void __iomem *ONLINE1regaddr; /* Address to components of internal UE */
+#define LPFC_ONLINE_NERR	0xFFFFFFFF
+	void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */
+	/* BAR1 FCoE function CSR register memory map */
+	void __iomem *STAregaddr;    /* Address to HST_STATE register */
+	void __iomem *ISRregaddr;    /* Address to HST_ISR register */
+	void __iomem *IMRregaddr;    /* Address to HST_IMR register */
+	void __iomem *ISCRregaddr;   /* Address to HST_ISCR register */
+	/* BAR2 VF-0 doorbell register memory map */
+	void __iomem *RQDBregaddr;   /* Address to RQ_DOORBELL register */
+	void __iomem *WQDBregaddr;   /* Address to WQ_DOORBELL register */
+	void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */
+	void __iomem *MQDBregaddr;   /* Address to MQ_DOORBELL register */
+	void __iomem *BMBXregaddr;   /* Address to BootStrap MBX register */
+
+	struct msix_entry *msix_entries;
+	uint32_t cfg_eqn;
+	struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
+	/* Pointers to the constructed SLI4 queues */
+	struct lpfc_queue **fp_eq; /* Fast-path event queue */
+	struct lpfc_queue *sp_eq;  /* Slow-path event queue */
+	struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
+	struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
+	struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
+	struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
+	struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
+	struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
+	struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
+	struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
+	struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */
+
+	/* Setup information for various queue parameters */
+	int eq_esize;
+	int eq_ecount;
+	int cq_esize;
+	int cq_ecount;
+	int wq_esize;
+	int wq_ecount;
+	int mq_esize;
+	int mq_ecount;
+	int rq_esize;
+	int rq_ecount;
+#define LPFC_SP_EQ_MAX_INTR_SEC         10000
+#define LPFC_FP_EQ_MAX_INTR_SEC         10000
+
+	uint32_t intr_enable;
+	struct lpfc_bmbx bmbx;
+	struct lpfc_max_cfg_param max_cfg_param;
+	uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
+	uint16_t next_rpi;
+	uint16_t scsi_xri_max;
+	uint16_t scsi_xri_cnt;
+	struct list_head lpfc_free_sgl_list;
+	struct list_head lpfc_sgl_list;
+	struct lpfc_sglq **lpfc_els_sgl_array;
+	struct list_head lpfc_abts_els_sgl_list;
+	struct lpfc_scsi_buf **lpfc_scsi_psb_array;
+	struct list_head lpfc_abts_scsi_buf_list;
+	uint32_t total_sglq_bufs;
+	struct lpfc_sglq **lpfc_sglq_active_list;
+	struct list_head lpfc_rpi_hdr_list;
+	unsigned long *rpi_bmask;
+	uint16_t rpi_count;
+	struct lpfc_sli4_flags sli4_flags;
+	struct list_head sp_rspiocb_work_queue;
+	struct list_head sp_cqe_event_pool;
+	struct list_head sp_asynce_work_queue;
+	struct list_head sp_fcp_xri_aborted_work_queue;
+	struct list_head sp_els_xri_aborted_work_queue;
+	struct list_head sp_unsol_work_queue;
+	struct lpfc_sli4_link link_state;
+	spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
+	spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
+};
+
+enum lpfc_sge_type {
+	GEN_BUFF_TYPE,
+	SCSI_BUFF_TYPE
+};
+
+struct lpfc_sglq {
+	/* lpfc_sglqs are used in double linked lists */
+	struct list_head list;
+	struct list_head clist;
+	enum lpfc_sge_type buff_type; /* is this a scsi sgl */
+	uint16_t iotag;         /* pre-assigned IO tag */
+	uint16_t sli4_xritag;   /* pre-assigned XRI, (OXID) tag. */
+	struct sli4_sge *sgl;	/* pre-assigned SGL */
+	void *virt;		/* virtual address. */
+	dma_addr_t phys;	/* physical address */
+};
+
+struct lpfc_rpi_hdr {
+	struct list_head list;
+	uint32_t len;
+	struct lpfc_dmabuf *dmabuf;
+	uint32_t page_count;
+	uint32_t start_rpi;
+};
+
+/*
+ * SLI4 specific function prototypes
+ */
+int lpfc_pci_function_reset(struct lpfc_hba *);
+int lpfc_sli4_hba_setup(struct lpfc_hba *);
+int lpfc_sli4_hba_down(struct lpfc_hba *);
+int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
+		     uint8_t, uint32_t, bool);
+void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
+void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
+			   struct lpfc_mbx_sge *);
+
+void lpfc_sli4_hba_reset(struct lpfc_hba *);
+struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
+			uint32_t);
+void lpfc_sli4_queue_free(struct lpfc_queue *);
+uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
+uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
+			struct lpfc_queue *, uint32_t, uint32_t);
+uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
+			struct lpfc_queue *, uint32_t);
+uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
+			struct lpfc_queue *, uint32_t);
+uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
+			struct lpfc_queue *, struct lpfc_queue *, uint32_t);
+uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
+			 struct lpfc_queue *);
+int lpfc_sli4_queue_setup(struct lpfc_hba *);
+void lpfc_sli4_queue_unset(struct lpfc_hba *);
+int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
+int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
+int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *);
+uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
+int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
+int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
+int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
+struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
+struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
+void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
+void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
+int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *);
+int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *);
+int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
+struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
+void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
+int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
+void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
+void lpfc_sli4_remove_rpis(struct lpfc_hba *);
+void lpfc_sli4_async_event_proc(struct lpfc_hba *);
+int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
+void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
+			       struct sli4_wcqe_xri_aborted *);
+void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
+			       struct sli4_wcqe_xri_aborted *);
+int lpfc_sli4_brdreset(struct lpfc_hba *);
+int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
+void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
+int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
+int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
+uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
+uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
+void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t);
+void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_post_status_check(struct lpfc_hba *);
+uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
+

+ 1 - 1
drivers/scsi/lpfc/lpfc_version.h

@@ -18,7 +18,7 @@
  * included with this package.                                     *
  * included with this package.                                     *
  *******************************************************************/
  *******************************************************************/
 
 
-#define LPFC_DRIVER_VERSION "8.3.1"
+#define LPFC_DRIVER_VERSION "8.3.2"
 
 
 #define LPFC_DRIVER_NAME		"lpfc"
 #define LPFC_DRIVER_NAME		"lpfc"
 #define LPFC_SP_DRIVER_HANDLER_NAME	"lpfc:sp"
 #define LPFC_SP_DRIVER_HANDLER_NAME	"lpfc:sp"

+ 46 - 16
drivers/scsi/lpfc/lpfc_vport.c

@@ -32,8 +32,10 @@
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 #include <scsi/scsi_transport_fc.h>
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
 #include "lpfc_scsi.h"
@@ -89,6 +91,8 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
 		vpi = 0;
 		vpi = 0;
 	else
 	else
 		set_bit(vpi, phba->vpi_bmask);
 		set_bit(vpi, phba->vpi_bmask);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		phba->sli4_hba.max_cfg_param.vpi_used++;
 	spin_unlock_irq(&phba->hbalock);
 	spin_unlock_irq(&phba->hbalock);
 	return vpi;
 	return vpi;
 }
 }
@@ -96,8 +100,12 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
 static void
 static void
 lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
 lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
 {
 {
+	if (vpi == 0)
+		return;
 	spin_lock_irq(&phba->hbalock);
 	spin_lock_irq(&phba->hbalock);
 	clear_bit(vpi, phba->vpi_bmask);
 	clear_bit(vpi, phba->vpi_bmask);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		phba->sli4_hba.max_cfg_param.vpi_used--;
 	spin_unlock_irq(&phba->hbalock);
 	spin_unlock_irq(&phba->hbalock);
 }
 }
 
 
@@ -113,7 +121,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
 	if (!pmb) {
 	if (!pmb) {
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
-	mb = &pmb->mb;
+	mb = &pmb->u.mb;
 
 
 	lpfc_read_sparam(phba, pmb, vport->vpi);
 	lpfc_read_sparam(phba, pmb, vport->vpi);
 	/*
 	/*
@@ -243,23 +251,22 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
 		    (vport->fc_flag & wait_flags)  ||
 		    (vport->fc_flag & wait_flags)  ||
 		    ((vport->port_state > LPFC_VPORT_FAILED) &&
 		    ((vport->port_state > LPFC_VPORT_FAILED) &&
 		     (vport->port_state < LPFC_VPORT_READY))) {
 		     (vport->port_state < LPFC_VPORT_READY))) {
-			lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
 					"1833 Vport discovery quiesce Wait:"
 					"1833 Vport discovery quiesce Wait:"
-					" vpi x%x state x%x fc_flags x%x"
+					" state x%x fc_flags x%x"
 					" num_nodes x%x, waiting 1000 msecs"
 					" num_nodes x%x, waiting 1000 msecs"
 					" total wait msecs x%x\n",
 					" total wait msecs x%x\n",
-					vport->vpi, vport->port_state,
-					vport->fc_flag, vport->num_disc_nodes,
+					vport->port_state, vport->fc_flag,
+					vport->num_disc_nodes,
 					jiffies_to_msecs(jiffies - start_time));
 					jiffies_to_msecs(jiffies - start_time));
 			msleep(1000);
 			msleep(1000);
 		} else {
 		} else {
 			/* Base case.  Wait variants satisfied.  Break out */
 			/* Base case.  Wait variants satisfied.  Break out */
-			lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
 					 "1834 Vport discovery quiesced:"
 					 "1834 Vport discovery quiesced:"
-					 " vpi x%x state x%x fc_flags x%x"
+					 " state x%x fc_flags x%x"
 					 " wait msecs x%x\n",
 					 " wait msecs x%x\n",
-					 vport->vpi, vport->port_state,
-					 vport->fc_flag,
+					 vport->port_state, vport->fc_flag,
 					 jiffies_to_msecs(jiffies
 					 jiffies_to_msecs(jiffies
 						- start_time));
 						- start_time));
 			break;
 			break;
@@ -267,12 +274,10 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
 	}
 	}
 
 
 	if (time_after(jiffies, wait_time_max))
 	if (time_after(jiffies, wait_time_max))
-		lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
 				"1835 Vport discovery quiesce failed:"
 				"1835 Vport discovery quiesce failed:"
-				" vpi x%x state x%x fc_flags x%x"
-				" wait msecs x%x\n",
-				vport->vpi, vport->port_state,
-				vport->fc_flag,
+				" state x%x fc_flags x%x wait msecs x%x\n",
+				vport->port_state, vport->fc_flag,
 				jiffies_to_msecs(jiffies - start_time));
 				jiffies_to_msecs(jiffies - start_time));
 }
 }
 
 
@@ -308,6 +313,21 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
 		goto error_out;
 		goto error_out;
 	}
 	}
 
 
+	/*
+	 * In SLI4, the vpi must be activated before it can be used
+	 * by the port.
+	 */
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		rc = lpfc_sli4_init_vpi(phba, vpi);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+					"1838 Failed to INIT_VPI on vpi %d "
+					"status %d\n", vpi, rc);
+			rc = VPORT_NORESOURCES;
+			lpfc_free_vpi(phba, vpi);
+			goto error_out;
+		}
+	}
 
 
 	/* Assign an unused board number */
 	/* Assign an unused board number */
 	if ((instance = lpfc_get_instance()) < 0) {
 	if ((instance = lpfc_get_instance()) < 0) {
@@ -535,6 +555,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
 				 "physical host\n");
 				 "physical host\n");
 		return VPORT_ERROR;
 		return VPORT_ERROR;
 	}
 	}
+
+	/* If the vport is a static vport fail the deletion. */
+	if ((vport->vport_flag & STATIC_VPORT) &&
+		!(phba->pport->load_flag & FC_UNLOADING)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+				 "1837 vport_delete failed: Cannot delete "
+				 "static vport.\n");
+		return VPORT_ERROR;
+	}
+
 	/*
 	/*
 	 * If we are not unloading the driver then prevent the vport_delete
 	 * If we are not unloading the driver then prevent the vport_delete
 	 * from happening until after this vport's discovery is finished.
 	 * from happening until after this vport's discovery is finished.
@@ -710,7 +740,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
 	struct lpfc_vport *port_iterator;
 	struct lpfc_vport *port_iterator;
 	struct lpfc_vport **vports;
 	struct lpfc_vport **vports;
 	int index = 0;
 	int index = 0;
-	vports = kzalloc((phba->max_vpi + 1) * sizeof(struct lpfc_vport *),
+	vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *),
 			 GFP_KERNEL);
 			 GFP_KERNEL);
 	if (vports == NULL)
 	if (vports == NULL)
 		return NULL;
 		return NULL;
@@ -734,7 +764,7 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
 	int i;
 	int i;
 	if (vports == NULL)
 	if (vports == NULL)
 		return;
 		return;
-	for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++)
+	for (i = 0; vports[i] != NULL && i <= phba->max_vports; i++)
 		scsi_host_put(lpfc_shost_from_vport(vports[i]));
 		scsi_host_put(lpfc_shost_from_vport(vports[i]));
 	kfree(vports);
 	kfree(vports);
 }
 }

+ 3 - 2
drivers/scsi/mpt2sas/mpt2sas_base.h

@@ -61,6 +61,7 @@
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_transport_sas.h>
 #include <scsi/scsi_transport_sas.h>
 #include <scsi/scsi_dbg.h>
 #include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
 
 
 #include "mpt2sas_debug.h"
 #include "mpt2sas_debug.h"
 
 
@@ -68,10 +69,10 @@
 #define MPT2SAS_DRIVER_NAME		"mpt2sas"
 #define MPT2SAS_DRIVER_NAME		"mpt2sas"
 #define MPT2SAS_AUTHOR	"LSI Corporation <DL-MPTFusionLinux@lsi.com>"
 #define MPT2SAS_AUTHOR	"LSI Corporation <DL-MPTFusionLinux@lsi.com>"
 #define MPT2SAS_DESCRIPTION	"LSI MPT Fusion SAS 2.0 Device Driver"
 #define MPT2SAS_DESCRIPTION	"LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION		"01.100.02.00"
+#define MPT2SAS_DRIVER_VERSION		"01.100.03.00"
 #define MPT2SAS_MAJOR_VERSION		01
 #define MPT2SAS_MAJOR_VERSION		01
 #define MPT2SAS_MINOR_VERSION		100
 #define MPT2SAS_MINOR_VERSION		100
-#define MPT2SAS_BUILD_VERSION		02
+#define MPT2SAS_BUILD_VERSION		03
 #define MPT2SAS_RELEASE_VERSION		00
 #define MPT2SAS_RELEASE_VERSION		00
 
 
 /*
 /*

+ 21 - 11
drivers/scsi/mpt2sas/mpt2sas_ctl.c

@@ -473,7 +473,7 @@ _ctl_poll(struct file *filep, poll_table *wait)
 }
 }
 
 
 /**
 /**
- * _ctl_do_task_abort - assign an active smid to the abort_task
+ * _ctl_set_task_mid - assign an active smid to tm request
  * @ioc: per adapter object
  * @ioc: per adapter object
  * @karg - (struct mpt2_ioctl_command)
  * @karg - (struct mpt2_ioctl_command)
  * @tm_request - pointer to mf from user space
  * @tm_request - pointer to mf from user space
@@ -482,7 +482,7 @@ _ctl_poll(struct file *filep, poll_table *wait)
  * during failure, the reply frame is filled.
  * during failure, the reply frame is filled.
  */
  */
 static int
 static int
-_ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
+_ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
     Mpi2SCSITaskManagementRequest_t *tm_request)
     Mpi2SCSITaskManagementRequest_t *tm_request)
 {
 {
 	u8 found = 0;
 	u8 found = 0;
@@ -494,6 +494,14 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
 	Mpi2SCSITaskManagementReply_t *tm_reply;
 	Mpi2SCSITaskManagementReply_t *tm_reply;
 	u32 sz;
 	u32 sz;
 	u32 lun;
 	u32 lun;
+	char *desc = NULL;
+
+	if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
+		desc = "abort_task";
+	else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
+		desc = "query_task";
+	else
+		return 0;
 
 
 	lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
 	lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
 
 
@@ -517,13 +525,13 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
 
 
 	if (!found) {
 	if (!found) {
-		dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: "
-		    "DevHandle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
-		    tm_request->DevHandle, lun));
+		dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
+		    "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
+		    desc, tm_request->DevHandle, lun));
 		tm_reply = ioc->ctl_cmds.reply;
 		tm_reply = ioc->ctl_cmds.reply;
 		tm_reply->DevHandle = tm_request->DevHandle;
 		tm_reply->DevHandle = tm_request->DevHandle;
 		tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
 		tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
-		tm_reply->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
+		tm_reply->TaskType = tm_request->TaskType;
 		tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
 		tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
 		tm_reply->VP_ID = tm_request->VP_ID;
 		tm_reply->VP_ID = tm_request->VP_ID;
 		tm_reply->VF_ID = tm_request->VF_ID;
 		tm_reply->VF_ID = tm_request->VF_ID;
@@ -535,9 +543,9 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
 		return 1;
 		return 1;
 	}
 	}
 
 
-	dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: "
-	    "DevHandle(0x%04x), lun(%d), smid(%d)\n", ioc->name,
-	    tm_request->DevHandle, lun, tm_request->TaskMID));
+	dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
+	    "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
+	    desc, tm_request->DevHandle, lun, tm_request->TaskMID));
 	return 0;
 	return 0;
 }
 }
 
 
@@ -739,8 +747,10 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
 		    (Mpi2SCSITaskManagementRequest_t *)mpi_request;
 		    (Mpi2SCSITaskManagementRequest_t *)mpi_request;
 
 
 		if (tm_request->TaskType ==
 		if (tm_request->TaskType ==
-		    MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
-			if (_ctl_do_task_abort(ioc, &karg, tm_request)) {
+		    MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
+		    tm_request->TaskType ==
+		    MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
+			if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
 				mpt2sas_base_free_smid(ioc, smid);
 				mpt2sas_base_free_smid(ioc, smid);
 				goto out;
 				goto out;
 			}
 			}

+ 294 - 69
drivers/scsi/mpt2sas/mpt2sas_scsih.c

@@ -197,12 +197,12 @@ static struct pci_device_id scsih_pci_table[] = {
 MODULE_DEVICE_TABLE(pci, scsih_pci_table);
 MODULE_DEVICE_TABLE(pci, scsih_pci_table);
 
 
 /**
 /**
- * scsih_set_debug_level - global setting of ioc->logging_level.
+ * _scsih_set_debug_level - global setting of ioc->logging_level.
  *
  *
  * Note: The logging levels are defined in mpt2sas_debug.h.
  * Note: The logging levels are defined in mpt2sas_debug.h.
  */
  */
 static int
 static int
-scsih_set_debug_level(const char *val, struct kernel_param *kp)
+_scsih_set_debug_level(const char *val, struct kernel_param *kp)
 {
 {
 	int ret = param_set_int(val, kp);
 	int ret = param_set_int(val, kp);
 	struct MPT2SAS_ADAPTER *ioc;
 	struct MPT2SAS_ADAPTER *ioc;
@@ -215,7 +215,7 @@ scsih_set_debug_level(const char *val, struct kernel_param *kp)
 		ioc->logging_level = logging_level;
 		ioc->logging_level = logging_level;
 	return 0;
 	return 0;
 }
 }
-module_param_call(logging_level, scsih_set_debug_level, param_get_int,
+module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
     &logging_level, 0644);
     &logging_level, 0644);
 
 
 /**
 /**
@@ -883,6 +883,41 @@ _scsih_scsi_lookup_find_by_target(struct MPT2SAS_ADAPTER *ioc, int id,
 	return found;
 	return found;
 }
 }
 
 
+/**
+ * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
+ * @ioc: per adapter object
+ * @id: target id
+ * @lun: lun number
+ * @channel: channel
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a matching channel:id:lun in the scsi_lookup array,
+ * returning 1 if found.
+ */
+static u8
+_scsih_scsi_lookup_find_by_lun(struct MPT2SAS_ADAPTER *ioc, int id,
+    unsigned int lun, int channel)
+{
+	u8 found;
+	unsigned long	flags;
+	int i;
+
+	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+	found = 0;
+	for (i = 0 ; i < ioc->request_depth; i++) {
+		if (ioc->scsi_lookup[i].scmd &&
+		    (ioc->scsi_lookup[i].scmd->device->id == id &&
+		    ioc->scsi_lookup[i].scmd->device->channel == channel &&
+		    ioc->scsi_lookup[i].scmd->device->lun == lun)) {
+			found = 1;
+			goto out;
+		}
+	}
+ out:
+	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+	return found;
+}
+
 /**
 /**
  * _scsih_get_chain_buffer_dma - obtain block of chains (dma address)
  * _scsih_get_chain_buffer_dma - obtain block of chains (dma address)
  * @ioc: per adapter object
  * @ioc: per adapter object
@@ -1047,14 +1082,14 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
 }
 }
 
 
 /**
 /**
- * scsih_change_queue_depth - setting device queue depth
+ * _scsih_change_queue_depth - setting device queue depth
  * @sdev: scsi device struct
  * @sdev: scsi device struct
  * @qdepth: requested queue depth
  * @qdepth: requested queue depth
  *
  *
  * Returns queue depth.
  * Returns queue depth.
  */
  */
 static int
 static int
-scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
+_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
 {
 {
 	struct Scsi_Host *shost = sdev->host;
 	struct Scsi_Host *shost = sdev->host;
 	int max_depth;
 	int max_depth;
@@ -1079,14 +1114,14 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
 }
 }
 
 
 /**
 /**
- * scsih_change_queue_depth - changing device queue tag type
+ * _scsih_change_queue_depth - changing device queue tag type
  * @sdev: scsi device struct
  * @sdev: scsi device struct
  * @tag_type: requested tag type
  * @tag_type: requested tag type
  *
  *
  * Returns queue tag type.
  * Returns queue tag type.
  */
  */
 static int
 static int
-scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
+_scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
 {
 {
 	if (sdev->tagged_supported) {
 	if (sdev->tagged_supported) {
 		scsi_set_tag_type(sdev, tag_type);
 		scsi_set_tag_type(sdev, tag_type);
@@ -1101,14 +1136,14 @@ scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
 }
 }
 
 
 /**
 /**
- * scsih_target_alloc - target add routine
+ * _scsih_target_alloc - target add routine
  * @starget: scsi target struct
  * @starget: scsi target struct
  *
  *
  * Returns 0 if ok. Any other return is assumed to be an error and
  * Returns 0 if ok. Any other return is assumed to be an error and
  * the device is ignored.
  * the device is ignored.
  */
  */
 static int
 static int
-scsih_target_alloc(struct scsi_target *starget)
+_scsih_target_alloc(struct scsi_target *starget)
 {
 {
 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1163,13 +1198,13 @@ scsih_target_alloc(struct scsi_target *starget)
 }
 }
 
 
 /**
 /**
- * scsih_target_destroy - target destroy routine
+ * _scsih_target_destroy - target destroy routine
  * @starget: scsi target struct
  * @starget: scsi target struct
  *
  *
  * Returns nothing.
  * Returns nothing.
  */
  */
 static void
 static void
-scsih_target_destroy(struct scsi_target *starget)
+_scsih_target_destroy(struct scsi_target *starget)
 {
 {
 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1212,14 +1247,14 @@ scsih_target_destroy(struct scsi_target *starget)
 }
 }
 
 
 /**
 /**
- * scsih_slave_alloc - device add routine
+ * _scsih_slave_alloc - device add routine
  * @sdev: scsi device struct
  * @sdev: scsi device struct
  *
  *
  * Returns 0 if ok. Any other return is assumed to be an error and
  * Returns 0 if ok. Any other return is assumed to be an error and
  * the device is ignored.
  * the device is ignored.
  */
  */
 static int
 static int
-scsih_slave_alloc(struct scsi_device *sdev)
+_scsih_slave_alloc(struct scsi_device *sdev)
 {
 {
 	struct Scsi_Host *shost;
 	struct Scsi_Host *shost;
 	struct MPT2SAS_ADAPTER *ioc;
 	struct MPT2SAS_ADAPTER *ioc;
@@ -1273,13 +1308,13 @@ scsih_slave_alloc(struct scsi_device *sdev)
 }
 }
 
 
 /**
 /**
- * scsih_slave_destroy - device destroy routine
+ * _scsih_slave_destroy - device destroy routine
  * @sdev: scsi device struct
  * @sdev: scsi device struct
  *
  *
  * Returns nothing.
  * Returns nothing.
  */
  */
 static void
 static void
-scsih_slave_destroy(struct scsi_device *sdev)
+_scsih_slave_destroy(struct scsi_device *sdev)
 {
 {
 	struct MPT2SAS_TARGET *sas_target_priv_data;
 	struct MPT2SAS_TARGET *sas_target_priv_data;
 	struct scsi_target *starget;
 	struct scsi_target *starget;
@@ -1295,13 +1330,13 @@ scsih_slave_destroy(struct scsi_device *sdev)
 }
 }
 
 
 /**
 /**
- * scsih_display_sata_capabilities - sata capabilities
+ * _scsih_display_sata_capabilities - sata capabilities
  * @ioc: per adapter object
  * @ioc: per adapter object
  * @sas_device: the sas_device object
  * @sas_device: the sas_device object
  * @sdev: scsi device struct
  * @sdev: scsi device struct
  */
  */
 static void
 static void
-scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
+_scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
     struct _sas_device *sas_device, struct scsi_device *sdev)
     struct _sas_device *sas_device, struct scsi_device *sdev)
 {
 {
 	Mpi2ConfigReply_t mpi_reply;
 	Mpi2ConfigReply_t mpi_reply;
@@ -1401,14 +1436,14 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
 }
 }
 
 
 /**
 /**
- * scsih_slave_configure - device configure routine.
+ * _scsih_slave_configure - device configure routine.
  * @sdev: scsi device struct
  * @sdev: scsi device struct
  *
  *
  * Returns 0 if ok. Any other return is assumed to be an error and
  * Returns 0 if ok. Any other return is assumed to be an error and
  * the device is ignored.
  * the device is ignored.
  */
  */
 static int
 static int
-scsih_slave_configure(struct scsi_device *sdev)
+_scsih_slave_configure(struct scsi_device *sdev)
 {
 {
 	struct Scsi_Host *shost = sdev->host;
 	struct Scsi_Host *shost = sdev->host;
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1489,7 +1524,7 @@ scsih_slave_configure(struct scsi_device *sdev)
 		    r_level, raid_device->handle,
 		    r_level, raid_device->handle,
 		    (unsigned long long)raid_device->wwid,
 		    (unsigned long long)raid_device->wwid,
 		    raid_device->num_pds, ds);
 		    raid_device->num_pds, ds);
-		scsih_change_queue_depth(sdev, qdepth);
+		_scsih_change_queue_depth(sdev, qdepth);
 		return 0;
 		return 0;
 	}
 	}
 
 
@@ -1532,10 +1567,10 @@ scsih_slave_configure(struct scsi_device *sdev)
 		    sas_device->slot);
 		    sas_device->slot);
 
 
 		if (!ssp_target)
 		if (!ssp_target)
-			scsih_display_sata_capabilities(ioc, sas_device, sdev);
+			_scsih_display_sata_capabilities(ioc, sas_device, sdev);
 	}
 	}
 
 
-	scsih_change_queue_depth(sdev, qdepth);
+	_scsih_change_queue_depth(sdev, qdepth);
 
 
 	if (ssp_target)
 	if (ssp_target)
 		sas_read_port_mode_page(sdev);
 		sas_read_port_mode_page(sdev);
@@ -1543,7 +1578,7 @@ scsih_slave_configure(struct scsi_device *sdev)
 }
 }
 
 
 /**
 /**
- * scsih_bios_param - fetch head, sector, cylinder info for a disk
+ * _scsih_bios_param - fetch head, sector, cylinder info for a disk
  * @sdev: scsi device struct
  * @sdev: scsi device struct
  * @bdev: pointer to block device context
  * @bdev: pointer to block device context
  * @capacity: device size (in 512 byte sectors)
  * @capacity: device size (in 512 byte sectors)
@@ -1555,7 +1590,7 @@ scsih_slave_configure(struct scsi_device *sdev)
  * Return nothing.
  * Return nothing.
  */
  */
 static int
 static int
-scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
     sector_t capacity, int params[])
     sector_t capacity, int params[])
 {
 {
 	int		heads;
 	int		heads;
@@ -1636,7 +1671,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
 }
 }
 
 
 /**
 /**
- * scsih_tm_done - tm completion routine
+ * _scsih_tm_done - tm completion routine
  * @ioc: per adapter object
  * @ioc: per adapter object
  * @smid: system request message index
  * @smid: system request message index
  * @VF_ID: virtual function id
  * @VF_ID: virtual function id
@@ -1648,7 +1683,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
  * Return nothing.
  * Return nothing.
  */
  */
 static void
 static void
-scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
+_scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
 {
 {
 	MPI2DefaultReply_t *mpi_reply;
 	MPI2DefaultReply_t *mpi_reply;
 
 
@@ -1823,13 +1858,13 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
 }
 }
 
 
 /**
 /**
- * scsih_abort - eh threads main abort routine
+ * _scsih_abort - eh threads main abort routine
  * @sdev: scsi device struct
  * @sdev: scsi device struct
  *
  *
  * Returns SUCCESS if command aborted else FAILED
  * Returns SUCCESS if command aborted else FAILED
  */
  */
 static int
 static int
-scsih_abort(struct scsi_cmnd *scmd)
+_scsih_abort(struct scsi_cmnd *scmd)
 {
 {
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
 	struct MPT2SAS_DEVICE *sas_device_priv_data;
 	struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -1889,15 +1924,86 @@ scsih_abort(struct scsi_cmnd *scmd)
 	return r;
 	return r;
 }
 }
 
 
+/**
+ * _scsih_dev_reset - eh threads main device reset routine
+ * @sdev: scsi device struct
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_dev_reset(struct scsi_cmnd *scmd)
+{
+	struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+	struct MPT2SAS_DEVICE *sas_device_priv_data;
+	struct _sas_device *sas_device;
+	unsigned long flags;
+	u16	handle;
+	int r;
+
+	printk(MPT2SAS_INFO_FMT "attempting device reset! scmd(%p)\n",
+	    ioc->name, scmd);
+	scsi_print_command(scmd);
+
+	sas_device_priv_data = scmd->device->hostdata;
+	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+		printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n",
+		    ioc->name, scmd);
+		scmd->result = DID_NO_CONNECT << 16;
+		scmd->scsi_done(scmd);
+		r = SUCCESS;
+		goto out;
+	}
+
+	/* for hidden raid components obtain the volume_handle */
+	handle = 0;
+	if (sas_device_priv_data->sas_target->flags &
+	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
+		spin_lock_irqsave(&ioc->sas_device_lock, flags);
+		sas_device = _scsih_sas_device_find_by_handle(ioc,
+		   sas_device_priv_data->sas_target->handle);
+		if (sas_device)
+			handle = sas_device->volume_handle;
+		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+	} else
+		handle = sas_device_priv_data->sas_target->handle;
+
+	if (!handle) {
+		scmd->result = DID_RESET << 16;
+		r = FAILED;
+		goto out;
+	}
+
+	mutex_lock(&ioc->tm_cmds.mutex);
+	mpt2sas_scsih_issue_tm(ioc, handle, 0,
+	    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, scmd->device->lun,
+	    30);
+
+	/*
+	 *  sanity check see whether all commands to this device been
+	 *  completed
+	 */
+	if (_scsih_scsi_lookup_find_by_lun(ioc, scmd->device->id,
+	    scmd->device->lun, scmd->device->channel))
+		r = FAILED;
+	else
+		r = SUCCESS;
+	ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
+	mutex_unlock(&ioc->tm_cmds.mutex);
+
+ out:
+	printk(MPT2SAS_INFO_FMT "device reset: %s scmd(%p)\n",
+	    ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+	return r;
+}
 
 
 /**
 /**
- * scsih_dev_reset - eh threads main device reset routine
+ * _scsih_target_reset - eh threads main target reset routine
  * @sdev: scsi device struct
  * @sdev: scsi device struct
  *
  *
  * Returns SUCCESS if command aborted else FAILED
  * Returns SUCCESS if command aborted else FAILED
  */
  */
 static int
 static int
-scsih_dev_reset(struct scsi_cmnd *scmd)
+_scsih_target_reset(struct scsi_cmnd *scmd)
 {
 {
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
 	struct MPT2SAS_DEVICE *sas_device_priv_data;
 	struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -1912,7 +2018,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
 
 
 	sas_device_priv_data = scmd->device->hostdata;
 	sas_device_priv_data = scmd->device->hostdata;
 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
-		printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n",
+		printk(MPT2SAS_INFO_FMT "target been deleted! scmd(%p)\n",
 		    ioc->name, scmd);
 		    ioc->name, scmd);
 		scmd->result = DID_NO_CONNECT << 16;
 		scmd->result = DID_NO_CONNECT << 16;
 		scmd->scsi_done(scmd);
 		scmd->scsi_done(scmd);
@@ -1962,13 +2068,13 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
 }
 }
 
 
 /**
 /**
- * scsih_abort - eh threads main host reset routine
+ * _scsih_abort - eh threads main host reset routine
  * @sdev: scsi device struct
  * @sdev: scsi device struct
  *
  *
  * Returns SUCCESS if command aborted else FAILED
  * Returns SUCCESS if command aborted else FAILED
  */
  */
 static int
 static int
-scsih_host_reset(struct scsi_cmnd *scmd)
+_scsih_host_reset(struct scsi_cmnd *scmd)
 {
 {
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
 	int r, retval;
 	int r, retval;
@@ -2390,7 +2496,107 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
 }
 }
 
 
 /**
 /**
- * scsih_qcmd - main scsi request entry point
+ * _scsih_setup_eedp - setup MPI request for EEDP transfer
+ * @scmd: pointer to scsi command object
+ * @mpi_request: pointer to the SCSI_IO reqest message frame
+ *
+ * Supporting protection 1 and 3.
+ *
+ * Returns nothing
+ */
+static void
+_scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
+{
+	u16 eedp_flags;
+	unsigned char prot_op = scsi_get_prot_op(scmd);
+	unsigned char prot_type = scsi_get_prot_type(scmd);
+
+	if (prot_type == SCSI_PROT_DIF_TYPE0 ||
+	   prot_type == SCSI_PROT_DIF_TYPE2 ||
+	   prot_op == SCSI_PROT_NORMAL)
+		return;
+
+	if (prot_op ==  SCSI_PROT_READ_STRIP)
+		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
+	else if (prot_op ==  SCSI_PROT_WRITE_INSERT)
+		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
+	else
+		return;
+
+	mpi_request->EEDPBlockSize = scmd->device->sector_size;
+
+	switch (prot_type) {
+	case SCSI_PROT_DIF_TYPE1:
+
+		/*
+		* enable ref/guard checking
+		* auto increment ref tag
+		*/
+		mpi_request->EEDPFlags = eedp_flags |
+		    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
+		    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+		mpi_request->CDB.EEDP32.PrimaryReferenceTag =
+		    cpu_to_be32(scsi_get_lba(scmd));
+
+		break;
+
+	case SCSI_PROT_DIF_TYPE3:
+
+		/*
+		* enable guard checking
+		*/
+		mpi_request->EEDPFlags = eedp_flags |
+		    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+
+		break;
+	}
+}
+
+/**
+ * _scsih_eedp_error_handling - return sense code for EEDP errors
+ * @scmd: pointer to scsi command object
+ * @ioc_status: ioc status
+ *
+ * Returns nothing
+ */
+static void
+_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
+{
+	u8 ascq;
+	u8 sk;
+	u8 host_byte;
+
+	switch (ioc_status) {
+	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+		ascq = 0x01;
+		break;
+	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+		ascq = 0x02;
+		break;
+	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+		ascq = 0x03;
+		break;
+	default:
+		ascq = 0x00;
+		break;
+	}
+
+	if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+		sk = ILLEGAL_REQUEST;
+		host_byte = DID_ABORT;
+	} else {
+		sk = ABORTED_COMMAND;
+		host_byte = DID_OK;
+	}
+
+	scsi_build_sense_buffer(0, scmd->sense_buffer, sk, 0x10, ascq);
+	scmd->result = DRIVER_SENSE << 24 | (host_byte << 16) |
+	    SAM_STAT_CHECK_CONDITION;
+}
+
+/**
+ * _scsih_qcmd - main scsi request entry point
  * @scmd: pointer to scsi command object
  * @scmd: pointer to scsi command object
  * @done: function pointer to be invoked on completion
  * @done: function pointer to be invoked on completion
  *
  *
@@ -2401,7 +2607,7 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
  */
  */
 static int
 static int
-scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+_scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
 {
 {
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
 	struct MPT2SAS_DEVICE *sas_device_priv_data;
 	struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -2470,6 +2676,7 @@ scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
 	}
 	}
 	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
 	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
 	memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
 	memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
+	_scsih_setup_eedp(scmd, mpi_request);
 	mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
 	mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
 	if (sas_device_priv_data->sas_target->flags &
 	if (sas_device_priv_data->sas_target->flags &
 	    MPT_TARGET_FLAGS_RAID_COMPONENT)
 	    MPT_TARGET_FLAGS_RAID_COMPONENT)
@@ -2604,6 +2811,15 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
 		desc_ioc_state = "scsi ext terminated";
 		desc_ioc_state = "scsi ext terminated";
 		break;
 		break;
+	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+		desc_ioc_state = "eedp guard error";
+		break;
+	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+		desc_ioc_state = "eedp ref tag error";
+		break;
+	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+		desc_ioc_state = "eedp app tag error";
+		break;
 	default:
 	default:
 		desc_ioc_state = "unknown";
 		desc_ioc_state = "unknown";
 		break;
 		break;
@@ -2783,7 +2999,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
 }
 }
 
 
 /**
 /**
- * scsih_io_done - scsi request callback
+ * _scsih_io_done - scsi request callback
  * @ioc: per adapter object
  * @ioc: per adapter object
  * @smid: system request message index
  * @smid: system request message index
  * @VF_ID: virtual function id
  * @VF_ID: virtual function id
@@ -2794,7 +3010,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
  * Return nothing.
  * Return nothing.
  */
  */
 static void
 static void
-scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
+_scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
 {
 {
 	Mpi2SCSIIORequest_t *mpi_request;
 	Mpi2SCSIIORequest_t *mpi_request;
 	Mpi2SCSIIOReply_t *mpi_reply;
 	Mpi2SCSIIOReply_t *mpi_reply;
@@ -2939,6 +3155,11 @@ scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
 			scmd->result = DID_RESET << 16;
 			scmd->result = DID_RESET << 16;
 		break;
 		break;
 
 
+	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+		_scsih_eedp_error_handling(scmd, ioc_status);
+		break;
 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
 	case MPI2_IOCSTATUS_INVALID_SGL:
 	case MPI2_IOCSTATUS_INVALID_SGL:
@@ -5130,18 +5351,19 @@ static struct scsi_host_template scsih_driver_template = {
 	.module				= THIS_MODULE,
 	.module				= THIS_MODULE,
 	.name				= "Fusion MPT SAS Host",
 	.name				= "Fusion MPT SAS Host",
 	.proc_name			= MPT2SAS_DRIVER_NAME,
 	.proc_name			= MPT2SAS_DRIVER_NAME,
-	.queuecommand			= scsih_qcmd,
-	.target_alloc			= scsih_target_alloc,
-	.slave_alloc			= scsih_slave_alloc,
-	.slave_configure		= scsih_slave_configure,
-	.target_destroy			= scsih_target_destroy,
-	.slave_destroy			= scsih_slave_destroy,
-	.change_queue_depth 		= scsih_change_queue_depth,
-	.change_queue_type		= scsih_change_queue_type,
-	.eh_abort_handler		= scsih_abort,
-	.eh_device_reset_handler	= scsih_dev_reset,
-	.eh_host_reset_handler		= scsih_host_reset,
-	.bios_param			= scsih_bios_param,
+	.queuecommand			= _scsih_qcmd,
+	.target_alloc			= _scsih_target_alloc,
+	.slave_alloc			= _scsih_slave_alloc,
+	.slave_configure		= _scsih_slave_configure,
+	.target_destroy			= _scsih_target_destroy,
+	.slave_destroy			= _scsih_slave_destroy,
+	.change_queue_depth 		= _scsih_change_queue_depth,
+	.change_queue_type		= _scsih_change_queue_type,
+	.eh_abort_handler		= _scsih_abort,
+	.eh_device_reset_handler	= _scsih_dev_reset,
+	.eh_target_reset_handler	= _scsih_target_reset,
+	.eh_host_reset_handler		= _scsih_host_reset,
+	.bios_param			= _scsih_bios_param,
 	.can_queue			= 1,
 	.can_queue			= 1,
 	.this_id			= -1,
 	.this_id			= -1,
 	.sg_tablesize			= MPT2SAS_SG_DEPTH,
 	.sg_tablesize			= MPT2SAS_SG_DEPTH,
@@ -5228,13 +5450,13 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
 }
 }
 
 
 /**
 /**
- * scsih_remove - detach and remove add host
+ * _scsih_remove - detach and remove add host
  * @pdev: PCI device struct
  * @pdev: PCI device struct
  *
  *
  * Return nothing.
  * Return nothing.
  */
  */
 static void __devexit
 static void __devexit
-scsih_remove(struct pci_dev *pdev)
+_scsih_remove(struct pci_dev *pdev)
 {
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5442,14 +5664,14 @@ _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc)
 }
 }
 
 
 /**
 /**
- * scsih_probe - attach and add scsi host
+ * _scsih_probe - attach and add scsi host
  * @pdev: PCI device struct
  * @pdev: PCI device struct
  * @id: pci device id
  * @id: pci device id
  *
  *
  * Returns 0 success, anything else error.
  * Returns 0 success, anything else error.
  */
  */
 static int
 static int
-scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 {
 	struct MPT2SAS_ADAPTER *ioc;
 	struct MPT2SAS_ADAPTER *ioc;
 	struct Scsi_Host *shost;
 	struct Scsi_Host *shost;
@@ -5503,6 +5725,9 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		goto out_add_shost_fail;
 		goto out_add_shost_fail;
 	}
 	}
 
 
+	scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
+	    | SHOST_DIF_TYPE3_PROTECTION);
+
 	/* event thread */
 	/* event thread */
 	snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
 	snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
 	    "fw_event%d", ioc->id);
 	    "fw_event%d", ioc->id);
@@ -5536,14 +5761,14 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
 
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM
 /**
 /**
- * scsih_suspend - power management suspend main entry point
+ * _scsih_suspend - power management suspend main entry point
  * @pdev: PCI device struct
  * @pdev: PCI device struct
  * @state: PM state change to (usually PCI_D3)
  * @state: PM state change to (usually PCI_D3)
  *
  *
  * Returns 0 success, anything else error.
  * Returns 0 success, anything else error.
  */
  */
 static int
 static int
-scsih_suspend(struct pci_dev *pdev, pm_message_t state)
+_scsih_suspend(struct pci_dev *pdev, pm_message_t state)
 {
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5564,13 +5789,13 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
 }
 }
 
 
 /**
 /**
- * scsih_resume - power management resume main entry point
+ * _scsih_resume - power management resume main entry point
  * @pdev: PCI device struct
  * @pdev: PCI device struct
  *
  *
  * Returns 0 success, anything else error.
  * Returns 0 success, anything else error.
  */
  */
 static int
 static int
-scsih_resume(struct pci_dev *pdev)
+_scsih_resume(struct pci_dev *pdev)
 {
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5599,22 +5824,22 @@ scsih_resume(struct pci_dev *pdev)
 static struct pci_driver scsih_driver = {
 static struct pci_driver scsih_driver = {
 	.name		= MPT2SAS_DRIVER_NAME,
 	.name		= MPT2SAS_DRIVER_NAME,
 	.id_table	= scsih_pci_table,
 	.id_table	= scsih_pci_table,
-	.probe		= scsih_probe,
-	.remove		= __devexit_p(scsih_remove),
+	.probe		= _scsih_probe,
+	.remove		= __devexit_p(_scsih_remove),
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM
-	.suspend	= scsih_suspend,
-	.resume		= scsih_resume,
+	.suspend	= _scsih_suspend,
+	.resume		= _scsih_resume,
 #endif
 #endif
 };
 };
 
 
 
 
 /**
 /**
- * scsih_init - main entry point for this driver.
+ * _scsih_init - main entry point for this driver.
  *
  *
  * Returns 0 success, anything else error.
  * Returns 0 success, anything else error.
  */
  */
 static int __init
 static int __init
-scsih_init(void)
+_scsih_init(void)
 {
 {
 	int error;
 	int error;
 
 
@@ -5630,10 +5855,10 @@ scsih_init(void)
 	mpt2sas_base_initialize_callback_handler();
 	mpt2sas_base_initialize_callback_handler();
 
 
 	 /* queuecommand callback hander */
 	 /* queuecommand callback hander */
-	scsi_io_cb_idx = mpt2sas_base_register_callback_handler(scsih_io_done);
+	scsi_io_cb_idx = mpt2sas_base_register_callback_handler(_scsih_io_done);
 
 
 	/* task managment callback handler */
 	/* task managment callback handler */
-	tm_cb_idx = mpt2sas_base_register_callback_handler(scsih_tm_done);
+	tm_cb_idx = mpt2sas_base_register_callback_handler(_scsih_tm_done);
 
 
 	/* base internal commands callback handler */
 	/* base internal commands callback handler */
 	base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done);
 	base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done);
@@ -5659,12 +5884,12 @@ scsih_init(void)
 }
 }
 
 
 /**
 /**
- * scsih_exit - exit point for this driver (when it is a module).
+ * _scsih_exit - exit point for this driver (when it is a module).
  *
  *
  * Returns 0 success, anything else error.
  * Returns 0 success, anything else error.
  */
  */
 static void __exit
 static void __exit
-scsih_exit(void)
+_scsih_exit(void)
 {
 {
 	printk(KERN_INFO "mpt2sas version %s unloading\n",
 	printk(KERN_INFO "mpt2sas version %s unloading\n",
 	    MPT2SAS_DRIVER_VERSION);
 	    MPT2SAS_DRIVER_VERSION);
@@ -5682,5 +5907,5 @@ scsih_exit(void)
 	mpt2sas_ctl_exit();
 	mpt2sas_ctl_exit();
 }
 }
 
 
-module_init(scsih_init);
-module_exit(scsih_exit);
+module_init(_scsih_init);
+module_exit(_scsih_exit);

+ 18 - 18
drivers/scsi/mpt2sas/mpt2sas_transport.c

@@ -264,7 +264,7 @@ struct rep_manu_reply{
 };
 };
 
 
 /**
 /**
- * transport_expander_report_manufacture - obtain SMP report_manufacture
+ * _transport_expander_report_manufacture - obtain SMP report_manufacture
  * @ioc: per adapter object
  * @ioc: per adapter object
  * @sas_address: expander sas address
  * @sas_address: expander sas address
  * @edev: the sas_expander_device object
  * @edev: the sas_expander_device object
@@ -274,7 +274,7 @@ struct rep_manu_reply{
  * Returns 0 for success, non-zero for failure.
  * Returns 0 for success, non-zero for failure.
  */
  */
 static int
 static int
-transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
+_transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
     u64 sas_address, struct sas_expander_device *edev)
     u64 sas_address, struct sas_expander_device *edev)
 {
 {
 	Mpi2SmpPassthroughRequest_t *mpi_request;
 	Mpi2SmpPassthroughRequest_t *mpi_request;
@@ -578,7 +578,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
 	    MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
 	    MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
 	    mpt2sas_port->remote_identify.device_type ==
 	    mpt2sas_port->remote_identify.device_type ==
 	    MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
 	    MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
-		transport_expander_report_manufacture(ioc,
+		_transport_expander_report_manufacture(ioc,
 		    mpt2sas_port->remote_identify.sas_address,
 		    mpt2sas_port->remote_identify.sas_address,
 		    rphy_to_expander_device(rphy));
 		    rphy_to_expander_device(rphy));
 
 
@@ -852,7 +852,7 @@ rphy_to_ioc(struct sas_rphy *rphy)
 }
 }
 
 
 /**
 /**
- * transport_get_linkerrors -
+ * _transport_get_linkerrors -
  * @phy: The sas phy object
  * @phy: The sas phy object
  *
  *
  * Only support sas_host direct attached phys.
  * Only support sas_host direct attached phys.
@@ -860,7 +860,7 @@ rphy_to_ioc(struct sas_rphy *rphy)
  *
  *
  */
  */
 static int
 static int
-transport_get_linkerrors(struct sas_phy *phy)
+_transport_get_linkerrors(struct sas_phy *phy)
 {
 {
 	struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
 	struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
 	struct _sas_phy *mpt2sas_phy;
 	struct _sas_phy *mpt2sas_phy;
@@ -903,14 +903,14 @@ transport_get_linkerrors(struct sas_phy *phy)
 }
 }
 
 
 /**
 /**
- * transport_get_enclosure_identifier -
+ * _transport_get_enclosure_identifier -
  * @phy: The sas phy object
  * @phy: The sas phy object
  *
  *
  * Obtain the enclosure logical id for an expander.
  * Obtain the enclosure logical id for an expander.
  * Returns 0 for success, non-zero for failure.
  * Returns 0 for success, non-zero for failure.
  */
  */
 static int
 static int
-transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
+_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
 {
 {
 	struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
 	struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
 	struct _sas_node *sas_expander;
 	struct _sas_node *sas_expander;
@@ -929,13 +929,13 @@ transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
 }
 }
 
 
 /**
 /**
- * transport_get_bay_identifier -
+ * _transport_get_bay_identifier -
  * @phy: The sas phy object
  * @phy: The sas phy object
  *
  *
  * Returns the slot id for a device that resides inside an enclosure.
  * Returns the slot id for a device that resides inside an enclosure.
  */
  */
 static int
 static int
-transport_get_bay_identifier(struct sas_rphy *rphy)
+_transport_get_bay_identifier(struct sas_rphy *rphy)
 {
 {
 	struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
 	struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
 	struct _sas_device *sas_device;
 	struct _sas_device *sas_device;
@@ -953,7 +953,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy)
 }
 }
 
 
 /**
 /**
- * transport_phy_reset -
+ * _transport_phy_reset -
  * @phy: The sas phy object
  * @phy: The sas phy object
  * @hard_reset:
  * @hard_reset:
  *
  *
@@ -961,7 +961,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy)
  * Returns 0 for success, non-zero for failure.
  * Returns 0 for success, non-zero for failure.
  */
  */
 static int
 static int
-transport_phy_reset(struct sas_phy *phy, int hard_reset)
+_transport_phy_reset(struct sas_phy *phy, int hard_reset)
 {
 {
 	struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
 	struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
 	struct _sas_phy *mpt2sas_phy;
 	struct _sas_phy *mpt2sas_phy;
@@ -1002,7 +1002,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset)
 }
 }
 
 
 /**
 /**
- * transport_smp_handler - transport portal for smp passthru
+ * _transport_smp_handler - transport portal for smp passthru
  * @shost: shost object
  * @shost: shost object
  * @rphy: sas transport rphy object
  * @rphy: sas transport rphy object
  * @req:
  * @req:
@@ -1012,7 +1012,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset)
  *           smp_rep_general /sys/class/bsg/expander-5:0
  *           smp_rep_general /sys/class/bsg/expander-5:0
  */
  */
 static int
 static int
-transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
+_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
     struct request *req)
     struct request *req)
 {
 {
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1200,11 +1200,11 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 }
 }
 
 
 struct sas_function_template mpt2sas_transport_functions = {
 struct sas_function_template mpt2sas_transport_functions = {
-	.get_linkerrors		= transport_get_linkerrors,
-	.get_enclosure_identifier = transport_get_enclosure_identifier,
-	.get_bay_identifier	= transport_get_bay_identifier,
-	.phy_reset		= transport_phy_reset,
-	.smp_handler		= transport_smp_handler,
+	.get_linkerrors		= _transport_get_linkerrors,
+	.get_enclosure_identifier = _transport_get_enclosure_identifier,
+	.get_bay_identifier	= _transport_get_bay_identifier,
+	.phy_reset		= _transport_phy_reset,
+	.smp_handler		= _transport_smp_handler,
 };
 };
 
 
 struct scsi_transport_template *mpt2sas_transport_template;
 struct scsi_transport_template *mpt2sas_transport_template;

+ 0 - 3222
drivers/scsi/mvsas.c

@@ -1,3222 +0,0 @@
-/*
-	mvsas.c - Marvell 88SE6440 SAS/SATA support
-
-	Copyright 2007 Red Hat, Inc.
-	Copyright 2008 Marvell. <kewei@marvell.com>
-
-	This program is free software; you can redistribute it and/or
-	modify it under the terms of the GNU General Public License as
-	published by the Free Software Foundation; either version 2,
-	or (at your option) any later version.
-
-	This program is distributed in the hope that it will be useful,
-	but WITHOUT ANY WARRANTY; without even the implied warranty
-	of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-	See the GNU General Public License for more details.
-
-	You should have received a copy of the GNU General Public
-	License along with this program; see the file COPYING.	If not,
-	write to the Free Software Foundation, 675 Mass Ave, Cambridge,
-	MA 02139, USA.
-
-	---------------------------------------------------------------
-
-	Random notes:
-	* hardware supports controlling the endian-ness of data
-	  structures.  this permits elimination of all the le32_to_cpu()
-	  and cpu_to_le32() conversions.
-
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/ctype.h>
-#include <scsi/libsas.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/sas_ata.h>
-#include <asm/io.h>
-
-#define DRV_NAME	"mvsas"
-#define DRV_VERSION	"0.5.2"
-#define _MV_DUMP	0
-#define MVS_DISABLE_NVRAM
-#define MVS_DISABLE_MSI
-
-#define mr32(reg)	readl(regs + MVS_##reg)
-#define mw32(reg,val)	writel((val), regs + MVS_##reg)
-#define mw32_f(reg,val)	do {			\
-	writel((val), regs + MVS_##reg);	\
-	readl(regs + MVS_##reg);		\
-	} while (0)
-
-#define MVS_ID_NOT_MAPPED	0x7f
-#define MVS_CHIP_SLOT_SZ	(1U << mvi->chip->slot_width)
-
-/* offset for D2H FIS in the Received FIS List Structure */
-#define SATA_RECEIVED_D2H_FIS(reg_set)	\
-	((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
-#define SATA_RECEIVED_PIO_FIS(reg_set)	\
-	((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
-#define UNASSOC_D2H_FIS(id)		\
-	((void *) mvi->rx_fis + 0x100 * id)
-
-#define for_each_phy(__lseq_mask, __mc, __lseq, __rest)			\
-	for ((__mc) = (__lseq_mask), (__lseq) = 0;			\
-					(__mc) != 0 && __rest;		\
-					(++__lseq), (__mc) >>= 1)
-
-/* driver compile-time configuration */
-enum driver_configuration {
-	MVS_TX_RING_SZ		= 1024,	/* TX ring size (12-bit) */
-	MVS_RX_RING_SZ		= 1024, /* RX ring size (12-bit) */
-					/* software requires power-of-2
-					   ring size */
-
-	MVS_SLOTS		= 512,	/* command slots */
-	MVS_SLOT_BUF_SZ		= 8192, /* cmd tbl + IU + status + PRD */
-	MVS_SSP_CMD_SZ		= 64,	/* SSP command table buffer size */
-	MVS_ATA_CMD_SZ		= 96,	/* SATA command table buffer size */
-	MVS_OAF_SZ		= 64,	/* Open address frame buffer size */
-
-	MVS_RX_FIS_COUNT	= 17,	/* Optional rx'd FISs (max 17) */
-
-	MVS_QUEUE_SIZE		= 30,	/* Support Queue depth */
-	MVS_CAN_QUEUE		= MVS_SLOTS - 1,	/* SCSI Queue depth */
-};
-
-/* unchangeable hardware details */
-enum hardware_details {
-	MVS_MAX_PHYS		= 8,	/* max. possible phys */
-	MVS_MAX_PORTS		= 8,	/* max. possible ports */
-	MVS_RX_FISL_SZ		= 0x400 + (MVS_RX_FIS_COUNT * 0x100),
-};
-
-/* peripheral registers (BAR2) */
-enum peripheral_registers {
-	SPI_CTL			= 0x10,	/* EEPROM control */
-	SPI_CMD			= 0x14,	/* EEPROM command */
-	SPI_DATA		= 0x18, /* EEPROM data */
-};
-
-enum peripheral_register_bits {
-	TWSI_RDY		= (1U << 7),	/* EEPROM interface ready */
-	TWSI_RD			= (1U << 4),	/* EEPROM read access */
-
-	SPI_ADDR_MASK		= 0x3ffff,	/* bits 17:0 */
-};
-
-/* enhanced mode registers (BAR4) */
-enum hw_registers {
-	MVS_GBL_CTL		= 0x04,  /* global control */
-	MVS_GBL_INT_STAT	= 0x08,  /* global irq status */
-	MVS_GBL_PI		= 0x0C,  /* ports implemented bitmask */
-	MVS_GBL_PORT_TYPE	= 0xa0,  /* port type */
-
-	MVS_CTL			= 0x100, /* SAS/SATA port configuration */
-	MVS_PCS			= 0x104, /* SAS/SATA port control/status */
-	MVS_CMD_LIST_LO		= 0x108, /* cmd list addr */
-	MVS_CMD_LIST_HI		= 0x10C,
-	MVS_RX_FIS_LO		= 0x110, /* RX FIS list addr */
-	MVS_RX_FIS_HI		= 0x114,
-
-	MVS_TX_CFG		= 0x120, /* TX configuration */
-	MVS_TX_LO		= 0x124, /* TX (delivery) ring addr */
-	MVS_TX_HI		= 0x128,
-
-	MVS_TX_PROD_IDX		= 0x12C, /* TX producer pointer */
-	MVS_TX_CONS_IDX		= 0x130, /* TX consumer pointer (RO) */
-	MVS_RX_CFG		= 0x134, /* RX configuration */
-	MVS_RX_LO		= 0x138, /* RX (completion) ring addr */
-	MVS_RX_HI		= 0x13C,
-	MVS_RX_CONS_IDX		= 0x140, /* RX consumer pointer (RO) */
-
-	MVS_INT_COAL		= 0x148, /* Int coalescing config */
-	MVS_INT_COAL_TMOUT	= 0x14C, /* Int coalescing timeout */
-	MVS_INT_STAT		= 0x150, /* Central int status */
-	MVS_INT_MASK		= 0x154, /* Central int enable */
-	MVS_INT_STAT_SRS	= 0x158, /* SATA register set status */
-	MVS_INT_MASK_SRS	= 0x15C,
-
-					 /* ports 1-3 follow after this */
-	MVS_P0_INT_STAT		= 0x160, /* port0 interrupt status */
-	MVS_P0_INT_MASK		= 0x164, /* port0 interrupt mask */
-	MVS_P4_INT_STAT		= 0x200, /* Port 4 interrupt status */
-	MVS_P4_INT_MASK		= 0x204, /* Port 4 interrupt enable mask */
-
-					 /* ports 1-3 follow after this */
-	MVS_P0_SER_CTLSTAT	= 0x180, /* port0 serial control/status */
-	MVS_P4_SER_CTLSTAT	= 0x220, /* port4 serial control/status */
-
-	MVS_CMD_ADDR		= 0x1B8, /* Command register port (addr) */
-	MVS_CMD_DATA		= 0x1BC, /* Command register port (data) */
-
-					 /* ports 1-3 follow after this */
-	MVS_P0_CFG_ADDR		= 0x1C0, /* port0 phy register address */
-	MVS_P0_CFG_DATA		= 0x1C4, /* port0 phy register data */
-	MVS_P4_CFG_ADDR		= 0x230, /* Port 4 config address */
-	MVS_P4_CFG_DATA		= 0x234, /* Port 4 config data */
-
-					 /* ports 1-3 follow after this */
-	MVS_P0_VSR_ADDR		= 0x1E0, /* port0 VSR address */
-	MVS_P0_VSR_DATA		= 0x1E4, /* port0 VSR data */
-	MVS_P4_VSR_ADDR		= 0x250, /* port 4 VSR addr */
-	MVS_P4_VSR_DATA		= 0x254, /* port 4 VSR data */
-};
-
-enum hw_register_bits {
-	/* MVS_GBL_CTL */
-	INT_EN			= (1U << 1),	/* Global int enable */
-	HBA_RST			= (1U << 0),	/* HBA reset */
-
-	/* MVS_GBL_INT_STAT */
-	INT_XOR			= (1U << 4),	/* XOR engine event */
-	INT_SAS_SATA		= (1U << 0),	/* SAS/SATA event */
-
-	/* MVS_GBL_PORT_TYPE */			/* shl for ports 1-3 */
-	SATA_TARGET		= (1U << 16),	/* port0 SATA target enable */
-	MODE_AUTO_DET_PORT7 = (1U << 15),	/* port0 SAS/SATA autodetect */
-	MODE_AUTO_DET_PORT6 = (1U << 14),
-	MODE_AUTO_DET_PORT5 = (1U << 13),
-	MODE_AUTO_DET_PORT4 = (1U << 12),
-	MODE_AUTO_DET_PORT3 = (1U << 11),
-	MODE_AUTO_DET_PORT2 = (1U << 10),
-	MODE_AUTO_DET_PORT1 = (1U << 9),
-	MODE_AUTO_DET_PORT0 = (1U << 8),
-	MODE_AUTO_DET_EN    =	MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
-				MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
-				MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
-				MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
-	MODE_SAS_PORT7_MASK = (1U << 7),  /* port0 SAS(1), SATA(0) mode */
-	MODE_SAS_PORT6_MASK = (1U << 6),
-	MODE_SAS_PORT5_MASK = (1U << 5),
-	MODE_SAS_PORT4_MASK = (1U << 4),
-	MODE_SAS_PORT3_MASK = (1U << 3),
-	MODE_SAS_PORT2_MASK = (1U << 2),
-	MODE_SAS_PORT1_MASK = (1U << 1),
-	MODE_SAS_PORT0_MASK = (1U << 0),
-	MODE_SAS_SATA	=	MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
-				MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
-				MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
-				MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
-
-				/* SAS_MODE value may be
-				 * dictated (in hw) by values
-				 * of SATA_TARGET & AUTO_DET
-				 */
-
-	/* MVS_TX_CFG */
-	TX_EN			= (1U << 16),	/* Enable TX */
-	TX_RING_SZ_MASK		= 0xfff,	/* TX ring size, bits 11:0 */
-
-	/* MVS_RX_CFG */
-	RX_EN			= (1U << 16),	/* Enable RX */
-	RX_RING_SZ_MASK		= 0xfff,	/* RX ring size, bits 11:0 */
-
-	/* MVS_INT_COAL */
-	COAL_EN			= (1U << 16),	/* Enable int coalescing */
-
-	/* MVS_INT_STAT, MVS_INT_MASK */
-	CINT_I2C		= (1U << 31),	/* I2C event */
-	CINT_SW0		= (1U << 30),	/* software event 0 */
-	CINT_SW1		= (1U << 29),	/* software event 1 */
-	CINT_PRD_BC		= (1U << 28),	/* PRD BC err for read cmd */
-	CINT_DMA_PCIE		= (1U << 27),	/* DMA to PCIE timeout */
-	CINT_MEM		= (1U << 26),	/* int mem parity err */
-	CINT_I2C_SLAVE		= (1U << 25),	/* slave I2C event */
-	CINT_SRS		= (1U << 3),	/* SRS event */
-	CINT_CI_STOP		= (1U << 1),	/* cmd issue stopped */
-	CINT_DONE		= (1U << 0),	/* cmd completion */
-
-						/* shl for ports 1-3 */
-	CINT_PORT_STOPPED	= (1U << 16),	/* port0 stopped */
-	CINT_PORT		= (1U << 8),	/* port0 event */
-	CINT_PORT_MASK_OFFSET	= 8,
-	CINT_PORT_MASK		= (0xFF << CINT_PORT_MASK_OFFSET),
-
-	/* TX (delivery) ring bits */
-	TXQ_CMD_SHIFT		= 29,
-	TXQ_CMD_SSP		= 1,		/* SSP protocol */
-	TXQ_CMD_SMP		= 2,		/* SMP protocol */
-	TXQ_CMD_STP		= 3,		/* STP/SATA protocol */
-	TXQ_CMD_SSP_FREE_LIST	= 4,		/* add to SSP targ free list */
-	TXQ_CMD_SLOT_RESET	= 7,		/* reset command slot */
-	TXQ_MODE_I		= (1U << 28),	/* mode: 0=target,1=initiator */
-	TXQ_PRIO_HI		= (1U << 27),	/* priority: 0=normal, 1=high */
-	TXQ_SRS_SHIFT		= 20,		/* SATA register set */
-	TXQ_SRS_MASK		= 0x7f,
-	TXQ_PHY_SHIFT		= 12,		/* PHY bitmap */
-	TXQ_PHY_MASK		= 0xff,
-	TXQ_SLOT_MASK		= 0xfff,	/* slot number */
-
-	/* RX (completion) ring bits */
-	RXQ_GOOD		= (1U << 23),	/* Response good */
-	RXQ_SLOT_RESET		= (1U << 21),	/* Slot reset complete */
-	RXQ_CMD_RX		= (1U << 20),	/* target cmd received */
-	RXQ_ATTN		= (1U << 19),	/* attention */
-	RXQ_RSP			= (1U << 18),	/* response frame xfer'd */
-	RXQ_ERR			= (1U << 17),	/* err info rec xfer'd */
-	RXQ_DONE		= (1U << 16),	/* cmd complete */
-	RXQ_SLOT_MASK		= 0xfff,	/* slot number */
-
-	/* mvs_cmd_hdr bits */
-	MCH_PRD_LEN_SHIFT	= 16,		/* 16-bit PRD table len */
-	MCH_SSP_FR_TYPE_SHIFT	= 13,		/* SSP frame type */
-
-						/* SSP initiator only */
-	MCH_SSP_FR_CMD		= 0x0,		/* COMMAND frame */
-
-						/* SSP initiator or target */
-	MCH_SSP_FR_TASK		= 0x1,		/* TASK frame */
-
-						/* SSP target only */
-	MCH_SSP_FR_XFER_RDY	= 0x4,		/* XFER_RDY frame */
-	MCH_SSP_FR_RESP		= 0x5,		/* RESPONSE frame */
-	MCH_SSP_FR_READ		= 0x6,		/* Read DATA frame(s) */
-	MCH_SSP_FR_READ_RESP	= 0x7,		/* ditto, plus RESPONSE */
-
-	MCH_PASSTHRU		= (1U << 12),	/* pass-through (SSP) */
-	MCH_FBURST		= (1U << 11),	/* first burst (SSP) */
-	MCH_CHK_LEN		= (1U << 10),	/* chk xfer len (SSP) */
-	MCH_RETRY		= (1U << 9),	/* tport layer retry (SSP) */
-	MCH_PROTECTION		= (1U << 8),	/* protection info rec (SSP) */
-	MCH_RESET		= (1U << 7),	/* Reset (STP/SATA) */
-	MCH_FPDMA		= (1U << 6),	/* First party DMA (STP/SATA) */
-	MCH_ATAPI		= (1U << 5),	/* ATAPI (STP/SATA) */
-	MCH_BIST		= (1U << 4),	/* BIST activate (STP/SATA) */
-	MCH_PMP_MASK		= 0xf,		/* PMP from cmd FIS (STP/SATA)*/
-
-	CCTL_RST		= (1U << 5),	/* port logic reset */
-
-						/* 0(LSB first), 1(MSB first) */
-	CCTL_ENDIAN_DATA	= (1U << 3),	/* PRD data */
-	CCTL_ENDIAN_RSP		= (1U << 2),	/* response frame */
-	CCTL_ENDIAN_OPEN	= (1U << 1),	/* open address frame */
-	CCTL_ENDIAN_CMD		= (1U << 0),	/* command table */
-
-	/* MVS_Px_SER_CTLSTAT (per-phy control) */
-	PHY_SSP_RST		= (1U << 3),	/* reset SSP link layer */
-	PHY_BCAST_CHG		= (1U << 2),	/* broadcast(change) notif */
-	PHY_RST_HARD		= (1U << 1),	/* hard reset + phy reset */
-	PHY_RST			= (1U << 0),	/* phy reset */
-	PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
-	PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
-	PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
-	PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
-			(0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
-	PHY_READY_MASK		= (1U << 20),
-
-	/* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
-	PHYEV_DEC_ERR		= (1U << 24),	/* Phy Decoding Error */
-	PHYEV_UNASSOC_FIS	= (1U << 19),	/* unassociated FIS rx'd */
-	PHYEV_AN		= (1U << 18),	/* SATA async notification */
-	PHYEV_BIST_ACT		= (1U << 17),	/* BIST activate FIS */
-	PHYEV_SIG_FIS		= (1U << 16),	/* signature FIS */
-	PHYEV_POOF		= (1U << 12),	/* phy ready from 1 -> 0 */
-	PHYEV_IU_BIG		= (1U << 11),	/* IU too long err */
-	PHYEV_IU_SMALL		= (1U << 10),	/* IU too short err */
-	PHYEV_UNK_TAG		= (1U << 9),	/* unknown tag */
-	PHYEV_BROAD_CH		= (1U << 8),	/* broadcast(CHANGE) */
-	PHYEV_COMWAKE		= (1U << 7),	/* COMWAKE rx'd */
-	PHYEV_PORT_SEL		= (1U << 6),	/* port selector present */
-	PHYEV_HARD_RST		= (1U << 5),	/* hard reset rx'd */
-	PHYEV_ID_TMOUT		= (1U << 4),	/* identify timeout */
-	PHYEV_ID_FAIL		= (1U << 3),	/* identify failed */
-	PHYEV_ID_DONE		= (1U << 2),	/* identify done */
-	PHYEV_HARD_RST_DONE	= (1U << 1),	/* hard reset done */
-	PHYEV_RDY_CH		= (1U << 0),	/* phy ready changed state */
-
-	/* MVS_PCS */
-	PCS_EN_SATA_REG_SHIFT	= (16),		/* Enable SATA Register Set */
-	PCS_EN_PORT_XMT_SHIFT	= (12),		/* Enable Port Transmit */
-	PCS_EN_PORT_XMT_SHIFT2	= (8),		/* For 6480 */
-	PCS_SATA_RETRY		= (1U << 8),	/* retry ctl FIS on R_ERR */
-	PCS_RSP_RX_EN		= (1U << 7),	/* raw response rx */
-	PCS_SELF_CLEAR		= (1U << 5),	/* self-clearing int mode */
-	PCS_FIS_RX_EN		= (1U << 4),	/* FIS rx enable */
-	PCS_CMD_STOP_ERR	= (1U << 3),	/* cmd stop-on-err enable */
-	PCS_CMD_RST		= (1U << 1),	/* reset cmd issue */
-	PCS_CMD_EN		= (1U << 0),	/* enable cmd issue */
-
-	/* Port n Attached Device Info */
-	PORT_DEV_SSP_TRGT	= (1U << 19),
-	PORT_DEV_SMP_TRGT	= (1U << 18),
-	PORT_DEV_STP_TRGT	= (1U << 17),
-	PORT_DEV_SSP_INIT	= (1U << 11),
-	PORT_DEV_SMP_INIT	= (1U << 10),
-	PORT_DEV_STP_INIT	= (1U << 9),
-	PORT_PHY_ID_MASK	= (0xFFU << 24),
-	PORT_DEV_TRGT_MASK	= (0x7U << 17),
-	PORT_DEV_INIT_MASK	= (0x7U << 9),
-	PORT_DEV_TYPE_MASK	= (0x7U << 0),
-
-	/* Port n PHY Status */
-	PHY_RDY			= (1U << 2),
-	PHY_DW_SYNC		= (1U << 1),
-	PHY_OOB_DTCTD		= (1U << 0),
-
-	/* VSR */
-	/* PHYMODE 6 (CDB) */
-	PHY_MODE6_LATECLK	= (1U << 29),	/* Lock Clock */
-	PHY_MODE6_DTL_SPEED	= (1U << 27),	/* Digital Loop Speed */
-	PHY_MODE6_FC_ORDER	= (1U << 26),	/* Fibre Channel Mode Order*/
-	PHY_MODE6_MUCNT_EN	= (1U << 24),	/* u Count Enable */
-	PHY_MODE6_SEL_MUCNT_LEN	= (1U << 22),	/* Training Length Select */
-	PHY_MODE6_SELMUPI	= (1U << 20),	/* Phase Multi Select (init) */
-	PHY_MODE6_SELMUPF	= (1U << 18),	/* Phase Multi Select (final) */
-	PHY_MODE6_SELMUFF	= (1U << 16),	/* Freq Loop Multi Sel(final) */
-	PHY_MODE6_SELMUFI	= (1U << 14),	/* Freq Loop Multi Sel(init) */
-	PHY_MODE6_FREEZE_LOOP	= (1U << 12),	/* Freeze Rx CDR Loop */
-	PHY_MODE6_INT_RXFOFFS	= (1U << 3),	/* Rx CDR Freq Loop Enable */
-	PHY_MODE6_FRC_RXFOFFS	= (1U << 2),	/* Initial Rx CDR Offset */
-	PHY_MODE6_STAU_0D8	= (1U << 1),	/* Rx CDR Freq Loop Saturate */
-	PHY_MODE6_RXSAT_DIS	= (1U << 0),	/* Saturate Ctl */
-};
-
-enum mvs_info_flags {
-	MVF_MSI			= (1U << 0),	/* MSI is enabled */
-	MVF_PHY_PWR_FIX		= (1U << 1),	/* bug workaround */
-};
-
-enum sas_cmd_port_registers {
-	CMD_CMRST_OOB_DET	= 0x100, /* COMRESET OOB detect register */
-	CMD_CMWK_OOB_DET	= 0x104, /* COMWAKE OOB detect register */
-	CMD_CMSAS_OOB_DET	= 0x108, /* COMSAS OOB detect register */
-	CMD_BRST_OOB_DET	= 0x10c, /* burst OOB detect register */
-	CMD_OOB_SPACE		= 0x110, /* OOB space control register */
-	CMD_OOB_BURST		= 0x114, /* OOB burst control register */
-	CMD_PHY_TIMER		= 0x118, /* PHY timer control register */
-	CMD_PHY_CONFIG0		= 0x11c, /* PHY config register 0 */
-	CMD_PHY_CONFIG1		= 0x120, /* PHY config register 1 */
-	CMD_SAS_CTL0		= 0x124, /* SAS control register 0 */
-	CMD_SAS_CTL1		= 0x128, /* SAS control register 1 */
-	CMD_SAS_CTL2		= 0x12c, /* SAS control register 2 */
-	CMD_SAS_CTL3		= 0x130, /* SAS control register 3 */
-	CMD_ID_TEST		= 0x134, /* ID test register */
-	CMD_PL_TIMER		= 0x138, /* PL timer register */
-	CMD_WD_TIMER		= 0x13c, /* WD timer register */
-	CMD_PORT_SEL_COUNT	= 0x140, /* port selector count register */
-	CMD_APP_MEM_CTL		= 0x144, /* Application Memory Control */
-	CMD_XOR_MEM_CTL		= 0x148, /* XOR Block Memory Control */
-	CMD_DMA_MEM_CTL		= 0x14c, /* DMA Block Memory Control */
-	CMD_PORT_MEM_CTL0	= 0x150, /* Port Memory Control 0 */
-	CMD_PORT_MEM_CTL1	= 0x154, /* Port Memory Control 1 */
-	CMD_SATA_PORT_MEM_CTL0	= 0x158, /* SATA Port Memory Control 0 */
-	CMD_SATA_PORT_MEM_CTL1	= 0x15c, /* SATA Port Memory Control 1 */
-	CMD_XOR_MEM_BIST_CTL	= 0x160, /* XOR Memory BIST Control */
-	CMD_XOR_MEM_BIST_STAT	= 0x164, /* XOR Memroy BIST Status */
-	CMD_DMA_MEM_BIST_CTL	= 0x168, /* DMA Memory BIST Control */
-	CMD_DMA_MEM_BIST_STAT	= 0x16c, /* DMA Memory BIST Status */
-	CMD_PORT_MEM_BIST_CTL	= 0x170, /* Port Memory BIST Control */
-	CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
-	CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
-	CMD_STP_MEM_BIST_CTL	= 0x17c, /* STP Memory BIST Control */
-	CMD_STP_MEM_BIST_STAT0	= 0x180, /* STP Memory BIST Status 0 */
-	CMD_STP_MEM_BIST_STAT1	= 0x184, /* STP Memory BIST Status 1 */
-	CMD_RESET_COUNT		= 0x188, /* Reset Count */
-	CMD_MONTR_DATA_SEL	= 0x18C, /* Monitor Data/Select */
-	CMD_PLL_PHY_CONFIG	= 0x190, /* PLL/PHY Configuration */
-	CMD_PHY_CTL		= 0x194, /* PHY Control and Status */
-	CMD_PHY_TEST_COUNT0	= 0x198, /* Phy Test Count 0 */
-	CMD_PHY_TEST_COUNT1	= 0x19C, /* Phy Test Count 1 */
-	CMD_PHY_TEST_COUNT2	= 0x1A0, /* Phy Test Count 2 */
-	CMD_APP_ERR_CONFIG	= 0x1A4, /* Application Error Configuration */
-	CMD_PND_FIFO_CTL0	= 0x1A8, /* Pending FIFO Control 0 */
-	CMD_HOST_CTL		= 0x1AC, /* Host Control Status */
-	CMD_HOST_WR_DATA	= 0x1B0, /* Host Write Data */
-	CMD_HOST_RD_DATA	= 0x1B4, /* Host Read Data */
-	CMD_PHY_MODE_21		= 0x1B8, /* Phy Mode 21 */
-	CMD_SL_MODE0		= 0x1BC, /* SL Mode 0 */
-	CMD_SL_MODE1		= 0x1C0, /* SL Mode 1 */
-	CMD_PND_FIFO_CTL1	= 0x1C4, /* Pending FIFO Control 1 */
-};
-
-/* SAS/SATA configuration port registers, aka phy registers */
-enum sas_sata_config_port_regs {
-	PHYR_IDENTIFY		= 0x00,	/* info for IDENTIFY frame */
-	PHYR_ADDR_LO		= 0x04,	/* my SAS address (low) */
-	PHYR_ADDR_HI		= 0x08,	/* my SAS address (high) */
-	PHYR_ATT_DEV_INFO	= 0x0C,	/* attached device info */
-	PHYR_ATT_ADDR_LO	= 0x10,	/* attached dev SAS addr (low) */
-	PHYR_ATT_ADDR_HI	= 0x14,	/* attached dev SAS addr (high) */
-	PHYR_SATA_CTL		= 0x18,	/* SATA control */
-	PHYR_PHY_STAT		= 0x1C,	/* PHY status */
-	PHYR_SATA_SIG0		= 0x20,	/*port SATA signature FIS(Byte 0-3) */
-	PHYR_SATA_SIG1		= 0x24,	/*port SATA signature FIS(Byte 4-7) */
-	PHYR_SATA_SIG2		= 0x28,	/*port SATA signature FIS(Byte 8-11) */
-	PHYR_SATA_SIG3		= 0x2c,	/*port SATA signature FIS(Byte 12-15) */
-	PHYR_R_ERR_COUNT	= 0x30, /* port R_ERR count register */
-	PHYR_CRC_ERR_COUNT	= 0x34, /* port CRC error count register */
-	PHYR_WIDE_PORT		= 0x38,	/* wide port participating */
-	PHYR_CURRENT0		= 0x80,	/* current connection info 0 */
-	PHYR_CURRENT1		= 0x84,	/* current connection info 1 */
-	PHYR_CURRENT2		= 0x88,	/* current connection info 2 */
-};
-
-/*  SAS/SATA Vendor Specific Port Registers */
-enum sas_sata_vsp_regs {
-	VSR_PHY_STAT		= 0x00, /* Phy Status */
-	VSR_PHY_MODE1		= 0x01, /* phy tx */
-	VSR_PHY_MODE2		= 0x02, /* tx scc */
-	VSR_PHY_MODE3		= 0x03, /* pll */
-	VSR_PHY_MODE4		= 0x04, /* VCO */
-	VSR_PHY_MODE5		= 0x05, /* Rx */
-	VSR_PHY_MODE6		= 0x06, /* CDR */
-	VSR_PHY_MODE7		= 0x07, /* Impedance */
-	VSR_PHY_MODE8		= 0x08, /* Voltage */
-	VSR_PHY_MODE9		= 0x09, /* Test */
-	VSR_PHY_MODE10		= 0x0A, /* Power */
-	VSR_PHY_MODE11		= 0x0B, /* Phy Mode */
-	VSR_PHY_VS0		= 0x0C, /* Vednor Specific 0 */
-	VSR_PHY_VS1		= 0x0D, /* Vednor Specific 1 */
-};
-
-enum pci_cfg_registers {
-	PCR_PHY_CTL	= 0x40,
-	PCR_PHY_CTL2	= 0x90,
-	PCR_DEV_CTRL	= 0xE8,
-};
-
-enum pci_cfg_register_bits {
-	PCTL_PWR_ON	= (0xFU << 24),
-	PCTL_OFF	= (0xFU << 12),
-	PRD_REQ_SIZE	= (0x4000),
-	PRD_REQ_MASK	= (0x00007000),
-};
-
-enum nvram_layout_offsets {
-	NVR_SIG		= 0x00,		/* 0xAA, 0x55 */
-	NVR_SAS_ADDR	= 0x02,		/* 8-byte SAS address */
-};
-
-enum chip_flavors {
-	chip_6320,
-	chip_6440,
-	chip_6480,
-};
-
-enum port_type {
-	PORT_TYPE_SAS	=  (1L << 1),
-	PORT_TYPE_SATA	=  (1L << 0),
-};
-
-/* Command Table Format */
-enum ct_format {
-	/* SSP */
-	SSP_F_H		=  0x00,
-	SSP_F_IU	=  0x18,
-	SSP_F_MAX	=  0x4D,
-	/* STP */
-	STP_CMD_FIS	=  0x00,
-	STP_ATAPI_CMD	=  0x40,
-	STP_F_MAX	=  0x10,
-	/* SMP */
-	SMP_F_T		=  0x00,
-	SMP_F_DEP	=  0x01,
-	SMP_F_MAX	=  0x101,
-};
-
-enum status_buffer {
-	SB_EIR_OFF	=  0x00,	/* Error Information Record */
-	SB_RFB_OFF	=  0x08,	/* Response Frame Buffer */
-	SB_RFB_MAX	=  0x400,	/* RFB size*/
-};
-
-enum error_info_rec {
-	CMD_ISS_STPD	= (1U << 31),	/* Cmd Issue Stopped */
-	CMD_PI_ERR	= (1U << 30),	/* Protection info error.  see flags2 */
-	RSP_OVER	= (1U << 29),	/* rsp buffer overflow */
-	RETRY_LIM	= (1U << 28),	/* FIS/frame retry limit exceeded */
-	UNK_FIS 	= (1U << 27),	/* unknown FIS */
-	DMA_TERM	= (1U << 26),	/* DMA terminate primitive rx'd */
-	SYNC_ERR	= (1U << 25),	/* SYNC rx'd during frame xmit */
-	TFILE_ERR	= (1U << 24),	/* SATA taskfile Error bit set */
-	R_ERR		= (1U << 23),	/* SATA returned R_ERR prim */
-	RD_OFS		= (1U << 20),	/* Read DATA frame invalid offset */
-	XFER_RDY_OFS	= (1U << 19),	/* XFER_RDY offset error */
-	UNEXP_XFER_RDY	= (1U << 18),	/* unexpected XFER_RDY error */
-	DATA_OVER_UNDER = (1U << 16),	/* data overflow/underflow */
-	INTERLOCK	= (1U << 15),	/* interlock error */
-	NAK		= (1U << 14),	/* NAK rx'd */
-	ACK_NAK_TO	= (1U << 13),	/* ACK/NAK timeout */
-	CXN_CLOSED	= (1U << 12),	/* cxn closed w/out ack/nak */
-	OPEN_TO 	= (1U << 11),	/* I_T nexus lost, open cxn timeout */
-	PATH_BLOCKED	= (1U << 10),	/* I_T nexus lost, pathway blocked */
-	NO_DEST 	= (1U << 9),	/* I_T nexus lost, no destination */
-	STP_RES_BSY	= (1U << 8),	/* STP resources busy */
-	BREAK		= (1U << 7),	/* break received */
-	BAD_DEST	= (1U << 6),	/* bad destination */
-	BAD_PROTO	= (1U << 5),	/* protocol not supported */
-	BAD_RATE	= (1U << 4),	/* cxn rate not supported */
-	WRONG_DEST	= (1U << 3),	/* wrong destination error */
-	CREDIT_TO	= (1U << 2),	/* credit timeout */
-	WDOG_TO 	= (1U << 1),	/* watchdog timeout */
-	BUF_PAR 	= (1U << 0),	/* buffer parity error */
-};
-
-enum error_info_rec_2 {
-	SLOT_BSY_ERR	= (1U << 31),	/* Slot Busy Error */
-	GRD_CHK_ERR	= (1U << 14),	/* Guard Check Error */
-	APP_CHK_ERR	= (1U << 13),	/* Application Check error */
-	REF_CHK_ERR	= (1U << 12),	/* Reference Check Error */
-	USR_BLK_NM	= (1U << 0),	/* User Block Number */
-};
-
-struct mvs_chip_info {
-	u32		n_phy;
-	u32		srs_sz;
-	u32		slot_width;
-};
-
-struct mvs_err_info {
-	__le32			flags;
-	__le32			flags2;
-};
-
-struct mvs_prd {
-	__le64			addr;		/* 64-bit buffer address */
-	__le32			reserved;
-	__le32			len;		/* 16-bit length */
-};
-
-struct mvs_cmd_hdr {
-	__le32			flags;		/* PRD tbl len; SAS, SATA ctl */
-	__le32			lens;		/* cmd, max resp frame len */
-	__le32			tags;		/* targ port xfer tag; tag */
-	__le32			data_len;	/* data xfer len */
-	__le64			cmd_tbl;	/* command table address */
-	__le64			open_frame;	/* open addr frame address */
-	__le64			status_buf;	/* status buffer address */
-	__le64			prd_tbl;	/* PRD tbl address */
-	__le32			reserved[4];
-};
-
-struct mvs_port {
-	struct asd_sas_port	sas_port;
-	u8			port_attached;
-	u8			taskfileset;
-	u8			wide_port_phymap;
-	struct list_head	list;
-};
-
-struct mvs_phy {
-	struct mvs_port		*port;
-	struct asd_sas_phy	sas_phy;
-	struct sas_identify	identify;
-	struct scsi_device	*sdev;
-	u64		dev_sas_addr;
-	u64		att_dev_sas_addr;
-	u32		att_dev_info;
-	u32		dev_info;
-	u32		phy_type;
-	u32		phy_status;
-	u32		irq_status;
-	u32		frame_rcvd_size;
-	u8		frame_rcvd[32];
-	u8		phy_attached;
-	enum sas_linkrate	minimum_linkrate;
-	enum sas_linkrate	maximum_linkrate;
-};
-
-struct mvs_slot_info {
-	struct list_head	list;
-	struct sas_task		*task;
-	u32			n_elem;
-	u32			tx;
-
-	/* DMA buffer for storing cmd tbl, open addr frame, status buffer,
-	 * and PRD table
-	 */
-	void			*buf;
-	dma_addr_t		buf_dma;
-#if _MV_DUMP
-	u32			cmd_size;
-#endif
-
-	void			*response;
-	struct mvs_port		*port;
-};
-
-struct mvs_info {
-	unsigned long		flags;
-
-	spinlock_t		lock;		/* host-wide lock */
-	struct pci_dev		*pdev;		/* our device */
-	void __iomem		*regs;		/* enhanced mode registers */
-	void __iomem		*peri_regs;	/* peripheral registers */
-
-	u8			sas_addr[SAS_ADDR_SIZE];
-	struct sas_ha_struct	sas;		/* SCSI/SAS glue */
-	struct Scsi_Host	*shost;
-
-	__le32			*tx;		/* TX (delivery) DMA ring */
-	dma_addr_t		tx_dma;
-	u32			tx_prod;	/* cached next-producer idx */
-
-	__le32			*rx;		/* RX (completion) DMA ring */
-	dma_addr_t		rx_dma;
-	u32			rx_cons;	/* RX consumer idx */
-
-	__le32			*rx_fis;	/* RX'd FIS area */
-	dma_addr_t		rx_fis_dma;
-
-	struct mvs_cmd_hdr	*slot;	/* DMA command header slots */
-	dma_addr_t		slot_dma;
-
-	const struct mvs_chip_info *chip;
-
-	u8			tags[MVS_SLOTS];
-	struct mvs_slot_info	slot_info[MVS_SLOTS];
-				/* further per-slot information */
-	struct mvs_phy		phy[MVS_MAX_PHYS];
-	struct mvs_port		port[MVS_MAX_PHYS];
-#ifdef MVS_USE_TASKLET
-	struct tasklet_struct	tasklet;
-#endif
-};
-
-static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
-			   void *funcdata);
-static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port);
-static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val);
-static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port);
-static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val);
-static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val);
-static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port);
-
-static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
-static void mvs_detect_porttype(struct mvs_info *mvi, int i);
-static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
-static void mvs_release_task(struct mvs_info *mvi, int phy_no);
-
-static int mvs_scan_finished(struct Scsi_Host *, unsigned long);
-static void mvs_scan_start(struct Scsi_Host *);
-static int mvs_slave_configure(struct scsi_device *sdev);
-
-static struct scsi_transport_template *mvs_stt;
-
-static const struct mvs_chip_info mvs_chips[] = {
-	[chip_6320] =		{ 2, 16, 9  },
-	[chip_6440] =		{ 4, 16, 9  },
-	[chip_6480] =		{ 8, 32, 10 },
-};
-
-static struct scsi_host_template mvs_sht = {
-	.module			= THIS_MODULE,
-	.name			= DRV_NAME,
-	.queuecommand		= sas_queuecommand,
-	.target_alloc		= sas_target_alloc,
-	.slave_configure	= mvs_slave_configure,
-	.slave_destroy		= sas_slave_destroy,
-	.scan_finished		= mvs_scan_finished,
-	.scan_start		= mvs_scan_start,
-	.change_queue_depth	= sas_change_queue_depth,
-	.change_queue_type	= sas_change_queue_type,
-	.bios_param		= sas_bios_param,
-	.can_queue		= 1,
-	.cmd_per_lun		= 1,
-	.this_id		= -1,
-	.sg_tablesize		= SG_ALL,
-	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
-	.use_clustering		= ENABLE_CLUSTERING,
-	.eh_device_reset_handler	= sas_eh_device_reset_handler,
-	.eh_bus_reset_handler	= sas_eh_bus_reset_handler,
-	.slave_alloc		= sas_slave_alloc,
-	.target_destroy		= sas_target_destroy,
-	.ioctl			= sas_ioctl,
-};
-
-static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
-{
-	u32 i;
-	u32 run;
-	u32 offset;
-
-	offset = 0;
-	while (size) {
-		printk("%08X : ", baseaddr + offset);
-		if (size >= 16)
-			run = 16;
-		else
-			run = size;
-		size -= run;
-		for (i = 0; i < 16; i++) {
-			if (i < run)
-				printk("%02X ", (u32)data[i]);
-			else
-				printk("   ");
-		}
-		printk(": ");
-		for (i = 0; i < run; i++)
-			printk("%c", isalnum(data[i]) ? data[i] : '.');
-		printk("\n");
-		data = &data[16];
-		offset += run;
-	}
-	printk("\n");
-}
-
-#if _MV_DUMP
-static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
-				   enum sas_protocol proto)
-{
-	u32 offset;
-	struct pci_dev *pdev = mvi->pdev;
-	struct mvs_slot_info *slot = &mvi->slot_info[tag];
-
-	offset = slot->cmd_size + MVS_OAF_SZ +
-	    sizeof(struct mvs_prd) * slot->n_elem;
-	dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
-			tag);
-	mvs_hexdump(32, (u8 *) slot->response,
-		    (u32) slot->buf_dma + offset);
-}
-#endif
-
-static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
-				enum sas_protocol proto)
-{
-#if _MV_DUMP
-	u32 sz, w_ptr;
-	u64 addr;
-	void __iomem *regs = mvi->regs;
-	struct pci_dev *pdev = mvi->pdev;
-	struct mvs_slot_info *slot = &mvi->slot_info[tag];
-
-	/*Delivery Queue */
-	sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
-	w_ptr = slot->tx;
-	addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
-	dev_printk(KERN_DEBUG, &pdev->dev,
-		"Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
-	dev_printk(KERN_DEBUG, &pdev->dev,
-		"Delivery Queue Base Address=0x%llX (PA)"
-		"(tx_dma=0x%llX), Entry=%04d\n",
-		addr, mvi->tx_dma, w_ptr);
-	mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
-			(u32) mvi->tx_dma + sizeof(u32) * w_ptr);
-	/*Command List */
-	addr = mvi->slot_dma;
-	dev_printk(KERN_DEBUG, &pdev->dev,
-		"Command List Base Address=0x%llX (PA)"
-		"(slot_dma=0x%llX), Header=%03d\n",
-		addr, slot->buf_dma, tag);
-	dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
-	/*mvs_cmd_hdr */
-	mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
-		(u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
-	/*1.command table area */
-	dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
-	mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
-	/*2.open address frame area */
-	dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
-	mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
-				(u32) slot->buf_dma + slot->cmd_size);
-	/*3.status buffer */
-	mvs_hba_sb_dump(mvi, tag, proto);
-	/*4.PRD table */
-	dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
-	mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
-		(u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
-		(u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
-#endif
-}
-
-static void mvs_hba_cq_dump(struct mvs_info *mvi)
-{
-#if (_MV_DUMP > 2)
-	u64 addr;
-	void __iomem *regs = mvi->regs;
-	struct pci_dev *pdev = mvi->pdev;
-	u32 entry = mvi->rx_cons + 1;
-	u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
-
-	/*Completion Queue */
-	addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
-	dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n",
-		   mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
-	dev_printk(KERN_DEBUG, &pdev->dev,
-		"Completion List Base Address=0x%llX (PA), "
-		"CQ_Entry=%04d, CQ_WP=0x%08X\n",
-		addr, entry - 1, mvi->rx[0]);
-	mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
-		    mvi->rx_dma + sizeof(u32) * entry);
-#endif
-}
-
-static void mvs_hba_interrupt_enable(struct mvs_info *mvi)
-{
-	void __iomem *regs = mvi->regs;
-	u32 tmp;
-
-	tmp = mr32(GBL_CTL);
-
-	mw32(GBL_CTL, tmp | INT_EN);
-}
-
-static void mvs_hba_interrupt_disable(struct mvs_info *mvi)
-{
-	void __iomem *regs = mvi->regs;
-	u32 tmp;
-
-	tmp = mr32(GBL_CTL);
-
-	mw32(GBL_CTL, tmp & ~INT_EN);
-}
-
-static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
-
-/* move to PCI layer or libata core? */
-static int pci_go_64(struct pci_dev *pdev)
-{
-	int rc;
-
-	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-		if (rc) {
-			rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-			if (rc) {
-				dev_printk(KERN_ERR, &pdev->dev,
-					   "64-bit DMA enable failed\n");
-				return rc;
-			}
-		}
-	} else {
-		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (rc) {
-			dev_printk(KERN_ERR, &pdev->dev,
-				   "32-bit DMA enable failed\n");
-			return rc;
-		}
-		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (rc) {
-			dev_printk(KERN_ERR, &pdev->dev,
-				   "32-bit consistent DMA enable failed\n");
-			return rc;
-		}
-	}
-
-	return rc;
-}
-
-static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
-{
-	if (task->lldd_task) {
-		struct mvs_slot_info *slot;
-		slot = (struct mvs_slot_info *) task->lldd_task;
-		*tag = slot - mvi->slot_info;
-		return 1;
-	}
-	return 0;
-}
-
-static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
-{
-	void *bitmap = (void *) &mvi->tags;
-	clear_bit(tag, bitmap);
-}
-
-static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
-{
-	mvs_tag_clear(mvi, tag);
-}
-
-static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
-{
-	void *bitmap = (void *) &mvi->tags;
-	set_bit(tag, bitmap);
-}
-
-static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
-{
-	unsigned int index, tag;
-	void *bitmap = (void *) &mvi->tags;
-
-	index = find_first_zero_bit(bitmap, MVS_SLOTS);
-	tag = index;
-	if (tag >= MVS_SLOTS)
-		return -SAS_QUEUE_FULL;
-	mvs_tag_set(mvi, tag);
-	*tag_out = tag;
-	return 0;
-}
-
-static void mvs_tag_init(struct mvs_info *mvi)
-{
-	int i;
-	for (i = 0; i < MVS_SLOTS; ++i)
-		mvs_tag_clear(mvi, i);
-}
-
-#ifndef MVS_DISABLE_NVRAM
-static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
-{
-	int timeout = 1000;
-
-	if (addr & ~SPI_ADDR_MASK)
-		return -EINVAL;
-
-	writel(addr, regs + SPI_CMD);
-	writel(TWSI_RD, regs + SPI_CTL);
-
-	while (timeout-- > 0) {
-		if (readl(regs + SPI_CTL) & TWSI_RDY) {
-			*data = readl(regs + SPI_DATA);
-			return 0;
-		}
-
-		udelay(10);
-	}
-
-	return -EBUSY;
-}
-
-static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
-			    void *buf, u32 buflen)
-{
-	u32 addr_end, tmp_addr, i, j;
-	u32 tmp = 0;
-	int rc;
-	u8 *tmp8, *buf8 = buf;
-
-	addr_end = addr + buflen;
-	tmp_addr = ALIGN(addr, 4);
-	if (addr > 0xff)
-		return -EINVAL;
-
-	j = addr & 0x3;
-	if (j) {
-		rc = mvs_eep_read(regs, tmp_addr, &tmp);
-		if (rc)
-			return rc;
-
-		tmp8 = (u8 *)&tmp;
-		for (i = j; i < 4; i++)
-			*buf8++ = tmp8[i];
-
-		tmp_addr += 4;
-	}
-
-	for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
-		rc = mvs_eep_read(regs, tmp_addr, &tmp);
-		if (rc)
-			return rc;
-
-		memcpy(buf8, &tmp, 4);
-		buf8 += 4;
-	}
-
-	if (tmp_addr < addr_end) {
-		rc = mvs_eep_read(regs, tmp_addr, &tmp);
-		if (rc)
-			return rc;
-
-		tmp8 = (u8 *)&tmp;
-		j = addr_end - tmp_addr;
-		for (i = 0; i < j; i++)
-			*buf8++ = tmp8[i];
-
-		tmp_addr += 4;
-	}
-
-	return 0;
-}
-#endif
-
-static int mvs_nvram_read(struct mvs_info *mvi, u32 addr,
-			  void *buf, u32 buflen)
-{
-#ifndef MVS_DISABLE_NVRAM
-	void __iomem *regs = mvi->regs;
-	int rc, i;
-	u32 sum;
-	u8 hdr[2], *tmp;
-	const char *msg;
-
-	rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
-	if (rc) {
-		msg = "nvram hdr read failed";
-		goto err_out;
-	}
-	rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
-	if (rc) {
-		msg = "nvram read failed";
-		goto err_out;
-	}
-
-	if (hdr[0] != 0x5A) {
-		/* entry id */
-		msg = "invalid nvram entry id";
-		rc = -ENOENT;
-		goto err_out;
-	}
-
-	tmp = buf;
-	sum = ((u32)hdr[0]) + ((u32)hdr[1]);
-	for (i = 0; i < buflen; i++)
-		sum += ((u32)tmp[i]);
-
-	if (sum) {
-		msg = "nvram checksum failure";
-		rc = -EILSEQ;
-		goto err_out;
-	}
-
-	return 0;
-
-err_out:
-	dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
-	return rc;
-#else
-	/* FIXME , For SAS target mode */
-	memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8);
-	return 0;
-#endif
-}
-
-static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
-{
-	struct mvs_phy *phy = &mvi->phy[i];
-	struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
-
-	if (!phy->phy_attached)
-		return;
-
-	if (sas_phy->phy) {
-		struct sas_phy *sphy = sas_phy->phy;
-
-		sphy->negotiated_linkrate = sas_phy->linkrate;
-		sphy->minimum_linkrate = phy->minimum_linkrate;
-		sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
-		sphy->maximum_linkrate = phy->maximum_linkrate;
-		sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
-	}
-
-	if (phy->phy_type & PORT_TYPE_SAS) {
-		struct sas_identify_frame *id;
-
-		id = (struct sas_identify_frame *)phy->frame_rcvd;
-		id->dev_type = phy->identify.device_type;
-		id->initiator_bits = SAS_PROTOCOL_ALL;
-		id->target_bits = phy->identify.target_port_protocols;
-	} else if (phy->phy_type & PORT_TYPE_SATA) {
-		/* TODO */
-	}
-	mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
-	mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
-				   PORTE_BYTES_DMAED);
-}
-
-static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
-{
-	/* give the phy enabling interrupt event time to come in (1s
-	 * is empirically about all it takes) */
-	if (time < HZ)
-		return 0;
-	/* Wait for discovery to finish */
-	scsi_flush_work(shost);
-	return 1;
-}
-
-static void mvs_scan_start(struct Scsi_Host *shost)
-{
-	int i;
-	struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
-
-	for (i = 0; i < mvi->chip->n_phy; ++i) {
-		mvs_bytes_dmaed(mvi, i);
-	}
-}
-
-static int mvs_slave_configure(struct scsi_device *sdev)
-{
-	struct domain_device *dev = sdev_to_domain_dev(sdev);
-	int ret = sas_slave_configure(sdev);
-
-	if (ret)
-		return ret;
-
-	if (dev_is_sata(dev)) {
-		/* struct ata_port *ap = dev->sata_dev.ap; */
-		/* struct ata_device *adev = ap->link.device; */
-
-		/* clamp at no NCQ for the time being */
-		/* adev->flags |= ATA_DFLAG_NCQ_OFF; */
-		scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
-	}
-	return 0;
-}
-
-static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
-{
-	struct pci_dev *pdev = mvi->pdev;
-	struct sas_ha_struct *sas_ha = &mvi->sas;
-	struct mvs_phy *phy = &mvi->phy[phy_no];
-	struct asd_sas_phy *sas_phy = &phy->sas_phy;
-
-	phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no);
-	/*
-	* events is port event now ,
-	* we need check the interrupt status which belongs to per port.
-	*/
-	dev_printk(KERN_DEBUG, &pdev->dev,
-		"Port %d Event = %X\n",
-		phy_no, phy->irq_status);
-
-	if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
-		mvs_release_task(mvi, phy_no);
-		if (!mvs_is_phy_ready(mvi, phy_no)) {
-			sas_phy_disconnected(sas_phy);
-			sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
-			dev_printk(KERN_INFO, &pdev->dev,
-				"Port %d Unplug Notice\n", phy_no);
-
-		} else
-			mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
-	}
-	if (!(phy->irq_status & PHYEV_DEC_ERR)) {
-		if (phy->irq_status & PHYEV_COMWAKE) {
-			u32 tmp = mvs_read_port_irq_mask(mvi, phy_no);
-			mvs_write_port_irq_mask(mvi, phy_no,
-						tmp | PHYEV_SIG_FIS);
-		}
-		if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
-			phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
-			if (phy->phy_status) {
-				mvs_detect_porttype(mvi, phy_no);
-
-				if (phy->phy_type & PORT_TYPE_SATA) {
-					u32 tmp = mvs_read_port_irq_mask(mvi,
-								phy_no);
-					tmp &= ~PHYEV_SIG_FIS;
-					mvs_write_port_irq_mask(mvi,
-								phy_no, tmp);
-				}
-
-				mvs_update_phyinfo(mvi, phy_no, 0);
-				sas_ha->notify_phy_event(sas_phy,
-							PHYE_OOB_DONE);
-				mvs_bytes_dmaed(mvi, phy_no);
-			} else {
-				dev_printk(KERN_DEBUG, &pdev->dev,
-					"plugin interrupt but phy is gone\n");
-				mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
-							NULL);
-			}
-		} else if (phy->irq_status & PHYEV_BROAD_CH) {
-			mvs_release_task(mvi, phy_no);
-			sas_ha->notify_port_event(sas_phy,
-						PORTE_BROADCAST_RCVD);
-		}
-	}
-	mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status);
-}
-
-static void mvs_int_sata(struct mvs_info *mvi)
-{
-	u32 tmp;
-	void __iomem *regs = mvi->regs;
-	tmp = mr32(INT_STAT_SRS);
-	mw32(INT_STAT_SRS, tmp & 0xFFFF);
-}
-
-static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task,
-				u32 slot_idx)
-{
-	void __iomem *regs = mvi->regs;
-	struct domain_device *dev = task->dev;
-	struct asd_sas_port *sas_port = dev->port;
-	struct mvs_port *port = mvi->slot_info[slot_idx].port;
-	u32 reg_set, phy_mask;
-
-	if (!sas_protocol_ata(task->task_proto)) {
-		reg_set = 0;
-		phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
-				sas_port->phy_mask;
-	} else {
-		reg_set = port->taskfileset;
-		phy_mask = sas_port->phy_mask;
-	}
-	mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx |
-					(TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) |
-					(phy_mask << TXQ_PHY_SHIFT) |
-					(reg_set << TXQ_SRS_SHIFT));
-
-	mw32(TX_PROD_IDX, mvi->tx_prod);
-	mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
-}
-
-static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
-			u32 slot_idx, int err)
-{
-	struct mvs_port *port = mvi->slot_info[slot_idx].port;
-	struct task_status_struct *tstat = &task->task_status;
-	struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
-	int stat = SAM_GOOD;
-
-	resp->frame_len = sizeof(struct dev_to_host_fis);
-	memcpy(&resp->ending_fis[0],
-	       SATA_RECEIVED_D2H_FIS(port->taskfileset),
-	       sizeof(struct dev_to_host_fis));
-	tstat->buf_valid_size = sizeof(*resp);
-	if (unlikely(err))
-		stat = SAS_PROTO_RESPONSE;
-	return stat;
-}
-
-static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
-{
-	u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
-	mvs_tag_clear(mvi, slot_idx);
-}
-
-static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
-			  struct mvs_slot_info *slot, u32 slot_idx)
-{
-	if (!sas_protocol_ata(task->task_proto))
-		if (slot->n_elem)
-			pci_unmap_sg(mvi->pdev, task->scatter,
-				     slot->n_elem, task->data_dir);
-
-	switch (task->task_proto) {
-	case SAS_PROTOCOL_SMP:
-		pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
-			     PCI_DMA_FROMDEVICE);
-		pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
-			     PCI_DMA_TODEVICE);
-		break;
-
-	case SAS_PROTOCOL_SATA:
-	case SAS_PROTOCOL_STP:
-	case SAS_PROTOCOL_SSP:
-	default:
-		/* do nothing */
-		break;
-	}
-	list_del(&slot->list);
-	task->lldd_task = NULL;
-	slot->task = NULL;
-	slot->port = NULL;
-}
-
-static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
-			 u32 slot_idx)
-{
-	struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
-	u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
-	u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4));
-	int stat = SAM_CHECK_COND;
-
-	if (err_dw1 & SLOT_BSY_ERR) {
-		stat = SAS_QUEUE_FULL;
-		mvs_slot_reset(mvi, task, slot_idx);
-	}
-	switch (task->task_proto) {
-	case SAS_PROTOCOL_SSP:
-		break;
-	case SAS_PROTOCOL_SMP:
-		break;
-	case SAS_PROTOCOL_SATA:
-	case SAS_PROTOCOL_STP:
-	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
-		if (err_dw0 & TFILE_ERR)
-			stat = mvs_sata_done(mvi, task, slot_idx, 1);
-		break;
-	default:
-		break;
-	}
-
-	mvs_hexdump(16, (u8 *) slot->response, 0);
-	return stat;
-}
-
-static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
-{
-	u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
-	struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
-	struct sas_task *task = slot->task;
-	struct task_status_struct *tstat;
-	struct mvs_port *port;
-	bool aborted;
-	void *to;
-
-	if (unlikely(!task || !task->lldd_task))
-		return -1;
-
-	mvs_hba_cq_dump(mvi);
-
-	spin_lock(&task->task_state_lock);
-	aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
-	if (!aborted) {
-		task->task_state_flags &=
-		    ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
-		task->task_state_flags |= SAS_TASK_STATE_DONE;
-	}
-	spin_unlock(&task->task_state_lock);
-
-	if (aborted) {
-		mvs_slot_task_free(mvi, task, slot, slot_idx);
-		mvs_slot_free(mvi, rx_desc);
-		return -1;
-	}
-
-	port = slot->port;
-	tstat = &task->task_status;
-	memset(tstat, 0, sizeof(*tstat));
-	tstat->resp = SAS_TASK_COMPLETE;
-
-	if (unlikely(!port->port_attached || flags)) {
-		mvs_slot_err(mvi, task, slot_idx);
-		if (!sas_protocol_ata(task->task_proto))
-			tstat->stat = SAS_PHY_DOWN;
-		goto out;
-	}
-
-	/* error info record present */
-	if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
-		tstat->stat = mvs_slot_err(mvi, task, slot_idx);
-		goto out;
-	}
-
-	switch (task->task_proto) {
-	case SAS_PROTOCOL_SSP:
-		/* hw says status == 0, datapres == 0 */
-		if (rx_desc & RXQ_GOOD) {
-			tstat->stat = SAM_GOOD;
-			tstat->resp = SAS_TASK_COMPLETE;
-		}
-		/* response frame present */
-		else if (rx_desc & RXQ_RSP) {
-			struct ssp_response_iu *iu =
-			    slot->response + sizeof(struct mvs_err_info);
-			sas_ssp_task_response(&mvi->pdev->dev, task, iu);
-		}
-
-		/* should never happen? */
-		else
-			tstat->stat = SAM_CHECK_COND;
-		break;
-
-	case SAS_PROTOCOL_SMP: {
-			struct scatterlist *sg_resp = &task->smp_task.smp_resp;
-			tstat->stat = SAM_GOOD;
-			to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
-			memcpy(to + sg_resp->offset,
-				slot->response + sizeof(struct mvs_err_info),
-				sg_dma_len(sg_resp));
-			kunmap_atomic(to, KM_IRQ0);
-			break;
-		}
-
-	case SAS_PROTOCOL_SATA:
-	case SAS_PROTOCOL_STP:
-	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
-			tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
-			break;
-		}
-
-	default:
-		tstat->stat = SAM_CHECK_COND;
-		break;
-	}
-
-out:
-	mvs_slot_task_free(mvi, task, slot, slot_idx);
-	if (unlikely(tstat->stat != SAS_QUEUE_FULL))
-		mvs_slot_free(mvi, rx_desc);
-
-	spin_unlock(&mvi->lock);
-	task->task_done(task);
-	spin_lock(&mvi->lock);
-	return tstat->stat;
-}
-
-static void mvs_release_task(struct mvs_info *mvi, int phy_no)
-{
-	struct list_head *pos, *n;
-	struct mvs_slot_info *slot;
-	struct mvs_phy *phy = &mvi->phy[phy_no];
-	struct mvs_port *port = phy->port;
-	u32 rx_desc;
-
-	if (!port)
-		return;
-
-	list_for_each_safe(pos, n, &port->list) {
-		slot = container_of(pos, struct mvs_slot_info, list);
-		rx_desc = (u32) (slot - mvi->slot_info);
-		mvs_slot_complete(mvi, rx_desc, 1);
-	}
-}
-
-static void mvs_int_full(struct mvs_info *mvi)
-{
-	void __iomem *regs = mvi->regs;
-	u32 tmp, stat;
-	int i;
-
-	stat = mr32(INT_STAT);
-
-	mvs_int_rx(mvi, false);
-
-	for (i = 0; i < MVS_MAX_PORTS; i++) {
-		tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
-		if (tmp)
-			mvs_int_port(mvi, i, tmp);
-	}
-
-	if (stat & CINT_SRS)
-		mvs_int_sata(mvi);
-
-	mw32(INT_STAT, stat);
-}
-
-static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
-{
-	void __iomem *regs = mvi->regs;
-	u32 rx_prod_idx, rx_desc;
-	bool attn = false;
-	struct pci_dev *pdev = mvi->pdev;
-
-	/* the first dword in the RX ring is special: it contains
-	 * a mirror of the hardware's RX producer index, so that
-	 * we don't have to stall the CPU reading that register.
-	 * The actual RX ring is offset by one dword, due to this.
-	 */
-	rx_prod_idx = mvi->rx_cons;
-	mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
-	if (mvi->rx_cons == 0xfff)	/* h/w hasn't touched RX ring yet */
-		return 0;
-
-	/* The CMPL_Q may come late, read from register and try again
-	* note: if coalescing is enabled,
-	* it will need to read from register every time for sure
-	*/
-	if (mvi->rx_cons == rx_prod_idx)
-		mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
-
-	if (mvi->rx_cons == rx_prod_idx)
-		return 0;
-
-	while (mvi->rx_cons != rx_prod_idx) {
-
-		/* increment our internal RX consumer pointer */
-		rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
-
-		rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
-
-		if (likely(rx_desc & RXQ_DONE))
-			mvs_slot_complete(mvi, rx_desc, 0);
-		if (rx_desc & RXQ_ATTN) {
-			attn = true;
-			dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
-				rx_desc);
-		} else if (rx_desc & RXQ_ERR) {
-			if (!(rx_desc & RXQ_DONE))
-				mvs_slot_complete(mvi, rx_desc, 0);
-			dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
-				rx_desc);
-		} else if (rx_desc & RXQ_SLOT_RESET) {
-			dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n",
-				rx_desc);
-			mvs_slot_free(mvi, rx_desc);
-		}
-	}
-
-	if (attn && self_clear)
-		mvs_int_full(mvi);
-
-	return 0;
-}
-
-#ifdef MVS_USE_TASKLET
-static void mvs_tasklet(unsigned long data)
-{
-	struct mvs_info *mvi = (struct mvs_info *) data;
-	unsigned long flags;
-
-	spin_lock_irqsave(&mvi->lock, flags);
-
-#ifdef MVS_DISABLE_MSI
-	mvs_int_full(mvi);
-#else
-	mvs_int_rx(mvi, true);
-#endif
-	spin_unlock_irqrestore(&mvi->lock, flags);
-}
-#endif
-
-static irqreturn_t mvs_interrupt(int irq, void *opaque)
-{
-	struct mvs_info *mvi = opaque;
-	void __iomem *regs = mvi->regs;
-	u32 stat;
-
-	stat = mr32(GBL_INT_STAT);
-
-	if (stat == 0 || stat == 0xffffffff)
-		return IRQ_NONE;
-
-	/* clear CMD_CMPLT ASAP */
-	mw32_f(INT_STAT, CINT_DONE);
-
-#ifndef MVS_USE_TASKLET
-	spin_lock(&mvi->lock);
-
-	mvs_int_full(mvi);
-
-	spin_unlock(&mvi->lock);
-#else
-	tasklet_schedule(&mvi->tasklet);
-#endif
-	return IRQ_HANDLED;
-}
-
-#ifndef MVS_DISABLE_MSI
-static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
-{
-	struct mvs_info *mvi = opaque;
-
-#ifndef MVS_USE_TASKLET
-	spin_lock(&mvi->lock);
-
-	mvs_int_rx(mvi, true);
-
-	spin_unlock(&mvi->lock);
-#else
-	tasklet_schedule(&mvi->tasklet);
-#endif
-	return IRQ_HANDLED;
-}
-#endif
-
-struct mvs_task_exec_info {
-	struct sas_task *task;
-	struct mvs_cmd_hdr *hdr;
-	struct mvs_port *port;
-	u32 tag;
-	int n_elem;
-};
-
-static int mvs_task_prep_smp(struct mvs_info *mvi,
-			     struct mvs_task_exec_info *tei)
-{
-	int elem, rc, i;
-	struct sas_task *task = tei->task;
-	struct mvs_cmd_hdr *hdr = tei->hdr;
-	struct scatterlist *sg_req, *sg_resp;
-	u32 req_len, resp_len, tag = tei->tag;
-	void *buf_tmp;
-	u8 *buf_oaf;
-	dma_addr_t buf_tmp_dma;
-	struct mvs_prd *buf_prd;
-	struct scatterlist *sg;
-	struct mvs_slot_info *slot = &mvi->slot_info[tag];
-	struct asd_sas_port *sas_port = task->dev->port;
-	u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
-#if _MV_DUMP
-	u8 *buf_cmd;
-	void *from;
-#endif
-	/*
-	 * DMA-map SMP request, response buffers
-	 */
-	sg_req = &task->smp_task.smp_req;
-	elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
-	if (!elem)
-		return -ENOMEM;
-	req_len = sg_dma_len(sg_req);
-
-	sg_resp = &task->smp_task.smp_resp;
-	elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
-	if (!elem) {
-		rc = -ENOMEM;
-		goto err_out;
-	}
-	resp_len = sg_dma_len(sg_resp);
-
-	/* must be in dwords */
-	if ((req_len & 0x3) || (resp_len & 0x3)) {
-		rc = -EINVAL;
-		goto err_out_2;
-	}
-
-	/*
-	 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
-	 */
-
-	/* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
-	buf_tmp = slot->buf;
-	buf_tmp_dma = slot->buf_dma;
-
-#if _MV_DUMP
-	buf_cmd = buf_tmp;
-	hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
-	buf_tmp += req_len;
-	buf_tmp_dma += req_len;
-	slot->cmd_size = req_len;
-#else
-	hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
-#endif
-
-	/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
-	buf_oaf = buf_tmp;
-	hdr->open_frame = cpu_to_le64(buf_tmp_dma);
-
-	buf_tmp += MVS_OAF_SZ;
-	buf_tmp_dma += MVS_OAF_SZ;
-
-	/* region 3: PRD table ********************************************* */
-	buf_prd = buf_tmp;
-	if (tei->n_elem)
-		hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
-	else
-		hdr->prd_tbl = 0;
-
-	i = sizeof(struct mvs_prd) * tei->n_elem;
-	buf_tmp += i;
-	buf_tmp_dma += i;
-
-	/* region 4: status buffer (larger the PRD, smaller this buf) ****** */
-	slot->response = buf_tmp;
-	hdr->status_buf = cpu_to_le64(buf_tmp_dma);
-
-	/*
-	 * Fill in TX ring and command slot header
-	 */
-	slot->tx = mvi->tx_prod;
-	mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
-					TXQ_MODE_I | tag |
-					(sas_port->phy_mask << TXQ_PHY_SHIFT));
-
-	hdr->flags |= flags;
-	hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
-	hdr->tags = cpu_to_le32(tag);
-	hdr->data_len = 0;
-
-	/* generate open address frame hdr (first 12 bytes) */
-	buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
-	buf_oaf[1] = task->dev->linkrate & 0xf;
-	*(u16 *)(buf_oaf + 2) = 0xFFFF;		/* SAS SPEC */
-	memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
-
-	/* fill in PRD (scatter/gather) table, if any */
-	for_each_sg(task->scatter, sg, tei->n_elem, i) {
-		buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
-		buf_prd->len = cpu_to_le32(sg_dma_len(sg));
-		buf_prd++;
-	}
-
-#if _MV_DUMP
-	/* copy cmd table */
-	from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
-	memcpy(buf_cmd, from + sg_req->offset, req_len);
-	kunmap_atomic(from, KM_IRQ0);
-#endif
-	return 0;
-
-err_out_2:
-	pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
-		     PCI_DMA_FROMDEVICE);
-err_out:
-	pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
-		     PCI_DMA_TODEVICE);
-	return rc;
-}
-
-static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port)
-{
-	void __iomem *regs = mvi->regs;
-	u32 tmp, offs;
-	u8 *tfs = &port->taskfileset;
-
-	if (*tfs == MVS_ID_NOT_MAPPED)
-		return;
-
-	offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
-	if (*tfs < 16) {
-		tmp = mr32(PCS);
-		mw32(PCS, tmp & ~offs);
-	} else {
-		tmp = mr32(CTL);
-		mw32(CTL, tmp & ~offs);
-	}
-
-	tmp = mr32(INT_STAT_SRS) & (1U << *tfs);
-	if (tmp)
-		mw32(INT_STAT_SRS, tmp);
-
-	*tfs = MVS_ID_NOT_MAPPED;
-}
-
-static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port)
-{
-	int i;
-	u32 tmp, offs;
-	void __iomem *regs = mvi->regs;
-
-	if (port->taskfileset != MVS_ID_NOT_MAPPED)
-		return 0;
-
-	tmp = mr32(PCS);
-
-	for (i = 0; i < mvi->chip->srs_sz; i++) {
-		if (i == 16)
-			tmp = mr32(CTL);
-		offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
-		if (!(tmp & offs)) {
-			port->taskfileset = i;
-
-			if (i < 16)
-				mw32(PCS, tmp | offs);
-			else
-				mw32(CTL, tmp | offs);
-			tmp = mr32(INT_STAT_SRS) & (1U << i);
-			if (tmp)
-				mw32(INT_STAT_SRS, tmp);
-			return 0;
-		}
-	}
-	return MVS_ID_NOT_MAPPED;
-}
-
-static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
-{
-	struct ata_queued_cmd *qc = task->uldd_task;
-
-	if (qc) {
-		if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
-			qc->tf.command == ATA_CMD_FPDMA_READ) {
-			*tag = qc->tag;
-			return 1;
-		}
-	}
-
-	return 0;
-}
-
-static int mvs_task_prep_ata(struct mvs_info *mvi,
-			     struct mvs_task_exec_info *tei)
-{
-	struct sas_task *task = tei->task;
-	struct domain_device *dev = task->dev;
-	struct mvs_cmd_hdr *hdr = tei->hdr;
-	struct asd_sas_port *sas_port = dev->port;
-	struct mvs_slot_info *slot;
-	struct scatterlist *sg;
-	struct mvs_prd *buf_prd;
-	struct mvs_port *port = tei->port;
-	u32 tag = tei->tag;
-	u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
-	void *buf_tmp;
-	u8 *buf_cmd, *buf_oaf;
-	dma_addr_t buf_tmp_dma;
-	u32 i, req_len, resp_len;
-	const u32 max_resp_len = SB_RFB_MAX;
-
-	if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
-		return -EBUSY;
-
-	slot = &mvi->slot_info[tag];
-	slot->tx = mvi->tx_prod;
-	mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
-					(TXQ_CMD_STP << TXQ_CMD_SHIFT) |
-					(sas_port->phy_mask << TXQ_PHY_SHIFT) |
-					(port->taskfileset << TXQ_SRS_SHIFT));
-
-	if (task->ata_task.use_ncq)
-		flags |= MCH_FPDMA;
-	if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
-		if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
-			flags |= MCH_ATAPI;
-	}
-
-	/* FIXME: fill in port multiplier number */
-
-	hdr->flags = cpu_to_le32(flags);
-
-	/* FIXME: the low order order 5 bits for the TAG if enable NCQ */
-	if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags))
-		task->ata_task.fis.sector_count |= hdr->tags << 3;
-	else
-		hdr->tags = cpu_to_le32(tag);
-	hdr->data_len = cpu_to_le32(task->total_xfer_len);
-
-	/*
-	 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
-	 */
-
-	/* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
-	buf_cmd = buf_tmp = slot->buf;
-	buf_tmp_dma = slot->buf_dma;
-
-	hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
-
-	buf_tmp += MVS_ATA_CMD_SZ;
-	buf_tmp_dma += MVS_ATA_CMD_SZ;
-#if _MV_DUMP
-	slot->cmd_size = MVS_ATA_CMD_SZ;
-#endif
-
-	/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
-	/* used for STP.  unused for SATA? */
-	buf_oaf = buf_tmp;
-	hdr->open_frame = cpu_to_le64(buf_tmp_dma);
-
-	buf_tmp += MVS_OAF_SZ;
-	buf_tmp_dma += MVS_OAF_SZ;
-
-	/* region 3: PRD table ********************************************* */
-	buf_prd = buf_tmp;
-	if (tei->n_elem)
-		hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
-	else
-		hdr->prd_tbl = 0;
-
-	i = sizeof(struct mvs_prd) * tei->n_elem;
-	buf_tmp += i;
-	buf_tmp_dma += i;
-
-	/* region 4: status buffer (larger the PRD, smaller this buf) ****** */
-	/* FIXME: probably unused, for SATA.  kept here just in case
-	 * we get a STP/SATA error information record
-	 */
-	slot->response = buf_tmp;
-	hdr->status_buf = cpu_to_le64(buf_tmp_dma);
-
-	req_len = sizeof(struct host_to_dev_fis);
-	resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
-	    sizeof(struct mvs_err_info) - i;
-
-	/* request, response lengths */
-	resp_len = min(resp_len, max_resp_len);
-	hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
-
-	task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
-	/* fill in command FIS and ATAPI CDB */
-	memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
-	if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
-		memcpy(buf_cmd + STP_ATAPI_CMD,
-			task->ata_task.atapi_packet, 16);
-
-	/* generate open address frame hdr (first 12 bytes) */
-	buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1;	/* initiator, STP, ftype 1h */
-	buf_oaf[1] = task->dev->linkrate & 0xf;
-	*(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
-	memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
-
-	/* fill in PRD (scatter/gather) table, if any */
-	for_each_sg(task->scatter, sg, tei->n_elem, i) {
-		buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
-		buf_prd->len = cpu_to_le32(sg_dma_len(sg));
-		buf_prd++;
-	}
-
-	return 0;
-}
-
-static int mvs_task_prep_ssp(struct mvs_info *mvi,
-			     struct mvs_task_exec_info *tei)
-{
-	struct sas_task *task = tei->task;
-	struct mvs_cmd_hdr *hdr = tei->hdr;
-	struct mvs_port *port = tei->port;
-	struct mvs_slot_info *slot;
-	struct scatterlist *sg;
-	struct mvs_prd *buf_prd;
-	struct ssp_frame_hdr *ssp_hdr;
-	void *buf_tmp;
-	u8 *buf_cmd, *buf_oaf, fburst = 0;
-	dma_addr_t buf_tmp_dma;
-	u32 flags;
-	u32 resp_len, req_len, i, tag = tei->tag;
-	const u32 max_resp_len = SB_RFB_MAX;
-	u8 phy_mask;
-
-	slot = &mvi->slot_info[tag];
-
-	phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
-		task->dev->port->phy_mask;
-	slot->tx = mvi->tx_prod;
-	mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
-				(TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
-				(phy_mask << TXQ_PHY_SHIFT));
-
-	flags = MCH_RETRY;
-	if (task->ssp_task.enable_first_burst) {
-		flags |= MCH_FBURST;
-		fburst = (1 << 7);
-	}
-	hdr->flags = cpu_to_le32(flags |
-				 (tei->n_elem << MCH_PRD_LEN_SHIFT) |
-				 (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
-
-	hdr->tags = cpu_to_le32(tag);
-	hdr->data_len = cpu_to_le32(task->total_xfer_len);
-
-	/*
-	 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
-	 */
-
-	/* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
-	buf_cmd = buf_tmp = slot->buf;
-	buf_tmp_dma = slot->buf_dma;
-
-	hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
-
-	buf_tmp += MVS_SSP_CMD_SZ;
-	buf_tmp_dma += MVS_SSP_CMD_SZ;
-#if _MV_DUMP
-	slot->cmd_size = MVS_SSP_CMD_SZ;
-#endif
-
-	/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
-	buf_oaf = buf_tmp;
-	hdr->open_frame = cpu_to_le64(buf_tmp_dma);
-
-	buf_tmp += MVS_OAF_SZ;
-	buf_tmp_dma += MVS_OAF_SZ;
-
-	/* region 3: PRD table ********************************************* */
-	buf_prd = buf_tmp;
-	if (tei->n_elem)
-		hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
-	else
-		hdr->prd_tbl = 0;
-
-	i = sizeof(struct mvs_prd) * tei->n_elem;
-	buf_tmp += i;
-	buf_tmp_dma += i;
-
-	/* region 4: status buffer (larger the PRD, smaller this buf) ****** */
-	slot->response = buf_tmp;
-	hdr->status_buf = cpu_to_le64(buf_tmp_dma);
-
-	resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
-	    sizeof(struct mvs_err_info) - i;
-	resp_len = min(resp_len, max_resp_len);
-
-	req_len = sizeof(struct ssp_frame_hdr) + 28;
-
-	/* request, response lengths */
-	hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
-
-	/* generate open address frame hdr (first 12 bytes) */
-	buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1;	/* initiator, SSP, ftype 1h */
-	buf_oaf[1] = task->dev->linkrate & 0xf;
-	*(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
-	memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
-
-	/* fill in SSP frame header (Command Table.SSP frame header) */
-	ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
-	ssp_hdr->frame_type = SSP_COMMAND;
-	memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
-	       HASHED_SAS_ADDR_SIZE);
-	memcpy(ssp_hdr->hashed_src_addr,
-	       task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
-	ssp_hdr->tag = cpu_to_be16(tag);
-
-	/* fill in command frame IU */
-	buf_cmd += sizeof(*ssp_hdr);
-	memcpy(buf_cmd, &task->ssp_task.LUN, 8);
-	buf_cmd[9] = fburst | task->ssp_task.task_attr |
-			(task->ssp_task.task_prio << 3);
-	memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
-
-	/* fill in PRD (scatter/gather) table, if any */
-	for_each_sg(task->scatter, sg, tei->n_elem, i) {
-		buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
-		buf_prd->len = cpu_to_le32(sg_dma_len(sg));
-		buf_prd++;
-	}
-
-	return 0;
-}
-
-static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
-{
-	struct domain_device *dev = task->dev;
-	struct mvs_info *mvi = dev->port->ha->lldd_ha;
-	struct pci_dev *pdev = mvi->pdev;
-	void __iomem *regs = mvi->regs;
-	struct mvs_task_exec_info tei;
-	struct sas_task *t = task;
-	struct mvs_slot_info *slot;
-	u32 tag = 0xdeadbeef, rc, n_elem = 0;
-	unsigned long flags;
-	u32 n = num, pass = 0;
-
-	spin_lock_irqsave(&mvi->lock, flags);
-	do {
-		dev = t->dev;
-		tei.port = &mvi->port[dev->port->id];
-
-		if (!tei.port->port_attached) {
-			if (sas_protocol_ata(t->task_proto)) {
-				rc = SAS_PHY_DOWN;
-				goto out_done;
-			} else {
-				struct task_status_struct *ts = &t->task_status;
-				ts->resp = SAS_TASK_UNDELIVERED;
-				ts->stat = SAS_PHY_DOWN;
-				t->task_done(t);
-				if (n > 1)
-					t = list_entry(t->list.next,
-							struct sas_task, list);
-				continue;
-			}
-		}
-
-		if (!sas_protocol_ata(t->task_proto)) {
-			if (t->num_scatter) {
-				n_elem = pci_map_sg(mvi->pdev, t->scatter,
-						    t->num_scatter,
-						    t->data_dir);
-				if (!n_elem) {
-					rc = -ENOMEM;
-					goto err_out;
-				}
-			}
-		} else {
-			n_elem = t->num_scatter;
-		}
-
-		rc = mvs_tag_alloc(mvi, &tag);
-		if (rc)
-			goto err_out;
-
-		slot = &mvi->slot_info[tag];
-		t->lldd_task = NULL;
-		slot->n_elem = n_elem;
-		memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
-		tei.task = t;
-		tei.hdr = &mvi->slot[tag];
-		tei.tag = tag;
-		tei.n_elem = n_elem;
-
-		switch (t->task_proto) {
-		case SAS_PROTOCOL_SMP:
-			rc = mvs_task_prep_smp(mvi, &tei);
-			break;
-		case SAS_PROTOCOL_SSP:
-			rc = mvs_task_prep_ssp(mvi, &tei);
-			break;
-		case SAS_PROTOCOL_SATA:
-		case SAS_PROTOCOL_STP:
-		case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
-			rc = mvs_task_prep_ata(mvi, &tei);
-			break;
-		default:
-			dev_printk(KERN_ERR, &pdev->dev,
-				"unknown sas_task proto: 0x%x\n",
-				t->task_proto);
-			rc = -EINVAL;
-			break;
-		}
-
-		if (rc)
-			goto err_out_tag;
-
-		slot->task = t;
-		slot->port = tei.port;
-		t->lldd_task = (void *) slot;
-		list_add_tail(&slot->list, &slot->port->list);
-		/* TODO: select normal or high priority */
-
-		spin_lock(&t->task_state_lock);
-		t->task_state_flags |= SAS_TASK_AT_INITIATOR;
-		spin_unlock(&t->task_state_lock);
-
-		mvs_hba_memory_dump(mvi, tag, t->task_proto);
-
-		++pass;
-		mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
-		if (n > 1)
-			t = list_entry(t->list.next, struct sas_task, list);
-	} while (--n);
-
-	rc = 0;
-	goto out_done;
-
-err_out_tag:
-	mvs_tag_free(mvi, tag);
-err_out:
-	dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
-	if (!sas_protocol_ata(t->task_proto))
-		if (n_elem)
-			pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
-				     t->data_dir);
-out_done:
-	if (pass)
-		mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
-	spin_unlock_irqrestore(&mvi->lock, flags);
-	return rc;
-}
-
-static int mvs_task_abort(struct sas_task *task)
-{
-	int rc;
-	unsigned long flags;
-	struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
-	struct pci_dev *pdev = mvi->pdev;
-	int tag;
-
-	spin_lock_irqsave(&task->task_state_lock, flags);
-	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
-		rc = TMF_RESP_FUNC_COMPLETE;
-		spin_unlock_irqrestore(&task->task_state_lock, flags);
-		goto out_done;
-	}
-	spin_unlock_irqrestore(&task->task_state_lock, flags);
-
-	switch (task->task_proto) {
-	case SAS_PROTOCOL_SMP:
-		dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n");
-		break;
-	case SAS_PROTOCOL_SSP:
-		dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n");
-		break;
-	case SAS_PROTOCOL_SATA:
-	case SAS_PROTOCOL_STP:
-	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
-		dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n");
-#if _MV_DUMP
-		dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n");
-		mvs_hexdump(sizeof(struct host_to_dev_fis),
-				(void *)&task->ata_task.fis, 0);
-		dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
-		mvs_hexdump(16, task->ata_task.atapi_packet, 0);
-#endif
-		spin_lock_irqsave(&task->task_state_lock, flags);
-		if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
-			/* TODO */
-			;
-		}
-		spin_unlock_irqrestore(&task->task_state_lock, flags);
-		break;
-	}
-	default:
-		break;
-	}
-
-	if (mvs_find_tag(mvi, task, &tag)) {
-		spin_lock_irqsave(&mvi->lock, flags);
-		mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag);
-		spin_unlock_irqrestore(&mvi->lock, flags);
-	}
-	if (!mvs_task_exec(task, 1, GFP_ATOMIC))
-		rc = TMF_RESP_FUNC_COMPLETE;
-	else
-		rc = TMF_RESP_FUNC_FAILED;
-out_done:
-	return rc;
-}
-
-static void mvs_free(struct mvs_info *mvi)
-{
-	int i;
-
-	if (!mvi)
-		return;
-
-	for (i = 0; i < MVS_SLOTS; i++) {
-		struct mvs_slot_info *slot = &mvi->slot_info[i];
-
-		if (slot->buf)
-			dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ,
-					  slot->buf, slot->buf_dma);
-	}
-
-	if (mvi->tx)
-		dma_free_coherent(&mvi->pdev->dev,
-				  sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
-				  mvi->tx, mvi->tx_dma);
-	if (mvi->rx_fis)
-		dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
-				  mvi->rx_fis, mvi->rx_fis_dma);
-	if (mvi->rx)
-		dma_free_coherent(&mvi->pdev->dev,
-				  sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
-				  mvi->rx, mvi->rx_dma);
-	if (mvi->slot)
-		dma_free_coherent(&mvi->pdev->dev,
-				  sizeof(*mvi->slot) * MVS_SLOTS,
-				  mvi->slot, mvi->slot_dma);
-#ifdef MVS_ENABLE_PERI
-	if (mvi->peri_regs)
-		iounmap(mvi->peri_regs);
-#endif
-	if (mvi->regs)
-		iounmap(mvi->regs);
-	if (mvi->shost)
-		scsi_host_put(mvi->shost);
-	kfree(mvi->sas.sas_port);
-	kfree(mvi->sas.sas_phy);
-	kfree(mvi);
-}
-
-/* FIXME: locking? */
-static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
-			   void *funcdata)
-{
-	struct mvs_info *mvi = sas_phy->ha->lldd_ha;
-	int rc = 0, phy_id = sas_phy->id;
-	u32 tmp;
-
-	tmp = mvs_read_phy_ctl(mvi, phy_id);
-
-	switch (func) {
-	case PHY_FUNC_SET_LINK_RATE:{
-			struct sas_phy_linkrates *rates = funcdata;
-			u32 lrmin = 0, lrmax = 0;
-
-			lrmin = (rates->minimum_linkrate << 8);
-			lrmax = (rates->maximum_linkrate << 12);
-
-			if (lrmin) {
-				tmp &= ~(0xf << 8);
-				tmp |= lrmin;
-			}
-			if (lrmax) {
-				tmp &= ~(0xf << 12);
-				tmp |= lrmax;
-			}
-			mvs_write_phy_ctl(mvi, phy_id, tmp);
-			break;
-		}
-
-	case PHY_FUNC_HARD_RESET:
-		if (tmp & PHY_RST_HARD)
-			break;
-		mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
-		break;
-
-	case PHY_FUNC_LINK_RESET:
-		mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
-		break;
-
-	case PHY_FUNC_DISABLE:
-	case PHY_FUNC_RELEASE_SPINUP_HOLD:
-	default:
-		rc = -EOPNOTSUPP;
-	}
-
-	return rc;
-}
-
-static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
-{
-	struct mvs_phy *phy = &mvi->phy[phy_id];
-	struct asd_sas_phy *sas_phy = &phy->sas_phy;
-
-	sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
-	sas_phy->class = SAS;
-	sas_phy->iproto = SAS_PROTOCOL_ALL;
-	sas_phy->tproto = 0;
-	sas_phy->type = PHY_TYPE_PHYSICAL;
-	sas_phy->role = PHY_ROLE_INITIATOR;
-	sas_phy->oob_mode = OOB_NOT_CONNECTED;
-	sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
-
-	sas_phy->id = phy_id;
-	sas_phy->sas_addr = &mvi->sas_addr[0];
-	sas_phy->frame_rcvd = &phy->frame_rcvd[0];
-	sas_phy->ha = &mvi->sas;
-	sas_phy->lldd_phy = phy;
-}
-
-static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
-					    const struct pci_device_id *ent)
-{
-	struct mvs_info *mvi;
-	unsigned long res_start, res_len, res_flag;
-	struct asd_sas_phy **arr_phy;
-	struct asd_sas_port **arr_port;
-	const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
-	int i;
-
-	/*
-	 * alloc and init our per-HBA mvs_info struct
-	 */
-
-	mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
-	if (!mvi)
-		return NULL;
-
-	spin_lock_init(&mvi->lock);
-#ifdef MVS_USE_TASKLET
-	tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi);
-#endif
-	mvi->pdev = pdev;
-	mvi->chip = chip;
-
-	if (pdev->device == 0x6440 && pdev->revision == 0)
-		mvi->flags |= MVF_PHY_PWR_FIX;
-
-	/*
-	 * alloc and init SCSI, SAS glue
-	 */
-
-	mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
-	if (!mvi->shost)
-		goto err_out;
-
-	arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
-	arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
-	if (!arr_phy || !arr_port)
-		goto err_out;
-
-	for (i = 0; i < MVS_MAX_PHYS; i++) {
-		mvs_phy_init(mvi, i);
-		arr_phy[i] = &mvi->phy[i].sas_phy;
-		arr_port[i] = &mvi->port[i].sas_port;
-		mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED;
-		mvi->port[i].wide_port_phymap = 0;
-		mvi->port[i].port_attached = 0;
-		INIT_LIST_HEAD(&mvi->port[i].list);
-	}
-
-	SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
-	mvi->shost->transportt = mvs_stt;
-	mvi->shost->max_id = 21;
-	mvi->shost->max_lun = ~0;
-	mvi->shost->max_channel = 0;
-	mvi->shost->max_cmd_len = 16;
-
-	mvi->sas.sas_ha_name = DRV_NAME;
-	mvi->sas.dev = &pdev->dev;
-	mvi->sas.lldd_module = THIS_MODULE;
-	mvi->sas.sas_addr = &mvi->sas_addr[0];
-	mvi->sas.sas_phy = arr_phy;
-	mvi->sas.sas_port = arr_port;
-	mvi->sas.num_phys = chip->n_phy;
-	mvi->sas.lldd_max_execute_num = 1;
-	mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
-	mvi->shost->can_queue = MVS_CAN_QUEUE;
-	mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys;
-	mvi->sas.lldd_ha = mvi;
-	mvi->sas.core.shost = mvi->shost;
-
-	mvs_tag_init(mvi);
-
-	/*
-	 * ioremap main and peripheral registers
-	 */
-
-#ifdef MVS_ENABLE_PERI
-	res_start = pci_resource_start(pdev, 2);
-	res_len = pci_resource_len(pdev, 2);
-	if (!res_start || !res_len)
-		goto err_out;
-
-	mvi->peri_regs = ioremap_nocache(res_start, res_len);
-	if (!mvi->peri_regs)
-		goto err_out;
-#endif
-
-	res_start = pci_resource_start(pdev, 4);
-	res_len = pci_resource_len(pdev, 4);
-	if (!res_start || !res_len)
-		goto err_out;
-
-	res_flag = pci_resource_flags(pdev, 4);
-	if (res_flag & IORESOURCE_CACHEABLE)
-		mvi->regs = ioremap(res_start, res_len);
-	else
-		mvi->regs = ioremap_nocache(res_start, res_len);
-
-	if (!mvi->regs)
-		goto err_out;
-
-	/*
-	 * alloc and init our DMA areas
-	 */
-
-	mvi->tx = dma_alloc_coherent(&pdev->dev,
-				     sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
-				     &mvi->tx_dma, GFP_KERNEL);
-	if (!mvi->tx)
-		goto err_out;
-	memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
-
-	mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
-					 &mvi->rx_fis_dma, GFP_KERNEL);
-	if (!mvi->rx_fis)
-		goto err_out;
-	memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
-
-	mvi->rx = dma_alloc_coherent(&pdev->dev,
-				     sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
-				     &mvi->rx_dma, GFP_KERNEL);
-	if (!mvi->rx)
-		goto err_out;
-	memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
-
-	mvi->rx[0] = cpu_to_le32(0xfff);
-	mvi->rx_cons = 0xfff;
-
-	mvi->slot = dma_alloc_coherent(&pdev->dev,
-				       sizeof(*mvi->slot) * MVS_SLOTS,
-				       &mvi->slot_dma, GFP_KERNEL);
-	if (!mvi->slot)
-		goto err_out;
-	memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS);
-
-	for (i = 0; i < MVS_SLOTS; i++) {
-		struct mvs_slot_info *slot = &mvi->slot_info[i];
-
-		slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
-					       &slot->buf_dma, GFP_KERNEL);
-		if (!slot->buf)
-			goto err_out;
-		memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
-	}
-
-	/* finally, read NVRAM to get our SAS address */
-	if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
-		goto err_out;
-	return mvi;
-
-err_out:
-	mvs_free(mvi);
-	return NULL;
-}
-
-static u32 mvs_cr32(void __iomem *regs, u32 addr)
-{
-	mw32(CMD_ADDR, addr);
-	return mr32(CMD_DATA);
-}
-
-static void mvs_cw32(void __iomem *regs, u32 addr, u32 val)
-{
-	mw32(CMD_ADDR, addr);
-	mw32(CMD_DATA, val);
-}
-
-static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
-{
-	void __iomem *regs = mvi->regs;
-	return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4):
-		mr32(P4_SER_CTLSTAT + (port - 4) * 4);
-}
-
-static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
-{
-	void __iomem *regs = mvi->regs;
-	if (port < 4)
-		mw32(P0_SER_CTLSTAT + port * 4, val);
-	else
-		mw32(P4_SER_CTLSTAT + (port - 4) * 4, val);
-}
-
-static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port)
-{
-	void __iomem *regs = mvi->regs + off;
-	void __iomem *regs2 = mvi->regs + off2;
-	return (port < 4)?readl(regs + port * 8):
-		readl(regs2 + (port - 4) * 8);
-}
-
-static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
-				u32 port, u32 val)
-{
-	void __iomem *regs = mvi->regs + off;
-	void __iomem *regs2 = mvi->regs + off2;
-	if (port < 4)
-		writel(val, regs + port * 8);
-	else
-		writel(val, regs2 + (port - 4) * 8);
-}
-
-static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
-{
-	return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port);
-}
-
-static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val)
-{
-	mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val);
-}
-
-static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr)
-{
-	mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr);
-}
-
-static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
-{
-	return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port);
-}
-
-static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val)
-{
-	mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val);
-}
-
-static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr)
-{
-	mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr);
-}
-
-static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
-{
-	return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port);
-}
-
-static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val)
-{
-	mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val);
-}
-
-static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
-{
-	return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port);
-}
-
-static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val)
-{
-	mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val);
-}
-
-static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
-{
-	void __iomem *regs = mvi->regs;
-	u32 tmp;
-
-	/* workaround for SATA R-ERR, to ignore phy glitch */
-	tmp = mvs_cr32(regs, CMD_PHY_TIMER);
-	tmp &= ~(1 << 9);
-	tmp |= (1 << 10);
-	mvs_cw32(regs, CMD_PHY_TIMER, tmp);
-
-	/* enable retry 127 times */
-	mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f);
-
-	/* extend open frame timeout to max */
-	tmp = mvs_cr32(regs, CMD_SAS_CTL0);
-	tmp &= ~0xffff;
-	tmp |= 0x3fff;
-	mvs_cw32(regs, CMD_SAS_CTL0, tmp);
-
-	/* workaround for WDTIMEOUT , set to 550 ms */
-	mvs_cw32(regs, CMD_WD_TIMER, 0x86470);
-
-	/* not to halt for different port op during wideport link change */
-	mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d);
-
-	/* workaround for Seagate disk not-found OOB sequence, recv
-	 * COMINIT before sending out COMWAKE */
-	tmp = mvs_cr32(regs, CMD_PHY_MODE_21);
-	tmp &= 0x0000ffff;
-	tmp |= 0x00fa0000;
-	mvs_cw32(regs, CMD_PHY_MODE_21, tmp);
-
-	tmp = mvs_cr32(regs, CMD_PHY_TIMER);
-	tmp &= 0x1fffffff;
-	tmp |= (2U << 29);	/* 8 ms retry */
-	mvs_cw32(regs, CMD_PHY_TIMER, tmp);
-
-	/* TEST - for phy decoding error, adjust voltage levels */
-	mw32(P0_VSR_ADDR + 0, 0x8);
-	mw32(P0_VSR_DATA + 0, 0x2F0);
-
-	mw32(P0_VSR_ADDR + 8, 0x8);
-	mw32(P0_VSR_DATA + 8, 0x2F0);
-
-	mw32(P0_VSR_ADDR + 16, 0x8);
-	mw32(P0_VSR_DATA + 16, 0x2F0);
-
-	mw32(P0_VSR_ADDR + 24, 0x8);
-	mw32(P0_VSR_DATA + 24, 0x2F0);
-
-}
-
-static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId)
-{
-	void __iomem *regs = mvi->regs;
-	u32 tmp;
-
-	tmp = mr32(PCS);
-	if (mvi->chip->n_phy <= 4)
-		tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT);
-	else
-		tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2);
-	mw32(PCS, tmp);
-}
-
-static void mvs_detect_porttype(struct mvs_info *mvi, int i)
-{
-	void __iomem *regs = mvi->regs;
-	u32 reg;
-	struct mvs_phy *phy = &mvi->phy[i];
-
-	/* TODO check & save device type */
-	reg = mr32(GBL_PORT_TYPE);
-
-	if (reg & MODE_SAS_SATA & (1 << i))
-		phy->phy_type |= PORT_TYPE_SAS;
-	else
-		phy->phy_type |= PORT_TYPE_SATA;
-}
-
-static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
-{
-	u32 *s = (u32 *) buf;
-
-	if (!s)
-		return NULL;
-
-	mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
-	s[3] = mvs_read_port_cfg_data(mvi, i);
-
-	mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
-	s[2] = mvs_read_port_cfg_data(mvi, i);
-
-	mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
-	s[1] = mvs_read_port_cfg_data(mvi, i);
-
-	mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
-	s[0] = mvs_read_port_cfg_data(mvi, i);
-
-	return (void *)s;
-}
-
-static u32 mvs_is_sig_fis_received(u32 irq_status)
-{
-	return irq_status & PHYEV_SIG_FIS;
-}
-
-static void mvs_update_wideport(struct mvs_info *mvi, int i)
-{
-	struct mvs_phy *phy = &mvi->phy[i];
-	struct mvs_port *port = phy->port;
-	int j, no;
-
-	for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
-		if (no & 1) {
-			mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
-			mvs_write_port_cfg_data(mvi, no,
-						port->wide_port_phymap);
-		} else {
-			mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
-			mvs_write_port_cfg_data(mvi, no, 0);
-		}
-}
-
-static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
-{
-	u32 tmp;
-	struct mvs_phy *phy = &mvi->phy[i];
-	struct mvs_port *port = phy->port;;
-
-	tmp = mvs_read_phy_ctl(mvi, i);
-
-	if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
-		if (!port)
-			phy->phy_attached = 1;
-		return tmp;
-	}
-
-	if (port) {
-		if (phy->phy_type & PORT_TYPE_SAS) {
-			port->wide_port_phymap &= ~(1U << i);
-			if (!port->wide_port_phymap)
-				port->port_attached = 0;
-			mvs_update_wideport(mvi, i);
-		} else if (phy->phy_type & PORT_TYPE_SATA)
-			port->port_attached = 0;
-		mvs_free_reg_set(mvi, phy->port);
-		phy->port = NULL;
-		phy->phy_attached = 0;
-		phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
-	}
-	return 0;
-}
-
-static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
-					int get_st)
-{
-	struct mvs_phy *phy = &mvi->phy[i];
-	struct pci_dev *pdev = mvi->pdev;
-	u32 tmp;
-	u64 tmp64;
-
-	mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
-	phy->dev_info = mvs_read_port_cfg_data(mvi, i);
-
-	mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
-	phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
-
-	mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
-	phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
-
-	if (get_st) {
-		phy->irq_status = mvs_read_port_irq_stat(mvi, i);
-		phy->phy_status = mvs_is_phy_ready(mvi, i);
-	}
-
-	if (phy->phy_status) {
-		u32 phy_st;
-		struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
-
-		mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
-		phy_st = mvs_read_port_cfg_data(mvi, i);
-
-		sas_phy->linkrate =
-			(phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
-				PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
-		phy->minimum_linkrate =
-			(phy->phy_status &
-				PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
-		phy->maximum_linkrate =
-			(phy->phy_status &
-				PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
-
-		if (phy->phy_type & PORT_TYPE_SAS) {
-			/* Updated attached_sas_addr */
-			mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
-			phy->att_dev_sas_addr =
-				(u64) mvs_read_port_cfg_data(mvi, i) << 32;
-			mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
-			phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
-			mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
-			phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
-			phy->identify.device_type =
-			    phy->att_dev_info & PORT_DEV_TYPE_MASK;
-
-			if (phy->identify.device_type == SAS_END_DEV)
-				phy->identify.target_port_protocols =
-							SAS_PROTOCOL_SSP;
-			else if (phy->identify.device_type != NO_DEVICE)
-				phy->identify.target_port_protocols =
-							SAS_PROTOCOL_SMP;
-			if (phy_st & PHY_OOB_DTCTD)
-				sas_phy->oob_mode = SAS_OOB_MODE;
-			phy->frame_rcvd_size =
-			    sizeof(struct sas_identify_frame);
-		} else if (phy->phy_type & PORT_TYPE_SATA) {
-			phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
-			if (mvs_is_sig_fis_received(phy->irq_status)) {
-				phy->att_dev_sas_addr = i;	/* temp */
-				if (phy_st & PHY_OOB_DTCTD)
-					sas_phy->oob_mode = SATA_OOB_MODE;
-				phy->frame_rcvd_size =
-				    sizeof(struct dev_to_host_fis);
-				mvs_get_d2h_reg(mvi, i,
-						(void *)sas_phy->frame_rcvd);
-			} else {
-				dev_printk(KERN_DEBUG, &pdev->dev,
-					"No sig fis\n");
-				phy->phy_type &= ~(PORT_TYPE_SATA);
-				goto out_done;
-			}
-		}
-		tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
-		memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
-
-		dev_printk(KERN_DEBUG, &pdev->dev,
-			"phy[%d] Get Attached Address 0x%llX ,"
-			" SAS Address 0x%llX\n",
-			i,
-			(unsigned long long)phy->att_dev_sas_addr,
-			(unsigned long long)phy->dev_sas_addr);
-		dev_printk(KERN_DEBUG, &pdev->dev,
-			"Rate = %x , type = %d\n",
-			sas_phy->linkrate, phy->phy_type);
-
-		/* workaround for HW phy decoding error on 1.5g disk drive */
-		mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
-		tmp = mvs_read_port_vsr_data(mvi, i);
-		if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
-		     PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
-			SAS_LINK_RATE_1_5_GBPS)
-			tmp &= ~PHY_MODE6_LATECLK;
-		else
-			tmp |= PHY_MODE6_LATECLK;
-		mvs_write_port_vsr_data(mvi, i, tmp);
-
-	}
-out_done:
-	if (get_st)
-		mvs_write_port_irq_stat(mvi, i, phy->irq_status);
-}
-
-static void mvs_port_formed(struct asd_sas_phy *sas_phy)
-{
-	struct sas_ha_struct *sas_ha = sas_phy->ha;
-	struct mvs_info *mvi = sas_ha->lldd_ha;
-	struct asd_sas_port *sas_port = sas_phy->port;
-	struct mvs_phy *phy = sas_phy->lldd_phy;
-	struct mvs_port *port = &mvi->port[sas_port->id];
-	unsigned long flags;
-
-	spin_lock_irqsave(&mvi->lock, flags);
-	port->port_attached = 1;
-	phy->port = port;
-	port->taskfileset = MVS_ID_NOT_MAPPED;
-	if (phy->phy_type & PORT_TYPE_SAS) {
-		port->wide_port_phymap = sas_port->phy_mask;
-		mvs_update_wideport(mvi, sas_phy->id);
-	}
-	spin_unlock_irqrestore(&mvi->lock, flags);
-}
-
-static int mvs_I_T_nexus_reset(struct domain_device *dev)
-{
-	return TMF_RESP_FUNC_FAILED;
-}
-
-static int __devinit mvs_hw_init(struct mvs_info *mvi)
-{
-	void __iomem *regs = mvi->regs;
-	int i;
-	u32 tmp, cctl;
-
-	/* make sure interrupts are masked immediately (paranoia) */
-	mw32(GBL_CTL, 0);
-	tmp = mr32(GBL_CTL);
-
-	/* Reset Controller */
-	if (!(tmp & HBA_RST)) {
-		if (mvi->flags & MVF_PHY_PWR_FIX) {
-			pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
-			tmp &= ~PCTL_PWR_ON;
-			tmp |= PCTL_OFF;
-			pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
-
-			pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
-			tmp &= ~PCTL_PWR_ON;
-			tmp |= PCTL_OFF;
-			pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
-		}
-
-		/* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
-		mw32_f(GBL_CTL, HBA_RST);
-	}
-
-	/* wait for reset to finish; timeout is just a guess */
-	i = 1000;
-	while (i-- > 0) {
-		msleep(10);
-
-		if (!(mr32(GBL_CTL) & HBA_RST))
-			break;
-	}
-	if (mr32(GBL_CTL) & HBA_RST) {
-		dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
-		return -EBUSY;
-	}
-
-	/* Init Chip */
-	/* make sure RST is set; HBA_RST /should/ have done that for us */
-	cctl = mr32(CTL);
-	if (cctl & CCTL_RST)
-		cctl &= ~CCTL_RST;
-	else
-		mw32_f(CTL, cctl | CCTL_RST);
-
-	/* write to device control _AND_ device status register? - A.C. */
-	pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
-	tmp &= ~PRD_REQ_MASK;
-	tmp |= PRD_REQ_SIZE;
-	pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
-
-	pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
-	tmp |= PCTL_PWR_ON;
-	tmp &= ~PCTL_OFF;
-	pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
-
-	pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
-	tmp |= PCTL_PWR_ON;
-	tmp &= ~PCTL_OFF;
-	pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
-
-	mw32_f(CTL, cctl);
-
-	/* reset control */
-	mw32(PCS, 0);		/*MVS_PCS */
-
-	mvs_phy_hacks(mvi);
-
-	mw32(CMD_LIST_LO, mvi->slot_dma);
-	mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
-
-	mw32(RX_FIS_LO, mvi->rx_fis_dma);
-	mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
-
-	mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
-	mw32(TX_LO, mvi->tx_dma);
-	mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
-
-	mw32(RX_CFG, MVS_RX_RING_SZ);
-	mw32(RX_LO, mvi->rx_dma);
-	mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
-
-	/* enable auto port detection */
-	mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
-	msleep(1100);
-	/* init and reset phys */
-	for (i = 0; i < mvi->chip->n_phy; i++) {
-		u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
-		u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]);
-
-		mvs_detect_porttype(mvi, i);
-
-		/* set phy local SAS address */
-		mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
-		mvs_write_port_cfg_data(mvi, i, lo);
-		mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
-		mvs_write_port_cfg_data(mvi, i, hi);
-
-		/* reset phy */
-		tmp = mvs_read_phy_ctl(mvi, i);
-		tmp |= PHY_RST;
-		mvs_write_phy_ctl(mvi, i, tmp);
-	}
-
-	msleep(100);
-
-	for (i = 0; i < mvi->chip->n_phy; i++) {
-		/* clear phy int status */
-		tmp = mvs_read_port_irq_stat(mvi, i);
-		tmp &= ~PHYEV_SIG_FIS;
-		mvs_write_port_irq_stat(mvi, i, tmp);
-
-		/* set phy int mask */
-		tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
-			PHYEV_ID_DONE | PHYEV_DEC_ERR;
-		mvs_write_port_irq_mask(mvi, i, tmp);
-
-		msleep(100);
-		mvs_update_phyinfo(mvi, i, 1);
-		mvs_enable_xmt(mvi, i);
-	}
-
-	/* FIXME: update wide port bitmaps */
-
-	/* little endian for open address and command table, etc. */
-	/* A.C.
-	 * it seems that ( from the spec ) turning on big-endian won't
-	 * do us any good on big-endian machines, need further confirmation
-	 */
-	cctl = mr32(CTL);
-	cctl |= CCTL_ENDIAN_CMD;
-	cctl |= CCTL_ENDIAN_DATA;
-	cctl &= ~CCTL_ENDIAN_OPEN;
-	cctl |= CCTL_ENDIAN_RSP;
-	mw32_f(CTL, cctl);
-
-	/* reset CMD queue */
-	tmp = mr32(PCS);
-	tmp |= PCS_CMD_RST;
-	mw32(PCS, tmp);
-	/* interrupt coalescing may cause missing HW interrput in some case,
-	 * and the max count is 0x1ff, while our max slot is 0x200,
-	 * it will make count 0.
-	 */
-	tmp = 0;
-	mw32(INT_COAL, tmp);
-
-	tmp = 0x100;
-	mw32(INT_COAL_TMOUT, tmp);
-
-	/* ladies and gentlemen, start your engines */
-	mw32(TX_CFG, 0);
-	mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
-	mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
-	/* enable CMD/CMPL_Q/RESP mode */
-	mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
-
-	/* enable completion queue interrupt */
-	tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS);
-	mw32(INT_MASK, tmp);
-
-	/* Enable SRS interrupt */
-	mw32(INT_MASK_SRS, 0xFF);
-	return 0;
-}
-
-static void __devinit mvs_print_info(struct mvs_info *mvi)
-{
-	struct pci_dev *pdev = mvi->pdev;
-	static int printed_version;
-
-	if (!printed_version++)
-		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
-
-	dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
-		   mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
-}
-
-static int __devinit mvs_pci_init(struct pci_dev *pdev,
-				  const struct pci_device_id *ent)
-{
-	int rc;
-	struct mvs_info *mvi;
-	irq_handler_t irq_handler = mvs_interrupt;
-
-	rc = pci_enable_device(pdev);
-	if (rc)
-		return rc;
-
-	pci_set_master(pdev);
-
-	rc = pci_request_regions(pdev, DRV_NAME);
-	if (rc)
-		goto err_out_disable;
-
-	rc = pci_go_64(pdev);
-	if (rc)
-		goto err_out_regions;
-
-	mvi = mvs_alloc(pdev, ent);
-	if (!mvi) {
-		rc = -ENOMEM;
-		goto err_out_regions;
-	}
-
-	rc = mvs_hw_init(mvi);
-	if (rc)
-		goto err_out_mvi;
-
-#ifndef MVS_DISABLE_MSI
-	if (!pci_enable_msi(pdev)) {
-		u32 tmp;
-		void __iomem *regs = mvi->regs;
-		mvi->flags |= MVF_MSI;
-		irq_handler = mvs_msi_interrupt;
-		tmp = mr32(PCS);
-		mw32(PCS, tmp | PCS_SELF_CLEAR);
-	}
-#endif
-
-	rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
-	if (rc)
-		goto err_out_msi;
-
-	rc = scsi_add_host(mvi->shost, &pdev->dev);
-	if (rc)
-		goto err_out_irq;
-
-	rc = sas_register_ha(&mvi->sas);
-	if (rc)
-		goto err_out_shost;
-
-	pci_set_drvdata(pdev, mvi);
-
-	mvs_print_info(mvi);
-
-	mvs_hba_interrupt_enable(mvi);
-
-	scsi_scan_host(mvi->shost);
-
-	return 0;
-
-err_out_shost:
-	scsi_remove_host(mvi->shost);
-err_out_irq:
-	free_irq(pdev->irq, mvi);
-err_out_msi:
-	if (mvi->flags |= MVF_MSI)
-		pci_disable_msi(pdev);
-err_out_mvi:
-	mvs_free(mvi);
-err_out_regions:
-	pci_release_regions(pdev);
-err_out_disable:
-	pci_disable_device(pdev);
-	return rc;
-}
-
-static void __devexit mvs_pci_remove(struct pci_dev *pdev)
-{
-	struct mvs_info *mvi = pci_get_drvdata(pdev);
-
-	pci_set_drvdata(pdev, NULL);
-
-	if (mvi) {
-		sas_unregister_ha(&mvi->sas);
-		mvs_hba_interrupt_disable(mvi);
-		sas_remove_host(mvi->shost);
-		scsi_remove_host(mvi->shost);
-
-		free_irq(pdev->irq, mvi);
-		if (mvi->flags & MVF_MSI)
-			pci_disable_msi(pdev);
-		mvs_free(mvi);
-		pci_release_regions(pdev);
-	}
-	pci_disable_device(pdev);
-}
-
-static struct sas_domain_function_template mvs_transport_ops = {
-	.lldd_execute_task	= mvs_task_exec,
-	.lldd_control_phy	= mvs_phy_control,
-	.lldd_abort_task	= mvs_task_abort,
-	.lldd_port_formed	= mvs_port_formed,
-	.lldd_I_T_nexus_reset	= mvs_I_T_nexus_reset,
-};
-
-static struct pci_device_id __devinitdata mvs_pci_table[] = {
-	{ PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
-	{ PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
-	{
-		.vendor 	= PCI_VENDOR_ID_MARVELL,
-		.device 	= 0x6440,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= 0x6480,
-		.class		= 0,
-		.class_mask	= 0,
-		.driver_data	= chip_6480,
-	},
-	{ PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
-	{ PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
-
-	{ }	/* terminate list */
-};
-
-static struct pci_driver mvs_pci_driver = {
-	.name		= DRV_NAME,
-	.id_table	= mvs_pci_table,
-	.probe		= mvs_pci_init,
-	.remove		= __devexit_p(mvs_pci_remove),
-};
-
-static int __init mvs_init(void)
-{
-	int rc;
-
-	mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
-	if (!mvs_stt)
-		return -ENOMEM;
-
-	rc = pci_register_driver(&mvs_pci_driver);
-	if (rc)
-		goto err_out;
-
-	return 0;
-
-err_out:
-	sas_release_transport(mvs_stt);
-	return rc;
-}
-
-static void __exit mvs_exit(void)
-{
-	pci_unregister_driver(&mvs_pci_driver);
-	sas_release_transport(mvs_stt);
-}
-
-module_init(mvs_init);
-module_exit(mvs_exit);
-
-MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
-MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
-MODULE_VERSION(DRV_VERSION);
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(pci, mvs_pci_table);

+ 42 - 0
drivers/scsi/mvsas/Kconfig

@@ -0,0 +1,42 @@
+#
+# Kernel configuration file for 88SE64XX/88SE94XX SAS/SATA driver.
+#
+# Copyright 2007 Red Hat, Inc.
+# Copyright 2008 Marvell. <kewei@marvell.com>
+#
+# This file is licensed under GPLv2.
+#
+# This file is part of the 88SE64XX/88SE94XX driver.
+#
+# The 88SE64XX/88SE94XX driver is free software; you can redistribute
+# it and/or modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2 of the
+# License.
+#
+# The 88SE64XX/88SE94XX driver is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with 88SE64XX/88SE94XX Driver; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+#
+
+config SCSI_MVSAS
+	tristate "Marvell 88SE64XX/88SE94XX SAS/SATA support"
+	depends on PCI
+	select SCSI_SAS_LIBSAS
+	select FW_LOADER
+	help
+		This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX and 6Gb/s
+		PCI-E 88SE94XX chip based host adapters.
+
+config SCSI_MVSAS_DEBUG
+	bool "Compile in debug mode"
+	default y
+	depends on SCSI_MVSAS
+	help
+		Compiles the 88SE64XX/88SE94XX driver in debug mode.  In debug mode,
+		the driver prints some messages to the console.

+ 32 - 0
drivers/scsi/mvsas/Makefile

@@ -0,0 +1,32 @@
+#
+# Makefile for Marvell 88SE64xx/88SE84xx SAS/SATA driver.
+#
+# Copyright 2007 Red Hat, Inc.
+# Copyright 2008 Marvell. <kewei@marvell.com>
+#
+# This file is licensed under GPLv2.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 of the
+# License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+# USA
+
+ifeq ($(CONFIG_SCSI_MVSAS_DEBUG),y)
+	EXTRA_CFLAGS += -DMV_DEBUG
+endif
+
+obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
+mvsas-y +=  mv_init.o  \
+           mv_sas.o   \
+           mv_64xx.o  \
+           mv_94xx.o

+ 793 - 0
drivers/scsi/mvsas/mv_64xx.c

@@ -0,0 +1,793 @@
+/*
+ * Marvell 88SE64xx hardware specific
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#include "mv_sas.h"
+#include "mv_64xx.h"
+#include "mv_chips.h"
+
+static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
+{
+	void __iomem *regs = mvi->regs;
+	u32 reg;
+	struct mvs_phy *phy = &mvi->phy[i];
+
+	/* TODO check & save device type */
+	reg = mr32(MVS_GBL_PORT_TYPE);
+	phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+	if (reg & MODE_SAS_SATA & (1 << i))
+		phy->phy_type |= PORT_TYPE_SAS;
+	else
+		phy->phy_type |= PORT_TYPE_SATA;
+}
+
+static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
+{
+	void __iomem *regs = mvi->regs;
+	u32 tmp;
+
+	tmp = mr32(MVS_PCS);
+	if (mvi->chip->n_phy <= 4)
+		tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
+	else
+		tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
+	mw32(MVS_PCS, tmp);
+}
+
+static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi)
+{
+	void __iomem *regs = mvi->regs;
+
+	mvs_phy_hacks(mvi);
+
+	if (!(mvi->flags & MVF_FLAG_SOC)) {
+		/* TEST - for phy decoding error, adjust voltage levels */
+		mw32(MVS_P0_VSR_ADDR + 0, 0x8);
+		mw32(MVS_P0_VSR_DATA + 0, 0x2F0);
+
+		mw32(MVS_P0_VSR_ADDR + 8, 0x8);
+		mw32(MVS_P0_VSR_DATA + 8, 0x2F0);
+
+		mw32(MVS_P0_VSR_ADDR + 16, 0x8);
+		mw32(MVS_P0_VSR_DATA + 16, 0x2F0);
+
+		mw32(MVS_P0_VSR_ADDR + 24, 0x8);
+		mw32(MVS_P0_VSR_DATA + 24, 0x2F0);
+	} else {
+		int i;
+		/* disable auto port detection */
+		mw32(MVS_GBL_PORT_TYPE, 0);
+		for (i = 0; i < mvi->chip->n_phy; i++) {
+			mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7);
+			mvs_write_port_vsr_data(mvi, i, 0x90000000);
+			mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9);
+			mvs_write_port_vsr_data(mvi, i, 0x50f2);
+			mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11);
+			mvs_write_port_vsr_data(mvi, i, 0x0e);
+		}
+	}
+}
+
+static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
+{
+	void __iomem *regs = mvi->regs;
+	u32 reg, tmp;
+
+	if (!(mvi->flags & MVF_FLAG_SOC)) {
+		if (phy_id < 4)
+			pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg);
+		else
+			pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg);
+
+	} else
+		reg = mr32(MVS_PHY_CTL);
+
+	tmp = reg;
+	if (phy_id < 4)
+		tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
+	else
+		tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS;
+
+	if (!(mvi->flags & MVF_FLAG_SOC)) {
+		if (phy_id < 4) {
+			pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
+			mdelay(10);
+			pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
+		} else {
+			pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
+			mdelay(10);
+			pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg);
+		}
+	} else {
+		mw32(MVS_PHY_CTL, tmp);
+		mdelay(10);
+		mw32(MVS_PHY_CTL, reg);
+	}
+}
+
+static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
+{
+	u32 tmp;
+	tmp = mvs_read_port_irq_stat(mvi, phy_id);
+	tmp &= ~PHYEV_RDY_CH;
+	mvs_write_port_irq_stat(mvi, phy_id, tmp);
+	tmp = mvs_read_phy_ctl(mvi, phy_id);
+	if (hard)
+		tmp |= PHY_RST_HARD;
+	else
+		tmp |= PHY_RST;
+	mvs_write_phy_ctl(mvi, phy_id, tmp);
+	if (hard) {
+		do {
+			tmp = mvs_read_phy_ctl(mvi, phy_id);
+		} while (tmp & PHY_RST_HARD);
+	}
+}
+
+static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi)
+{
+	void __iomem *regs = mvi->regs;
+	u32 tmp;
+	int i;
+
+	/* make sure interrupts are masked immediately (paranoia) */
+	mw32(MVS_GBL_CTL, 0);
+	tmp = mr32(MVS_GBL_CTL);
+
+	/* Reset Controller */
+	if (!(tmp & HBA_RST)) {
+		if (mvi->flags & MVF_PHY_PWR_FIX) {
+			pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
+			tmp &= ~PCTL_PWR_OFF;
+			tmp |= PCTL_PHY_DSBL;
+			pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
+
+			pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
+			tmp &= ~PCTL_PWR_OFF;
+			tmp |= PCTL_PHY_DSBL;
+			pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
+		}
+	}
+
+	/* make sure interrupts are masked immediately (paranoia) */
+	mw32(MVS_GBL_CTL, 0);
+	tmp = mr32(MVS_GBL_CTL);
+
+	/* Reset Controller */
+	if (!(tmp & HBA_RST)) {
+		/* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
+		mw32_f(MVS_GBL_CTL, HBA_RST);
+	}
+
+	/* wait for reset to finish; timeout is just a guess */
+	i = 1000;
+	while (i-- > 0) {
+		msleep(10);
+
+		if (!(mr32(MVS_GBL_CTL) & HBA_RST))
+			break;
+	}
+	if (mr32(MVS_GBL_CTL) & HBA_RST) {
+		dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n");
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
+{
+	void __iomem *regs = mvi->regs;
+	u32 tmp;
+	if (!(mvi->flags & MVF_FLAG_SOC)) {
+		u32 offs;
+		if (phy_id < 4)
+			offs = PCR_PHY_CTL;
+		else {
+			offs = PCR_PHY_CTL2;
+			phy_id -= 4;
+		}
+		pci_read_config_dword(mvi->pdev, offs, &tmp);
+		tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
+		pci_write_config_dword(mvi->pdev, offs, tmp);
+	} else {
+		tmp = mr32(MVS_PHY_CTL);
+		tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
+		mw32(MVS_PHY_CTL, tmp);
+	}
+}
+
+static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
+{
+	void __iomem *regs = mvi->regs;
+	u32 tmp;
+	if (!(mvi->flags & MVF_FLAG_SOC)) {
+		u32 offs;
+		if (phy_id < 4)
+			offs = PCR_PHY_CTL;
+		else {
+			offs = PCR_PHY_CTL2;
+			phy_id -= 4;
+		}
+		pci_read_config_dword(mvi->pdev, offs, &tmp);
+		tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
+		pci_write_config_dword(mvi->pdev, offs, tmp);
+	} else {
+		tmp = mr32(MVS_PHY_CTL);
+		tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
+		mw32(MVS_PHY_CTL, tmp);
+	}
+}
+
+static int __devinit mvs_64xx_init(struct mvs_info *mvi)
+{
+	void __iomem *regs = mvi->regs;
+	int i;
+	u32 tmp, cctl;
+
+	if (mvi->pdev && mvi->pdev->revision == 0)
+		mvi->flags |= MVF_PHY_PWR_FIX;
+	if (!(mvi->flags & MVF_FLAG_SOC)) {
+		mvs_show_pcie_usage(mvi);
+		tmp = mvs_64xx_chip_reset(mvi);
+		if (tmp)
+			return tmp;
+	} else {
+		tmp = mr32(MVS_PHY_CTL);
+		tmp &= ~PCTL_PWR_OFF;
+		tmp |= PCTL_PHY_DSBL;
+		mw32(MVS_PHY_CTL, tmp);
+	}
+
+	/* Init Chip */
+	/* make sure RST is set; HBA_RST /should/ have done that for us */
+	cctl = mr32(MVS_CTL) & 0xFFFF;
+	if (cctl & CCTL_RST)
+		cctl &= ~CCTL_RST;
+	else
+		mw32_f(MVS_CTL, cctl | CCTL_RST);
+
+	if (!(mvi->flags & MVF_FLAG_SOC)) {
+		/* write to device control _AND_ device status register */
+		pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
+		tmp &= ~PRD_REQ_MASK;
+		tmp |= PRD_REQ_SIZE;
+		pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
+
+		pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
+		tmp &= ~PCTL_PWR_OFF;
+		tmp &= ~PCTL_PHY_DSBL;
+		pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
+
+		pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
+		tmp &= PCTL_PWR_OFF;
+		tmp &= ~PCTL_PHY_DSBL;
+		pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
+	} else {
+		tmp = mr32(MVS_PHY_CTL);
+		tmp &= ~PCTL_PWR_OFF;
+		tmp |= PCTL_COM_ON;
+		tmp &= ~PCTL_PHY_DSBL;
+		tmp |= PCTL_LINK_RST;
+		mw32(MVS_PHY_CTL, tmp);
+		msleep(100);
+		tmp &= ~PCTL_LINK_RST;
+		mw32(MVS_PHY_CTL, tmp);
+		msleep(100);
+	}
+
+	/* reset control */
+	mw32(MVS_PCS, 0);		/* MVS_PCS */
+	/* init phys */
+	mvs_64xx_phy_hacks(mvi);
+
+	/* enable auto port detection */
+	mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
+
+	mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
+	mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
+
+	mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
+	mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
+
+	mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
+	mw32(MVS_TX_LO, mvi->tx_dma);
+	mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
+
+	mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
+	mw32(MVS_RX_LO, mvi->rx_dma);
+	mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
+
+	for (i = 0; i < mvi->chip->n_phy; i++) {
+		/* set phy local SAS address */
+		/* should set little endian SAS address to 64xx chip */
+		mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI,
+				cpu_to_be64(mvi->phy[i].dev_sas_addr));
+
+		mvs_64xx_enable_xmt(mvi, i);
+
+		mvs_64xx_phy_reset(mvi, i, 1);
+		msleep(500);
+		mvs_64xx_detect_porttype(mvi, i);
+	}
+	if (mvi->flags & MVF_FLAG_SOC) {
+		/* set select registers */
+		writel(0x0E008000, regs + 0x000);
+		writel(0x59000008, regs + 0x004);
+		writel(0x20, regs + 0x008);
+		writel(0x20, regs + 0x00c);
+		writel(0x20, regs + 0x010);
+		writel(0x20, regs + 0x014);
+		writel(0x20, regs + 0x018);
+		writel(0x20, regs + 0x01c);
+	}
+	for (i = 0; i < mvi->chip->n_phy; i++) {
+		/* clear phy int status */
+		tmp = mvs_read_port_irq_stat(mvi, i);
+		tmp &= ~PHYEV_SIG_FIS;
+		mvs_write_port_irq_stat(mvi, i, tmp);
+
+		/* set phy int mask */
+		tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
+			PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR |
+			PHYEV_DEC_ERR;
+		mvs_write_port_irq_mask(mvi, i, tmp);
+
+		msleep(100);
+		mvs_update_phyinfo(mvi, i, 1);
+	}
+
+	/* FIXME: update wide port bitmaps */
+
+	/* little endian for open address and command table, etc. */
+	/*
+	 * it seems that ( from the spec ) turning on big-endian won't
+	 * do us any good on big-endian machines, need further confirmation
+	 */
+	cctl = mr32(MVS_CTL);
+	cctl |= CCTL_ENDIAN_CMD;
+	cctl |= CCTL_ENDIAN_DATA;
+	cctl &= ~CCTL_ENDIAN_OPEN;
+	cctl |= CCTL_ENDIAN_RSP;
+	mw32_f(MVS_CTL, cctl);
+
+	/* reset CMD queue */
+	tmp = mr32(MVS_PCS);
+	tmp |= PCS_CMD_RST;
+	mw32(MVS_PCS, tmp);
+	/* interrupt coalescing may cause missing HW interrput in some case,
+	 * and the max count is 0x1ff, while our max slot is 0x200,
+	 * it will make count 0.
+	 */
+	tmp = 0;
+	mw32(MVS_INT_COAL, tmp);
+
+	tmp = 0x100;
+	mw32(MVS_INT_COAL_TMOUT, tmp);
+
+	/* ladies and gentlemen, start your engines */
+	mw32(MVS_TX_CFG, 0);
+	mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
+	mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
+	/* enable CMD/CMPL_Q/RESP mode */
+	mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN |
+		PCS_CMD_EN | PCS_CMD_STOP_ERR);
+
+	/* enable completion queue interrupt */
+	tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
+		CINT_DMA_PCIE);
+
+	mw32(MVS_INT_MASK, tmp);
+
+	/* Enable SRS interrupt */
+	mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
+
+	return 0;
+}
+
+static int mvs_64xx_ioremap(struct mvs_info *mvi)
+{
+	if (!mvs_ioremap(mvi, 4, 2))
+		return 0;
+	return -1;
+}
+
+static void mvs_64xx_iounmap(struct mvs_info *mvi)
+{
+	mvs_iounmap(mvi->regs);
+	mvs_iounmap(mvi->regs_ex);
+}
+
+static void mvs_64xx_interrupt_enable(struct mvs_info *mvi)
+{
+	void __iomem *regs = mvi->regs;
+	u32 tmp;
+
+	tmp = mr32(MVS_GBL_CTL);
+	mw32(MVS_GBL_CTL, tmp | INT_EN);
+}
+
+static void mvs_64xx_interrupt_disable(struct mvs_info *mvi)
+{
+	void __iomem *regs = mvi->regs;
+	u32 tmp;
+
+	tmp = mr32(MVS_GBL_CTL);
+	mw32(MVS_GBL_CTL, tmp & ~INT_EN);
+}
+
+static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq)
+{
+	void __iomem *regs = mvi->regs;
+	u32 stat;
+
+	if (!(mvi->flags & MVF_FLAG_SOC)) {
+		stat = mr32(MVS_GBL_INT_STAT);
+
+		if (stat == 0 || stat == 0xffffffff)
+			return 0;
+	} else
+		stat = 1;
+	return stat;
+}
+
+static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
+{
+	void __iomem *regs = mvi->regs;
+
+	/* clear CMD_CMPLT ASAP */
+	mw32_f(MVS_INT_STAT, CINT_DONE);
+#ifndef MVS_USE_TASKLET
+	spin_lock(&mvi->lock);
+#endif
+	mvs_int_full(mvi);
+#ifndef MVS_USE_TASKLET
+	spin_unlock(&mvi->lock);
+#endif
+	return IRQ_HANDLED;
+}
+
+static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx)
+{
+	u32 tmp;
+	mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32));
+	mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32));
+	do {
+		tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3));
+	} while (tmp & 1 << (slot_idx % 32));
+	do {
+		tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3));
+	} while (tmp & 1 << (slot_idx % 32));
+}
+
+static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
+				u32 tfs)
+{
+	void __iomem *regs = mvi->regs;
+	u32 tmp;
+
+	if (type == PORT_TYPE_SATA) {
+		tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
+		mw32(MVS_INT_STAT_SRS_0, tmp);
+	}
+	mw32(MVS_INT_STAT, CINT_CI_STOP);
+	tmp = mr32(MVS_PCS) | 0xFF00;
+	mw32(MVS_PCS, tmp);
+}
+
+static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+	void __iomem *regs = mvi->regs;
+	u32 tmp, offs;
+
+	if (*tfs == MVS_ID_NOT_MAPPED)
+		return;
+
+	offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
+	if (*tfs < 16) {
+		tmp = mr32(MVS_PCS);
+		mw32(MVS_PCS, tmp & ~offs);
+	} else {
+		tmp = mr32(MVS_CTL);
+		mw32(MVS_CTL, tmp & ~offs);
+	}
+
+	tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs);
+	if (tmp)
+		mw32(MVS_INT_STAT_SRS_0, tmp);
+
+	*tfs = MVS_ID_NOT_MAPPED;
+	return;
+}
+
+static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+	int i;
+	u32 tmp, offs;
+	void __iomem *regs = mvi->regs;
+
+	if (*tfs != MVS_ID_NOT_MAPPED)
+		return 0;
+
+	tmp = mr32(MVS_PCS);
+
+	for (i = 0; i < mvi->chip->srs_sz; i++) {
+		if (i == 16)
+			tmp = mr32(MVS_CTL);
+		offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
+		if (!(tmp & offs)) {
+			*tfs = i;
+
+			if (i < 16)
+				mw32(MVS_PCS, tmp | offs);
+			else
+				mw32(MVS_CTL, tmp | offs);
+			tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i);
+			if (tmp)
+				mw32(MVS_INT_STAT_SRS_0, tmp);
+			return 0;
+		}
+	}
+	return MVS_ID_NOT_MAPPED;
+}
+
+void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
+{
+	int i;
+	struct scatterlist *sg;
+	struct mvs_prd *buf_prd = prd;
+	for_each_sg(scatter, sg, nr, i) {
+		buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
+		buf_prd->len = cpu_to_le32(sg_dma_len(sg));
+		buf_prd++;
+	}
+}
+
+static int mvs_64xx_oob_done(struct mvs_info *mvi, int i)
+{
+	u32 phy_st;
+	mvs_write_port_cfg_addr(mvi, i,
+			PHYR_PHY_STAT);
+	phy_st = mvs_read_port_cfg_data(mvi, i);
+	if (phy_st & PHY_OOB_DTCTD)
+		return 1;
+	return 0;
+}
+
+static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i,
+				struct sas_identify_frame *id)
+
+{
+	struct mvs_phy *phy = &mvi->phy[i];
+	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+	sas_phy->linkrate =
+		(phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
+			PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
+
+	phy->minimum_linkrate =
+		(phy->phy_status &
+			PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
+	phy->maximum_linkrate =
+		(phy->phy_status &
+			PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
+
+	mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
+	phy->dev_info = mvs_read_port_cfg_data(mvi, i);
+
+	mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
+	phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
+
+	mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
+	phy->att_dev_sas_addr =
+	     (u64) mvs_read_port_cfg_data(mvi, i) << 32;
+	mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
+	phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
+	phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr);
+}
+
+static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
+{
+	u32 tmp;
+	struct mvs_phy *phy = &mvi->phy[i];
+	/* workaround for HW phy decoding error on 1.5g disk drive */
+	mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
+	tmp = mvs_read_port_vsr_data(mvi, i);
+	if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
+	     PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
+		SAS_LINK_RATE_1_5_GBPS)
+		tmp &= ~PHY_MODE6_LATECLK;
+	else
+		tmp |= PHY_MODE6_LATECLK;
+	mvs_write_port_vsr_data(mvi, i, tmp);
+}
+
+void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
+			struct sas_phy_linkrates *rates)
+{
+	u32 lrmin = 0, lrmax = 0;
+	u32 tmp;
+
+	tmp = mvs_read_phy_ctl(mvi, phy_id);
+	lrmin = (rates->minimum_linkrate << 8);
+	lrmax = (rates->maximum_linkrate << 12);
+
+	if (lrmin) {
+		tmp &= ~(0xf << 8);
+		tmp |= lrmin;
+	}
+	if (lrmax) {
+		tmp &= ~(0xf << 12);
+		tmp |= lrmax;
+	}
+	mvs_write_phy_ctl(mvi, phy_id, tmp);
+	mvs_64xx_phy_reset(mvi, phy_id, 1);
+}
+
+static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
+{
+	u32 tmp;
+	void __iomem *regs = mvi->regs;
+	tmp = mr32(MVS_PCS);
+	mw32(MVS_PCS, tmp & 0xFFFF);
+	mw32(MVS_PCS, tmp);
+	tmp = mr32(MVS_CTL);
+	mw32(MVS_CTL, tmp & 0xFFFF);
+	mw32(MVS_CTL, tmp);
+}
+
+
+u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
+{
+	void __iomem *regs = mvi->regs_ex;
+	return ior32(SPI_DATA_REG_64XX);
+}
+
+void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
+{
+	void __iomem *regs = mvi->regs_ex;
+	 iow32(SPI_DATA_REG_64XX, data);
+}
+
+
+int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
+			u32      *dwCmd,
+			u8       cmd,
+			u8       read,
+			u8       length,
+			u32      addr
+			)
+{
+	u32  dwTmp;
+
+	dwTmp = ((u32)cmd << 24) | ((u32)length << 19);
+	if (read)
+		dwTmp |= 1U<<23;
+
+	if (addr != MV_MAX_U32) {
+		dwTmp |= 1U<<22;
+		dwTmp |= (addr & 0x0003FFFF);
+	}
+
+	*dwCmd = dwTmp;
+	return 0;
+}
+
+
+int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
+{
+	void __iomem *regs = mvi->regs_ex;
+	int     retry;
+
+	for (retry = 0; retry < 1; retry++) {
+		iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE);
+		iow32(SPI_CMD_REG_64XX, cmd);
+		iow32(SPI_CTRL_REG_64XX,
+			SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART);
+	}
+
+	return 0;
+}
+
+int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
+{
+	void __iomem *regs = mvi->regs_ex;
+	u32 i, dwTmp;
+
+	for (i = 0; i < timeout; i++) {
+		dwTmp = ior32(SPI_CTRL_REG_64XX);
+		if (!(dwTmp & SPI_CTRL_SPISTART))
+			return 0;
+		msleep(10);
+	}
+
+	return -1;
+}
+
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
+{
+	int i;
+	struct mvs_prd *buf_prd = prd;
+	buf_prd	+= from;
+	for (i = 0; i < MAX_SG_ENTRY - from; i++) {
+		buf_prd->addr = cpu_to_le64(buf_dma);
+		buf_prd->len = cpu_to_le32(buf_len);
+		++buf_prd;
+	}
+}
+#endif
+
+const struct mvs_dispatch mvs_64xx_dispatch = {
+	"mv64xx",
+	mvs_64xx_init,
+	NULL,
+	mvs_64xx_ioremap,
+	mvs_64xx_iounmap,
+	mvs_64xx_isr,
+	mvs_64xx_isr_status,
+	mvs_64xx_interrupt_enable,
+	mvs_64xx_interrupt_disable,
+	mvs_read_phy_ctl,
+	mvs_write_phy_ctl,
+	mvs_read_port_cfg_data,
+	mvs_write_port_cfg_data,
+	mvs_write_port_cfg_addr,
+	mvs_read_port_vsr_data,
+	mvs_write_port_vsr_data,
+	mvs_write_port_vsr_addr,
+	mvs_read_port_irq_stat,
+	mvs_write_port_irq_stat,
+	mvs_read_port_irq_mask,
+	mvs_write_port_irq_mask,
+	mvs_get_sas_addr,
+	mvs_64xx_command_active,
+	mvs_64xx_issue_stop,
+	mvs_start_delivery,
+	mvs_rx_update,
+	mvs_int_full,
+	mvs_64xx_assign_reg_set,
+	mvs_64xx_free_reg_set,
+	mvs_get_prd_size,
+	mvs_get_prd_count,
+	mvs_64xx_make_prd,
+	mvs_64xx_detect_porttype,
+	mvs_64xx_oob_done,
+	mvs_64xx_fix_phy_info,
+	mvs_64xx_phy_work_around,
+	mvs_64xx_phy_set_link_rate,
+	mvs_hw_max_link_rate,
+	mvs_64xx_phy_disable,
+	mvs_64xx_phy_enable,
+	mvs_64xx_phy_reset,
+	mvs_64xx_stp_reset,
+	mvs_64xx_clear_active_cmds,
+	mvs_64xx_spi_read_data,
+	mvs_64xx_spi_write_data,
+	mvs_64xx_spi_buildcmd,
+	mvs_64xx_spi_issuecmd,
+	mvs_64xx_spi_waitdataready,
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+	mvs_64xx_fix_dma,
+#endif
+};
+

+ 151 - 0
drivers/scsi/mvsas/mv_64xx.h

@@ -0,0 +1,151 @@
+/*
+ * Marvell 88SE64xx hardware specific head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MVS64XX_REG_H_
+#define _MVS64XX_REG_H_
+
+#include <linux/types.h>
+
+#define MAX_LINK_RATE		SAS_LINK_RATE_3_0_GBPS
+
+/* enhanced mode registers (BAR4) */
+enum hw_registers {
+	MVS_GBL_CTL		= 0x04,  /* global control */
+	MVS_GBL_INT_STAT	= 0x08,  /* global irq status */
+	MVS_GBL_PI		= 0x0C,  /* ports implemented bitmask */
+
+	MVS_PHY_CTL		= 0x40,  /* SOC PHY Control */
+	MVS_PORTS_IMP		= 0x9C,  /* SOC Port Implemented */
+
+	MVS_GBL_PORT_TYPE	= 0xa0,  /* port type */
+
+	MVS_CTL			= 0x100, /* SAS/SATA port configuration */
+	MVS_PCS			= 0x104, /* SAS/SATA port control/status */
+	MVS_CMD_LIST_LO		= 0x108, /* cmd list addr */
+	MVS_CMD_LIST_HI		= 0x10C,
+	MVS_RX_FIS_LO		= 0x110, /* RX FIS list addr */
+	MVS_RX_FIS_HI		= 0x114,
+
+	MVS_TX_CFG		= 0x120, /* TX configuration */
+	MVS_TX_LO		= 0x124, /* TX (delivery) ring addr */
+	MVS_TX_HI		= 0x128,
+
+	MVS_TX_PROD_IDX		= 0x12C, /* TX producer pointer */
+	MVS_TX_CONS_IDX		= 0x130, /* TX consumer pointer (RO) */
+	MVS_RX_CFG		= 0x134, /* RX configuration */
+	MVS_RX_LO		= 0x138, /* RX (completion) ring addr */
+	MVS_RX_HI		= 0x13C,
+	MVS_RX_CONS_IDX		= 0x140, /* RX consumer pointer (RO) */
+
+	MVS_INT_COAL		= 0x148, /* Int coalescing config */
+	MVS_INT_COAL_TMOUT	= 0x14C, /* Int coalescing timeout */
+	MVS_INT_STAT		= 0x150, /* Central int status */
+	MVS_INT_MASK		= 0x154, /* Central int enable */
+	MVS_INT_STAT_SRS_0	= 0x158, /* SATA register set status */
+	MVS_INT_MASK_SRS_0	= 0x15C,
+
+					 /* ports 1-3 follow after this */
+	MVS_P0_INT_STAT		= 0x160, /* port0 interrupt status */
+	MVS_P0_INT_MASK		= 0x164, /* port0 interrupt mask */
+					 /* ports 5-7 follow after this */
+	MVS_P4_INT_STAT		= 0x200, /* Port4 interrupt status */
+	MVS_P4_INT_MASK		= 0x204, /* Port4 interrupt enable mask */
+
+					 /* ports 1-3 follow after this */
+	MVS_P0_SER_CTLSTAT	= 0x180, /* port0 serial control/status */
+					 /* ports 5-7 follow after this */
+	MVS_P4_SER_CTLSTAT	= 0x220, /* port4 serial control/status */
+
+	MVS_CMD_ADDR		= 0x1B8, /* Command register port (addr) */
+	MVS_CMD_DATA		= 0x1BC, /* Command register port (data) */
+
+					 /* ports 1-3 follow after this */
+	MVS_P0_CFG_ADDR		= 0x1C0, /* port0 phy register address */
+	MVS_P0_CFG_DATA		= 0x1C4, /* port0 phy register data */
+					 /* ports 5-7 follow after this */
+	MVS_P4_CFG_ADDR		= 0x230, /* Port4 config address */
+	MVS_P4_CFG_DATA		= 0x234, /* Port4 config data */
+
+					 /* ports 1-3 follow after this */
+	MVS_P0_VSR_ADDR		= 0x1E0, /* port0 VSR address */
+	MVS_P0_VSR_DATA		= 0x1E4, /* port0 VSR data */
+					 /* ports 5-7 follow after this */
+	MVS_P4_VSR_ADDR		= 0x250, /* port4 VSR addr */
+	MVS_P4_VSR_DATA		= 0x254, /* port4 VSR data */
+};
+
+enum pci_cfg_registers {
+	PCR_PHY_CTL		= 0x40,
+	PCR_PHY_CTL2		= 0x90,
+	PCR_DEV_CTRL		= 0xE8,
+	PCR_LINK_STAT		= 0xF2,
+};
+
+/*  SAS/SATA Vendor Specific Port Registers */
+enum sas_sata_vsp_regs {
+	VSR_PHY_STAT		= 0x00, /* Phy Status */
+	VSR_PHY_MODE1		= 0x01, /* phy tx */
+	VSR_PHY_MODE2		= 0x02, /* tx scc */
+	VSR_PHY_MODE3		= 0x03, /* pll */
+	VSR_PHY_MODE4		= 0x04, /* VCO */
+	VSR_PHY_MODE5		= 0x05, /* Rx */
+	VSR_PHY_MODE6		= 0x06, /* CDR */
+	VSR_PHY_MODE7		= 0x07, /* Impedance */
+	VSR_PHY_MODE8		= 0x08, /* Voltage */
+	VSR_PHY_MODE9		= 0x09, /* Test */
+	VSR_PHY_MODE10		= 0x0A, /* Power */
+	VSR_PHY_MODE11		= 0x0B, /* Phy Mode */
+	VSR_PHY_VS0		= 0x0C, /* Vednor Specific 0 */
+	VSR_PHY_VS1		= 0x0D, /* Vednor Specific 1 */
+};
+
+enum chip_register_bits {
+	PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
+	PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
+	PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
+	PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
+			(0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
+};
+
+#define MAX_SG_ENTRY		64
+
+struct mvs_prd {
+	__le64			addr;		/* 64-bit buffer address */
+	__le32			reserved;
+	__le32			len;		/* 16-bit length */
+};
+
+#define SPI_CTRL_REG				0xc0
+#define SPI_CTRL_VENDOR_ENABLE		(1U<<29)
+#define SPI_CTRL_SPIRDY         		(1U<<22)
+#define SPI_CTRL_SPISTART			(1U<<20)
+
+#define SPI_CMD_REG		0xc4
+#define SPI_DATA_REG		0xc8
+
+#define SPI_CTRL_REG_64XX		0x10
+#define SPI_CMD_REG_64XX		0x14
+#define SPI_DATA_REG_64XX		0x18
+
+#endif

+ 672 - 0
drivers/scsi/mvsas/mv_94xx.c

@@ -0,0 +1,672 @@
+/*
+ * Marvell 88SE94xx hardware specific
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#include "mv_sas.h"
+#include "mv_94xx.h"
+#include "mv_chips.h"
+
+static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
+{
+	u32 reg;
+	struct mvs_phy *phy = &mvi->phy[i];
+	u32 phy_status;
+
+	mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3);
+	reg = mvs_read_port_vsr_data(mvi, i);
+	phy_status = ((reg & 0x3f0000) >> 16) & 0xff;
+	phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+	switch (phy_status) {
+	case 0x10:
+		phy->phy_type |= PORT_TYPE_SAS;
+		break;
+	case 0x1d:
+	default:
+		phy->phy_type |= PORT_TYPE_SATA;
+		break;
+	}
+}
+
+static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
+{
+	void __iomem *regs = mvi->regs;
+	u32 tmp;
+
+	tmp = mr32(MVS_PCS);
+	tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
+	mw32(MVS_PCS, tmp);
+}
+
+static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
+{
+	u32 tmp;
+
+	tmp = mvs_read_port_irq_stat(mvi, phy_id);
+	tmp &= ~PHYEV_RDY_CH;
+	mvs_write_port_irq_stat(mvi, phy_id, tmp);
+	if (hard) {
+		tmp = mvs_read_phy_ctl(mvi, phy_id);
+		tmp |= PHY_RST_HARD;
+		mvs_write_phy_ctl(mvi, phy_id, tmp);
+		do {
+			tmp = mvs_read_phy_ctl(mvi, phy_id);
+		} while (tmp & PHY_RST_HARD);
+	} else {
+		mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT);
+		tmp = mvs_read_port_vsr_data(mvi, phy_id);
+		tmp |= PHY_RST;
+		mvs_write_port_vsr_data(mvi, phy_id, tmp);
+	}
+}
+
+static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
+{
+	u32 tmp;
+	mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
+	tmp = mvs_read_port_vsr_data(mvi, phy_id);
+	mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000);
+}
+
+static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
+{
+	mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4);
+	mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
+	mvs_write_port_vsr_addr(mvi, phy_id, 0x104);
+	mvs_write_port_vsr_data(mvi, phy_id, 0x00018080);
+	mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
+	mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff);
+}
+
+static int __devinit mvs_94xx_init(struct mvs_info *mvi)
+{
+	void __iomem *regs = mvi->regs;
+	int i;
+	u32 tmp, cctl;
+
+	mvs_show_pcie_usage(mvi);
+	if (mvi->flags & MVF_FLAG_SOC) {
+		tmp = mr32(MVS_PHY_CTL);
+		tmp &= ~PCTL_PWR_OFF;
+		tmp |= PCTL_PHY_DSBL;
+		mw32(MVS_PHY_CTL, tmp);
+	}
+
+	/* Init Chip */
+	/* make sure RST is set; HBA_RST /should/ have done that for us */
+	cctl = mr32(MVS_CTL) & 0xFFFF;
+	if (cctl & CCTL_RST)
+		cctl &= ~CCTL_RST;
+	else
+		mw32_f(MVS_CTL, cctl | CCTL_RST);
+
+	if (mvi->flags & MVF_FLAG_SOC) {
+		tmp = mr32(MVS_PHY_CTL);
+		tmp &= ~PCTL_PWR_OFF;
+		tmp |= PCTL_COM_ON;
+		tmp &= ~PCTL_PHY_DSBL;
+		tmp |= PCTL_LINK_RST;
+		mw32(MVS_PHY_CTL, tmp);
+		msleep(100);
+		tmp &= ~PCTL_LINK_RST;
+		mw32(MVS_PHY_CTL, tmp);
+		msleep(100);
+	}
+
+	/* reset control */
+	mw32(MVS_PCS, 0);		/* MVS_PCS */
+	mw32(MVS_STP_REG_SET_0, 0);
+	mw32(MVS_STP_REG_SET_1, 0);
+
+	/* init phys */
+	mvs_phy_hacks(mvi);
+
+	/* disable Multiplexing, enable phy implemented */
+	mw32(MVS_PORTS_IMP, 0xFF);
+
+
+	mw32(MVS_PA_VSR_ADDR, 0x00000104);
+	mw32(MVS_PA_VSR_PORT, 0x00018080);
+	mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8);
+	mw32(MVS_PA_VSR_PORT, 0x0084ffff);
+
+	/* set LED blink when IO*/
+	mw32(MVS_PA_VSR_ADDR, 0x00000030);
+	tmp = mr32(MVS_PA_VSR_PORT);
+	tmp &= 0xFFFF00FF;
+	tmp |= 0x00003300;
+	mw32(MVS_PA_VSR_PORT, tmp);
+
+	mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
+	mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
+
+	mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
+	mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
+
+	mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
+	mw32(MVS_TX_LO, mvi->tx_dma);
+	mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
+
+	mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
+	mw32(MVS_RX_LO, mvi->rx_dma);
+	mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
+
+	for (i = 0; i < mvi->chip->n_phy; i++) {
+		mvs_94xx_phy_disable(mvi, i);
+		/* set phy local SAS address */
+		mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
+						(mvi->phy[i].dev_sas_addr));
+
+		mvs_94xx_enable_xmt(mvi, i);
+		mvs_94xx_phy_enable(mvi, i);
+
+		mvs_94xx_phy_reset(mvi, i, 1);
+		msleep(500);
+		mvs_94xx_detect_porttype(mvi, i);
+	}
+
+	if (mvi->flags & MVF_FLAG_SOC) {
+		/* set select registers */
+		writel(0x0E008000, regs + 0x000);
+		writel(0x59000008, regs + 0x004);
+		writel(0x20, regs + 0x008);
+		writel(0x20, regs + 0x00c);
+		writel(0x20, regs + 0x010);
+		writel(0x20, regs + 0x014);
+		writel(0x20, regs + 0x018);
+		writel(0x20, regs + 0x01c);
+	}
+	for (i = 0; i < mvi->chip->n_phy; i++) {
+		/* clear phy int status */
+		tmp = mvs_read_port_irq_stat(mvi, i);
+		tmp &= ~PHYEV_SIG_FIS;
+		mvs_write_port_irq_stat(mvi, i, tmp);
+
+		/* set phy int mask */
+		tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH |
+			PHYEV_ID_DONE  | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ;
+		mvs_write_port_irq_mask(mvi, i, tmp);
+
+		msleep(100);
+		mvs_update_phyinfo(mvi, i, 1);
+	}
+
+	/* FIXME: update wide port bitmaps */
+
+	/* little endian for open address and command table, etc. */
+	/*
+	 * it seems that ( from the spec ) turning on big-endian won't
+	 * do us any good on big-endian machines, need further confirmation
+	 */
+	cctl = mr32(MVS_CTL);
+	cctl |= CCTL_ENDIAN_CMD;
+	cctl |= CCTL_ENDIAN_DATA;
+	cctl &= ~CCTL_ENDIAN_OPEN;
+	cctl |= CCTL_ENDIAN_RSP;
+	mw32_f(MVS_CTL, cctl);
+
+	/* reset CMD queue */
+	tmp = mr32(MVS_PCS);
+	tmp |= PCS_CMD_RST;
+	mw32(MVS_PCS, tmp);
+	/* interrupt coalescing may cause missing HW interrput in some case,
+	 * and the max count is 0x1ff, while our max slot is 0x200,
+	 * it will make count 0.
+	 */
+	tmp = 0;
+	mw32(MVS_INT_COAL, tmp);
+
+	tmp = 0x100;
+	mw32(MVS_INT_COAL_TMOUT, tmp);
+
+	/* ladies and gentlemen, start your engines */
+	mw32(MVS_TX_CFG, 0);
+	mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
+	mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
+	/* enable CMD/CMPL_Q/RESP mode */
+	mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN |
+		PCS_CMD_EN | PCS_CMD_STOP_ERR);
+
+	/* enable completion queue interrupt */
+	tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
+		CINT_DMA_PCIE);
+	tmp |= CINT_PHY_MASK;
+	mw32(MVS_INT_MASK, tmp);
+
+	/* Enable SRS interrupt */
+	mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
+
+	return 0;
+}
+
+static int mvs_94xx_ioremap(struct mvs_info *mvi)
+{
+	if (!mvs_ioremap(mvi, 2, -1)) {
+		mvi->regs_ex = mvi->regs + 0x10200;
+		mvi->regs += 0x20000;
+		if (mvi->id == 1)
+			mvi->regs += 0x4000;
+		return 0;
+	}
+	return -1;
+}
+
+static void mvs_94xx_iounmap(struct mvs_info *mvi)
+{
+	if (mvi->regs) {
+		mvi->regs -= 0x20000;
+		if (mvi->id == 1)
+			mvi->regs -= 0x4000;
+		mvs_iounmap(mvi->regs);
+	}
+}
+
+static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
+{
+	void __iomem *regs = mvi->regs_ex;
+	u32 tmp;
+
+	tmp = mr32(MVS_GBL_CTL);
+	tmp |= (IRQ_SAS_A | IRQ_SAS_B);
+	mw32(MVS_GBL_INT_STAT, tmp);
+	writel(tmp, regs + 0x0C);
+	writel(tmp, regs + 0x10);
+	writel(tmp, regs + 0x14);
+	writel(tmp, regs + 0x18);
+	mw32(MVS_GBL_CTL, tmp);
+}
+
+static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
+{
+	void __iomem *regs = mvi->regs_ex;
+	u32 tmp;
+
+	tmp = mr32(MVS_GBL_CTL);
+
+	tmp &= ~(IRQ_SAS_A | IRQ_SAS_B);
+	mw32(MVS_GBL_INT_STAT, tmp);
+	writel(tmp, regs + 0x0C);
+	writel(tmp, regs + 0x10);
+	writel(tmp, regs + 0x14);
+	writel(tmp, regs + 0x18);
+	mw32(MVS_GBL_CTL, tmp);
+}
+
+static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
+{
+	void __iomem *regs = mvi->regs_ex;
+	u32 stat = 0;
+	if (!(mvi->flags & MVF_FLAG_SOC)) {
+		stat = mr32(MVS_GBL_INT_STAT);
+
+		if (!(stat & (IRQ_SAS_A | IRQ_SAS_B)))
+			return 0;
+	}
+	return stat;
+}
+
+static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
+{
+	void __iomem *regs = mvi->regs;
+
+	if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
+			((stat & IRQ_SAS_B) && mvi->id == 1)) {
+		mw32_f(MVS_INT_STAT, CINT_DONE);
+	#ifndef MVS_USE_TASKLET
+		spin_lock(&mvi->lock);
+	#endif
+		mvs_int_full(mvi);
+	#ifndef MVS_USE_TASKLET
+		spin_unlock(&mvi->lock);
+	#endif
+	}
+	return IRQ_HANDLED;
+}
+
+static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
+{
+	u32 tmp;
+	mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32));
+	do {
+		tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3));
+	} while (tmp & 1 << (slot_idx % 32));
+}
+
+static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
+				u32 tfs)
+{
+	void __iomem *regs = mvi->regs;
+	u32 tmp;
+
+	if (type == PORT_TYPE_SATA) {
+		tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
+		mw32(MVS_INT_STAT_SRS_0, tmp);
+	}
+	mw32(MVS_INT_STAT, CINT_CI_STOP);
+	tmp = mr32(MVS_PCS) | 0xFF00;
+	mw32(MVS_PCS, tmp);
+}
+
+static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+	void __iomem *regs = mvi->regs;
+	u32 tmp;
+	u8 reg_set = *tfs;
+
+	if (*tfs == MVS_ID_NOT_MAPPED)
+		return;
+
+	mvi->sata_reg_set &= ~bit(reg_set);
+	if (reg_set < 32) {
+		w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
+		tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set;
+		if (tmp)
+			mw32(MVS_INT_STAT_SRS_0, tmp);
+	} else {
+		w_reg_set_enable(reg_set, mvi->sata_reg_set);
+		tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
+		if (tmp)
+			mw32(MVS_INT_STAT_SRS_1, tmp);
+	}
+
+	*tfs = MVS_ID_NOT_MAPPED;
+
+	return;
+}
+
+static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+	int i;
+	void __iomem *regs = mvi->regs;
+
+	if (*tfs != MVS_ID_NOT_MAPPED)
+		return 0;
+
+	i = mv_ffc64(mvi->sata_reg_set);
+	if (i > 32) {
+		mvi->sata_reg_set |= bit(i);
+		w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
+		*tfs = i;
+		return 0;
+	} else if (i >= 0) {
+		mvi->sata_reg_set |= bit(i);
+		w_reg_set_enable(i, (u32)mvi->sata_reg_set);
+		*tfs = i;
+		return 0;
+	}
+	return MVS_ID_NOT_MAPPED;
+}
+
+static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
+{
+	int i;
+	struct scatterlist *sg;
+	struct mvs_prd *buf_prd = prd;
+	for_each_sg(scatter, sg, nr, i) {
+		buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
+		buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
+		buf_prd++;
+	}
+}
+
+static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
+{
+	u32 phy_st;
+	phy_st = mvs_read_phy_ctl(mvi, i);
+	if (phy_st & PHY_READY_MASK)	/* phy ready */
+		return 1;
+	return 0;
+}
+
+static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
+					struct sas_identify_frame *id)
+{
+	int i;
+	u32 id_frame[7];
+
+	for (i = 0; i < 7; i++) {
+		mvs_write_port_cfg_addr(mvi, port_id,
+					CONFIG_ID_FRAME0 + i * 4);
+		id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
+	}
+	memcpy(id, id_frame, 28);
+}
+
+static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
+					struct sas_identify_frame *id)
+{
+	int i;
+	u32 id_frame[7];
+
+	/* mvs_hexdump(28, (u8 *)id_frame, 0); */
+	for (i = 0; i < 7; i++) {
+		mvs_write_port_cfg_addr(mvi, port_id,
+					CONFIG_ATT_ID_FRAME0 + i * 4);
+		id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
+		mv_dprintk("94xx phy %d atta frame %d %x.\n",
+			port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
+	}
+	/* mvs_hexdump(28, (u8 *)id_frame, 0); */
+	memcpy(id, id_frame, 28);
+}
+
+static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id)
+{
+	u32 att_dev_info = 0;
+
+	att_dev_info |= id->dev_type;
+	if (id->stp_iport)
+		att_dev_info |= PORT_DEV_STP_INIT;
+	if (id->smp_iport)
+		att_dev_info |= PORT_DEV_SMP_INIT;
+	if (id->ssp_iport)
+		att_dev_info |= PORT_DEV_SSP_INIT;
+	if (id->stp_tport)
+		att_dev_info |= PORT_DEV_STP_TRGT;
+	if (id->smp_tport)
+		att_dev_info |= PORT_DEV_SMP_TRGT;
+	if (id->ssp_tport)
+		att_dev_info |= PORT_DEV_SSP_TRGT;
+
+	att_dev_info |= (u32)id->phy_id<<24;
+	return att_dev_info;
+}
+
+static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id)
+{
+	return mvs_94xx_make_dev_info(id);
+}
+
+static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
+				struct sas_identify_frame *id)
+{
+	struct mvs_phy *phy = &mvi->phy[i];
+	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+	mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status);
+	sas_phy->linkrate =
+		(phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
+			PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
+	sas_phy->linkrate += 0x8;
+	mv_dprintk("get link rate is %d\n", sas_phy->linkrate);
+	phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+	phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
+	mvs_94xx_get_dev_identify_frame(mvi, i, id);
+	phy->dev_info = mvs_94xx_make_dev_info(id);
+
+	if (phy->phy_type & PORT_TYPE_SAS) {
+		mvs_94xx_get_att_identify_frame(mvi, i, id);
+		phy->att_dev_info = mvs_94xx_make_att_info(id);
+		phy->att_dev_sas_addr = *(u64 *)id->sas_addr;
+	} else {
+		phy->att_dev_info = PORT_DEV_STP_TRGT | 1;
+	}
+
+}
+
+void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
+			struct sas_phy_linkrates *rates)
+{
+	/* TODO */
+}
+
+static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
+{
+	u32 tmp;
+	void __iomem *regs = mvi->regs;
+	tmp = mr32(MVS_STP_REG_SET_0);
+	mw32(MVS_STP_REG_SET_0, 0);
+	mw32(MVS_STP_REG_SET_0, tmp);
+	tmp = mr32(MVS_STP_REG_SET_1);
+	mw32(MVS_STP_REG_SET_1, 0);
+	mw32(MVS_STP_REG_SET_1, tmp);
+}
+
+
+u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
+{
+	void __iomem *regs = mvi->regs_ex - 0x10200;
+	return mr32(SPI_RD_DATA_REG_94XX);
+}
+
+void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
+{
+	void __iomem *regs = mvi->regs_ex - 0x10200;
+	 mw32(SPI_RD_DATA_REG_94XX, data);
+}
+
+
+int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
+				u32      *dwCmd,
+				u8       cmd,
+				u8       read,
+				u8       length,
+				u32      addr
+				)
+{
+	void __iomem *regs = mvi->regs_ex - 0x10200;
+	u32  dwTmp;
+
+	dwTmp = ((u32)cmd << 8) | ((u32)length << 4);
+	if (read)
+		dwTmp |= SPI_CTRL_READ_94XX;
+
+	if (addr != MV_MAX_U32) {
+		mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL));
+		dwTmp |= SPI_ADDR_VLD_94XX;
+	}
+
+	*dwCmd = dwTmp;
+	return 0;
+}
+
+
+int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
+{
+	void __iomem *regs = mvi->regs_ex - 0x10200;
+	mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX);
+
+	return 0;
+}
+
+int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
+{
+	void __iomem *regs = mvi->regs_ex - 0x10200;
+	u32   i, dwTmp;
+
+	for (i = 0; i < timeout; i++) {
+		dwTmp = mr32(SPI_CTRL_REG_94XX);
+		if (!(dwTmp & SPI_CTRL_SpiStart_94XX))
+			return 0;
+		msleep(10);
+	}
+
+	return -1;
+}
+
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
+{
+	int i;
+	struct mvs_prd *buf_prd = prd;
+	buf_prd += from;
+	for (i = 0; i < MAX_SG_ENTRY - from; i++) {
+		buf_prd->addr = cpu_to_le64(buf_dma);
+		buf_prd->im_len.len = cpu_to_le32(buf_len);
+		++buf_prd;
+	}
+}
+#endif
+
+const struct mvs_dispatch mvs_94xx_dispatch = {
+	"mv94xx",
+	mvs_94xx_init,
+	NULL,
+	mvs_94xx_ioremap,
+	mvs_94xx_iounmap,
+	mvs_94xx_isr,
+	mvs_94xx_isr_status,
+	mvs_94xx_interrupt_enable,
+	mvs_94xx_interrupt_disable,
+	mvs_read_phy_ctl,
+	mvs_write_phy_ctl,
+	mvs_read_port_cfg_data,
+	mvs_write_port_cfg_data,
+	mvs_write_port_cfg_addr,
+	mvs_read_port_vsr_data,
+	mvs_write_port_vsr_data,
+	mvs_write_port_vsr_addr,
+	mvs_read_port_irq_stat,
+	mvs_write_port_irq_stat,
+	mvs_read_port_irq_mask,
+	mvs_write_port_irq_mask,
+	mvs_get_sas_addr,
+	mvs_94xx_command_active,
+	mvs_94xx_issue_stop,
+	mvs_start_delivery,
+	mvs_rx_update,
+	mvs_int_full,
+	mvs_94xx_assign_reg_set,
+	mvs_94xx_free_reg_set,
+	mvs_get_prd_size,
+	mvs_get_prd_count,
+	mvs_94xx_make_prd,
+	mvs_94xx_detect_porttype,
+	mvs_94xx_oob_done,
+	mvs_94xx_fix_phy_info,
+	NULL,
+	mvs_94xx_phy_set_link_rate,
+	mvs_hw_max_link_rate,
+	mvs_94xx_phy_disable,
+	mvs_94xx_phy_enable,
+	mvs_94xx_phy_reset,
+	NULL,
+	mvs_94xx_clear_active_cmds,
+	mvs_94xx_spi_read_data,
+	mvs_94xx_spi_write_data,
+	mvs_94xx_spi_buildcmd,
+	mvs_94xx_spi_issuecmd,
+	mvs_94xx_spi_waitdataready,
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+	mvs_94xx_fix_dma,
+#endif
+};
+

+ 222 - 0
drivers/scsi/mvsas/mv_94xx.h

@@ -0,0 +1,222 @@
+/*
+ * Marvell 88SE94xx hardware specific head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MVS94XX_REG_H_
+#define _MVS94XX_REG_H_
+
+#include <linux/types.h>
+
+#define MAX_LINK_RATE		SAS_LINK_RATE_6_0_GBPS
+
+enum hw_registers {
+	MVS_GBL_CTL		= 0x04,  /* global control */
+	MVS_GBL_INT_STAT	= 0x00,  /* global irq status */
+	MVS_GBL_PI		= 0x0C,  /* ports implemented bitmask */
+
+	MVS_PHY_CTL		= 0x40,  /* SOC PHY Control */
+	MVS_PORTS_IMP		= 0x9C,  /* SOC Port Implemented */
+
+	MVS_GBL_PORT_TYPE	= 0xa0,  /* port type */
+
+	MVS_CTL			= 0x100, /* SAS/SATA port configuration */
+	MVS_PCS			= 0x104, /* SAS/SATA port control/status */
+	MVS_CMD_LIST_LO		= 0x108, /* cmd list addr */
+	MVS_CMD_LIST_HI		= 0x10C,
+	MVS_RX_FIS_LO		= 0x110, /* RX FIS list addr */
+	MVS_RX_FIS_HI		= 0x114,
+	MVS_STP_REG_SET_0	= 0x118, /* STP/SATA Register Set Enable */
+	MVS_STP_REG_SET_1	= 0x11C,
+	MVS_TX_CFG		= 0x120, /* TX configuration */
+	MVS_TX_LO		= 0x124, /* TX (delivery) ring addr */
+	MVS_TX_HI		= 0x128,
+
+	MVS_TX_PROD_IDX		= 0x12C, /* TX producer pointer */
+	MVS_TX_CONS_IDX		= 0x130, /* TX consumer pointer (RO) */
+	MVS_RX_CFG		= 0x134, /* RX configuration */
+	MVS_RX_LO		= 0x138, /* RX (completion) ring addr */
+	MVS_RX_HI		= 0x13C,
+	MVS_RX_CONS_IDX		= 0x140, /* RX consumer pointer (RO) */
+
+	MVS_INT_COAL		= 0x148, /* Int coalescing config */
+	MVS_INT_COAL_TMOUT	= 0x14C, /* Int coalescing timeout */
+	MVS_INT_STAT		= 0x150, /* Central int status */
+	MVS_INT_MASK		= 0x154, /* Central int enable */
+	MVS_INT_STAT_SRS_0	= 0x158, /* SATA register set status */
+	MVS_INT_MASK_SRS_0	= 0x15C,
+	MVS_INT_STAT_SRS_1	= 0x160,
+	MVS_INT_MASK_SRS_1	= 0x164,
+	MVS_NON_NCQ_ERR_0	= 0x168, /* SRS Non-specific NCQ Error */
+	MVS_NON_NCQ_ERR_1	= 0x16C,
+	MVS_CMD_ADDR		= 0x170, /* Command register port (addr) */
+	MVS_CMD_DATA		= 0x174, /* Command register port (data) */
+	MVS_MEM_PARITY_ERR	= 0x178, /* Memory parity error */
+
+					 /* ports 1-3 follow after this */
+	MVS_P0_INT_STAT		= 0x180, /* port0 interrupt status */
+	MVS_P0_INT_MASK		= 0x184, /* port0 interrupt mask */
+					 /* ports 5-7 follow after this */
+	MVS_P4_INT_STAT		= 0x1A0, /* Port4 interrupt status */
+	MVS_P4_INT_MASK		= 0x1A4, /* Port4 interrupt enable mask */
+
+					 /* ports 1-3 follow after this */
+	MVS_P0_SER_CTLSTAT	= 0x1D0, /* port0 serial control/status */
+					 /* ports 5-7 follow after this */
+	MVS_P4_SER_CTLSTAT	= 0x1E0, /* port4 serial control/status */
+
+					 /* ports 1-3 follow after this */
+	MVS_P0_CFG_ADDR		= 0x200, /* port0 phy register address */
+	MVS_P0_CFG_DATA		= 0x204, /* port0 phy register data */
+					 /* ports 5-7 follow after this */
+	MVS_P4_CFG_ADDR		= 0x220, /* Port4 config address */
+	MVS_P4_CFG_DATA		= 0x224, /* Port4 config data */
+
+					 /* phys 1-3 follow after this */
+	MVS_P0_VSR_ADDR		= 0x250, /* phy0 VSR address */
+	MVS_P0_VSR_DATA		= 0x254, /* phy0 VSR data */
+					 /* phys 1-3 follow after this */
+					 /* multiplexing */
+	MVS_P4_VSR_ADDR 	= 0x250, /* phy4 VSR address */
+	MVS_P4_VSR_DATA 	= 0x254, /* phy4 VSR data */
+	MVS_PA_VSR_ADDR		= 0x290, /* All port VSR addr */
+	MVS_PA_VSR_PORT		= 0x294, /* All port VSR data */
+};
+
+enum pci_cfg_registers {
+	PCR_PHY_CTL		= 0x40,
+	PCR_PHY_CTL2		= 0x90,
+	PCR_DEV_CTRL		= 0x78,
+	PCR_LINK_STAT		= 0x82,
+};
+
+/*  SAS/SATA Vendor Specific Port Registers */
+enum sas_sata_vsp_regs {
+	VSR_PHY_STAT		= 0x00 * 4, /* Phy Status */
+	VSR_PHY_MODE1		= 0x01 * 4, /* phy tx */
+	VSR_PHY_MODE2		= 0x02 * 4, /* tx scc */
+	VSR_PHY_MODE3		= 0x03 * 4, /* pll */
+	VSR_PHY_MODE4		= 0x04 * 4, /* VCO */
+	VSR_PHY_MODE5		= 0x05 * 4, /* Rx */
+	VSR_PHY_MODE6		= 0x06 * 4, /* CDR */
+	VSR_PHY_MODE7		= 0x07 * 4, /* Impedance */
+	VSR_PHY_MODE8		= 0x08 * 4, /* Voltage */
+	VSR_PHY_MODE9		= 0x09 * 4, /* Test */
+	VSR_PHY_MODE10		= 0x0A * 4, /* Power */
+	VSR_PHY_MODE11		= 0x0B * 4, /* Phy Mode */
+	VSR_PHY_VS0		= 0x0C * 4, /* Vednor Specific 0 */
+	VSR_PHY_VS1		= 0x0D * 4, /* Vednor Specific 1 */
+};
+
+enum chip_register_bits {
+	PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
+	PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
+	PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12),
+	PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
+			(0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
+};
+
+enum pci_interrupt_cause {
+	/*  MAIN_IRQ_CAUSE (R10200) Bits*/
+	IRQ_COM_IN_I2O_IOP0            = (1 << 0),
+	IRQ_COM_IN_I2O_IOP1            = (1 << 1),
+	IRQ_COM_IN_I2O_IOP2            = (1 << 2),
+	IRQ_COM_IN_I2O_IOP3            = (1 << 3),
+	IRQ_COM_OUT_I2O_HOS0           = (1 << 4),
+	IRQ_COM_OUT_I2O_HOS1           = (1 << 5),
+	IRQ_COM_OUT_I2O_HOS2           = (1 << 6),
+	IRQ_COM_OUT_I2O_HOS3           = (1 << 7),
+	IRQ_PCIF_TO_CPU_DRBL0          = (1 << 8),
+	IRQ_PCIF_TO_CPU_DRBL1          = (1 << 9),
+	IRQ_PCIF_TO_CPU_DRBL2          = (1 << 10),
+	IRQ_PCIF_TO_CPU_DRBL3          = (1 << 11),
+	IRQ_PCIF_DRBL0                 = (1 << 12),
+	IRQ_PCIF_DRBL1                 = (1 << 13),
+	IRQ_PCIF_DRBL2                 = (1 << 14),
+	IRQ_PCIF_DRBL3                 = (1 << 15),
+	IRQ_XOR_A                      = (1 << 16),
+	IRQ_XOR_B                      = (1 << 17),
+	IRQ_SAS_A                      = (1 << 18),
+	IRQ_SAS_B                      = (1 << 19),
+	IRQ_CPU_CNTRL                  = (1 << 20),
+	IRQ_GPIO                       = (1 << 21),
+	IRQ_UART                       = (1 << 22),
+	IRQ_SPI                        = (1 << 23),
+	IRQ_I2C                        = (1 << 24),
+	IRQ_SGPIO                      = (1 << 25),
+	IRQ_COM_ERR                    = (1 << 29),
+	IRQ_I2O_ERR                    = (1 << 30),
+	IRQ_PCIE_ERR                   = (1 << 31),
+};
+
+#define MAX_SG_ENTRY		255
+
+struct mvs_prd_imt {
+	__le32			len:22;
+	u8			_r_a:2;
+	u8			misc_ctl:4;
+	u8			inter_sel:4;
+};
+
+struct mvs_prd {
+	/* 64-bit buffer address */
+	__le64			addr;
+	/* 22-bit length */
+	struct mvs_prd_imt	im_len;
+} __attribute__ ((packed));
+
+#define SPI_CTRL_REG_94XX           	0xc800
+#define SPI_ADDR_REG_94XX            	0xc804
+#define SPI_WR_DATA_REG_94XX         0xc808
+#define SPI_RD_DATA_REG_94XX         	0xc80c
+#define SPI_CTRL_READ_94XX         	(1U << 2)
+#define SPI_ADDR_VLD_94XX         	(1U << 1)
+#define SPI_CTRL_SpiStart_94XX     	(1U << 0)
+
+#define mv_ffc(x)   ffz(x)
+
+static inline int
+mv_ffc64(u64 v)
+{
+	int i;
+	i = mv_ffc((u32)v);
+	if (i >= 0)
+		return i;
+	i = mv_ffc((u32)(v>>32));
+
+	if (i != 0)
+		return 32 + i;
+
+	return -1;
+}
+
+#define r_reg_set_enable(i) \
+	(((i) > 31) ? mr32(MVS_STP_REG_SET_1) : \
+	mr32(MVS_STP_REG_SET_0))
+
+#define w_reg_set_enable(i, tmp) \
+	(((i) > 31) ? mw32(MVS_STP_REG_SET_1, tmp) : \
+	mw32(MVS_STP_REG_SET_0, tmp))
+
+extern const struct mvs_dispatch mvs_94xx_dispatch;
+#endif
+

+ 280 - 0
drivers/scsi/mvsas/mv_chips.h

@@ -0,0 +1,280 @@
+/*
+ * Marvell 88SE64xx/88SE94xx register IO interface
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+
+#ifndef _MV_CHIPS_H_
+#define _MV_CHIPS_H_
+
+#define mr32(reg)	readl(regs + reg)
+#define mw32(reg, val)	writel((val), regs + reg)
+#define mw32_f(reg, val)	do {			\
+				mw32(reg, val);	\
+				mr32(reg);	\
+			} while (0)
+
+#define iow32(reg, val) 	outl(val, (unsigned long)(regs + reg))
+#define ior32(reg) 		inl((unsigned long)(regs + reg))
+#define iow16(reg, val) 	outw((unsigned long)(val, regs + reg))
+#define ior16(reg) 		inw((unsigned long)(regs + reg))
+#define iow8(reg, val) 		outb((unsigned long)(val, regs + reg))
+#define ior8(reg) 		inb((unsigned long)(regs + reg))
+
+static inline u32 mvs_cr32(struct mvs_info *mvi, u32 addr)
+{
+	void __iomem *regs = mvi->regs;
+	mw32(MVS_CMD_ADDR, addr);
+	return mr32(MVS_CMD_DATA);
+}
+
+static inline void mvs_cw32(struct mvs_info *mvi, u32 addr, u32 val)
+{
+	void __iomem *regs = mvi->regs;
+	mw32(MVS_CMD_ADDR, addr);
+	mw32(MVS_CMD_DATA, val);
+}
+
+static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
+{
+	void __iomem *regs = mvi->regs;
+	return (port < 4) ? mr32(MVS_P0_SER_CTLSTAT + port * 4) :
+		mr32(MVS_P4_SER_CTLSTAT + (port - 4) * 4);
+}
+
+static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
+{
+	void __iomem *regs = mvi->regs;
+	if (port < 4)
+		mw32(MVS_P0_SER_CTLSTAT + port * 4, val);
+	else
+		mw32(MVS_P4_SER_CTLSTAT + (port - 4) * 4, val);
+}
+
+static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off,
+				u32 off2, u32 port)
+{
+	void __iomem *regs = mvi->regs + off;
+	void __iomem *regs2 = mvi->regs + off2;
+	return (port < 4) ? readl(regs + port * 8) :
+		readl(regs2 + (port - 4) * 8);
+}
+
+static inline void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
+				u32 port, u32 val)
+{
+	void __iomem *regs = mvi->regs + off;
+	void __iomem *regs2 = mvi->regs + off2;
+	if (port < 4)
+		writel(val, regs + port * 8);
+	else
+		writel(val, regs2 + (port - 4) * 8);
+}
+
+static inline u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
+{
+	return mvs_read_port(mvi, MVS_P0_CFG_DATA,
+			MVS_P4_CFG_DATA, port);
+}
+
+static inline void mvs_write_port_cfg_data(struct mvs_info *mvi,
+						u32 port, u32 val)
+{
+	mvs_write_port(mvi, MVS_P0_CFG_DATA,
+			MVS_P4_CFG_DATA, port, val);
+}
+
+static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi,
+						u32 port, u32 addr)
+{
+	mvs_write_port(mvi, MVS_P0_CFG_ADDR,
+			MVS_P4_CFG_ADDR, port, addr);
+	mdelay(10);
+}
+
+static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
+{
+	return mvs_read_port(mvi, MVS_P0_VSR_DATA,
+			MVS_P4_VSR_DATA, port);
+}
+
+static inline void mvs_write_port_vsr_data(struct mvs_info *mvi,
+						u32 port, u32 val)
+{
+	mvs_write_port(mvi, MVS_P0_VSR_DATA,
+			MVS_P4_VSR_DATA, port, val);
+}
+
+static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi,
+						u32 port, u32 addr)
+{
+	mvs_write_port(mvi, MVS_P0_VSR_ADDR,
+			MVS_P4_VSR_ADDR, port, addr);
+	mdelay(10);
+}
+
+static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
+{
+	return mvs_read_port(mvi, MVS_P0_INT_STAT,
+			MVS_P4_INT_STAT, port);
+}
+
+static inline void mvs_write_port_irq_stat(struct mvs_info *mvi,
+						u32 port, u32 val)
+{
+	mvs_write_port(mvi, MVS_P0_INT_STAT,
+			MVS_P4_INT_STAT, port, val);
+}
+
+static inline u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
+{
+	return mvs_read_port(mvi, MVS_P0_INT_MASK,
+			MVS_P4_INT_MASK, port);
+
+}
+
+static inline void mvs_write_port_irq_mask(struct mvs_info *mvi,
+						u32 port, u32 val)
+{
+	mvs_write_port(mvi, MVS_P0_INT_MASK,
+			MVS_P4_INT_MASK, port, val);
+}
+
+static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
+{
+	u32 tmp;
+
+	/* workaround for SATA R-ERR, to ignore phy glitch */
+	tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
+	tmp &= ~(1 << 9);
+	tmp |= (1 << 10);
+	mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
+
+	/* enable retry 127 times */
+	mvs_cw32(mvi, CMD_SAS_CTL1, 0x7f7f);
+
+	/* extend open frame timeout to max */
+	tmp = mvs_cr32(mvi, CMD_SAS_CTL0);
+	tmp &= ~0xffff;
+	tmp |= 0x3fff;
+	mvs_cw32(mvi, CMD_SAS_CTL0, tmp);
+
+	/* workaround for WDTIMEOUT , set to 550 ms */
+	mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000);
+
+	/* not to halt for different port op during wideport link change */
+	mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d);
+
+	/* workaround for Seagate disk not-found OOB sequence, recv
+	 * COMINIT before sending out COMWAKE */
+	tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
+	tmp &= 0x0000ffff;
+	tmp |= 0x00fa0000;
+	mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
+
+	tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
+	tmp &= 0x1fffffff;
+	tmp |= (2U << 29);	/* 8 ms retry */
+	mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
+}
+
+static inline void mvs_int_sata(struct mvs_info *mvi)
+{
+	u32 tmp;
+	void __iomem *regs = mvi->regs;
+	tmp = mr32(MVS_INT_STAT_SRS_0);
+	if (tmp)
+		mw32(MVS_INT_STAT_SRS_0, tmp);
+	MVS_CHIP_DISP->clear_active_cmds(mvi);
+}
+
+static inline void mvs_int_full(struct mvs_info *mvi)
+{
+	void __iomem *regs = mvi->regs;
+	u32 tmp, stat;
+	int i;
+
+	stat = mr32(MVS_INT_STAT);
+	mvs_int_rx(mvi, false);
+
+	for (i = 0; i < mvi->chip->n_phy; i++) {
+		tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
+		if (tmp)
+			mvs_int_port(mvi, i, tmp);
+	}
+
+	if (stat & CINT_SRS)
+		mvs_int_sata(mvi);
+
+	mw32(MVS_INT_STAT, stat);
+}
+
+static inline void mvs_start_delivery(struct mvs_info *mvi, u32 tx)
+{
+	void __iomem *regs = mvi->regs;
+	mw32(MVS_TX_PROD_IDX, tx);
+}
+
+static inline u32 mvs_rx_update(struct mvs_info *mvi)
+{
+	void __iomem *regs = mvi->regs;
+	return mr32(MVS_RX_CONS_IDX);
+}
+
+static inline u32 mvs_get_prd_size(void)
+{
+	return sizeof(struct mvs_prd);
+}
+
+static inline u32 mvs_get_prd_count(void)
+{
+	return MAX_SG_ENTRY;
+}
+
+static inline void mvs_show_pcie_usage(struct mvs_info *mvi)
+{
+	u16 link_stat, link_spd;
+	const char *spd[] = {
+		"UnKnown",
+		"2.5",
+		"5.0",
+	};
+	if (mvi->flags & MVF_FLAG_SOC || mvi->id > 0)
+		return;
+
+	pci_read_config_word(mvi->pdev, PCR_LINK_STAT, &link_stat);
+	link_spd = (link_stat & PLS_LINK_SPD) >> PLS_LINK_SPD_OFFS;
+	if (link_spd >= 3)
+		link_spd = 0;
+	dev_printk(KERN_INFO, mvi->dev,
+		"mvsas: PCI-E x%u, Bandwidth Usage: %s Gbps\n",
+	       (link_stat & PLS_NEG_LINK_WD) >> PLS_NEG_LINK_WD_OFFS,
+	       spd[link_spd]);
+}
+
+static inline u32 mvs_hw_max_link_rate(void)
+{
+	return MAX_LINK_RATE;
+}
+
+#endif  /* _MV_CHIPS_H_ */
+

+ 502 - 0
drivers/scsi/mvsas/mv_defs.h

@@ -0,0 +1,502 @@
+/*
+ * Marvell 88SE64xx/88SE94xx const head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MV_DEFS_H_
+#define _MV_DEFS_H_
+
+
+enum chip_flavors {
+	chip_6320,
+	chip_6440,
+	chip_6485,
+	chip_9480,
+	chip_9180,
+};
+
+/* driver compile-time configuration */
+enum driver_configuration {
+	MVS_SLOTS		= 512,	/* command slots */
+	MVS_TX_RING_SZ		= 1024,	/* TX ring size (12-bit) */
+	MVS_RX_RING_SZ		= 1024, /* RX ring size (12-bit) */
+					/* software requires power-of-2
+					   ring size */
+	MVS_SOC_SLOTS		= 64,
+	MVS_SOC_TX_RING_SZ	= MVS_SOC_SLOTS * 2,
+	MVS_SOC_RX_RING_SZ	= MVS_SOC_SLOTS * 2,
+
+	MVS_SLOT_BUF_SZ		= 8192, /* cmd tbl + IU + status + PRD */
+	MVS_SSP_CMD_SZ		= 64,	/* SSP command table buffer size */
+	MVS_ATA_CMD_SZ		= 96,	/* SATA command table buffer size */
+	MVS_OAF_SZ		= 64,	/* Open address frame buffer size */
+	MVS_QUEUE_SIZE	= 32,	/* Support Queue depth */
+	MVS_CAN_QUEUE		= MVS_SLOTS - 2,	/* SCSI Queue depth */
+	MVS_SOC_CAN_QUEUE	= MVS_SOC_SLOTS - 2,
+};
+
+/* unchangeable hardware details */
+enum hardware_details {
+	MVS_MAX_PHYS		= 8,	/* max. possible phys */
+	MVS_MAX_PORTS		= 8,	/* max. possible ports */
+	MVS_SOC_PHYS		= 4,	/* soc phys */
+	MVS_SOC_PORTS		= 4,	/* soc phys */
+	MVS_MAX_DEVICES	= 1024,	/* max supported device */
+};
+
+/* peripheral registers (BAR2) */
+enum peripheral_registers {
+	SPI_CTL			= 0x10,	/* EEPROM control */
+	SPI_CMD			= 0x14,	/* EEPROM command */
+	SPI_DATA		= 0x18, /* EEPROM data */
+};
+
+enum peripheral_register_bits {
+	TWSI_RDY		= (1U << 7),	/* EEPROM interface ready */
+	TWSI_RD			= (1U << 4),	/* EEPROM read access */
+
+	SPI_ADDR_MASK		= 0x3ffff,	/* bits 17:0 */
+};
+
+enum hw_register_bits {
+	/* MVS_GBL_CTL */
+	INT_EN			= (1U << 1),	/* Global int enable */
+	HBA_RST			= (1U << 0),	/* HBA reset */
+
+	/* MVS_GBL_INT_STAT */
+	INT_XOR			= (1U << 4),	/* XOR engine event */
+	INT_SAS_SATA		= (1U << 0),	/* SAS/SATA event */
+
+	/* MVS_GBL_PORT_TYPE */			/* shl for ports 1-3 */
+	SATA_TARGET		= (1U << 16),	/* port0 SATA target enable */
+	MODE_AUTO_DET_PORT7 = (1U << 15),	/* port0 SAS/SATA autodetect */
+	MODE_AUTO_DET_PORT6 = (1U << 14),
+	MODE_AUTO_DET_PORT5 = (1U << 13),
+	MODE_AUTO_DET_PORT4 = (1U << 12),
+	MODE_AUTO_DET_PORT3 = (1U << 11),
+	MODE_AUTO_DET_PORT2 = (1U << 10),
+	MODE_AUTO_DET_PORT1 = (1U << 9),
+	MODE_AUTO_DET_PORT0 = (1U << 8),
+	MODE_AUTO_DET_EN    =	MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
+				MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
+				MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
+				MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
+	MODE_SAS_PORT7_MASK = (1U << 7),  /* port0 SAS(1), SATA(0) mode */
+	MODE_SAS_PORT6_MASK = (1U << 6),
+	MODE_SAS_PORT5_MASK = (1U << 5),
+	MODE_SAS_PORT4_MASK = (1U << 4),
+	MODE_SAS_PORT3_MASK = (1U << 3),
+	MODE_SAS_PORT2_MASK = (1U << 2),
+	MODE_SAS_PORT1_MASK = (1U << 1),
+	MODE_SAS_PORT0_MASK = (1U << 0),
+	MODE_SAS_SATA	=	MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
+				MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
+				MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
+				MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
+
+				/* SAS_MODE value may be
+				 * dictated (in hw) by values
+				 * of SATA_TARGET & AUTO_DET
+				 */
+
+	/* MVS_TX_CFG */
+	TX_EN			= (1U << 16),	/* Enable TX */
+	TX_RING_SZ_MASK		= 0xfff,	/* TX ring size, bits 11:0 */
+
+	/* MVS_RX_CFG */
+	RX_EN			= (1U << 16),	/* Enable RX */
+	RX_RING_SZ_MASK		= 0xfff,	/* RX ring size, bits 11:0 */
+
+	/* MVS_INT_COAL */
+	COAL_EN			= (1U << 16),	/* Enable int coalescing */
+
+	/* MVS_INT_STAT, MVS_INT_MASK */
+	CINT_I2C		= (1U << 31),	/* I2C event */
+	CINT_SW0		= (1U << 30),	/* software event 0 */
+	CINT_SW1		= (1U << 29),	/* software event 1 */
+	CINT_PRD_BC		= (1U << 28),	/* PRD BC err for read cmd */
+	CINT_DMA_PCIE		= (1U << 27),	/* DMA to PCIE timeout */
+	CINT_MEM		= (1U << 26),	/* int mem parity err */
+	CINT_I2C_SLAVE		= (1U << 25),	/* slave I2C event */
+	CINT_SRS		= (1U << 3),	/* SRS event */
+	CINT_CI_STOP		= (1U << 1),	/* cmd issue stopped */
+	CINT_DONE		= (1U << 0),	/* cmd completion */
+
+						/* shl for ports 1-3 */
+	CINT_PORT_STOPPED	= (1U << 16),	/* port0 stopped */
+	CINT_PORT		= (1U << 8),	/* port0 event */
+	CINT_PORT_MASK_OFFSET	= 8,
+	CINT_PORT_MASK		= (0xFF << CINT_PORT_MASK_OFFSET),
+	CINT_PHY_MASK_OFFSET	= 4,
+	CINT_PHY_MASK		= (0x0F << CINT_PHY_MASK_OFFSET),
+
+	/* TX (delivery) ring bits */
+	TXQ_CMD_SHIFT		= 29,
+	TXQ_CMD_SSP		= 1,		/* SSP protocol */
+	TXQ_CMD_SMP		= 2,		/* SMP protocol */
+	TXQ_CMD_STP		= 3,		/* STP/SATA protocol */
+	TXQ_CMD_SSP_FREE_LIST	= 4,		/* add to SSP targ free list */
+	TXQ_CMD_SLOT_RESET	= 7,		/* reset command slot */
+	TXQ_MODE_I		= (1U << 28),	/* mode: 0=target,1=initiator */
+	TXQ_MODE_TARGET 	= 0,
+	TXQ_MODE_INITIATOR	= 1,
+	TXQ_PRIO_HI		= (1U << 27),	/* priority: 0=normal, 1=high */
+	TXQ_PRI_NORMAL		= 0,
+	TXQ_PRI_HIGH		= 1,
+	TXQ_SRS_SHIFT		= 20,		/* SATA register set */
+	TXQ_SRS_MASK		= 0x7f,
+	TXQ_PHY_SHIFT		= 12,		/* PHY bitmap */
+	TXQ_PHY_MASK		= 0xff,
+	TXQ_SLOT_MASK		= 0xfff,	/* slot number */
+
+	/* RX (completion) ring bits */
+	RXQ_GOOD		= (1U << 23),	/* Response good */
+	RXQ_SLOT_RESET		= (1U << 21),	/* Slot reset complete */
+	RXQ_CMD_RX		= (1U << 20),	/* target cmd received */
+	RXQ_ATTN		= (1U << 19),	/* attention */
+	RXQ_RSP			= (1U << 18),	/* response frame xfer'd */
+	RXQ_ERR			= (1U << 17),	/* err info rec xfer'd */
+	RXQ_DONE		= (1U << 16),	/* cmd complete */
+	RXQ_SLOT_MASK		= 0xfff,	/* slot number */
+
+	/* mvs_cmd_hdr bits */
+	MCH_PRD_LEN_SHIFT	= 16,		/* 16-bit PRD table len */
+	MCH_SSP_FR_TYPE_SHIFT	= 13,		/* SSP frame type */
+
+						/* SSP initiator only */
+	MCH_SSP_FR_CMD		= 0x0,		/* COMMAND frame */
+
+						/* SSP initiator or target */
+	MCH_SSP_FR_TASK		= 0x1,		/* TASK frame */
+
+						/* SSP target only */
+	MCH_SSP_FR_XFER_RDY	= 0x4,		/* XFER_RDY frame */
+	MCH_SSP_FR_RESP		= 0x5,		/* RESPONSE frame */
+	MCH_SSP_FR_READ		= 0x6,		/* Read DATA frame(s) */
+	MCH_SSP_FR_READ_RESP	= 0x7,		/* ditto, plus RESPONSE */
+
+	MCH_SSP_MODE_PASSTHRU	= 1,
+	MCH_SSP_MODE_NORMAL	= 0,
+	MCH_PASSTHRU		= (1U << 12),	/* pass-through (SSP) */
+	MCH_FBURST		= (1U << 11),	/* first burst (SSP) */
+	MCH_CHK_LEN		= (1U << 10),	/* chk xfer len (SSP) */
+	MCH_RETRY		= (1U << 9),	/* tport layer retry (SSP) */
+	MCH_PROTECTION		= (1U << 8),	/* protection info rec (SSP) */
+	MCH_RESET		= (1U << 7),	/* Reset (STP/SATA) */
+	MCH_FPDMA		= (1U << 6),	/* First party DMA (STP/SATA) */
+	MCH_ATAPI		= (1U << 5),	/* ATAPI (STP/SATA) */
+	MCH_BIST		= (1U << 4),	/* BIST activate (STP/SATA) */
+	MCH_PMP_MASK		= 0xf,		/* PMP from cmd FIS (STP/SATA)*/
+
+	CCTL_RST		= (1U << 5),	/* port logic reset */
+
+						/* 0(LSB first), 1(MSB first) */
+	CCTL_ENDIAN_DATA	= (1U << 3),	/* PRD data */
+	CCTL_ENDIAN_RSP		= (1U << 2),	/* response frame */
+	CCTL_ENDIAN_OPEN	= (1U << 1),	/* open address frame */
+	CCTL_ENDIAN_CMD		= (1U << 0),	/* command table */
+
+	/* MVS_Px_SER_CTLSTAT (per-phy control) */
+	PHY_SSP_RST		= (1U << 3),	/* reset SSP link layer */
+	PHY_BCAST_CHG		= (1U << 2),	/* broadcast(change) notif */
+	PHY_RST_HARD		= (1U << 1),	/* hard reset + phy reset */
+	PHY_RST			= (1U << 0),	/* phy reset */
+	PHY_READY_MASK		= (1U << 20),
+
+	/* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
+	PHYEV_DEC_ERR		= (1U << 24),	/* Phy Decoding Error */
+	PHYEV_DCDR_ERR		= (1U << 23),	/* STP Deocder Error */
+	PHYEV_CRC_ERR		= (1U << 22),	/* STP CRC Error */
+	PHYEV_UNASSOC_FIS	= (1U << 19),	/* unassociated FIS rx'd */
+	PHYEV_AN		= (1U << 18),	/* SATA async notification */
+	PHYEV_BIST_ACT		= (1U << 17),	/* BIST activate FIS */
+	PHYEV_SIG_FIS		= (1U << 16),	/* signature FIS */
+	PHYEV_POOF		= (1U << 12),	/* phy ready from 1 -> 0 */
+	PHYEV_IU_BIG		= (1U << 11),	/* IU too long err */
+	PHYEV_IU_SMALL		= (1U << 10),	/* IU too short err */
+	PHYEV_UNK_TAG		= (1U << 9),	/* unknown tag */
+	PHYEV_BROAD_CH		= (1U << 8),	/* broadcast(CHANGE) */
+	PHYEV_COMWAKE		= (1U << 7),	/* COMWAKE rx'd */
+	PHYEV_PORT_SEL		= (1U << 6),	/* port selector present */
+	PHYEV_HARD_RST		= (1U << 5),	/* hard reset rx'd */
+	PHYEV_ID_TMOUT		= (1U << 4),	/* identify timeout */
+	PHYEV_ID_FAIL		= (1U << 3),	/* identify failed */
+	PHYEV_ID_DONE		= (1U << 2),	/* identify done */
+	PHYEV_HARD_RST_DONE	= (1U << 1),	/* hard reset done */
+	PHYEV_RDY_CH		= (1U << 0),	/* phy ready changed state */
+
+	/* MVS_PCS */
+	PCS_EN_SATA_REG_SHIFT	= (16),		/* Enable SATA Register Set */
+	PCS_EN_PORT_XMT_SHIFT	= (12),		/* Enable Port Transmit */
+	PCS_EN_PORT_XMT_SHIFT2	= (8),		/* For 6485 */
+	PCS_SATA_RETRY		= (1U << 8),	/* retry ctl FIS on R_ERR */
+	PCS_RSP_RX_EN		= (1U << 7),	/* raw response rx */
+	PCS_SATA_RETRY_2	= (1U << 6),	/* For 9180 */
+	PCS_SELF_CLEAR		= (1U << 5),	/* self-clearing int mode */
+	PCS_FIS_RX_EN		= (1U << 4),	/* FIS rx enable */
+	PCS_CMD_STOP_ERR	= (1U << 3),	/* cmd stop-on-err enable */
+	PCS_CMD_RST		= (1U << 1),	/* reset cmd issue */
+	PCS_CMD_EN		= (1U << 0),	/* enable cmd issue */
+
+	/* Port n Attached Device Info */
+	PORT_DEV_SSP_TRGT	= (1U << 19),
+	PORT_DEV_SMP_TRGT	= (1U << 18),
+	PORT_DEV_STP_TRGT	= (1U << 17),
+	PORT_DEV_SSP_INIT	= (1U << 11),
+	PORT_DEV_SMP_INIT	= (1U << 10),
+	PORT_DEV_STP_INIT	= (1U << 9),
+	PORT_PHY_ID_MASK	= (0xFFU << 24),
+	PORT_SSP_TRGT_MASK	= (0x1U << 19),
+	PORT_SSP_INIT_MASK	= (0x1U << 11),
+	PORT_DEV_TRGT_MASK	= (0x7U << 17),
+	PORT_DEV_INIT_MASK	= (0x7U << 9),
+	PORT_DEV_TYPE_MASK	= (0x7U << 0),
+
+	/* Port n PHY Status */
+	PHY_RDY			= (1U << 2),
+	PHY_DW_SYNC		= (1U << 1),
+	PHY_OOB_DTCTD		= (1U << 0),
+
+	/* VSR */
+	/* PHYMODE 6 (CDB) */
+	PHY_MODE6_LATECLK	= (1U << 29),	/* Lock Clock */
+	PHY_MODE6_DTL_SPEED	= (1U << 27),	/* Digital Loop Speed */
+	PHY_MODE6_FC_ORDER	= (1U << 26),	/* Fibre Channel Mode Order*/
+	PHY_MODE6_MUCNT_EN	= (1U << 24),	/* u Count Enable */
+	PHY_MODE6_SEL_MUCNT_LEN	= (1U << 22),	/* Training Length Select */
+	PHY_MODE6_SELMUPI	= (1U << 20),	/* Phase Multi Select (init) */
+	PHY_MODE6_SELMUPF	= (1U << 18),	/* Phase Multi Select (final) */
+	PHY_MODE6_SELMUFF	= (1U << 16),	/* Freq Loop Multi Sel(final) */
+	PHY_MODE6_SELMUFI	= (1U << 14),	/* Freq Loop Multi Sel(init) */
+	PHY_MODE6_FREEZE_LOOP	= (1U << 12),	/* Freeze Rx CDR Loop */
+	PHY_MODE6_INT_RXFOFFS	= (1U << 3),	/* Rx CDR Freq Loop Enable */
+	PHY_MODE6_FRC_RXFOFFS	= (1U << 2),	/* Initial Rx CDR Offset */
+	PHY_MODE6_STAU_0D8	= (1U << 1),	/* Rx CDR Freq Loop Saturate */
+	PHY_MODE6_RXSAT_DIS	= (1U << 0),	/* Saturate Ctl */
+};
+
+/* SAS/SATA configuration port registers, aka phy registers */
+enum sas_sata_config_port_regs {
+	PHYR_IDENTIFY		= 0x00,	/* info for IDENTIFY frame */
+	PHYR_ADDR_LO		= 0x04,	/* my SAS address (low) */
+	PHYR_ADDR_HI		= 0x08,	/* my SAS address (high) */
+	PHYR_ATT_DEV_INFO	= 0x0C,	/* attached device info */
+	PHYR_ATT_ADDR_LO	= 0x10,	/* attached dev SAS addr (low) */
+	PHYR_ATT_ADDR_HI	= 0x14,	/* attached dev SAS addr (high) */
+	PHYR_SATA_CTL		= 0x18,	/* SATA control */
+	PHYR_PHY_STAT		= 0x1C,	/* PHY status */
+	PHYR_SATA_SIG0	= 0x20,	/*port SATA signature FIS(Byte 0-3) */
+	PHYR_SATA_SIG1	= 0x24,	/*port SATA signature FIS(Byte 4-7) */
+	PHYR_SATA_SIG2	= 0x28,	/*port SATA signature FIS(Byte 8-11) */
+	PHYR_SATA_SIG3	= 0x2c,	/*port SATA signature FIS(Byte 12-15) */
+	PHYR_R_ERR_COUNT	= 0x30, /* port R_ERR count register */
+	PHYR_CRC_ERR_COUNT	= 0x34, /* port CRC error count register */
+	PHYR_WIDE_PORT	= 0x38,	/* wide port participating */
+	PHYR_CURRENT0		= 0x80,	/* current connection info 0 */
+	PHYR_CURRENT1		= 0x84,	/* current connection info 1 */
+	PHYR_CURRENT2		= 0x88,	/* current connection info 2 */
+	CONFIG_ID_FRAME0       = 0x100, /* Port device ID frame register 0 */
+	CONFIG_ID_FRAME1       = 0x104, /* Port device ID frame register 1 */
+	CONFIG_ID_FRAME2       = 0x108, /* Port device ID frame register 2 */
+	CONFIG_ID_FRAME3       = 0x10c, /* Port device ID frame register 3 */
+	CONFIG_ID_FRAME4       = 0x110, /* Port device ID frame register 4 */
+	CONFIG_ID_FRAME5       = 0x114, /* Port device ID frame register 5 */
+	CONFIG_ID_FRAME6       = 0x118, /* Port device ID frame register 6 */
+	CONFIG_ATT_ID_FRAME0   = 0x11c, /* attached ID frame register 0 */
+	CONFIG_ATT_ID_FRAME1   = 0x120, /* attached ID frame register 1 */
+	CONFIG_ATT_ID_FRAME2   = 0x124, /* attached ID frame register 2 */
+	CONFIG_ATT_ID_FRAME3   = 0x128, /* attached ID frame register 3 */
+	CONFIG_ATT_ID_FRAME4   = 0x12c, /* attached ID frame register 4 */
+	CONFIG_ATT_ID_FRAME5   = 0x130, /* attached ID frame register 5 */
+	CONFIG_ATT_ID_FRAME6   = 0x134, /* attached ID frame register 6 */
+};
+
+enum sas_cmd_port_registers {
+	CMD_CMRST_OOB_DET	= 0x100, /* COMRESET OOB detect register */
+	CMD_CMWK_OOB_DET	= 0x104, /* COMWAKE OOB detect register */
+	CMD_CMSAS_OOB_DET	= 0x108, /* COMSAS OOB detect register */
+	CMD_BRST_OOB_DET	= 0x10c, /* burst OOB detect register */
+	CMD_OOB_SPACE	= 0x110, /* OOB space control register */
+	CMD_OOB_BURST	= 0x114, /* OOB burst control register */
+	CMD_PHY_TIMER		= 0x118, /* PHY timer control register */
+	CMD_PHY_CONFIG0	= 0x11c, /* PHY config register 0 */
+	CMD_PHY_CONFIG1	= 0x120, /* PHY config register 1 */
+	CMD_SAS_CTL0		= 0x124, /* SAS control register 0 */
+	CMD_SAS_CTL1		= 0x128, /* SAS control register 1 */
+	CMD_SAS_CTL2		= 0x12c, /* SAS control register 2 */
+	CMD_SAS_CTL3		= 0x130, /* SAS control register 3 */
+	CMD_ID_TEST		= 0x134, /* ID test register */
+	CMD_PL_TIMER		= 0x138, /* PL timer register */
+	CMD_WD_TIMER		= 0x13c, /* WD timer register */
+	CMD_PORT_SEL_COUNT	= 0x140, /* port selector count register */
+	CMD_APP_MEM_CTL	= 0x144, /* Application Memory Control */
+	CMD_XOR_MEM_CTL	= 0x148, /* XOR Block Memory Control */
+	CMD_DMA_MEM_CTL	= 0x14c, /* DMA Block Memory Control */
+	CMD_PORT_MEM_CTL0	= 0x150, /* Port Memory Control 0 */
+	CMD_PORT_MEM_CTL1	= 0x154, /* Port Memory Control 1 */
+	CMD_SATA_PORT_MEM_CTL0	= 0x158, /* SATA Port Memory Control 0 */
+	CMD_SATA_PORT_MEM_CTL1	= 0x15c, /* SATA Port Memory Control 1 */
+	CMD_XOR_MEM_BIST_CTL	= 0x160, /* XOR Memory BIST Control */
+	CMD_XOR_MEM_BIST_STAT	= 0x164, /* XOR Memroy BIST Status */
+	CMD_DMA_MEM_BIST_CTL	= 0x168, /* DMA Memory BIST Control */
+	CMD_DMA_MEM_BIST_STAT	= 0x16c, /* DMA Memory BIST Status */
+	CMD_PORT_MEM_BIST_CTL	= 0x170, /* Port Memory BIST Control */
+	CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
+	CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
+	CMD_STP_MEM_BIST_CTL	= 0x17c, /* STP Memory BIST Control */
+	CMD_STP_MEM_BIST_STAT0	= 0x180, /* STP Memory BIST Status 0 */
+	CMD_STP_MEM_BIST_STAT1	= 0x184, /* STP Memory BIST Status 1 */
+	CMD_RESET_COUNT		= 0x188, /* Reset Count */
+	CMD_MONTR_DATA_SEL	= 0x18C, /* Monitor Data/Select */
+	CMD_PLL_PHY_CONFIG	= 0x190, /* PLL/PHY Configuration */
+	CMD_PHY_CTL		= 0x194, /* PHY Control and Status */
+	CMD_PHY_TEST_COUNT0	= 0x198, /* Phy Test Count 0 */
+	CMD_PHY_TEST_COUNT1	= 0x19C, /* Phy Test Count 1 */
+	CMD_PHY_TEST_COUNT2	= 0x1A0, /* Phy Test Count 2 */
+	CMD_APP_ERR_CONFIG	= 0x1A4, /* Application Error Configuration */
+	CMD_PND_FIFO_CTL0	= 0x1A8, /* Pending FIFO Control 0 */
+	CMD_HOST_CTL		= 0x1AC, /* Host Control Status */
+	CMD_HOST_WR_DATA	= 0x1B0, /* Host Write Data */
+	CMD_HOST_RD_DATA	= 0x1B4, /* Host Read Data */
+	CMD_PHY_MODE_21		= 0x1B8, /* Phy Mode 21 */
+	CMD_SL_MODE0		= 0x1BC, /* SL Mode 0 */
+	CMD_SL_MODE1		= 0x1C0, /* SL Mode 1 */
+	CMD_PND_FIFO_CTL1	= 0x1C4, /* Pending FIFO Control 1 */
+};
+
+enum mvs_info_flags {
+	MVF_MSI		= (1U << 0),	/* MSI is enabled */
+	MVF_PHY_PWR_FIX	= (1U << 1),	/* bug workaround */
+	MVF_FLAG_SOC		= (1U << 2),	/* SoC integrated controllers */
+};
+
+enum mvs_event_flags {
+	PHY_PLUG_EVENT	= (3U),
+	PHY_PLUG_IN		= (1U << 0),	/* phy plug in */
+	PHY_PLUG_OUT		= (1U << 1),	/* phy plug out */
+};
+
+enum mvs_port_type {
+	PORT_TGT_MASK	=  (1U << 5),
+	PORT_INIT_PORT	=  (1U << 4),
+	PORT_TGT_PORT	=  (1U << 3),
+	PORT_INIT_TGT_PORT = (PORT_INIT_PORT | PORT_TGT_PORT),
+	PORT_TYPE_SAS	=  (1U << 1),
+	PORT_TYPE_SATA	=  (1U << 0),
+};
+
+/* Command Table Format */
+enum ct_format {
+	/* SSP */
+	SSP_F_H		=  0x00,
+	SSP_F_IU	=  0x18,
+	SSP_F_MAX	=  0x4D,
+	/* STP */
+	STP_CMD_FIS	=  0x00,
+	STP_ATAPI_CMD	=  0x40,
+	STP_F_MAX	=  0x10,
+	/* SMP */
+	SMP_F_T		=  0x00,
+	SMP_F_DEP	=  0x01,
+	SMP_F_MAX	=  0x101,
+};
+
+enum status_buffer {
+	SB_EIR_OFF	=  0x00,	/* Error Information Record */
+	SB_RFB_OFF	=  0x08,	/* Response Frame Buffer */
+	SB_RFB_MAX	=  0x400,	/* RFB size*/
+};
+
+enum error_info_rec {
+	CMD_ISS_STPD	= (1U << 31),	/* Cmd Issue Stopped */
+	CMD_PI_ERR	= (1U << 30),	/* Protection info error.  see flags2 */
+	RSP_OVER	= (1U << 29),	/* rsp buffer overflow */
+	RETRY_LIM	= (1U << 28),	/* FIS/frame retry limit exceeded */
+	UNK_FIS 	= (1U << 27),	/* unknown FIS */
+	DMA_TERM	= (1U << 26),	/* DMA terminate primitive rx'd */
+	SYNC_ERR	= (1U << 25),	/* SYNC rx'd during frame xmit */
+	TFILE_ERR	= (1U << 24),	/* SATA taskfile Error bit set */
+	R_ERR		= (1U << 23),	/* SATA returned R_ERR prim */
+	RD_OFS		= (1U << 20),	/* Read DATA frame invalid offset */
+	XFER_RDY_OFS	= (1U << 19),	/* XFER_RDY offset error */
+	UNEXP_XFER_RDY	= (1U << 18),	/* unexpected XFER_RDY error */
+	DATA_OVER_UNDER = (1U << 16),	/* data overflow/underflow */
+	INTERLOCK	= (1U << 15),	/* interlock error */
+	NAK		= (1U << 14),	/* NAK rx'd */
+	ACK_NAK_TO	= (1U << 13),	/* ACK/NAK timeout */
+	CXN_CLOSED	= (1U << 12),	/* cxn closed w/out ack/nak */
+	OPEN_TO 	= (1U << 11),	/* I_T nexus lost, open cxn timeout */
+	PATH_BLOCKED	= (1U << 10),	/* I_T nexus lost, pathway blocked */
+	NO_DEST 	= (1U << 9),	/* I_T nexus lost, no destination */
+	STP_RES_BSY	= (1U << 8),	/* STP resources busy */
+	BREAK		= (1U << 7),	/* break received */
+	BAD_DEST	= (1U << 6),	/* bad destination */
+	BAD_PROTO	= (1U << 5),	/* protocol not supported */
+	BAD_RATE	= (1U << 4),	/* cxn rate not supported */
+	WRONG_DEST	= (1U << 3),	/* wrong destination error */
+	CREDIT_TO	= (1U << 2),	/* credit timeout */
+	WDOG_TO 	= (1U << 1),	/* watchdog timeout */
+	BUF_PAR 	= (1U << 0),	/* buffer parity error */
+};
+
+enum error_info_rec_2 {
+	SLOT_BSY_ERR	= (1U << 31),	/* Slot Busy Error */
+	GRD_CHK_ERR	= (1U << 14),	/* Guard Check Error */
+	APP_CHK_ERR	= (1U << 13),	/* Application Check error */
+	REF_CHK_ERR	= (1U << 12),	/* Reference Check Error */
+	USR_BLK_NM	= (1U << 0),	/* User Block Number */
+};
+
+enum pci_cfg_register_bits {
+	PCTL_PWR_OFF	= (0xFU << 24),
+	PCTL_COM_ON	= (0xFU << 20),
+	PCTL_LINK_RST	= (0xFU << 16),
+	PCTL_LINK_OFFS	= (16),
+	PCTL_PHY_DSBL	= (0xFU << 12),
+	PCTL_PHY_DSBL_OFFS	= (12),
+	PRD_REQ_SIZE	= (0x4000),
+	PRD_REQ_MASK	= (0x00007000),
+	PLS_NEG_LINK_WD		= (0x3FU << 4),
+	PLS_NEG_LINK_WD_OFFS	= 4,
+	PLS_LINK_SPD		= (0x0FU << 0),
+	PLS_LINK_SPD_OFFS	= 0,
+};
+
+enum open_frame_protocol {
+	PROTOCOL_SMP	= 0x0,
+	PROTOCOL_SSP	= 0x1,
+	PROTOCOL_STP	= 0x2,
+};
+
+/* define for response frame datapres field */
+enum datapres_field {
+	NO_DATA		= 0,
+	RESPONSE_DATA	= 1,
+	SENSE_DATA	= 2,
+};
+
+/* define task management IU */
+struct mvs_tmf_task{
+	u8 tmf;
+	u16 tag_of_task_to_be_managed;
+};
+#endif

+ 703 - 0
drivers/scsi/mvsas/mv_init.c

@@ -0,0 +1,703 @@
+/*
+ * Marvell 88SE64xx/88SE94xx pci init
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+
+#include "mv_sas.h"
+
+static struct scsi_transport_template *mvs_stt;
+static const struct mvs_chip_info mvs_chips[] = {
+	[chip_6320] =	{ 1, 2, 0x400, 17, 16,  9, &mvs_64xx_dispatch, },
+	[chip_6440] =	{ 1, 4, 0x400, 17, 16,  9, &mvs_64xx_dispatch, },
+	[chip_6485] =	{ 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, },
+	[chip_9180] =	{ 2, 4, 0x800, 17, 64,  9, &mvs_94xx_dispatch, },
+	[chip_9480] =	{ 2, 4, 0x800, 17, 64,  9, &mvs_94xx_dispatch, },
+};
+
+#define SOC_SAS_NUM 2
+
+static struct scsi_host_template mvs_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.queuecommand		= sas_queuecommand,
+	.target_alloc		= sas_target_alloc,
+	.slave_configure	= mvs_slave_configure,
+	.slave_destroy		= sas_slave_destroy,
+	.scan_finished		= mvs_scan_finished,
+	.scan_start		= mvs_scan_start,
+	.change_queue_depth	= sas_change_queue_depth,
+	.change_queue_type	= sas_change_queue_type,
+	.bios_param		= sas_bios_param,
+	.can_queue		= 1,
+	.cmd_per_lun		= 1,
+	.this_id		= -1,
+	.sg_tablesize		= SG_ALL,
+	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.eh_device_reset_handler	= sas_eh_device_reset_handler,
+	.eh_bus_reset_handler	= sas_eh_bus_reset_handler,
+	.slave_alloc		= mvs_slave_alloc,
+	.target_destroy		= sas_target_destroy,
+	.ioctl			= sas_ioctl,
+};
+
+static struct sas_domain_function_template mvs_transport_ops = {
+	.lldd_dev_found 	= mvs_dev_found,
+	.lldd_dev_gone	= mvs_dev_gone,
+
+	.lldd_execute_task	= mvs_queue_command,
+	.lldd_control_phy	= mvs_phy_control,
+
+	.lldd_abort_task	= mvs_abort_task,
+	.lldd_abort_task_set    = mvs_abort_task_set,
+	.lldd_clear_aca         = mvs_clear_aca,
+       .lldd_clear_task_set    = mvs_clear_task_set,
+	.lldd_I_T_nexus_reset	= mvs_I_T_nexus_reset,
+	.lldd_lu_reset 		= mvs_lu_reset,
+	.lldd_query_task	= mvs_query_task,
+
+	.lldd_port_formed	= mvs_port_formed,
+	.lldd_port_deformed     = mvs_port_deformed,
+
+};
+
+static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
+{
+	struct mvs_phy *phy = &mvi->phy[phy_id];
+	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+	phy->mvi = mvi;
+	init_timer(&phy->timer);
+	sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
+	sas_phy->class = SAS;
+	sas_phy->iproto = SAS_PROTOCOL_ALL;
+	sas_phy->tproto = 0;
+	sas_phy->type = PHY_TYPE_PHYSICAL;
+	sas_phy->role = PHY_ROLE_INITIATOR;
+	sas_phy->oob_mode = OOB_NOT_CONNECTED;
+	sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
+
+	sas_phy->id = phy_id;
+	sas_phy->sas_addr = &mvi->sas_addr[0];
+	sas_phy->frame_rcvd = &phy->frame_rcvd[0];
+	sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata;
+	sas_phy->lldd_phy = phy;
+}
+
+static void mvs_free(struct mvs_info *mvi)
+{
+	int i;
+	struct mvs_wq *mwq;
+	int slot_nr;
+
+	if (!mvi)
+		return;
+
+	if (mvi->flags & MVF_FLAG_SOC)
+		slot_nr = MVS_SOC_SLOTS;
+	else
+		slot_nr = MVS_SLOTS;
+
+	for (i = 0; i < mvi->tags_num; i++) {
+		struct mvs_slot_info *slot = &mvi->slot_info[i];
+		if (slot->buf)
+			dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
+					  slot->buf, slot->buf_dma);
+	}
+
+	if (mvi->tx)
+		dma_free_coherent(mvi->dev,
+				  sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
+				  mvi->tx, mvi->tx_dma);
+	if (mvi->rx_fis)
+		dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ,
+				  mvi->rx_fis, mvi->rx_fis_dma);
+	if (mvi->rx)
+		dma_free_coherent(mvi->dev,
+				  sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
+				  mvi->rx, mvi->rx_dma);
+	if (mvi->slot)
+		dma_free_coherent(mvi->dev,
+				  sizeof(*mvi->slot) * slot_nr,
+				  mvi->slot, mvi->slot_dma);
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+	if (mvi->bulk_buffer)
+		dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
+				  mvi->bulk_buffer, mvi->bulk_buffer_dma);
+#endif
+
+	MVS_CHIP_DISP->chip_iounmap(mvi);
+	if (mvi->shost)
+		scsi_host_put(mvi->shost);
+	list_for_each_entry(mwq, &mvi->wq_list, entry)
+		cancel_delayed_work(&mwq->work_q);
+	kfree(mvi);
+}
+
+#ifdef MVS_USE_TASKLET
+struct tasklet_struct	mv_tasklet;
+static void mvs_tasklet(unsigned long opaque)
+{
+	unsigned long flags;
+	u32 stat;
+	u16 core_nr, i = 0;
+
+	struct mvs_info *mvi;
+	struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque;
+
+	core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+	mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
+
+	if (unlikely(!mvi))
+		BUG_ON(1);
+
+	for (i = 0; i < core_nr; i++) {
+		mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
+		stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq);
+		if (stat)
+			MVS_CHIP_DISP->isr(mvi, mvi->irq, stat);
+	}
+
+}
+#endif
+
+static irqreturn_t mvs_interrupt(int irq, void *opaque)
+{
+	u32 core_nr, i = 0;
+	u32 stat;
+	struct mvs_info *mvi;
+	struct sas_ha_struct *sha = opaque;
+
+	core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+	mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
+
+	if (unlikely(!mvi))
+		return IRQ_NONE;
+
+	stat = MVS_CHIP_DISP->isr_status(mvi, irq);
+	if (!stat)
+		return IRQ_NONE;
+
+#ifdef MVS_USE_TASKLET
+	tasklet_schedule(&mv_tasklet);
+#else
+	for (i = 0; i < core_nr; i++) {
+		mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
+		MVS_CHIP_DISP->isr(mvi, irq, stat);
+	}
+#endif
+	return IRQ_HANDLED;
+}
+
+static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
+{
+	int i, slot_nr;
+
+	if (mvi->flags & MVF_FLAG_SOC)
+		slot_nr = MVS_SOC_SLOTS;
+	else
+		slot_nr = MVS_SLOTS;
+
+	spin_lock_init(&mvi->lock);
+	for (i = 0; i < mvi->chip->n_phy; i++) {
+		mvs_phy_init(mvi, i);
+		mvi->port[i].wide_port_phymap = 0;
+		mvi->port[i].port_attached = 0;
+		INIT_LIST_HEAD(&mvi->port[i].list);
+	}
+	for (i = 0; i < MVS_MAX_DEVICES; i++) {
+		mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
+		mvi->devices[i].dev_type = NO_DEVICE;
+		mvi->devices[i].device_id = i;
+		mvi->devices[i].dev_status = MVS_DEV_NORMAL;
+	}
+
+	/*
+	 * alloc and init our DMA areas
+	 */
+	mvi->tx = dma_alloc_coherent(mvi->dev,
+				     sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
+				     &mvi->tx_dma, GFP_KERNEL);
+	if (!mvi->tx)
+		goto err_out;
+	memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
+	mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
+					 &mvi->rx_fis_dma, GFP_KERNEL);
+	if (!mvi->rx_fis)
+		goto err_out;
+	memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
+
+	mvi->rx = dma_alloc_coherent(mvi->dev,
+				     sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
+				     &mvi->rx_dma, GFP_KERNEL);
+	if (!mvi->rx)
+		goto err_out;
+	memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
+	mvi->rx[0] = cpu_to_le32(0xfff);
+	mvi->rx_cons = 0xfff;
+
+	mvi->slot = dma_alloc_coherent(mvi->dev,
+				       sizeof(*mvi->slot) * slot_nr,
+				       &mvi->slot_dma, GFP_KERNEL);
+	if (!mvi->slot)
+		goto err_out;
+	memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
+
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+	mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
+				       TRASH_BUCKET_SIZE,
+				       &mvi->bulk_buffer_dma, GFP_KERNEL);
+	if (!mvi->bulk_buffer)
+		goto err_out;
+#endif
+	for (i = 0; i < slot_nr; i++) {
+		struct mvs_slot_info *slot = &mvi->slot_info[i];
+
+		slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
+					       &slot->buf_dma, GFP_KERNEL);
+		if (!slot->buf) {
+			printk(KERN_DEBUG"failed to allocate slot->buf.\n");
+			goto err_out;
+		}
+		memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
+		++mvi->tags_num;
+	}
+	/* Initialize tags */
+	mvs_tag_init(mvi);
+	return 0;
+err_out:
+	return 1;
+}
+
+
+int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
+{
+	unsigned long res_start, res_len, res_flag, res_flag_ex = 0;
+	struct pci_dev *pdev = mvi->pdev;
+	if (bar_ex != -1) {
+		/*
+		 * ioremap main and peripheral registers
+		 */
+		res_start = pci_resource_start(pdev, bar_ex);
+		res_len = pci_resource_len(pdev, bar_ex);
+		if (!res_start || !res_len)
+			goto err_out;
+
+		res_flag_ex = pci_resource_flags(pdev, bar_ex);
+		if (res_flag_ex & IORESOURCE_MEM) {
+			if (res_flag_ex & IORESOURCE_CACHEABLE)
+				mvi->regs_ex = ioremap(res_start, res_len);
+			else
+				mvi->regs_ex = ioremap_nocache(res_start,
+						res_len);
+		} else
+			mvi->regs_ex = (void *)res_start;
+		if (!mvi->regs_ex)
+			goto err_out;
+	}
+
+	res_start = pci_resource_start(pdev, bar);
+	res_len = pci_resource_len(pdev, bar);
+	if (!res_start || !res_len)
+		goto err_out;
+
+	res_flag = pci_resource_flags(pdev, bar);
+	if (res_flag & IORESOURCE_CACHEABLE)
+		mvi->regs = ioremap(res_start, res_len);
+	else
+		mvi->regs = ioremap_nocache(res_start, res_len);
+
+	if (!mvi->regs) {
+		if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
+			iounmap(mvi->regs_ex);
+		mvi->regs_ex = NULL;
+		goto err_out;
+	}
+
+	return 0;
+err_out:
+	return -1;
+}
+
+void mvs_iounmap(void __iomem *regs)
+{
+	iounmap(regs);
+}
+
+static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
+				const struct pci_device_id *ent,
+				struct Scsi_Host *shost, unsigned int id)
+{
+	struct mvs_info *mvi;
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+	mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info),
+			GFP_KERNEL);
+	if (!mvi)
+		return NULL;
+
+	mvi->pdev = pdev;
+	mvi->dev = &pdev->dev;
+	mvi->chip_id = ent->driver_data;
+	mvi->chip = &mvs_chips[mvi->chip_id];
+	INIT_LIST_HEAD(&mvi->wq_list);
+	mvi->irq = pdev->irq;
+
+	((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
+	((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
+
+	mvi->id = id;
+	mvi->sas = sha;
+	mvi->shost = shost;
+#ifdef MVS_USE_TASKLET
+	tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha);
+#endif
+
+	if (MVS_CHIP_DISP->chip_ioremap(mvi))
+		goto err_out;
+	if (!mvs_alloc(mvi, shost))
+		return mvi;
+err_out:
+	mvs_free(mvi);
+	return NULL;
+}
+
+/* move to PCI layer or libata core? */
+static int pci_go_64(struct pci_dev *pdev)
+{
+	int rc;
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+		if (rc) {
+			rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+			if (rc) {
+				dev_printk(KERN_ERR, &pdev->dev,
+					   "64-bit DMA enable failed\n");
+				return rc;
+			}
+		}
+	} else {
+		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (rc) {
+			dev_printk(KERN_ERR, &pdev->dev,
+				   "32-bit DMA enable failed\n");
+			return rc;
+		}
+		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (rc) {
+			dev_printk(KERN_ERR, &pdev->dev,
+				   "32-bit consistent DMA enable failed\n");
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
+				const struct mvs_chip_info *chip_info)
+{
+	int phy_nr, port_nr; unsigned short core_nr;
+	struct asd_sas_phy **arr_phy;
+	struct asd_sas_port **arr_port;
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+	core_nr = chip_info->n_host;
+	phy_nr  = core_nr * chip_info->n_phy;
+	port_nr = phy_nr;
+
+	memset(sha, 0x00, sizeof(struct sas_ha_struct));
+	arr_phy  = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
+	arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
+	if (!arr_phy || !arr_port)
+		goto exit_free;
+
+	sha->sas_phy = arr_phy;
+	sha->sas_port = arr_port;
+
+	sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
+	if (!sha->lldd_ha)
+		goto exit_free;
+
+	((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
+
+	shost->transportt = mvs_stt;
+	shost->max_id = 128;
+	shost->max_lun = ~0;
+	shost->max_channel = 1;
+	shost->max_cmd_len = 16;
+
+	return 0;
+exit_free:
+	kfree(arr_phy);
+	kfree(arr_port);
+	return -1;
+
+}
+
+static void  __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
+			const struct mvs_chip_info *chip_info)
+{
+	int can_queue, i = 0, j = 0;
+	struct mvs_info *mvi = NULL;
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+
+	for (j = 0; j < nr_core; j++) {
+		mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
+		for (i = 0; i < chip_info->n_phy; i++) {
+			sha->sas_phy[j * chip_info->n_phy  + i] =
+				&mvi->phy[i].sas_phy;
+			sha->sas_port[j * chip_info->n_phy + i] =
+				&mvi->port[i].sas_port;
+		}
+	}
+
+	sha->sas_ha_name = DRV_NAME;
+	sha->dev = mvi->dev;
+	sha->lldd_module = THIS_MODULE;
+	sha->sas_addr = &mvi->sas_addr[0];
+
+	sha->num_phys = nr_core * chip_info->n_phy;
+
+	sha->lldd_max_execute_num = 1;
+
+	if (mvi->flags & MVF_FLAG_SOC)
+		can_queue = MVS_SOC_CAN_QUEUE;
+	else
+		can_queue = MVS_CAN_QUEUE;
+
+	sha->lldd_queue_size = can_queue;
+	shost->can_queue = can_queue;
+	mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys;
+	sha->core.shost = mvi->shost;
+}
+
+static void mvs_init_sas_add(struct mvs_info *mvi)
+{
+	u8 i;
+	for (i = 0; i < mvi->chip->n_phy; i++) {
+		mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL;
+		mvi->phy[i].dev_sas_addr =
+			cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr));
+	}
+
+	memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
+}
+
+static int __devinit mvs_pci_init(struct pci_dev *pdev,
+				  const struct pci_device_id *ent)
+{
+	unsigned int rc, nhost = 0;
+	struct mvs_info *mvi;
+	irq_handler_t irq_handler = mvs_interrupt;
+	struct Scsi_Host *shost = NULL;
+	const struct mvs_chip_info *chip;
+
+	dev_printk(KERN_INFO, &pdev->dev,
+		"mvsas: driver version %s\n", DRV_VERSION);
+	rc = pci_enable_device(pdev);
+	if (rc)
+		goto err_out_enable;
+
+	pci_set_master(pdev);
+
+	rc = pci_request_regions(pdev, DRV_NAME);
+	if (rc)
+		goto err_out_disable;
+
+	rc = pci_go_64(pdev);
+	if (rc)
+		goto err_out_regions;
+
+	shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
+	if (!shost) {
+		rc = -ENOMEM;
+		goto err_out_regions;
+	}
+
+	chip = &mvs_chips[ent->driver_data];
+	SHOST_TO_SAS_HA(shost) =
+		kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
+	if (!SHOST_TO_SAS_HA(shost)) {
+		kfree(shost);
+		rc = -ENOMEM;
+		goto err_out_regions;
+	}
+
+	rc = mvs_prep_sas_ha_init(shost, chip);
+	if (rc) {
+		kfree(shost);
+		rc = -ENOMEM;
+		goto err_out_regions;
+	}
+
+	pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
+
+	do {
+		mvi = mvs_pci_alloc(pdev, ent, shost, nhost);
+		if (!mvi) {
+			rc = -ENOMEM;
+			goto err_out_regions;
+		}
+
+		mvs_init_sas_add(mvi);
+
+		mvi->instance = nhost;
+		rc = MVS_CHIP_DISP->chip_init(mvi);
+		if (rc) {
+			mvs_free(mvi);
+			goto err_out_regions;
+		}
+		nhost++;
+	} while (nhost < chip->n_host);
+
+	mvs_post_sas_ha_init(shost, chip);
+
+	rc = scsi_add_host(shost, &pdev->dev);
+	if (rc)
+		goto err_out_shost;
+
+	rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
+	if (rc)
+		goto err_out_shost;
+	rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED,
+		DRV_NAME, SHOST_TO_SAS_HA(shost));
+	if (rc)
+		goto err_not_sas;
+
+	MVS_CHIP_DISP->interrupt_enable(mvi);
+
+	scsi_scan_host(mvi->shost);
+
+	return 0;
+
+err_not_sas:
+	sas_unregister_ha(SHOST_TO_SAS_HA(shost));
+err_out_shost:
+	scsi_remove_host(mvi->shost);
+err_out_regions:
+	pci_release_regions(pdev);
+err_out_disable:
+	pci_disable_device(pdev);
+err_out_enable:
+	return rc;
+}
+
+static void __devexit mvs_pci_remove(struct pci_dev *pdev)
+{
+	unsigned short core_nr, i = 0;
+	struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+	struct mvs_info *mvi = NULL;
+
+	core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+	mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
+
+#ifdef MVS_USE_TASKLET
+	tasklet_kill(&mv_tasklet);
+#endif
+
+	pci_set_drvdata(pdev, NULL);
+	sas_unregister_ha(sha);
+	sas_remove_host(mvi->shost);
+	scsi_remove_host(mvi->shost);
+
+	MVS_CHIP_DISP->interrupt_disable(mvi);
+	free_irq(mvi->irq, sha);
+	for (i = 0; i < core_nr; i++) {
+		mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
+		mvs_free(mvi);
+	}
+	kfree(sha->sas_phy);
+	kfree(sha->sas_port);
+	kfree(sha);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	return;
+}
+
+static struct pci_device_id __devinitdata mvs_pci_table[] = {
+	{ PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
+	{ PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
+	{
+		.vendor 	= PCI_VENDOR_ID_MARVELL,
+		.device 	= 0x6440,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= 0x6480,
+		.class		= 0,
+		.class_mask	= 0,
+		.driver_data	= chip_6485,
+	},
+	{ PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
+	{ PCI_VDEVICE(MARVELL, 0x6485), chip_6485 },
+	{ PCI_VDEVICE(MARVELL, 0x9480), chip_9480 },
+	{ PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver mvs_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= mvs_pci_table,
+	.probe		= mvs_pci_init,
+	.remove		= __devexit_p(mvs_pci_remove),
+};
+
+/* task handler */
+struct task_struct *mvs_th;
+static int __init mvs_init(void)
+{
+	int rc;
+	mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
+	if (!mvs_stt)
+		return -ENOMEM;
+
+	rc = pci_register_driver(&mvs_pci_driver);
+
+	if (rc)
+		goto err_out;
+
+	return 0;
+
+err_out:
+	sas_release_transport(mvs_stt);
+	return rc;
+}
+
+static void __exit mvs_exit(void)
+{
+	pci_unregister_driver(&mvs_pci_driver);
+	sas_release_transport(mvs_stt);
+}
+
+module_init(mvs_init);
+module_exit(mvs_exit);
+
+MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
+MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+#ifdef CONFIG_PCI
+MODULE_DEVICE_TABLE(pci, mvs_pci_table);
+#endif

+ 2154 - 0
drivers/scsi/mvsas/mv_sas.c

@@ -0,0 +1,2154 @@
+/*
+ * Marvell 88SE64xx/88SE94xx main function
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#include "mv_sas.h"
+
+static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
+{
+	if (task->lldd_task) {
+		struct mvs_slot_info *slot;
+		slot = task->lldd_task;
+		*tag = slot->slot_tag;
+		return 1;
+	}
+	return 0;
+}
+
+void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
+{
+	void *bitmap = &mvi->tags;
+	clear_bit(tag, bitmap);
+}
+
+void mvs_tag_free(struct mvs_info *mvi, u32 tag)
+{
+	mvs_tag_clear(mvi, tag);
+}
+
+void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
+{
+	void *bitmap = &mvi->tags;
+	set_bit(tag, bitmap);
+}
+
+inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
+{
+	unsigned int index, tag;
+	void *bitmap = &mvi->tags;
+
+	index = find_first_zero_bit(bitmap, mvi->tags_num);
+	tag = index;
+	if (tag >= mvi->tags_num)
+		return -SAS_QUEUE_FULL;
+	mvs_tag_set(mvi, tag);
+	*tag_out = tag;
+	return 0;
+}
+
+void mvs_tag_init(struct mvs_info *mvi)
+{
+	int i;
+	for (i = 0; i < mvi->tags_num; ++i)
+		mvs_tag_clear(mvi, i);
+}
+
+void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
+{
+	u32 i;
+	u32 run;
+	u32 offset;
+
+	offset = 0;
+	while (size) {
+		printk(KERN_DEBUG"%08X : ", baseaddr + offset);
+		if (size >= 16)
+			run = 16;
+		else
+			run = size;
+		size -= run;
+		for (i = 0; i < 16; i++) {
+			if (i < run)
+				printk(KERN_DEBUG"%02X ", (u32)data[i]);
+			else
+				printk(KERN_DEBUG"   ");
+		}
+		printk(KERN_DEBUG": ");
+		for (i = 0; i < run; i++)
+			printk(KERN_DEBUG"%c",
+				isalnum(data[i]) ? data[i] : '.');
+		printk(KERN_DEBUG"\n");
+		data = &data[16];
+		offset += run;
+	}
+	printk(KERN_DEBUG"\n");
+}
+
+#if (_MV_DUMP > 1)
+static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
+				   enum sas_protocol proto)
+{
+	u32 offset;
+	struct mvs_slot_info *slot = &mvi->slot_info[tag];
+
+	offset = slot->cmd_size + MVS_OAF_SZ +
+	    MVS_CHIP_DISP->prd_size() * slot->n_elem;
+	dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
+			tag);
+	mvs_hexdump(32, (u8 *) slot->response,
+		    (u32) slot->buf_dma + offset);
+}
+#endif
+
+static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
+				enum sas_protocol proto)
+{
+#if (_MV_DUMP > 1)
+	u32 sz, w_ptr;
+	u64 addr;
+	struct mvs_slot_info *slot = &mvi->slot_info[tag];
+
+	/*Delivery Queue */
+	sz = MVS_CHIP_SLOT_SZ;
+	w_ptr = slot->tx;
+	addr = mvi->tx_dma;
+	dev_printk(KERN_DEBUG, mvi->dev,
+		"Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
+	dev_printk(KERN_DEBUG, mvi->dev,
+		"Delivery Queue Base Address=0x%llX (PA)"
+		"(tx_dma=0x%llX), Entry=%04d\n",
+		addr, (unsigned long long)mvi->tx_dma, w_ptr);
+	mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
+			(u32) mvi->tx_dma + sizeof(u32) * w_ptr);
+	/*Command List */
+	addr = mvi->slot_dma;
+	dev_printk(KERN_DEBUG, mvi->dev,
+		"Command List Base Address=0x%llX (PA)"
+		"(slot_dma=0x%llX), Header=%03d\n",
+		addr, (unsigned long long)slot->buf_dma, tag);
+	dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
+	/*mvs_cmd_hdr */
+	mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
+		(u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
+	/*1.command table area */
+	dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
+	mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
+	/*2.open address frame area */
+	dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
+	mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
+				(u32) slot->buf_dma + slot->cmd_size);
+	/*3.status buffer */
+	mvs_hba_sb_dump(mvi, tag, proto);
+	/*4.PRD table */
+	dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
+	mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
+		(u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
+		(u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
+#endif
+}
+
+static void mvs_hba_cq_dump(struct mvs_info *mvi)
+{
+#if (_MV_DUMP > 2)
+	u64 addr;
+	void __iomem *regs = mvi->regs;
+	u32 entry = mvi->rx_cons + 1;
+	u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
+
+	/*Completion Queue */
+	addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
+	dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
+		   mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
+	dev_printk(KERN_DEBUG, mvi->dev,
+		"Completion List Base Address=0x%llX (PA), "
+		"CQ_Entry=%04d, CQ_WP=0x%08X\n",
+		addr, entry - 1, mvi->rx[0]);
+	mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
+		    mvi->rx_dma + sizeof(u32) * entry);
+#endif
+}
+
+void mvs_get_sas_addr(void *buf, u32 buflen)
+{
+	/*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
+}
+
+struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
+{
+	unsigned long i = 0, j = 0, hi = 0;
+	struct sas_ha_struct *sha = dev->port->ha;
+	struct mvs_info *mvi = NULL;
+	struct asd_sas_phy *phy;
+
+	while (sha->sas_port[i]) {
+		if (sha->sas_port[i] == dev->port) {
+			phy =  container_of(sha->sas_port[i]->phy_list.next,
+				struct asd_sas_phy, port_phy_el);
+			j = 0;
+			while (sha->sas_phy[j]) {
+				if (sha->sas_phy[j] == phy)
+					break;
+				j++;
+			}
+			break;
+		}
+		i++;
+	}
+	hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
+	mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
+
+	return mvi;
+
+}
+
+/* FIXME */
+int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
+{
+	unsigned long i = 0, j = 0, n = 0, num = 0;
+	struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
+	struct mvs_info *mvi = mvi_dev->mvi_info;
+	struct sas_ha_struct *sha = dev->port->ha;
+
+	while (sha->sas_port[i]) {
+		if (sha->sas_port[i] == dev->port) {
+			struct asd_sas_phy *phy;
+			list_for_each_entry(phy,
+				&sha->sas_port[i]->phy_list, port_phy_el) {
+				j = 0;
+				while (sha->sas_phy[j]) {
+					if (sha->sas_phy[j] == phy)
+						break;
+					j++;
+				}
+				phyno[n] = (j >= mvi->chip->n_phy) ?
+					(j - mvi->chip->n_phy) : j;
+				num++;
+				n++;
+			}
+			break;
+		}
+		i++;
+	}
+	return num;
+}
+
+static inline void mvs_free_reg_set(struct mvs_info *mvi,
+				struct mvs_device *dev)
+{
+	if (!dev) {
+		mv_printk("device has been free.\n");
+		return;
+	}
+	if (dev->runing_req != 0)
+		return;
+	if (dev->taskfileset == MVS_ID_NOT_MAPPED)
+		return;
+	MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
+}
+
+static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
+				struct mvs_device *dev)
+{
+	if (dev->taskfileset != MVS_ID_NOT_MAPPED)
+		return 0;
+	return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
+}
+
+void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
+{
+	u32 no;
+	for_each_phy(phy_mask, phy_mask, no) {
+		if (!(phy_mask & 1))
+			continue;
+		MVS_CHIP_DISP->phy_reset(mvi, no, hard);
+	}
+}
+
+/* FIXME: locking? */
+int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+			void *funcdata)
+{
+	int rc = 0, phy_id = sas_phy->id;
+	u32 tmp, i = 0, hi;
+	struct sas_ha_struct *sha = sas_phy->ha;
+	struct mvs_info *mvi = NULL;
+
+	while (sha->sas_phy[i]) {
+		if (sha->sas_phy[i] == sas_phy)
+			break;
+		i++;
+	}
+	hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
+	mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
+
+	switch (func) {
+	case PHY_FUNC_SET_LINK_RATE:
+		MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata);
+		break;
+
+	case PHY_FUNC_HARD_RESET:
+		tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
+		if (tmp & PHY_RST_HARD)
+			break;
+		MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1);
+		break;
+
+	case PHY_FUNC_LINK_RESET:
+		MVS_CHIP_DISP->phy_enable(mvi, phy_id);
+		MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0);
+		break;
+
+	case PHY_FUNC_DISABLE:
+		MVS_CHIP_DISP->phy_disable(mvi, phy_id);
+		break;
+	case PHY_FUNC_RELEASE_SPINUP_HOLD:
+	default:
+		rc = -EOPNOTSUPP;
+	}
+	msleep(200);
+	return rc;
+}
+
+void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
+				u32 off_lo, u32 off_hi, u64 sas_addr)
+{
+	u32 lo = (u32)sas_addr;
+	u32 hi = (u32)(sas_addr>>32);
+
+	MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo);
+	MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo);
+	MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi);
+	MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
+}
+
+static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
+{
+	struct mvs_phy *phy = &mvi->phy[i];
+	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+	struct sas_ha_struct *sas_ha;
+	if (!phy->phy_attached)
+		return;
+
+	if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK)
+		&& phy->phy_type & PORT_TYPE_SAS) {
+		return;
+	}
+
+	sas_ha = mvi->sas;
+	sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
+
+	if (sas_phy->phy) {
+		struct sas_phy *sphy = sas_phy->phy;
+
+		sphy->negotiated_linkrate = sas_phy->linkrate;
+		sphy->minimum_linkrate = phy->minimum_linkrate;
+		sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
+		sphy->maximum_linkrate = phy->maximum_linkrate;
+		sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate();
+	}
+
+	if (phy->phy_type & PORT_TYPE_SAS) {
+		struct sas_identify_frame *id;
+
+		id = (struct sas_identify_frame *)phy->frame_rcvd;
+		id->dev_type = phy->identify.device_type;
+		id->initiator_bits = SAS_PROTOCOL_ALL;
+		id->target_bits = phy->identify.target_port_protocols;
+	} else if (phy->phy_type & PORT_TYPE_SATA) {
+		/*Nothing*/
+	}
+	mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy);
+
+	sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
+
+	mvi->sas->notify_port_event(sas_phy,
+				   PORTE_BYTES_DMAED);
+}
+
+int mvs_slave_alloc(struct scsi_device *scsi_dev)
+{
+	struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
+	if (dev_is_sata(dev)) {
+		/* We don't need to rescan targets
+		 * if REPORT_LUNS request is failed
+		 */
+		if (scsi_dev->lun > 0)
+			return -ENXIO;
+		scsi_dev->tagged_supported = 1;
+	}
+
+	return sas_slave_alloc(scsi_dev);
+}
+
+int mvs_slave_configure(struct scsi_device *sdev)
+{
+	struct domain_device *dev = sdev_to_domain_dev(sdev);
+	int ret = sas_slave_configure(sdev);
+
+	if (ret)
+		return ret;
+	if (dev_is_sata(dev)) {
+		/* may set PIO mode */
+	#if MV_DISABLE_NCQ
+		struct ata_port *ap = dev->sata_dev.ap;
+		struct ata_device *adev = ap->link.device;
+		adev->flags |= ATA_DFLAG_NCQ_OFF;
+		scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
+	#endif
+	}
+	return 0;
+}
+
+void mvs_scan_start(struct Scsi_Host *shost)
+{
+	int i, j;
+	unsigned short core_nr;
+	struct mvs_info *mvi;
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+	core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+
+	for (j = 0; j < core_nr; j++) {
+		mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
+		for (i = 0; i < mvi->chip->n_phy; ++i)
+			mvs_bytes_dmaed(mvi, i);
+	}
+}
+
+int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+	/* give the phy enabling interrupt event time to come in (1s
+	 * is empirically about all it takes) */
+	if (time < HZ)
+		return 0;
+	/* Wait for discovery to finish */
+	scsi_flush_work(shost);
+	return 1;
+}
+
+static int mvs_task_prep_smp(struct mvs_info *mvi,
+			     struct mvs_task_exec_info *tei)
+{
+	int elem, rc, i;
+	struct sas_task *task = tei->task;
+	struct mvs_cmd_hdr *hdr = tei->hdr;
+	struct domain_device *dev = task->dev;
+	struct asd_sas_port *sas_port = dev->port;
+	struct scatterlist *sg_req, *sg_resp;
+	u32 req_len, resp_len, tag = tei->tag;
+	void *buf_tmp;
+	u8 *buf_oaf;
+	dma_addr_t buf_tmp_dma;
+	void *buf_prd;
+	struct mvs_slot_info *slot = &mvi->slot_info[tag];
+	u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
+#if _MV_DUMP
+	u8 *buf_cmd;
+	void *from;
+#endif
+	/*
+	 * DMA-map SMP request, response buffers
+	 */
+	sg_req = &task->smp_task.smp_req;
+	elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
+	if (!elem)
+		return -ENOMEM;
+	req_len = sg_dma_len(sg_req);
+
+	sg_resp = &task->smp_task.smp_resp;
+	elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+	if (!elem) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+	resp_len = SB_RFB_MAX;
+
+	/* must be in dwords */
+	if ((req_len & 0x3) || (resp_len & 0x3)) {
+		rc = -EINVAL;
+		goto err_out_2;
+	}
+
+	/*
+	 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+	 */
+
+	/* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
+	buf_tmp = slot->buf;
+	buf_tmp_dma = slot->buf_dma;
+
+#if _MV_DUMP
+	buf_cmd = buf_tmp;
+	hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+	buf_tmp += req_len;
+	buf_tmp_dma += req_len;
+	slot->cmd_size = req_len;
+#else
+	hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
+#endif
+
+	/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+	buf_oaf = buf_tmp;
+	hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+	buf_tmp += MVS_OAF_SZ;
+	buf_tmp_dma += MVS_OAF_SZ;
+
+	/* region 3: PRD table *********************************** */
+	buf_prd = buf_tmp;
+	if (tei->n_elem)
+		hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+	else
+		hdr->prd_tbl = 0;
+
+	i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
+	buf_tmp += i;
+	buf_tmp_dma += i;
+
+	/* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+	slot->response = buf_tmp;
+	hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+	if (mvi->flags & MVF_FLAG_SOC)
+		hdr->reserved[0] = 0;
+
+	/*
+	 * Fill in TX ring and command slot header
+	 */
+	slot->tx = mvi->tx_prod;
+	mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
+					TXQ_MODE_I | tag |
+					(sas_port->phy_mask << TXQ_PHY_SHIFT));
+
+	hdr->flags |= flags;
+	hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
+	hdr->tags = cpu_to_le32(tag);
+	hdr->data_len = 0;
+
+	/* generate open address frame hdr (first 12 bytes) */
+	/* initiator, SMP, ftype 1h */
+	buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
+	buf_oaf[1] = dev->linkrate & 0xf;
+	*(u16 *)(buf_oaf + 2) = 0xFFFF;		/* SAS SPEC */
+	memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
+
+	/* fill in PRD (scatter/gather) table, if any */
+	MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
+
+#if _MV_DUMP
+	/* copy cmd table */
+	from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
+	memcpy(buf_cmd, from + sg_req->offset, req_len);
+	kunmap_atomic(from, KM_IRQ0);
+#endif
+	return 0;
+
+err_out_2:
+	dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
+		     PCI_DMA_FROMDEVICE);
+err_out:
+	dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
+		     PCI_DMA_TODEVICE);
+	return rc;
+}
+
+static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
+{
+	struct ata_queued_cmd *qc = task->uldd_task;
+
+	if (qc) {
+		if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+			qc->tf.command == ATA_CMD_FPDMA_READ) {
+			*tag = qc->tag;
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+static int mvs_task_prep_ata(struct mvs_info *mvi,
+			     struct mvs_task_exec_info *tei)
+{
+	struct sas_task *task = tei->task;
+	struct domain_device *dev = task->dev;
+	struct mvs_device *mvi_dev = dev->lldd_dev;
+	struct mvs_cmd_hdr *hdr = tei->hdr;
+	struct asd_sas_port *sas_port = dev->port;
+	struct mvs_slot_info *slot;
+	void *buf_prd;
+	u32 tag = tei->tag, hdr_tag;
+	u32 flags, del_q;
+	void *buf_tmp;
+	u8 *buf_cmd, *buf_oaf;
+	dma_addr_t buf_tmp_dma;
+	u32 i, req_len, resp_len;
+	const u32 max_resp_len = SB_RFB_MAX;
+
+	if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) {
+		mv_dprintk("Have not enough regiset for dev %d.\n",
+			mvi_dev->device_id);
+		return -EBUSY;
+	}
+	slot = &mvi->slot_info[tag];
+	slot->tx = mvi->tx_prod;
+	del_q = TXQ_MODE_I | tag |
+		(TXQ_CMD_STP << TXQ_CMD_SHIFT) |
+		(sas_port->phy_mask << TXQ_PHY_SHIFT) |
+		(mvi_dev->taskfileset << TXQ_SRS_SHIFT);
+	mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
+
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+	if (task->data_dir == DMA_FROM_DEVICE)
+		flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
+	else
+		flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
+#else
+	flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
+#endif
+	if (task->ata_task.use_ncq)
+		flags |= MCH_FPDMA;
+	if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
+		if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
+			flags |= MCH_ATAPI;
+	}
+
+	/* FIXME: fill in port multiplier number */
+
+	hdr->flags = cpu_to_le32(flags);
+
+	/* FIXME: the low order order 5 bits for the TAG if enable NCQ */
+	if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
+		task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+	else
+		hdr_tag = tag;
+
+	hdr->tags = cpu_to_le32(hdr_tag);
+
+	hdr->data_len = cpu_to_le32(task->total_xfer_len);
+
+	/*
+	 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+	 */
+
+	/* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
+	buf_cmd = buf_tmp = slot->buf;
+	buf_tmp_dma = slot->buf_dma;
+
+	hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+
+	buf_tmp += MVS_ATA_CMD_SZ;
+	buf_tmp_dma += MVS_ATA_CMD_SZ;
+#if _MV_DUMP
+	slot->cmd_size = MVS_ATA_CMD_SZ;
+#endif
+
+	/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+	/* used for STP.  unused for SATA? */
+	buf_oaf = buf_tmp;
+	hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+	buf_tmp += MVS_OAF_SZ;
+	buf_tmp_dma += MVS_OAF_SZ;
+
+	/* region 3: PRD table ********************************************* */
+	buf_prd = buf_tmp;
+
+	if (tei->n_elem)
+		hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+	else
+		hdr->prd_tbl = 0;
+	i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count();
+
+	buf_tmp += i;
+	buf_tmp_dma += i;
+
+	/* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+	/* FIXME: probably unused, for SATA.  kept here just in case
+	 * we get a STP/SATA error information record
+	 */
+	slot->response = buf_tmp;
+	hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+	if (mvi->flags & MVF_FLAG_SOC)
+		hdr->reserved[0] = 0;
+
+	req_len = sizeof(struct host_to_dev_fis);
+	resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
+	    sizeof(struct mvs_err_info) - i;
+
+	/* request, response lengths */
+	resp_len = min(resp_len, max_resp_len);
+	hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
+
+	if (likely(!task->ata_task.device_control_reg_update))
+		task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
+	/* fill in command FIS and ATAPI CDB */
+	memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
+	if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
+		memcpy(buf_cmd + STP_ATAPI_CMD,
+			task->ata_task.atapi_packet, 16);
+
+	/* generate open address frame hdr (first 12 bytes) */
+	/* initiator, STP, ftype 1h */
+	buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
+	buf_oaf[1] = dev->linkrate & 0xf;
+	*(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
+	memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
+
+	/* fill in PRD (scatter/gather) table, if any */
+	MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+	if (task->data_dir == DMA_FROM_DEVICE)
+		MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma,
+				TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
+#endif
+	return 0;
+}
+
+static int mvs_task_prep_ssp(struct mvs_info *mvi,
+			     struct mvs_task_exec_info *tei, int is_tmf,
+			     struct mvs_tmf_task *tmf)
+{
+	struct sas_task *task = tei->task;
+	struct mvs_cmd_hdr *hdr = tei->hdr;
+	struct mvs_port *port = tei->port;
+	struct domain_device *dev = task->dev;
+	struct mvs_device *mvi_dev = dev->lldd_dev;
+	struct asd_sas_port *sas_port = dev->port;
+	struct mvs_slot_info *slot;
+	void *buf_prd;
+	struct ssp_frame_hdr *ssp_hdr;
+	void *buf_tmp;
+	u8 *buf_cmd, *buf_oaf, fburst = 0;
+	dma_addr_t buf_tmp_dma;
+	u32 flags;
+	u32 resp_len, req_len, i, tag = tei->tag;
+	const u32 max_resp_len = SB_RFB_MAX;
+	u32 phy_mask;
+
+	slot = &mvi->slot_info[tag];
+
+	phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap :
+		sas_port->phy_mask) & TXQ_PHY_MASK;
+
+	slot->tx = mvi->tx_prod;
+	mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
+				(TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
+				(phy_mask << TXQ_PHY_SHIFT));
+
+	flags = MCH_RETRY;
+	if (task->ssp_task.enable_first_burst) {
+		flags |= MCH_FBURST;
+		fburst = (1 << 7);
+	}
+	if (is_tmf)
+		flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
+	else
+		flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
+	hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
+	hdr->tags = cpu_to_le32(tag);
+	hdr->data_len = cpu_to_le32(task->total_xfer_len);
+
+	/*
+	 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+	 */
+
+	/* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
+	buf_cmd = buf_tmp = slot->buf;
+	buf_tmp_dma = slot->buf_dma;
+
+	hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+
+	buf_tmp += MVS_SSP_CMD_SZ;
+	buf_tmp_dma += MVS_SSP_CMD_SZ;
+#if _MV_DUMP
+	slot->cmd_size = MVS_SSP_CMD_SZ;
+#endif
+
+	/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+	buf_oaf = buf_tmp;
+	hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+	buf_tmp += MVS_OAF_SZ;
+	buf_tmp_dma += MVS_OAF_SZ;
+
+	/* region 3: PRD table ********************************************* */
+	buf_prd = buf_tmp;
+	if (tei->n_elem)
+		hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+	else
+		hdr->prd_tbl = 0;
+
+	i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
+	buf_tmp += i;
+	buf_tmp_dma += i;
+
+	/* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+	slot->response = buf_tmp;
+	hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+	if (mvi->flags & MVF_FLAG_SOC)
+		hdr->reserved[0] = 0;
+
+	resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
+	    sizeof(struct mvs_err_info) - i;
+	resp_len = min(resp_len, max_resp_len);
+
+	req_len = sizeof(struct ssp_frame_hdr) + 28;
+
+	/* request, response lengths */
+	hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
+
+	/* generate open address frame hdr (first 12 bytes) */
+	/* initiator, SSP, ftype 1h */
+	buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
+	buf_oaf[1] = dev->linkrate & 0xf;
+	*(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
+	memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
+
+	/* fill in SSP frame header (Command Table.SSP frame header) */
+	ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
+
+	if (is_tmf)
+		ssp_hdr->frame_type = SSP_TASK;
+	else
+		ssp_hdr->frame_type = SSP_COMMAND;
+
+	memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr,
+	       HASHED_SAS_ADDR_SIZE);
+	memcpy(ssp_hdr->hashed_src_addr,
+	       dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
+	ssp_hdr->tag = cpu_to_be16(tag);
+
+	/* fill in IU for TASK and Command Frame */
+	buf_cmd += sizeof(*ssp_hdr);
+	memcpy(buf_cmd, &task->ssp_task.LUN, 8);
+
+	if (ssp_hdr->frame_type != SSP_TASK) {
+		buf_cmd[9] = fburst | task->ssp_task.task_attr |
+				(task->ssp_task.task_prio << 3);
+		memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
+	} else{
+		buf_cmd[10] = tmf->tmf;
+		switch (tmf->tmf) {
+		case TMF_ABORT_TASK:
+		case TMF_QUERY_TASK:
+			buf_cmd[12] =
+				(tmf->tag_of_task_to_be_managed >> 8) & 0xff;
+			buf_cmd[13] =
+				tmf->tag_of_task_to_be_managed & 0xff;
+			break;
+		default:
+			break;
+		}
+	}
+	/* fill in PRD (scatter/gather) table, if any */
+	MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
+	return 0;
+}
+
+#define	DEV_IS_GONE(mvi_dev)	((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
+static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
+				struct completion *completion,int is_tmf,
+				struct mvs_tmf_task *tmf)
+{
+	struct domain_device *dev = task->dev;
+	struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
+	struct mvs_info *mvi = mvi_dev->mvi_info;
+	struct mvs_task_exec_info tei;
+	struct sas_task *t = task;
+	struct mvs_slot_info *slot;
+	u32 tag = 0xdeadbeef, rc, n_elem = 0;
+	u32 n = num, pass = 0;
+	unsigned long flags = 0;
+
+	if (!dev->port) {
+		struct task_status_struct *tsm = &t->task_status;
+
+		tsm->resp = SAS_TASK_UNDELIVERED;
+		tsm->stat = SAS_PHY_DOWN;
+		t->task_done(t);
+		return 0;
+	}
+
+	spin_lock_irqsave(&mvi->lock, flags);
+	do {
+		dev = t->dev;
+		mvi_dev = dev->lldd_dev;
+		if (DEV_IS_GONE(mvi_dev)) {
+			if (mvi_dev)
+				mv_dprintk("device %d not ready.\n",
+					mvi_dev->device_id);
+			else
+				mv_dprintk("device %016llx not ready.\n",
+					SAS_ADDR(dev->sas_addr));
+
+			rc = SAS_PHY_DOWN;
+			goto out_done;
+		}
+
+		if (dev->port->id >= mvi->chip->n_phy)
+			tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy];
+		else
+			tei.port = &mvi->port[dev->port->id];
+
+		if (!tei.port->port_attached) {
+			if (sas_protocol_ata(t->task_proto)) {
+				mv_dprintk("port %d does not"
+					"attached device.\n", dev->port->id);
+				rc = SAS_PHY_DOWN;
+				goto out_done;
+			} else {
+				struct task_status_struct *ts = &t->task_status;
+				ts->resp = SAS_TASK_UNDELIVERED;
+				ts->stat = SAS_PHY_DOWN;
+				t->task_done(t);
+				if (n > 1)
+					t = list_entry(t->list.next,
+							struct sas_task, list);
+				continue;
+			}
+		}
+
+		if (!sas_protocol_ata(t->task_proto)) {
+			if (t->num_scatter) {
+				n_elem = dma_map_sg(mvi->dev,
+						    t->scatter,
+						    t->num_scatter,
+						    t->data_dir);
+				if (!n_elem) {
+					rc = -ENOMEM;
+					goto err_out;
+				}
+			}
+		} else {
+			n_elem = t->num_scatter;
+		}
+
+		rc = mvs_tag_alloc(mvi, &tag);
+		if (rc)
+			goto err_out;
+
+		slot = &mvi->slot_info[tag];
+
+
+		t->lldd_task = NULL;
+		slot->n_elem = n_elem;
+		slot->slot_tag = tag;
+		memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
+
+		tei.task = t;
+		tei.hdr = &mvi->slot[tag];
+		tei.tag = tag;
+		tei.n_elem = n_elem;
+		switch (t->task_proto) {
+		case SAS_PROTOCOL_SMP:
+			rc = mvs_task_prep_smp(mvi, &tei);
+			break;
+		case SAS_PROTOCOL_SSP:
+			rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
+			break;
+		case SAS_PROTOCOL_SATA:
+		case SAS_PROTOCOL_STP:
+		case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+			rc = mvs_task_prep_ata(mvi, &tei);
+			break;
+		default:
+			dev_printk(KERN_ERR, mvi->dev,
+				"unknown sas_task proto: 0x%x\n",
+				t->task_proto);
+			rc = -EINVAL;
+			break;
+		}
+
+		if (rc) {
+			mv_dprintk("rc is %x\n", rc);
+			goto err_out_tag;
+		}
+		slot->task = t;
+		slot->port = tei.port;
+		t->lldd_task = slot;
+		list_add_tail(&slot->entry, &tei.port->list);
+		/* TODO: select normal or high priority */
+		spin_lock(&t->task_state_lock);
+		t->task_state_flags |= SAS_TASK_AT_INITIATOR;
+		spin_unlock(&t->task_state_lock);
+
+		mvs_hba_memory_dump(mvi, tag, t->task_proto);
+		mvi_dev->runing_req++;
+		++pass;
+		mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
+		if (n > 1)
+			t = list_entry(t->list.next, struct sas_task, list);
+	} while (--n);
+	rc = 0;
+	goto out_done;
+
+err_out_tag:
+	mvs_tag_free(mvi, tag);
+err_out:
+
+	dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
+	if (!sas_protocol_ata(t->task_proto))
+		if (n_elem)
+			dma_unmap_sg(mvi->dev, t->scatter, n_elem,
+				     t->data_dir);
+out_done:
+	if (likely(pass)) {
+		MVS_CHIP_DISP->start_delivery(mvi,
+			(mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
+	}
+	spin_unlock_irqrestore(&mvi->lock, flags);
+	return rc;
+}
+
+int mvs_queue_command(struct sas_task *task, const int num,
+			gfp_t gfp_flags)
+{
+	return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
+}
+
+static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
+{
+	u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
+	mvs_tag_clear(mvi, slot_idx);
+}
+
+static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
+			  struct mvs_slot_info *slot, u32 slot_idx)
+{
+	if (!slot->task)
+		return;
+	if (!sas_protocol_ata(task->task_proto))
+		if (slot->n_elem)
+			dma_unmap_sg(mvi->dev, task->scatter,
+				     slot->n_elem, task->data_dir);
+
+	switch (task->task_proto) {
+	case SAS_PROTOCOL_SMP:
+		dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
+			     PCI_DMA_FROMDEVICE);
+		dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
+			     PCI_DMA_TODEVICE);
+		break;
+
+	case SAS_PROTOCOL_SATA:
+	case SAS_PROTOCOL_STP:
+	case SAS_PROTOCOL_SSP:
+	default:
+		/* do nothing */
+		break;
+	}
+	list_del_init(&slot->entry);
+	task->lldd_task = NULL;
+	slot->task = NULL;
+	slot->port = NULL;
+	slot->slot_tag = 0xFFFFFFFF;
+	mvs_slot_free(mvi, slot_idx);
+}
+
+static void mvs_update_wideport(struct mvs_info *mvi, int i)
+{
+	struct mvs_phy *phy = &mvi->phy[i];
+	struct mvs_port *port = phy->port;
+	int j, no;
+
+	for_each_phy(port->wide_port_phymap, j, no) {
+		if (j & 1) {
+			MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
+						PHYR_WIDE_PORT);
+			MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
+						port->wide_port_phymap);
+		} else {
+			MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
+						PHYR_WIDE_PORT);
+			MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
+						0);
+		}
+	}
+}
+
+static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
+{
+	u32 tmp;
+	struct mvs_phy *phy = &mvi->phy[i];
+	struct mvs_port *port = phy->port;
+
+	tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i);
+	if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
+		if (!port)
+			phy->phy_attached = 1;
+		return tmp;
+	}
+
+	if (port) {
+		if (phy->phy_type & PORT_TYPE_SAS) {
+			port->wide_port_phymap &= ~(1U << i);
+			if (!port->wide_port_phymap)
+				port->port_attached = 0;
+			mvs_update_wideport(mvi, i);
+		} else if (phy->phy_type & PORT_TYPE_SATA)
+			port->port_attached = 0;
+		phy->port = NULL;
+		phy->phy_attached = 0;
+		phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+	}
+	return 0;
+}
+
+static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
+{
+	u32 *s = (u32 *) buf;
+
+	if (!s)
+		return NULL;
+
+	MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
+	s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+
+	MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
+	s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+
+	MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
+	s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+
+	MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
+	s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+
+	/* Workaround: take some ATAPI devices for ATA */
+	if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
+		s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
+
+	return s;
+}
+
+static u32 mvs_is_sig_fis_received(u32 irq_status)
+{
+	return irq_status & PHYEV_SIG_FIS;
+}
+
+void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
+{
+	struct mvs_phy *phy = &mvi->phy[i];
+	struct sas_identify_frame *id;
+
+	id = (struct sas_identify_frame *)phy->frame_rcvd;
+
+	if (get_st) {
+		phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i);
+		phy->phy_status = mvs_is_phy_ready(mvi, i);
+	}
+
+	if (phy->phy_status) {
+		int oob_done = 0;
+		struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy;
+
+		oob_done = MVS_CHIP_DISP->oob_done(mvi, i);
+
+		MVS_CHIP_DISP->fix_phy_info(mvi, i, id);
+		if (phy->phy_type & PORT_TYPE_SATA) {
+			phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
+			if (mvs_is_sig_fis_received(phy->irq_status)) {
+				phy->phy_attached = 1;
+				phy->att_dev_sas_addr =
+					i + mvi->id * mvi->chip->n_phy;
+				if (oob_done)
+					sas_phy->oob_mode = SATA_OOB_MODE;
+				phy->frame_rcvd_size =
+				    sizeof(struct dev_to_host_fis);
+				mvs_get_d2h_reg(mvi, i, id);
+			} else {
+				u32 tmp;
+				dev_printk(KERN_DEBUG, mvi->dev,
+					"Phy%d : No sig fis\n", i);
+				tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i);
+				MVS_CHIP_DISP->write_port_irq_mask(mvi, i,
+						tmp | PHYEV_SIG_FIS);
+				phy->phy_attached = 0;
+				phy->phy_type &= ~PORT_TYPE_SATA;
+				MVS_CHIP_DISP->phy_reset(mvi, i, 0);
+				goto out_done;
+			}
+		}		else if (phy->phy_type & PORT_TYPE_SAS
+			|| phy->att_dev_info & PORT_SSP_INIT_MASK) {
+			phy->phy_attached = 1;
+			phy->identify.device_type =
+				phy->att_dev_info & PORT_DEV_TYPE_MASK;
+
+			if (phy->identify.device_type == SAS_END_DEV)
+				phy->identify.target_port_protocols =
+							SAS_PROTOCOL_SSP;
+			else if (phy->identify.device_type != NO_DEVICE)
+				phy->identify.target_port_protocols =
+							SAS_PROTOCOL_SMP;
+			if (oob_done)
+				sas_phy->oob_mode = SAS_OOB_MODE;
+			phy->frame_rcvd_size =
+			    sizeof(struct sas_identify_frame);
+		}
+		memcpy(sas_phy->attached_sas_addr,
+			&phy->att_dev_sas_addr, SAS_ADDR_SIZE);
+
+		if (MVS_CHIP_DISP->phy_work_around)
+			MVS_CHIP_DISP->phy_work_around(mvi, i);
+	}
+	mv_dprintk("port %d attach dev info is %x\n",
+		i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
+	mv_dprintk("port %d attach sas addr is %llx\n",
+		i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
+out_done:
+	if (get_st)
+		MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status);
+}
+
+static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
+{
+	struct sas_ha_struct *sas_ha = sas_phy->ha;
+	struct mvs_info *mvi = NULL; int i = 0, hi;
+	struct mvs_phy *phy = sas_phy->lldd_phy;
+	struct asd_sas_port *sas_port = sas_phy->port;
+	struct mvs_port *port;
+	unsigned long flags = 0;
+	if (!sas_port)
+		return;
+
+	while (sas_ha->sas_phy[i]) {
+		if (sas_ha->sas_phy[i] == sas_phy)
+			break;
+		i++;
+	}
+	hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
+	mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
+	if (sas_port->id >= mvi->chip->n_phy)
+		port = &mvi->port[sas_port->id - mvi->chip->n_phy];
+	else
+		port = &mvi->port[sas_port->id];
+	if (lock)
+		spin_lock_irqsave(&mvi->lock, flags);
+	port->port_attached = 1;
+	phy->port = port;
+	if (phy->phy_type & PORT_TYPE_SAS) {
+		port->wide_port_phymap = sas_port->phy_mask;
+		mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
+		mvs_update_wideport(mvi, sas_phy->id);
+	}
+	if (lock)
+		spin_unlock_irqrestore(&mvi->lock, flags);
+}
+
+static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
+{
+	/*Nothing*/
+}
+
+
+void mvs_port_formed(struct asd_sas_phy *sas_phy)
+{
+	mvs_port_notify_formed(sas_phy, 1);
+}
+
+void mvs_port_deformed(struct asd_sas_phy *sas_phy)
+{
+	mvs_port_notify_deformed(sas_phy, 1);
+}
+
+struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
+{
+	u32 dev;
+	for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
+		if (mvi->devices[dev].dev_type == NO_DEVICE) {
+			mvi->devices[dev].device_id = dev;
+			return &mvi->devices[dev];
+		}
+	}
+
+	if (dev == MVS_MAX_DEVICES)
+		mv_printk("max support %d devices, ignore ..\n",
+			MVS_MAX_DEVICES);
+
+	return NULL;
+}
+
+void mvs_free_dev(struct mvs_device *mvi_dev)
+{
+	u32 id = mvi_dev->device_id;
+	memset(mvi_dev, 0, sizeof(*mvi_dev));
+	mvi_dev->device_id = id;
+	mvi_dev->dev_type = NO_DEVICE;
+	mvi_dev->dev_status = MVS_DEV_NORMAL;
+	mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
+}
+
+int mvs_dev_found_notify(struct domain_device *dev, int lock)
+{
+	unsigned long flags = 0;
+	int res = 0;
+	struct mvs_info *mvi = NULL;
+	struct domain_device *parent_dev = dev->parent;
+	struct mvs_device *mvi_device;
+
+	mvi = mvs_find_dev_mvi(dev);
+
+	if (lock)
+		spin_lock_irqsave(&mvi->lock, flags);
+
+	mvi_device = mvs_alloc_dev(mvi);
+	if (!mvi_device) {
+		res = -1;
+		goto found_out;
+	}
+	dev->lldd_dev = mvi_device;
+	mvi_device->dev_type = dev->dev_type;
+	mvi_device->mvi_info = mvi;
+	if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
+		int phy_id;
+		u8 phy_num = parent_dev->ex_dev.num_phys;
+		struct ex_phy *phy;
+		for (phy_id = 0; phy_id < phy_num; phy_id++) {
+			phy = &parent_dev->ex_dev.ex_phy[phy_id];
+			if (SAS_ADDR(phy->attached_sas_addr) ==
+				SAS_ADDR(dev->sas_addr)) {
+				mvi_device->attached_phy = phy_id;
+				break;
+			}
+		}
+
+		if (phy_id == phy_num) {
+			mv_printk("Error: no attached dev:%016llx"
+				"at ex:%016llx.\n",
+				SAS_ADDR(dev->sas_addr),
+				SAS_ADDR(parent_dev->sas_addr));
+			res = -1;
+		}
+	}
+
+found_out:
+	if (lock)
+		spin_unlock_irqrestore(&mvi->lock, flags);
+	return res;
+}
+
+int mvs_dev_found(struct domain_device *dev)
+{
+	return mvs_dev_found_notify(dev, 1);
+}
+
+void mvs_dev_gone_notify(struct domain_device *dev, int lock)
+{
+	unsigned long flags = 0;
+	struct mvs_device *mvi_dev = dev->lldd_dev;
+	struct mvs_info *mvi = mvi_dev->mvi_info;
+
+	if (lock)
+		spin_lock_irqsave(&mvi->lock, flags);
+
+	if (mvi_dev) {
+		mv_dprintk("found dev[%d:%x] is gone.\n",
+			mvi_dev->device_id, mvi_dev->dev_type);
+		mvs_free_reg_set(mvi, mvi_dev);
+		mvs_free_dev(mvi_dev);
+	} else {
+		mv_dprintk("found dev has gone.\n");
+	}
+	dev->lldd_dev = NULL;
+
+	if (lock)
+		spin_unlock_irqrestore(&mvi->lock, flags);
+}
+
+
+void mvs_dev_gone(struct domain_device *dev)
+{
+	mvs_dev_gone_notify(dev, 1);
+}
+
+static  struct sas_task *mvs_alloc_task(void)
+{
+	struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL);
+
+	if (task) {
+		INIT_LIST_HEAD(&task->list);
+		spin_lock_init(&task->task_state_lock);
+		task->task_state_flags = SAS_TASK_STATE_PENDING;
+		init_timer(&task->timer);
+		init_completion(&task->completion);
+	}
+	return task;
+}
+
+static  void mvs_free_task(struct sas_task *task)
+{
+	if (task) {
+		BUG_ON(!list_empty(&task->list));
+		kfree(task);
+	}
+}
+
+static void mvs_task_done(struct sas_task *task)
+{
+	if (!del_timer(&task->timer))
+		return;
+	complete(&task->completion);
+}
+
+static void mvs_tmf_timedout(unsigned long data)
+{
+	struct sas_task *task = (struct sas_task *)data;
+
+	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+	complete(&task->completion);
+}
+
+/* XXX */
+#define MVS_TASK_TIMEOUT 20
+static int mvs_exec_internal_tmf_task(struct domain_device *dev,
+			void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
+{
+	int res, retry;
+	struct sas_task *task = NULL;
+
+	for (retry = 0; retry < 3; retry++) {
+		task = mvs_alloc_task();
+		if (!task)
+			return -ENOMEM;
+
+		task->dev = dev;
+		task->task_proto = dev->tproto;
+
+		memcpy(&task->ssp_task, parameter, para_len);
+		task->task_done = mvs_task_done;
+
+		task->timer.data = (unsigned long) task;
+		task->timer.function = mvs_tmf_timedout;
+		task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
+		add_timer(&task->timer);
+
+		res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf);
+
+		if (res) {
+			del_timer(&task->timer);
+			mv_printk("executing internel task failed:%d\n", res);
+			goto ex_err;
+		}
+
+		wait_for_completion(&task->completion);
+		res = -TMF_RESP_FUNC_FAILED;
+		/* Even TMF timed out, return direct. */
+		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+				mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
+				goto ex_err;
+			}
+		}
+
+		if (task->task_status.resp == SAS_TASK_COMPLETE &&
+		    task->task_status.stat == SAM_GOOD) {
+			res = TMF_RESP_FUNC_COMPLETE;
+			break;
+		}
+
+		if (task->task_status.resp == SAS_TASK_COMPLETE &&
+		      task->task_status.stat == SAS_DATA_UNDERRUN) {
+			/* no error, but return the number of bytes of
+			 * underrun */
+			res = task->task_status.residual;
+			break;
+		}
+
+		if (task->task_status.resp == SAS_TASK_COMPLETE &&
+		      task->task_status.stat == SAS_DATA_OVERRUN) {
+			mv_dprintk("blocked task error.\n");
+			res = -EMSGSIZE;
+			break;
+		} else {
+			mv_dprintk(" task to dev %016llx response: 0x%x "
+				    "status 0x%x\n",
+				    SAS_ADDR(dev->sas_addr),
+				    task->task_status.resp,
+				    task->task_status.stat);
+			mvs_free_task(task);
+			task = NULL;
+
+		}
+	}
+ex_err:
+	BUG_ON(retry == 3 && task != NULL);
+	if (task != NULL)
+		mvs_free_task(task);
+	return res;
+}
+
+static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
+				u8 *lun, struct mvs_tmf_task *tmf)
+{
+	struct sas_ssp_task ssp_task;
+	DECLARE_COMPLETION_ONSTACK(completion);
+	if (!(dev->tproto & SAS_PROTOCOL_SSP))
+		return TMF_RESP_FUNC_ESUPP;
+
+	strncpy((u8 *)&ssp_task.LUN, lun, 8);
+
+	return mvs_exec_internal_tmf_task(dev, &ssp_task,
+				sizeof(ssp_task), tmf);
+}
+
+
+/*  Standard mandates link reset for ATA  (type 0)
+    and hard reset for SSP (type 1) , only for RECOVERY */
+static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
+{
+	int rc;
+	struct sas_phy *phy = sas_find_local_phy(dev);
+	int reset_type = (dev->dev_type == SATA_DEV ||
+			(dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
+	rc = sas_phy_reset(phy, reset_type);
+	msleep(2000);
+	return rc;
+}
+
+/* mandatory SAM-3 */
+int mvs_lu_reset(struct domain_device *dev, u8 *lun)
+{
+	unsigned long flags;
+	int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
+	struct mvs_tmf_task tmf_task;
+	struct mvs_device * mvi_dev = dev->lldd_dev;
+	struct mvs_info *mvi = mvi_dev->mvi_info;
+
+	tmf_task.tmf = TMF_LU_RESET;
+	mvi_dev->dev_status = MVS_DEV_EH;
+	rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+	if (rc == TMF_RESP_FUNC_COMPLETE) {
+		num = mvs_find_dev_phyno(dev, phyno);
+		spin_lock_irqsave(&mvi->lock, flags);
+		for (i = 0; i < num; i++)
+			mvs_release_task(mvi, phyno[i], dev);
+		spin_unlock_irqrestore(&mvi->lock, flags);
+	}
+	/* If failed, fall-through I_T_Nexus reset */
+	mv_printk("%s for device[%x]:rc= %d\n", __func__,
+			mvi_dev->device_id, rc);
+	return rc;
+}
+
+int mvs_I_T_nexus_reset(struct domain_device *dev)
+{
+	unsigned long flags;
+	int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
+	struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
+	struct mvs_info *mvi = mvi_dev->mvi_info;
+
+	if (mvi_dev->dev_status != MVS_DEV_EH)
+		return TMF_RESP_FUNC_COMPLETE;
+	rc = mvs_debug_I_T_nexus_reset(dev);
+	mv_printk("%s for device[%x]:rc= %d\n",
+		__func__, mvi_dev->device_id, rc);
+
+	/* housekeeper */
+	num = mvs_find_dev_phyno(dev, phyno);
+	spin_lock_irqsave(&mvi->lock, flags);
+	for (i = 0; i < num; i++)
+		mvs_release_task(mvi, phyno[i], dev);
+	spin_unlock_irqrestore(&mvi->lock, flags);
+
+	return rc;
+}
+/* optional SAM-3 */
+int mvs_query_task(struct sas_task *task)
+{
+	u32 tag;
+	struct scsi_lun lun;
+	struct mvs_tmf_task tmf_task;
+	int rc = TMF_RESP_FUNC_FAILED;
+
+	if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
+		struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
+		struct domain_device *dev = task->dev;
+		struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
+		struct mvs_info *mvi = mvi_dev->mvi_info;
+
+		int_to_scsilun(cmnd->device->lun, &lun);
+		rc = mvs_find_tag(mvi, task, &tag);
+		if (rc == 0) {
+			rc = TMF_RESP_FUNC_FAILED;
+			return rc;
+		}
+
+		tmf_task.tmf = TMF_QUERY_TASK;
+		tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
+
+		rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+		switch (rc) {
+		/* The task is still in Lun, release it then */
+		case TMF_RESP_FUNC_SUCC:
+		/* The task is not in Lun or failed, reset the phy */
+		case TMF_RESP_FUNC_FAILED:
+		case TMF_RESP_FUNC_COMPLETE:
+			break;
+		}
+	}
+	mv_printk("%s:rc= %d\n", __func__, rc);
+	return rc;
+}
+
+/*  mandatory SAM-3, still need free task/slot info */
+int mvs_abort_task(struct sas_task *task)
+{
+	struct scsi_lun lun;
+	struct mvs_tmf_task tmf_task;
+	struct domain_device *dev = task->dev;
+	struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
+	struct mvs_info *mvi = mvi_dev->mvi_info;
+	int rc = TMF_RESP_FUNC_FAILED;
+	unsigned long flags;
+	u32 tag;
+
+	if (mvi->exp_req)
+		mvi->exp_req--;
+	spin_lock_irqsave(&task->task_state_lock, flags);
+	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+		spin_unlock_irqrestore(&task->task_state_lock, flags);
+		rc = TMF_RESP_FUNC_COMPLETE;
+		goto out;
+	}
+	spin_unlock_irqrestore(&task->task_state_lock, flags);
+	if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
+		struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
+
+		int_to_scsilun(cmnd->device->lun, &lun);
+		rc = mvs_find_tag(mvi, task, &tag);
+		if (rc == 0) {
+			mv_printk("No such tag in %s\n", __func__);
+			rc = TMF_RESP_FUNC_FAILED;
+			return rc;
+		}
+
+		tmf_task.tmf = TMF_ABORT_TASK;
+		tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
+
+		rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+
+		/* if successful, clear the task and callback forwards.*/
+		if (rc == TMF_RESP_FUNC_COMPLETE) {
+			u32 slot_no;
+			struct mvs_slot_info *slot;
+
+			if (task->lldd_task) {
+				slot = task->lldd_task;
+				slot_no = (u32) (slot - mvi->slot_info);
+				mvs_slot_complete(mvi, slot_no, 1);
+			}
+		}
+	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
+		task->task_proto & SAS_PROTOCOL_STP) {
+		/* to do free register_set */
+	} else {
+		/* SMP */
+
+	}
+out:
+	if (rc != TMF_RESP_FUNC_COMPLETE)
+		mv_printk("%s:rc= %d\n", __func__, rc);
+	return rc;
+}
+
+int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
+{
+	int rc = TMF_RESP_FUNC_FAILED;
+	struct mvs_tmf_task tmf_task;
+
+	tmf_task.tmf = TMF_ABORT_TASK_SET;
+	rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+
+	return rc;
+}
+
+int mvs_clear_aca(struct domain_device *dev, u8 *lun)
+{
+	int rc = TMF_RESP_FUNC_FAILED;
+	struct mvs_tmf_task tmf_task;
+
+	tmf_task.tmf = TMF_CLEAR_ACA;
+	rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+
+	return rc;
+}
+
+int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
+{
+	int rc = TMF_RESP_FUNC_FAILED;
+	struct mvs_tmf_task tmf_task;
+
+	tmf_task.tmf = TMF_CLEAR_TASK_SET;
+	rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+
+	return rc;
+}
+
+static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
+			u32 slot_idx, int err)
+{
+	struct mvs_device *mvi_dev = task->dev->lldd_dev;
+	struct task_status_struct *tstat = &task->task_status;
+	struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
+	int stat = SAM_GOOD;
+
+
+	resp->frame_len = sizeof(struct dev_to_host_fis);
+	memcpy(&resp->ending_fis[0],
+	       SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
+	       sizeof(struct dev_to_host_fis));
+	tstat->buf_valid_size = sizeof(*resp);
+	if (unlikely(err))
+		stat = SAS_PROTO_RESPONSE;
+	return stat;
+}
+
+static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
+			 u32 slot_idx)
+{
+	struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
+	int stat;
+	u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
+	u32 tfs = 0;
+	enum mvs_port_type type = PORT_TYPE_SAS;
+
+	if (err_dw0 & CMD_ISS_STPD)
+		MVS_CHIP_DISP->issue_stop(mvi, type, tfs);
+
+	MVS_CHIP_DISP->command_active(mvi, slot_idx);
+
+	stat = SAM_CHECK_COND;
+	switch (task->task_proto) {
+	case SAS_PROTOCOL_SSP:
+		stat = SAS_ABORTED_TASK;
+		break;
+	case SAS_PROTOCOL_SMP:
+		stat = SAM_CHECK_COND;
+		break;
+
+	case SAS_PROTOCOL_SATA:
+	case SAS_PROTOCOL_STP:
+	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+	{
+		if (err_dw0 == 0x80400002)
+			mv_printk("find reserved error, why?\n");
+
+		task->ata_task.use_ncq = 0;
+		stat = SAS_PROTO_RESPONSE;
+		mvs_sata_done(mvi, task, slot_idx, 1);
+
+	}
+		break;
+	default:
+		break;
+	}
+
+	return stat;
+}
+
+int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
+{
+	u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
+	struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
+	struct sas_task *task = slot->task;
+	struct mvs_device *mvi_dev = NULL;
+	struct task_status_struct *tstat;
+
+	bool aborted;
+	void *to;
+	enum exec_status sts;
+
+	if (mvi->exp_req)
+		mvi->exp_req--;
+	if (unlikely(!task || !task->lldd_task))
+		return -1;
+
+	tstat = &task->task_status;
+	mvi_dev = task->dev->lldd_dev;
+
+	mvs_hba_cq_dump(mvi);
+
+	spin_lock(&task->task_state_lock);
+	task->task_state_flags &=
+		~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+	task->task_state_flags |= SAS_TASK_STATE_DONE;
+	/* race condition*/
+	aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
+	spin_unlock(&task->task_state_lock);
+
+	memset(tstat, 0, sizeof(*tstat));
+	tstat->resp = SAS_TASK_COMPLETE;
+
+	if (unlikely(aborted)) {
+		tstat->stat = SAS_ABORTED_TASK;
+		if (mvi_dev)
+			mvi_dev->runing_req--;
+		if (sas_protocol_ata(task->task_proto))
+			mvs_free_reg_set(mvi, mvi_dev);
+
+		mvs_slot_task_free(mvi, task, slot, slot_idx);
+		return -1;
+	}
+
+	if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) {
+		mv_dprintk("port has not device.\n");
+		tstat->stat = SAS_PHY_DOWN;
+		goto out;
+	}
+
+	/*
+	if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) {
+		 mv_dprintk("Find device[%016llx] RXQ_ERR %X,
+		 err info:%016llx\n",
+		 SAS_ADDR(task->dev->sas_addr),
+		 rx_desc, (u64)(*(u64 *) slot->response));
+	}
+	*/
+
+	/* error info record present */
+	if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
+		tstat->stat = mvs_slot_err(mvi, task, slot_idx);
+		goto out;
+	}
+
+	switch (task->task_proto) {
+	case SAS_PROTOCOL_SSP:
+		/* hw says status == 0, datapres == 0 */
+		if (rx_desc & RXQ_GOOD) {
+			tstat->stat = SAM_GOOD;
+			tstat->resp = SAS_TASK_COMPLETE;
+		}
+		/* response frame present */
+		else if (rx_desc & RXQ_RSP) {
+			struct ssp_response_iu *iu = slot->response +
+						sizeof(struct mvs_err_info);
+			sas_ssp_task_response(mvi->dev, task, iu);
+		} else
+			tstat->stat = SAM_CHECK_COND;
+		break;
+
+	case SAS_PROTOCOL_SMP: {
+			struct scatterlist *sg_resp = &task->smp_task.smp_resp;
+			tstat->stat = SAM_GOOD;
+			to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
+			memcpy(to + sg_resp->offset,
+				slot->response + sizeof(struct mvs_err_info),
+				sg_dma_len(sg_resp));
+			kunmap_atomic(to, KM_IRQ0);
+			break;
+		}
+
+	case SAS_PROTOCOL_SATA:
+	case SAS_PROTOCOL_STP:
+	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
+			tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
+			break;
+		}
+
+	default:
+		tstat->stat = SAM_CHECK_COND;
+		break;
+	}
+
+out:
+	if (mvi_dev) {
+		mvi_dev->runing_req--;
+		if (sas_protocol_ata(task->task_proto))
+			mvs_free_reg_set(mvi, mvi_dev);
+	}
+	mvs_slot_task_free(mvi, task, slot, slot_idx);
+	sts = tstat->stat;
+
+	spin_unlock(&mvi->lock);
+	if (task->task_done)
+		task->task_done(task);
+	else
+		mv_dprintk("why has not task_done.\n");
+	spin_lock(&mvi->lock);
+
+	return sts;
+}
+
+void mvs_release_task(struct mvs_info *mvi,
+		int phy_no, struct domain_device *dev)
+{
+	int i = 0; u32 slot_idx;
+	struct mvs_phy *phy;
+	struct mvs_port *port;
+	struct mvs_slot_info *slot, *slot2;
+
+	phy = &mvi->phy[phy_no];
+	port = phy->port;
+	if (!port)
+		return;
+
+	list_for_each_entry_safe(slot, slot2, &port->list, entry) {
+		struct sas_task *task;
+		slot_idx = (u32) (slot - mvi->slot_info);
+		task = slot->task;
+
+		if (dev && task->dev != dev)
+			continue;
+
+		mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
+			slot_idx, slot->slot_tag, task);
+
+		if (task->task_proto & SAS_PROTOCOL_SSP) {
+			mv_printk("attached with SSP task CDB[");
+			for (i = 0; i < 16; i++)
+				mv_printk(" %02x", task->ssp_task.cdb[i]);
+			mv_printk(" ]\n");
+		}
+
+		mvs_slot_complete(mvi, slot_idx, 1);
+	}
+}
+
+static void mvs_phy_disconnected(struct mvs_phy *phy)
+{
+	phy->phy_attached = 0;
+	phy->att_dev_info = 0;
+	phy->att_dev_sas_addr = 0;
+}
+
+static void mvs_work_queue(struct work_struct *work)
+{
+	struct delayed_work *dw = container_of(work, struct delayed_work, work);
+	struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
+	struct mvs_info *mvi = mwq->mvi;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mvi->lock, flags);
+	if (mwq->handler & PHY_PLUG_EVENT) {
+		u32 phy_no = (unsigned long) mwq->data;
+		struct sas_ha_struct *sas_ha = mvi->sas;
+		struct mvs_phy *phy = &mvi->phy[phy_no];
+		struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+		if (phy->phy_event & PHY_PLUG_OUT) {
+			u32 tmp;
+			struct sas_identify_frame *id;
+			id = (struct sas_identify_frame *)phy->frame_rcvd;
+			tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
+			phy->phy_event &= ~PHY_PLUG_OUT;
+			if (!(tmp & PHY_READY_MASK)) {
+				sas_phy_disconnected(sas_phy);
+				mvs_phy_disconnected(phy);
+				sas_ha->notify_phy_event(sas_phy,
+					PHYE_LOSS_OF_SIGNAL);
+				mv_dprintk("phy%d Removed Device\n", phy_no);
+			} else {
+				MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
+				mvs_update_phyinfo(mvi, phy_no, 1);
+				mvs_bytes_dmaed(mvi, phy_no);
+				mvs_port_notify_formed(sas_phy, 0);
+				mv_dprintk("phy%d Attached Device\n", phy_no);
+			}
+		}
+	}
+	list_del(&mwq->entry);
+	spin_unlock_irqrestore(&mvi->lock, flags);
+	kfree(mwq);
+}
+
+static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
+{
+	struct mvs_wq *mwq;
+	int ret = 0;
+
+	mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC);
+	if (mwq) {
+		mwq->mvi = mvi;
+		mwq->data = data;
+		mwq->handler = handler;
+		MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq);
+		list_add_tail(&mwq->entry, &mvi->wq_list);
+		schedule_delayed_work(&mwq->work_q, HZ * 2);
+	} else
+		ret = -ENOMEM;
+
+	return ret;
+}
+
+static void mvs_sig_time_out(unsigned long tphy)
+{
+	struct mvs_phy *phy = (struct mvs_phy *)tphy;
+	struct mvs_info *mvi = phy->mvi;
+	u8 phy_no;
+
+	for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) {
+		if (&mvi->phy[phy_no] == phy) {
+			mv_dprintk("Get signature time out, reset phy %d\n",
+				phy_no+mvi->id*mvi->chip->n_phy);
+			MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1);
+		}
+	}
+}
+
+static void mvs_sig_remove_timer(struct mvs_phy *phy)
+{
+	if (phy->timer.function)
+		del_timer(&phy->timer);
+	phy->timer.function = NULL;
+}
+
+void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
+{
+	u32 tmp;
+	struct sas_ha_struct *sas_ha = mvi->sas;
+	struct mvs_phy *phy = &mvi->phy[phy_no];
+	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+	phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
+	mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy,
+		MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
+	mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy,
+		phy->irq_status);
+
+	/*
+	* events is port event now ,
+	* we need check the interrupt status which belongs to per port.
+	*/
+
+	if (phy->irq_status & PHYEV_DCDR_ERR)
+		mv_dprintk("port %d STP decoding error.\n",
+		phy_no+mvi->id*mvi->chip->n_phy);
+
+	if (phy->irq_status & PHYEV_POOF) {
+		if (!(phy->phy_event & PHY_PLUG_OUT)) {
+			int dev_sata = phy->phy_type & PORT_TYPE_SATA;
+			int ready;
+			mvs_release_task(mvi, phy_no, NULL);
+			phy->phy_event |= PHY_PLUG_OUT;
+			mvs_handle_event(mvi,
+				(void *)(unsigned long)phy_no,
+				PHY_PLUG_EVENT);
+			ready = mvs_is_phy_ready(mvi, phy_no);
+			if (!ready)
+				mv_dprintk("phy%d Unplug Notice\n",
+					phy_no +
+					mvi->id * mvi->chip->n_phy);
+			if (ready || dev_sata) {
+				if (MVS_CHIP_DISP->stp_reset)
+					MVS_CHIP_DISP->stp_reset(mvi,
+							phy_no);
+				else
+					MVS_CHIP_DISP->phy_reset(mvi,
+							phy_no, 0);
+				return;
+			}
+		}
+	}
+
+	if (phy->irq_status & PHYEV_COMWAKE) {
+		tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no);
+		MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
+					tmp | PHYEV_SIG_FIS);
+		if (phy->timer.function == NULL) {
+			phy->timer.data = (unsigned long)phy;
+			phy->timer.function = mvs_sig_time_out;
+			phy->timer.expires = jiffies + 10*HZ;
+			add_timer(&phy->timer);
+		}
+	}
+	if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
+		phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
+		mvs_sig_remove_timer(phy);
+		mv_dprintk("notify plug in on phy[%d]\n", phy_no);
+		if (phy->phy_status) {
+			mdelay(10);
+			MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
+			if (phy->phy_type & PORT_TYPE_SATA) {
+				tmp = MVS_CHIP_DISP->read_port_irq_mask(
+						mvi, phy_no);
+				tmp &= ~PHYEV_SIG_FIS;
+				MVS_CHIP_DISP->write_port_irq_mask(mvi,
+							phy_no, tmp);
+			}
+			mvs_update_phyinfo(mvi, phy_no, 0);
+			mvs_bytes_dmaed(mvi, phy_no);
+			/* whether driver is going to handle hot plug */
+			if (phy->phy_event & PHY_PLUG_OUT) {
+				mvs_port_notify_formed(sas_phy, 0);
+				phy->phy_event &= ~PHY_PLUG_OUT;
+			}
+		} else {
+			mv_dprintk("plugin interrupt but phy%d is gone\n",
+				phy_no + mvi->id*mvi->chip->n_phy);
+		}
+	} else if (phy->irq_status & PHYEV_BROAD_CH) {
+		mv_dprintk("port %d broadcast change.\n",
+			phy_no + mvi->id*mvi->chip->n_phy);
+		/* exception for Samsung disk drive*/
+		mdelay(1000);
+		sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+	}
+	MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
+}
+
+int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
+{
+	u32 rx_prod_idx, rx_desc;
+	bool attn = false;
+
+	/* the first dword in the RX ring is special: it contains
+	 * a mirror of the hardware's RX producer index, so that
+	 * we don't have to stall the CPU reading that register.
+	 * The actual RX ring is offset by one dword, due to this.
+	 */
+	rx_prod_idx = mvi->rx_cons;
+	mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
+	if (mvi->rx_cons == 0xfff)	/* h/w hasn't touched RX ring yet */
+		return 0;
+
+	/* The CMPL_Q may come late, read from register and try again
+	* note: if coalescing is enabled,
+	* it will need to read from register every time for sure
+	*/
+	if (unlikely(mvi->rx_cons == rx_prod_idx))
+		mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK;
+
+	if (mvi->rx_cons == rx_prod_idx)
+		return 0;
+
+	while (mvi->rx_cons != rx_prod_idx) {
+		/* increment our internal RX consumer pointer */
+		rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
+		rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
+
+		if (likely(rx_desc & RXQ_DONE))
+			mvs_slot_complete(mvi, rx_desc, 0);
+		if (rx_desc & RXQ_ATTN) {
+			attn = true;
+		} else if (rx_desc & RXQ_ERR) {
+			if (!(rx_desc & RXQ_DONE))
+				mvs_slot_complete(mvi, rx_desc, 0);
+		} else if (rx_desc & RXQ_SLOT_RESET) {
+			mvs_slot_free(mvi, rx_desc);
+		}
+	}
+
+	if (attn && self_clear)
+		MVS_CHIP_DISP->int_full(mvi);
+	return 0;
+}
+

+ 406 - 0
drivers/scsi/mvsas/mv_sas.h

@@ -0,0 +1,406 @@
+/*
+ * Marvell 88SE64xx/88SE94xx main function head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MV_SAS_H_
+#define _MV_SAS_H_
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/vmalloc.h>
+#include <scsi/libsas.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/sas_ata.h>
+#include <linux/version.h>
+#include "mv_defs.h"
+
+#define DRV_NAME		"mvsas"
+#define DRV_VERSION		"0.8.2"
+#define _MV_DUMP		0
+#define MVS_ID_NOT_MAPPED	0x7f
+/* #define DISABLE_HOTPLUG_DMA_FIX */
+#define MAX_EXP_RUNNING_REQ	2
+#define WIDE_PORT_MAX_PHY		4
+#define	MV_DISABLE_NCQ	0
+#define mv_printk(fmt, arg ...)	\
+	printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg)
+#ifdef MV_DEBUG
+#define mv_dprintk(format, arg...)	\
+	printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg)
+#else
+#define mv_dprintk(format, arg...)
+#endif
+#define MV_MAX_U32			0xffffffff
+
+extern struct mvs_tgt_initiator mvs_tgt;
+extern struct mvs_info *tgt_mvi;
+extern const struct mvs_dispatch mvs_64xx_dispatch;
+extern const struct mvs_dispatch mvs_94xx_dispatch;
+
+#define DEV_IS_EXPANDER(type)	\
+	((type == EDGE_DEV) || (type == FANOUT_DEV))
+
+#define bit(n) ((u32)1 << n)
+
+#define for_each_phy(__lseq_mask, __mc, __lseq)			\
+	for ((__mc) = (__lseq_mask), (__lseq) = 0;		\
+					(__mc) != 0 ;		\
+					(++__lseq), (__mc) >>= 1)
+
+#define MV_INIT_DELAYED_WORK(w, f, d)	INIT_DELAYED_WORK(w, f)
+#define UNASSOC_D2H_FIS(id)		\
+	((void *) mvi->rx_fis + 0x100 * id)
+#define SATA_RECEIVED_FIS_LIST(reg_set)	\
+	((void *) mvi->rx_fis + mvi->chip->fis_offs + 0x100 * reg_set)
+#define SATA_RECEIVED_SDB_FIS(reg_set)	\
+	(SATA_RECEIVED_FIS_LIST(reg_set) + 0x58)
+#define SATA_RECEIVED_D2H_FIS(reg_set)	\
+	(SATA_RECEIVED_FIS_LIST(reg_set) + 0x40)
+#define SATA_RECEIVED_PIO_FIS(reg_set)	\
+	(SATA_RECEIVED_FIS_LIST(reg_set) + 0x20)
+#define SATA_RECEIVED_DMA_FIS(reg_set)	\
+	(SATA_RECEIVED_FIS_LIST(reg_set) + 0x00)
+
+enum dev_status {
+	MVS_DEV_NORMAL = 0x0,
+	MVS_DEV_EH	= 0x1,
+};
+
+
+struct mvs_info;
+
+struct mvs_dispatch {
+	char *name;
+	int (*chip_init)(struct mvs_info *mvi);
+	int (*spi_init)(struct mvs_info *mvi);
+	int (*chip_ioremap)(struct mvs_info *mvi);
+	void (*chip_iounmap)(struct mvs_info *mvi);
+	irqreturn_t (*isr)(struct mvs_info *mvi, int irq, u32 stat);
+	u32 (*isr_status)(struct mvs_info *mvi, int irq);
+	void (*interrupt_enable)(struct mvs_info *mvi);
+	void (*interrupt_disable)(struct mvs_info *mvi);
+
+	u32 (*read_phy_ctl)(struct mvs_info *mvi, u32 port);
+	void (*write_phy_ctl)(struct mvs_info *mvi, u32 port, u32 val);
+
+	u32 (*read_port_cfg_data)(struct mvs_info *mvi, u32 port);
+	void (*write_port_cfg_data)(struct mvs_info *mvi, u32 port, u32 val);
+	void (*write_port_cfg_addr)(struct mvs_info *mvi, u32 port, u32 addr);
+
+	u32 (*read_port_vsr_data)(struct mvs_info *mvi, u32 port);
+	void (*write_port_vsr_data)(struct mvs_info *mvi, u32 port, u32 val);
+	void (*write_port_vsr_addr)(struct mvs_info *mvi, u32 port, u32 addr);
+
+	u32 (*read_port_irq_stat)(struct mvs_info *mvi, u32 port);
+	void (*write_port_irq_stat)(struct mvs_info *mvi, u32 port, u32 val);
+
+	u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port);
+	void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val);
+
+	void (*get_sas_addr)(void *buf, u32 buflen);
+	void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
+	void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
+				u32 tfs);
+	void (*start_delivery)(struct mvs_info *mvi, u32 tx);
+	u32 (*rx_update)(struct mvs_info *mvi);
+	void (*int_full)(struct mvs_info *mvi);
+	u8 (*assign_reg_set)(struct mvs_info *mvi, u8 *tfs);
+	void (*free_reg_set)(struct mvs_info *mvi, u8 *tfs);
+	u32 (*prd_size)(void);
+	u32 (*prd_count)(void);
+	void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
+	void (*detect_porttype)(struct mvs_info *mvi, int i);
+	int (*oob_done)(struct mvs_info *mvi, int i);
+	void (*fix_phy_info)(struct mvs_info *mvi, int i,
+				struct sas_identify_frame *id);
+	void (*phy_work_around)(struct mvs_info *mvi, int i);
+	void (*phy_set_link_rate)(struct mvs_info *mvi, u32 phy_id,
+				struct sas_phy_linkrates *rates);
+	u32 (*phy_max_link_rate)(void);
+	void (*phy_disable)(struct mvs_info *mvi, u32 phy_id);
+	void (*phy_enable)(struct mvs_info *mvi, u32 phy_id);
+	void (*phy_reset)(struct mvs_info *mvi, u32 phy_id, int hard);
+	void (*stp_reset)(struct mvs_info *mvi, u32 phy_id);
+	void (*clear_active_cmds)(struct mvs_info *mvi);
+	u32 (*spi_read_data)(struct mvs_info *mvi);
+	void (*spi_write_data)(struct mvs_info *mvi, u32 data);
+	int (*spi_buildcmd)(struct mvs_info *mvi,
+						u32      *dwCmd,
+						u8       cmd,
+						u8       read,
+						u8       length,
+						u32      addr
+						);
+	int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd);
+	int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout);
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+	void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd);
+#endif
+
+};
+
+struct mvs_chip_info {
+	u32 		n_host;
+	u32 		n_phy;
+	u32 		fis_offs;
+	u32 		fis_count;
+	u32 		srs_sz;
+	u32 		slot_width;
+	const struct mvs_dispatch *dispatch;
+};
+#define MVS_CHIP_SLOT_SZ	(1U << mvi->chip->slot_width)
+#define MVS_RX_FISL_SZ		\
+	(mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100))
+#define MVS_CHIP_DISP		(mvi->chip->dispatch)
+
+struct mvs_err_info {
+	__le32			flags;
+	__le32			flags2;
+};
+
+struct mvs_cmd_hdr {
+	__le32			flags;	/* PRD tbl len; SAS, SATA ctl */
+	__le32			lens;	/* cmd, max resp frame len */
+	__le32			tags;	/* targ port xfer tag; tag */
+	__le32			data_len;	/* data xfer len */
+	__le64			cmd_tbl;  	/* command table address */
+	__le64			open_frame;	/* open addr frame address */
+	__le64			status_buf;	/* status buffer address */
+	__le64			prd_tbl;		/* PRD tbl address */
+	__le32			reserved[4];
+};
+
+struct mvs_port {
+	struct asd_sas_port	sas_port;
+	u8			port_attached;
+	u8			wide_port_phymap;
+	struct list_head	list;
+};
+
+struct mvs_phy {
+	struct mvs_info 		*mvi;
+	struct mvs_port		*port;
+	struct asd_sas_phy	sas_phy;
+	struct sas_identify	identify;
+	struct scsi_device	*sdev;
+	struct timer_list timer;
+	u64		dev_sas_addr;
+	u64		att_dev_sas_addr;
+	u32		att_dev_info;
+	u32		dev_info;
+	u32		phy_type;
+	u32		phy_status;
+	u32		irq_status;
+	u32		frame_rcvd_size;
+	u8		frame_rcvd[32];
+	u8		phy_attached;
+	u8		phy_mode;
+	u8		reserved[2];
+	u32		phy_event;
+	enum sas_linkrate	minimum_linkrate;
+	enum sas_linkrate	maximum_linkrate;
+};
+
+struct mvs_device {
+	struct list_head		dev_entry;
+	enum sas_dev_type dev_type;
+	struct mvs_info *mvi_info;
+	struct domain_device *sas_device;
+	u32 attached_phy;
+	u32 device_id;
+	u32 runing_req;
+	u8 taskfileset;
+	u8 dev_status;
+	u16 reserved;
+};
+
+struct mvs_slot_info {
+	struct list_head entry;
+	union {
+		struct sas_task *task;
+		void *tdata;
+	};
+	u32 n_elem;
+	u32 tx;
+	u32 slot_tag;
+
+	/* DMA buffer for storing cmd tbl, open addr frame, status buffer,
+	 * and PRD table
+	 */
+	void *buf;
+	dma_addr_t buf_dma;
+#if _MV_DUMP
+	u32 cmd_size;
+#endif
+	void *response;
+	struct mvs_port *port;
+	struct mvs_device	*device;
+	void *open_frame;
+};
+
+struct mvs_info {
+	unsigned long flags;
+
+	/* host-wide lock */
+	spinlock_t lock;
+
+	/* our device */
+	struct pci_dev *pdev;
+	struct device *dev;
+
+	/* enhanced mode registers */
+	void __iomem *regs;
+
+	/* peripheral or soc registers */
+	void __iomem *regs_ex;
+	u8 sas_addr[SAS_ADDR_SIZE];
+
+	/* SCSI/SAS glue */
+	struct sas_ha_struct *sas;
+	struct Scsi_Host *shost;
+
+	/* TX (delivery) DMA ring */
+	__le32 *tx;
+	dma_addr_t tx_dma;
+
+	/* cached next-producer idx */
+	u32 tx_prod;
+
+	/* RX (completion) DMA ring */
+	__le32	*rx;
+	dma_addr_t rx_dma;
+
+	/* RX consumer idx */
+	u32 rx_cons;
+
+	/* RX'd FIS area */
+	__le32 *rx_fis;
+	dma_addr_t rx_fis_dma;
+
+	/* DMA command header slots */
+	struct mvs_cmd_hdr *slot;
+	dma_addr_t slot_dma;
+
+	u32 chip_id;
+	const struct mvs_chip_info *chip;
+
+	int tags_num;
+	DECLARE_BITMAP(tags, MVS_SLOTS);
+	/* further per-slot information */
+	struct mvs_phy phy[MVS_MAX_PHYS];
+	struct mvs_port port[MVS_MAX_PHYS];
+	u32 irq;
+	u32 exp_req;
+	u32 id;
+	u64 sata_reg_set;
+	struct list_head *hba_list;
+	struct list_head soc_entry;
+	struct list_head wq_list;
+	unsigned long instance;
+	u16 flashid;
+	u32 flashsize;
+	u32 flashsectSize;
+
+	void *addon;
+	struct mvs_device	devices[MVS_MAX_DEVICES];
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+	void *bulk_buffer;
+	dma_addr_t bulk_buffer_dma;
+#define TRASH_BUCKET_SIZE    	0x20000
+#endif
+	struct mvs_slot_info slot_info[0];
+};
+
+struct mvs_prv_info{
+	u8 n_host;
+	u8 n_phy;
+	u16 reserve;
+	struct mvs_info *mvi[2];
+};
+
+struct mvs_wq {
+	struct delayed_work work_q;
+	struct mvs_info *mvi;
+	void *data;
+	int handler;
+	struct list_head entry;
+};
+
+struct mvs_task_exec_info {
+	struct sas_task *task;
+	struct mvs_cmd_hdr *hdr;
+	struct mvs_port *port;
+	u32 tag;
+	int n_elem;
+};
+
+
+/******************** function prototype *********************/
+void mvs_get_sas_addr(void *buf, u32 buflen);
+void mvs_tag_clear(struct mvs_info *mvi, u32 tag);
+void mvs_tag_free(struct mvs_info *mvi, u32 tag);
+void mvs_tag_set(struct mvs_info *mvi, unsigned int tag);
+int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out);
+void mvs_tag_init(struct mvs_info *mvi);
+void mvs_iounmap(void __iomem *regs);
+int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex);
+void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard);
+int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+			void *funcdata);
+void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
+				u32 off_lo, u32 off_hi, u64 sas_addr);
+int mvs_slave_alloc(struct scsi_device *scsi_dev);
+int mvs_slave_configure(struct scsi_device *sdev);
+void mvs_scan_start(struct Scsi_Host *shost);
+int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
+int mvs_queue_command(struct sas_task *task, const int num,
+			gfp_t gfp_flags);
+int mvs_abort_task(struct sas_task *task);
+int mvs_abort_task_set(struct domain_device *dev, u8 *lun);
+int mvs_clear_aca(struct domain_device *dev, u8 *lun);
+int mvs_clear_task_set(struct domain_device *dev, u8 * lun);
+void mvs_port_formed(struct asd_sas_phy *sas_phy);
+void mvs_port_deformed(struct asd_sas_phy *sas_phy);
+int mvs_dev_found(struct domain_device *dev);
+void mvs_dev_gone(struct domain_device *dev);
+int mvs_lu_reset(struct domain_device *dev, u8 *lun);
+int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags);
+int mvs_I_T_nexus_reset(struct domain_device *dev);
+int mvs_query_task(struct sas_task *task);
+void mvs_release_task(struct mvs_info *mvi, int phy_no,
+			struct domain_device *dev);
+void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
+void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
+int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
+void mvs_hexdump(u32 size, u8 *data, u32 baseaddr);
+#endif
+

+ 0 - 25
drivers/scsi/osd/Kbuild

@@ -11,31 +11,6 @@
 # it under the terms of the GNU General Public License version 2
 # it under the terms of the GNU General Public License version 2
 #
 #
 
 
-ifneq ($(OSD_INC),)
-# we are built out-of-tree Kconfigure everything as on
-
-CONFIG_SCSI_OSD_INITIATOR=m
-ccflags-y += -DCONFIG_SCSI_OSD_INITIATOR -DCONFIG_SCSI_OSD_INITIATOR_MODULE
-
-CONFIG_SCSI_OSD_ULD=m
-ccflags-y += -DCONFIG_SCSI_OSD_ULD -DCONFIG_SCSI_OSD_ULD_MODULE
-
-# CONFIG_SCSI_OSD_DPRINT_SENSE =
-#	0 - no print of errors
-#	1 - print errors
-#	2 - errors + warrnings
-ccflags-y += -DCONFIG_SCSI_OSD_DPRINT_SENSE=1
-
-# Uncomment to turn debug on
-# ccflags-y += -DCONFIG_SCSI_OSD_DEBUG
-
-# if we are built out-of-tree and the hosting kernel has OSD headers
-# then "ccflags-y +=" will not pick the out-off-tree headers. Only by doing
-# this it will work. This might break in future kernels
-LINUXINCLUDE := -I$(OSD_INC) $(LINUXINCLUDE)
-
-endif
-
 # libosd.ko - osd-initiator library
 # libosd.ko - osd-initiator library
 libosd-y := osd_initiator.o
 libosd-y := osd_initiator.o
 obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o
 obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o

+ 0 - 37
drivers/scsi/osd/Makefile

@@ -1,37 +0,0 @@
-#
-# Makefile for the OSD modules (out of tree)
-#
-# Copyright (C) 2008 Panasas Inc.  All rights reserved.
-#
-# Authors:
-#   Boaz Harrosh <bharrosh@panasas.com>
-#   Benny Halevy <bhalevy@panasas.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2
-#
-# This Makefile is used to call the kernel Makefile in case of an out-of-tree
-# build.
-# $KSRC should point to a Kernel source tree otherwise host's default is
-# used. (eg. /lib/modules/`uname -r`/build)
-
-# include path for out-of-tree Headers
-OSD_INC ?= `pwd`/../../../include
-
-# allow users to override these
-# e.g. to compile for a kernel that you aren't currently running
-KSRC ?= /lib/modules/$(shell uname -r)/build
-KBUILD_OUTPUT ?=
-ARCH ?=
-V ?= 0
-
-# this is the basic Kbuild out-of-tree invocation, with the M= option
-KBUILD_BASE = +$(MAKE) -C $(KSRC) M=`pwd` KBUILD_OUTPUT=$(KBUILD_OUTPUT) ARCH=$(ARCH) V=$(V)
-
-all: libosd
-
-libosd: ;
-	$(KBUILD_BASE) OSD_INC=$(OSD_INC) modules
-
-clean:
-	$(KBUILD_BASE) clean

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно