浏览代码

[SCSI] hptiop: add more adapter models and other fixes

Most code changes were made to support adapters based on Marvell IOP, plus some
other fixes.

- add more PCI device IDs
- support for adapters based on Marvell IOP
- fix a result code translation error on big-endian systems
- fix resource releasing bug when scsi_host_alloc() fail in hptiop_probe()
- update scsi_cmnd.resid when finishing a request
- correct some coding style issues

[akpm@linux-foundation.org: type fixes]
Signed-off-by: HighPoint Linux Team <linux@highpoint-tech.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
HighPoint Linux Team 17 年之前
父节点
当前提交
00f5970193
共有 4 个文件被更改,包括 576 次插入175 次删除
  1. 21 9
      Documentation/scsi/hptiop.txt
  2. 2 2
      drivers/scsi/Kconfig
  3. 464 129
      drivers/scsi/hptiop.c
  4. 89 35
      drivers/scsi/hptiop.h

+ 21 - 9
Documentation/scsi/hptiop.txt

@@ -1,9 +1,9 @@
-HIGHPOINT ROCKETRAID 3xxx RAID DRIVER (hptiop)
+HIGHPOINT ROCKETRAID 3xxx/4xxx ADAPTER DRIVER (hptiop)
 
 
 Controller Register Map
 Controller Register Map
 -------------------------
 -------------------------
 
 
-The controller IOP is accessed via PCI BAR0.
+For Intel IOP based adapters, the controller IOP is accessed via PCI BAR0:
 
 
      BAR0 offset    Register
      BAR0 offset    Register
             0x10    Inbound Message Register 0
             0x10    Inbound Message Register 0
@@ -18,6 +18,24 @@ The controller IOP is accessed via PCI BAR0.
             0x40    Inbound Queue Port
             0x40    Inbound Queue Port
             0x44    Outbound Queue Port
             0x44    Outbound Queue Port
 
 
+For Marvell IOP based adapters, the IOP is accessed via PCI BAR0 and BAR1:
+
+     BAR0 offset    Register
+         0x20400    Inbound Doorbell Register
+         0x20404    Inbound Interrupt Mask Register
+         0x20408    Outbound Doorbell Register
+         0x2040C    Outbound Interrupt Mask Register
+
+     BAR1 offset    Register
+             0x0    Inbound Queue Head Pointer
+             0x4    Inbound Queue Tail Pointer
+             0x8    Outbound Queue Head Pointer
+             0xC    Outbound Queue Tail Pointer
+            0x10    Inbound Message Register
+            0x14    Outbound Message Register
+     0x40-0x1040    Inbound Queue
+   0x1040-0x2040    Outbound Queue
+
 
 
 I/O Request Workflow
 I/O Request Workflow
 ----------------------
 ----------------------
@@ -73,15 +91,9 @@ The driver exposes following sysfs attributes:
      driver-version        R     driver version string
      driver-version        R     driver version string
      firmware-version      R     firmware version string
      firmware-version      R     firmware version string
 
 
-The driver registers char device "hptiop" to communicate with HighPoint RAID
-management software. Its ioctl routine acts as a general binary interface 
-between the IOP firmware and HighPoint RAID management software. New management
-functions can be implemented in application/firmware without modification
-in driver code.
-
 
 
 -----------------------------------------------------------------------------
 -----------------------------------------------------------------------------
-Copyright (C) 2006 HighPoint Technologies, Inc. All Rights Reserved.
+Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
 
 
   This file is distributed in the hope that it will be useful,
   This file is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   but WITHOUT ANY WARRANTY; without even the implied warranty of

+ 2 - 2
drivers/scsi/Kconfig

@@ -573,10 +573,10 @@ config SCSI_ARCMSR_AER
 source "drivers/scsi/megaraid/Kconfig.megaraid"
 source "drivers/scsi/megaraid/Kconfig.megaraid"
 
 
 config SCSI_HPTIOP
 config SCSI_HPTIOP
-	tristate "HighPoint RocketRAID 3xxx Controller support"
+	tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
 	depends on SCSI && PCI
 	depends on SCSI && PCI
 	help
 	help
-	  This option enables support for HighPoint RocketRAID 3xxx
+	  This option enables support for HighPoint RocketRAID 3xxx/4xxx
 	  controllers.
 	  controllers.
 
 
 	  To compile this driver as a module, choose M here; the module
 	  To compile this driver as a module, choose M here; the module

+ 464 - 129
drivers/scsi/hptiop.c

@@ -1,5 +1,5 @@
 /*
 /*
- * HighPoint RR3xxx controller driver for Linux
+ * HighPoint RR3xxx/4xxx controller driver for Linux
  * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
  * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
  *
  *
  * This program is free software; you can redistribute it and/or modify
  * This program is free software; you can redistribute it and/or modify
@@ -38,80 +38,84 @@
 #include "hptiop.h"
 #include "hptiop.h"
 
 
 MODULE_AUTHOR("HighPoint Technologies, Inc.");
 MODULE_AUTHOR("HighPoint Technologies, Inc.");
-MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx SATA Controller Driver");
+MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
 
 
 static char driver_name[] = "hptiop";
 static char driver_name[] = "hptiop";
-static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver";
-static const char driver_ver[] = "v1.2 (070830)";
-
-static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag);
-static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag);
+static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
+static const char driver_ver[] = "v1.3 (071203)";
+
+static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
+static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
+				struct hpt_iop_request_scsi_command *req);
+static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag);
+static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag);
 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
 
 
-static inline void hptiop_pci_posting_flush(struct hpt_iopmu __iomem *iop)
-{
-	readl(&iop->outbound_intstatus);
-}
-
-static int iop_wait_ready(struct hpt_iopmu __iomem *iop, u32 millisec)
+static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec)
 {
 {
 	u32 req = 0;
 	u32 req = 0;
 	int i;
 	int i;
 
 
 	for (i = 0; i < millisec; i++) {
 	for (i = 0; i < millisec; i++) {
-		req = readl(&iop->inbound_queue);
+		req = readl(&hba->u.itl.iop->inbound_queue);
 		if (req != IOPMU_QUEUE_EMPTY)
 		if (req != IOPMU_QUEUE_EMPTY)
 			break;
 			break;
 		msleep(1);
 		msleep(1);
 	}
 	}
 
 
 	if (req != IOPMU_QUEUE_EMPTY) {
 	if (req != IOPMU_QUEUE_EMPTY) {
-		writel(req, &iop->outbound_queue);
-		hptiop_pci_posting_flush(iop);
+		writel(req, &hba->u.itl.iop->outbound_queue);
+		readl(&hba->u.itl.iop->outbound_intstatus);
 		return 0;
 		return 0;
 	}
 	}
 
 
 	return -1;
 	return -1;
 }
 }
 
 
-static void hptiop_request_callback(struct hptiop_hba *hba, u32 tag)
+static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
+{
+	return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
+}
+
+static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
 {
 {
 	if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
 	if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
-		return hptiop_host_request_callback(hba,
+		hptiop_host_request_callback_itl(hba,
 				tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
 				tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
 	else
 	else
-		return hptiop_iop_request_callback(hba, tag);
+		hptiop_iop_request_callback_itl(hba, tag);
 }
 }
 
 
-static inline void hptiop_drain_outbound_queue(struct hptiop_hba *hba)
+static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
 {
 {
 	u32 req;
 	u32 req;
 
 
-	while ((req = readl(&hba->iop->outbound_queue)) != IOPMU_QUEUE_EMPTY) {
+	while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
+						IOPMU_QUEUE_EMPTY) {
 
 
 		if (req & IOPMU_QUEUE_MASK_HOST_BITS)
 		if (req & IOPMU_QUEUE_MASK_HOST_BITS)
-			hptiop_request_callback(hba, req);
+			hptiop_request_callback_itl(hba, req);
 		else {
 		else {
 			struct hpt_iop_request_header __iomem * p;
 			struct hpt_iop_request_header __iomem * p;
 
 
 			p = (struct hpt_iop_request_header __iomem *)
 			p = (struct hpt_iop_request_header __iomem *)
-				((char __iomem *)hba->iop + req);
+				((char __iomem *)hba->u.itl.iop + req);
 
 
 			if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
 			if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
 				if (readl(&p->context))
 				if (readl(&p->context))
-					hptiop_request_callback(hba, req);
+					hptiop_request_callback_itl(hba, req);
 				else
 				else
 					writel(1, &p->context);
 					writel(1, &p->context);
 			}
 			}
 			else
 			else
-				hptiop_request_callback(hba, req);
+				hptiop_request_callback_itl(hba, req);
 		}
 		}
 	}
 	}
 }
 }
 
 
-static int __iop_intr(struct hptiop_hba *hba)
+static int iop_intr_itl(struct hptiop_hba *hba)
 {
 {
-	struct hpt_iopmu __iomem *iop = hba->iop;
+	struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
 	u32 status;
 	u32 status;
 	int ret = 0;
 	int ret = 0;
 
 
@@ -119,6 +123,7 @@ static int __iop_intr(struct hptiop_hba *hba)
 
 
 	if (status & IOPMU_OUTBOUND_INT_MSG0) {
 	if (status & IOPMU_OUTBOUND_INT_MSG0) {
 		u32 msg = readl(&iop->outbound_msgaddr0);
 		u32 msg = readl(&iop->outbound_msgaddr0);
+
 		dprintk("received outbound msg %x\n", msg);
 		dprintk("received outbound msg %x\n", msg);
 		writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
 		writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
 		hptiop_message_callback(hba, msg);
 		hptiop_message_callback(hba, msg);
@@ -126,31 +131,115 @@ static int __iop_intr(struct hptiop_hba *hba)
 	}
 	}
 
 
 	if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
 	if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
-		hptiop_drain_outbound_queue(hba);
+		hptiop_drain_outbound_queue_itl(hba);
+		ret = 1;
+	}
+
+	return ret;
+}
+
+static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu)
+{
+	u32 outbound_tail = readl(&mu->outbound_tail);
+	u32 outbound_head = readl(&mu->outbound_head);
+
+	if (outbound_tail != outbound_head) {
+		u64 p;
+
+		memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8);
+		outbound_tail++;
+
+		if (outbound_tail == MVIOP_QUEUE_LEN)
+			outbound_tail = 0;
+		writel(outbound_tail, &mu->outbound_tail);
+		return p;
+	} else
+		return 0;
+}
+
+static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
+{
+	u32 inbound_head = readl(&hba->u.mv.mu->inbound_head);
+	u32 head = inbound_head + 1;
+
+	if (head == MVIOP_QUEUE_LEN)
+		head = 0;
+
+	memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
+	writel(head, &hba->u.mv.mu->inbound_head);
+	writel(MVIOP_MU_INBOUND_INT_POSTQUEUE,
+			&hba->u.mv.regs->inbound_doorbell);
+}
+
+static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag)
+{
+	u32 req_type = (tag >> 5) & 0x7;
+	struct hpt_iop_request_scsi_command *req;
+
+	dprintk("hptiop_request_callback_mv: tag=%llx\n", tag);
+
+	BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0);
+
+	switch (req_type) {
+	case IOP_REQUEST_TYPE_GET_CONFIG:
+	case IOP_REQUEST_TYPE_SET_CONFIG:
+		hba->msg_done = 1;
+		break;
+
+	case IOP_REQUEST_TYPE_SCSI_COMMAND:
+		req = hba->reqs[tag >> 8].req_virt;
+		if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT))
+			req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
+
+		hptiop_finish_scsi_req(hba, tag>>8, req);
+		break;
+
+	default:
+		break;
+	}
+}
+
+static int iop_intr_mv(struct hptiop_hba *hba)
+{
+	u32 status;
+	int ret = 0;
+
+	status = readl(&hba->u.mv.regs->outbound_doorbell);
+	writel(~status, &hba->u.mv.regs->outbound_doorbell);
+
+	if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
+		u32 msg;
+		msg = readl(&hba->u.mv.mu->outbound_msg);
+		dprintk("received outbound msg %x\n", msg);
+		hptiop_message_callback(hba, msg);
+		ret = 1;
+	}
+
+	if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
+		u64 tag;
+
+		while ((tag = mv_outbound_read(hba->u.mv.mu)))
+			hptiop_request_callback_mv(hba, tag);
 		ret = 1;
 		ret = 1;
 	}
 	}
 
 
 	return ret;
 	return ret;
 }
 }
 
 
-static int iop_send_sync_request(struct hptiop_hba *hba,
+static int iop_send_sync_request_itl(struct hptiop_hba *hba,
 					void __iomem *_req, u32 millisec)
 					void __iomem *_req, u32 millisec)
 {
 {
 	struct hpt_iop_request_header __iomem *req = _req;
 	struct hpt_iop_request_header __iomem *req = _req;
 	u32 i;
 	u32 i;
 
 
-	writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST,
-			&req->flags);
-
+	writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags);
 	writel(0, &req->context);
 	writel(0, &req->context);
-
-	writel((unsigned long)req - (unsigned long)hba->iop,
-			&hba->iop->inbound_queue);
-
-	hptiop_pci_posting_flush(hba->iop);
+	writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
+			&hba->u.itl.iop->inbound_queue);
+	readl(&hba->u.itl.iop->outbound_intstatus);
 
 
 	for (i = 0; i < millisec; i++) {
 	for (i = 0; i < millisec; i++) {
-		__iop_intr(hba);
+		iop_intr_itl(hba);
 		if (readl(&req->context))
 		if (readl(&req->context))
 			return 0;
 			return 0;
 		msleep(1);
 		msleep(1);
@@ -159,19 +248,49 @@ static int iop_send_sync_request(struct hptiop_hba *hba,
 	return -1;
 	return -1;
 }
 }
 
 
-static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
+static int iop_send_sync_request_mv(struct hptiop_hba *hba,
+					u32 size_bits, u32 millisec)
 {
 {
+	struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req;
 	u32 i;
 	u32 i;
 
 
 	hba->msg_done = 0;
 	hba->msg_done = 0;
+	reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
+	mv_inbound_write(hba->u.mv.internal_req_phy |
+			MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba);
+
+	for (i = 0; i < millisec; i++) {
+		iop_intr_mv(hba);
+		if (hba->msg_done)
+			return 0;
+		msleep(1);
+	}
+	return -1;
+}
+
+static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
+{
+	writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
+	readl(&hba->u.itl.iop->outbound_intstatus);
+}
+
+static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
+{
+	writel(msg, &hba->u.mv.mu->inbound_msg);
+	writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell);
+	readl(&hba->u.mv.regs->inbound_doorbell);
+}
 
 
-	writel(msg, &hba->iop->inbound_msgaddr0);
+static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
+{
+	u32 i;
 
 
-	hptiop_pci_posting_flush(hba->iop);
+	hba->msg_done = 0;
+	hba->ops->post_msg(hba, msg);
 
 
 	for (i = 0; i < millisec; i++) {
 	for (i = 0; i < millisec; i++) {
 		spin_lock_irq(hba->host->host_lock);
 		spin_lock_irq(hba->host->host_lock);
-		__iop_intr(hba);
+		hba->ops->iop_intr(hba);
 		spin_unlock_irq(hba->host->host_lock);
 		spin_unlock_irq(hba->host->host_lock);
 		if (hba->msg_done)
 		if (hba->msg_done)
 			break;
 			break;
@@ -181,46 +300,67 @@ static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
 	return hba->msg_done? 0 : -1;
 	return hba->msg_done? 0 : -1;
 }
 }
 
 
-static int iop_get_config(struct hptiop_hba *hba,
+static int iop_get_config_itl(struct hptiop_hba *hba,
 				struct hpt_iop_request_get_config *config)
 				struct hpt_iop_request_get_config *config)
 {
 {
 	u32 req32;
 	u32 req32;
 	struct hpt_iop_request_get_config __iomem *req;
 	struct hpt_iop_request_get_config __iomem *req;
 
 
-	req32 = readl(&hba->iop->inbound_queue);
+	req32 = readl(&hba->u.itl.iop->inbound_queue);
 	if (req32 == IOPMU_QUEUE_EMPTY)
 	if (req32 == IOPMU_QUEUE_EMPTY)
 		return -1;
 		return -1;
 
 
 	req = (struct hpt_iop_request_get_config __iomem *)
 	req = (struct hpt_iop_request_get_config __iomem *)
-			((unsigned long)hba->iop + req32);
+			((unsigned long)hba->u.itl.iop + req32);
 
 
 	writel(0, &req->header.flags);
 	writel(0, &req->header.flags);
 	writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
 	writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
 	writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
 	writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
 	writel(IOP_RESULT_PENDING, &req->header.result);
 	writel(IOP_RESULT_PENDING, &req->header.result);
 
 
-	if (iop_send_sync_request(hba, req, 20000)) {
+	if (iop_send_sync_request_itl(hba, req, 20000)) {
 		dprintk("Get config send cmd failed\n");
 		dprintk("Get config send cmd failed\n");
 		return -1;
 		return -1;
 	}
 	}
 
 
 	memcpy_fromio(config, req, sizeof(*config));
 	memcpy_fromio(config, req, sizeof(*config));
-	writel(req32, &hba->iop->outbound_queue);
+	writel(req32, &hba->u.itl.iop->outbound_queue);
+	return 0;
+}
+
+static int iop_get_config_mv(struct hptiop_hba *hba,
+				struct hpt_iop_request_get_config *config)
+{
+	struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
+
+	req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
+	req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG);
+	req->header.size =
+		cpu_to_le32(sizeof(struct hpt_iop_request_get_config));
+	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
+	req->header.context = cpu_to_le64(IOP_REQUEST_TYPE_GET_CONFIG<<5);
+
+	if (iop_send_sync_request_mv(hba, 0, 20000)) {
+		dprintk("Get config send cmd failed\n");
+		return -1;
+	}
+
+	memcpy(config, req, sizeof(struct hpt_iop_request_get_config));
 	return 0;
 	return 0;
 }
 }
 
 
-static int iop_set_config(struct hptiop_hba *hba,
+static int iop_set_config_itl(struct hptiop_hba *hba,
 				struct hpt_iop_request_set_config *config)
 				struct hpt_iop_request_set_config *config)
 {
 {
 	u32 req32;
 	u32 req32;
 	struct hpt_iop_request_set_config __iomem *req;
 	struct hpt_iop_request_set_config __iomem *req;
 
 
-	req32 = readl(&hba->iop->inbound_queue);
+	req32 = readl(&hba->u.itl.iop->inbound_queue);
 	if (req32 == IOPMU_QUEUE_EMPTY)
 	if (req32 == IOPMU_QUEUE_EMPTY)
 		return -1;
 		return -1;
 
 
 	req = (struct hpt_iop_request_set_config __iomem *)
 	req = (struct hpt_iop_request_set_config __iomem *)
-			((unsigned long)hba->iop + req32);
+			((unsigned long)hba->u.itl.iop + req32);
 
 
 	memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
 	memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
 		(u8 *)config + sizeof(struct hpt_iop_request_header),
 		(u8 *)config + sizeof(struct hpt_iop_request_header),
@@ -232,22 +372,52 @@ static int iop_set_config(struct hptiop_hba *hba,
 	writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
 	writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
 	writel(IOP_RESULT_PENDING, &req->header.result);
 	writel(IOP_RESULT_PENDING, &req->header.result);
 
 
-	if (iop_send_sync_request(hba, req, 20000)) {
+	if (iop_send_sync_request_itl(hba, req, 20000)) {
 		dprintk("Set config send cmd failed\n");
 		dprintk("Set config send cmd failed\n");
 		return -1;
 		return -1;
 	}
 	}
 
 
-	writel(req32, &hba->iop->outbound_queue);
+	writel(req32, &hba->u.itl.iop->outbound_queue);
 	return 0;
 	return 0;
 }
 }
 
 
-static int hptiop_initialize_iop(struct hptiop_hba *hba)
+static int iop_set_config_mv(struct hptiop_hba *hba,
+				struct hpt_iop_request_set_config *config)
 {
 {
-	struct hpt_iopmu __iomem *iop = hba->iop;
+	struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
 
 
-	/* enable interrupts */
+	memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
+	req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
+	req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
+	req->header.size =
+		cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
+	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
+	req->header.context = cpu_to_le64(IOP_REQUEST_TYPE_SET_CONFIG<<5);
+
+	if (iop_send_sync_request_mv(hba, 0, 20000)) {
+		dprintk("Set config send cmd failed\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
+{
 	writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
 	writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
-			&iop->outbound_intmask);
+		&hba->u.itl.iop->outbound_intmask);
+}
+
+static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
+{
+	writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG,
+		&hba->u.mv.regs->outbound_intmask);
+}
+
+static int hptiop_initialize_iop(struct hptiop_hba *hba)
+{
+	/* enable interrupts */
+	hba->ops->enable_intr(hba);
 
 
 	hba->initialized = 1;
 	hba->initialized = 1;
 
 
@@ -261,37 +431,74 @@ static int hptiop_initialize_iop(struct hptiop_hba *hba)
 	return 0;
 	return 0;
 }
 }
 
 
-static int hptiop_map_pci_bar(struct hptiop_hba *hba)
+static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
 {
 {
 	u32 mem_base_phy, length;
 	u32 mem_base_phy, length;
 	void __iomem *mem_base_virt;
 	void __iomem *mem_base_virt;
+
 	struct pci_dev *pcidev = hba->pcidev;
 	struct pci_dev *pcidev = hba->pcidev;
 
 
-	if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM)) {
+
+	if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) {
 		printk(KERN_ERR "scsi%d: pci resource invalid\n",
 		printk(KERN_ERR "scsi%d: pci resource invalid\n",
 				hba->host->host_no);
 				hba->host->host_no);
-		return -1;
+		return 0;
 	}
 	}
 
 
-	mem_base_phy = pci_resource_start(pcidev, 0);
-	length = pci_resource_len(pcidev, 0);
+	mem_base_phy = pci_resource_start(pcidev, index);
+	length = pci_resource_len(pcidev, index);
 	mem_base_virt = ioremap(mem_base_phy, length);
 	mem_base_virt = ioremap(mem_base_phy, length);
 
 
 	if (!mem_base_virt) {
 	if (!mem_base_virt) {
 		printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
 		printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
 				hba->host->host_no);
 				hba->host->host_no);
+		return 0;
+	}
+	return mem_base_virt;
+}
+
+static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
+{
+	hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
+	if (hba->u.itl.iop)
+		return 0;
+	else
+		return -1;
+}
+
+static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
+{
+	iounmap(hba->u.itl.iop);
+}
+
+static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
+{
+	hba->u.mv.regs = hptiop_map_pci_bar(hba, 0);
+	if (hba->u.mv.regs == 0)
+		return -1;
+
+	hba->u.mv.mu = hptiop_map_pci_bar(hba, 2);
+	if (hba->u.mv.mu == 0) {
+		iounmap(hba->u.mv.regs);
 		return -1;
 		return -1;
 	}
 	}
 
 
-	hba->iop = mem_base_virt;
-	dprintk("hptiop_map_pci_bar: iop=%p\n", hba->iop);
 	return 0;
 	return 0;
 }
 }
 
 
+static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
+{
+	iounmap(hba->u.mv.regs);
+	iounmap(hba->u.mv.mu);
+}
+
 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
 {
 {
 	dprintk("iop message 0x%x\n", msg);
 	dprintk("iop message 0x%x\n", msg);
 
 
+	if (msg == IOPMU_INBOUND_MSG0_NOP)
+		hba->msg_done = 1;
+
 	if (!hba->initialized)
 	if (!hba->initialized)
 		return;
 		return;
 
 
@@ -303,7 +510,7 @@ static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
 		hba->msg_done = 1;
 		hba->msg_done = 1;
 }
 }
 
 
-static inline struct hptiop_request *get_req(struct hptiop_hba *hba)
+static struct hptiop_request *get_req(struct hptiop_hba *hba)
 {
 {
 	struct hptiop_request *ret;
 	struct hptiop_request *ret;
 
 
@@ -316,30 +523,19 @@ static inline struct hptiop_request *get_req(struct hptiop_hba *hba)
 	return ret;
 	return ret;
 }
 }
 
 
-static inline void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
+static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
 {
 {
 	dprintk("free_req(%d, %p)\n", req->index, req);
 	dprintk("free_req(%d, %p)\n", req->index, req);
 	req->next = hba->req_list;
 	req->next = hba->req_list;
 	hba->req_list = req;
 	hba->req_list = req;
 }
 }
 
 
-static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
+static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
+				struct hpt_iop_request_scsi_command *req)
 {
 {
-	struct hpt_iop_request_scsi_command *req;
 	struct scsi_cmnd *scp;
 	struct scsi_cmnd *scp;
-	u32 tag;
-
-	if (hba->iopintf_v2) {
-		tag = _tag & ~ IOPMU_QUEUE_REQUEST_RESULT_BIT;
-		req = hba->reqs[tag].req_virt;
-		if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
-			req->header.result = IOP_RESULT_SUCCESS;
-	} else {
-		tag = _tag;
-		req = hba->reqs[tag].req_virt;
-	}
 
 
-	dprintk("hptiop_host_request_callback: req=%p, type=%d, "
+	dprintk("hptiop_finish_scsi_req: req=%p, type=%d, "
 			"result=%d, context=0x%x tag=%d\n",
 			"result=%d, context=0x%x tag=%d\n",
 			req, req->header.type, req->header.result,
 			req, req->header.type, req->header.result,
 			req->header.context, tag);
 			req->header.context, tag);
@@ -354,6 +550,8 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
 
 
 	switch (le32_to_cpu(req->header.result)) {
 	switch (le32_to_cpu(req->header.result)) {
 	case IOP_RESULT_SUCCESS:
 	case IOP_RESULT_SUCCESS:
+		scsi_set_resid(scp,
+			scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
 		scp->result = (DID_OK<<16);
 		scp->result = (DID_OK<<16);
 		break;
 		break;
 	case IOP_RESULT_BAD_TARGET:
 	case IOP_RESULT_BAD_TARGET:
@@ -371,12 +569,12 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
 	case IOP_RESULT_INVALID_REQUEST:
 	case IOP_RESULT_INVALID_REQUEST:
 		scp->result = (DID_ABORT<<16);
 		scp->result = (DID_ABORT<<16);
 		break;
 		break;
-	case IOP_RESULT_MODE_SENSE_CHECK_CONDITION:
+	case IOP_RESULT_CHECK_CONDITION:
+		scsi_set_resid(scp,
+			scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
 		scp->result = SAM_STAT_CHECK_CONDITION;
 		scp->result = SAM_STAT_CHECK_CONDITION;
-		memset(&scp->sense_buffer,
-				0, sizeof(scp->sense_buffer));
 		memcpy(&scp->sense_buffer, &req->sg_list,
 		memcpy(&scp->sense_buffer, &req->sg_list,
-				min(sizeof(scp->sense_buffer),
+				min_t(size_t, sizeof(scp->sense_buffer),
 					le32_to_cpu(req->dataxfer_length)));
 					le32_to_cpu(req->dataxfer_length)));
 		break;
 		break;
 
 
@@ -391,15 +589,33 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
 	free_req(hba, &hba->reqs[tag]);
 	free_req(hba, &hba->reqs[tag]);
 }
 }
 
 
-void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag)
+static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
+{
+	struct hpt_iop_request_scsi_command *req;
+	u32 tag;
+
+	if (hba->iopintf_v2) {
+		tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT;
+		req = hba->reqs[tag].req_virt;
+		if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
+			req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
+	} else {
+		tag = _tag;
+		req = hba->reqs[tag].req_virt;
+	}
+
+	hptiop_finish_scsi_req(hba, tag, req);
+}
+
+void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
 {
 {
 	struct hpt_iop_request_header __iomem *req;
 	struct hpt_iop_request_header __iomem *req;
 	struct hpt_iop_request_ioctl_command __iomem *p;
 	struct hpt_iop_request_ioctl_command __iomem *p;
 	struct hpt_ioctl_k *arg;
 	struct hpt_ioctl_k *arg;
 
 
 	req = (struct hpt_iop_request_header __iomem *)
 	req = (struct hpt_iop_request_header __iomem *)
-			((unsigned long)hba->iop + tag);
-	dprintk("hptiop_iop_request_callback: req=%p, type=%d, "
+			((unsigned long)hba->u.itl.iop + tag);
+	dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, "
 			"result=%d, context=0x%x tag=%d\n",
 			"result=%d, context=0x%x tag=%d\n",
 			req, readl(&req->type), readl(&req->result),
 			req, readl(&req->type), readl(&req->result),
 			readl(&req->context), tag);
 			readl(&req->context), tag);
@@ -427,7 +643,7 @@ void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag)
 		arg->result = HPT_IOCTL_RESULT_FAILED;
 		arg->result = HPT_IOCTL_RESULT_FAILED;
 
 
 	arg->done(arg);
 	arg->done(arg);
-	writel(tag, &hba->iop->outbound_queue);
+	writel(tag, &hba->u.itl.iop->outbound_queue);
 }
 }
 
 
 static irqreturn_t hptiop_intr(int irq, void *dev_id)
 static irqreturn_t hptiop_intr(int irq, void *dev_id)
@@ -437,7 +653,7 @@ static irqreturn_t hptiop_intr(int irq, void *dev_id)
 	unsigned long flags;
 	unsigned long flags;
 
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	handled = __iop_intr(hba);
+	handled = hba->ops->iop_intr(hba);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
 
 	return handled;
 	return handled;
@@ -469,6 +685,57 @@ static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
 	return HPT_SCP(scp)->sgcnt;
 	return HPT_SCP(scp)->sgcnt;
 }
 }
 
 
+static void hptiop_post_req_itl(struct hptiop_hba *hba,
+					struct hptiop_request *_req)
+{
+	struct hpt_iop_request_header *reqhdr = _req->req_virt;
+
+	reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
+							(u32)_req->index);
+	reqhdr->context_hi32 = 0;
+
+	if (hba->iopintf_v2) {
+		u32 size, size_bits;
+
+		size = le32_to_cpu(reqhdr->size);
+		if (size < 256)
+			size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
+		else if (size < 512)
+			size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
+		else
+			size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
+						IOPMU_QUEUE_ADDR_HOST_BIT;
+		writel(_req->req_shifted_phy | size_bits,
+			&hba->u.itl.iop->inbound_queue);
+	} else
+		writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
+					&hba->u.itl.iop->inbound_queue);
+}
+
+static void hptiop_post_req_mv(struct hptiop_hba *hba,
+					struct hptiop_request *_req)
+{
+	struct hpt_iop_request_header *reqhdr = _req->req_virt;
+	u32 size, size_bit;
+
+	reqhdr->context = cpu_to_le32(_req->index<<8 |
+					IOP_REQUEST_TYPE_SCSI_COMMAND<<5);
+	reqhdr->context_hi32 = 0;
+	size = le32_to_cpu(reqhdr->size);
+
+	if (size <= 256)
+		size_bit = 0;
+	else if (size <= 256*2)
+		size_bit = 1;
+	else if (size <= 256*3)
+		size_bit = 2;
+	else
+		size_bit = 3;
+
+	mv_inbound_write((_req->req_shifted_phy << 5) |
+		MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
+}
+
 static int hptiop_queuecommand(struct scsi_cmnd *scp,
 static int hptiop_queuecommand(struct scsi_cmnd *scp,
 				void (*done)(struct scsi_cmnd *))
 				void (*done)(struct scsi_cmnd *))
 {
 {
@@ -518,9 +785,6 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
 	req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
 	req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
 	req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
 	req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
 	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
 	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
-	req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
-							(u32)_req->index);
-	req->header.context_hi32 = 0;
 	req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
 	req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
 	req->channel = scp->device->channel;
 	req->channel = scp->device->channel;
 	req->target = scp->device->id;
 	req->target = scp->device->id;
@@ -531,21 +795,7 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
 				 + sg_count * sizeof(struct hpt_iopsg));
 				 + sg_count * sizeof(struct hpt_iopsg));
 
 
 	memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
 	memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
-
-	if (hba->iopintf_v2) {
-		u32 size_bits;
-		if (req->header.size < 256)
-			size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
-		else if (req->header.size < 512)
-			size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
-		else
-			size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
-						IOPMU_QUEUE_ADDR_HOST_BIT;
-		writel(_req->req_shifted_phy | size_bits, &hba->iop->inbound_queue);
-	} else
-		writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
-					&hba->iop->inbound_queue);
-
+	hba->ops->post_req(hba, _req);
 	return 0;
 	return 0;
 
 
 cmd_done:
 cmd_done:
@@ -563,9 +813,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba)
 {
 {
 	if (atomic_xchg(&hba->resetting, 1) == 0) {
 	if (atomic_xchg(&hba->resetting, 1) == 0) {
 		atomic_inc(&hba->reset_count);
 		atomic_inc(&hba->reset_count);
-		writel(IOPMU_INBOUND_MSG0_RESET,
-				&hba->iop->inbound_msgaddr0);
-		hptiop_pci_posting_flush(hba->iop);
+		hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
 	}
 	}
 
 
 	wait_event_timeout(hba->reset_wq,
 	wait_event_timeout(hba->reset_wq,
@@ -601,8 +849,10 @@ static int hptiop_reset(struct scsi_cmnd *scp)
 static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
 static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
 						int queue_depth)
 						int queue_depth)
 {
 {
-	if(queue_depth > 256)
-		queue_depth = 256;
+	struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
+
+	if (queue_depth > hba->max_requests)
+		queue_depth = hba->max_requests;
 	scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
 	scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
 	return queue_depth;
 	return queue_depth;
 }
 }
@@ -663,6 +913,26 @@ static struct scsi_host_template driver_template = {
 	.change_queue_depth         = hptiop_adjust_disk_queue_depth,
 	.change_queue_depth         = hptiop_adjust_disk_queue_depth,
 };
 };
 
 
+static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
+{
+	hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
+			0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL);
+	if (hba->u.mv.internal_req)
+		return 0;
+	else
+		return -1;
+}
+
+static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
+{
+	if (hba->u.mv.internal_req) {
+		dma_free_coherent(&hba->pcidev->dev, 0x800,
+			hba->u.mv.internal_req, hba->u.mv.internal_req_phy);
+		return 0;
+	} else
+		return -1;
+}
+
 static int __devinit hptiop_probe(struct pci_dev *pcidev,
 static int __devinit hptiop_probe(struct pci_dev *pcidev,
 					const struct pci_device_id *id)
 					const struct pci_device_id *id)
 {
 {
@@ -708,6 +978,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
 
 
 	hba = (struct hptiop_hba *)host->hostdata;
 	hba = (struct hptiop_hba *)host->hostdata;
 
 
+	hba->ops = (struct hptiop_adapter_ops *)id->driver_data;
 	hba->pcidev = pcidev;
 	hba->pcidev = pcidev;
 	hba->host = host;
 	hba->host = host;
 	hba->initialized = 0;
 	hba->initialized = 0;
@@ -725,16 +996,24 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
 	host->n_io_port = 0;
 	host->n_io_port = 0;
 	host->irq = pcidev->irq;
 	host->irq = pcidev->irq;
 
 
-	if (hptiop_map_pci_bar(hba))
+	if (hba->ops->map_pci_bar(hba))
 		goto free_scsi_host;
 		goto free_scsi_host;
 
 
-	if (iop_wait_ready(hba->iop, 20000)) {
+	if (hba->ops->iop_wait_ready(hba, 20000)) {
 		printk(KERN_ERR "scsi%d: firmware not ready\n",
 		printk(KERN_ERR "scsi%d: firmware not ready\n",
 				hba->host->host_no);
 				hba->host->host_no);
 		goto unmap_pci_bar;
 		goto unmap_pci_bar;
 	}
 	}
 
 
-	if (iop_get_config(hba, &iop_config)) {
+	if (hba->ops->internal_memalloc) {
+		if (hba->ops->internal_memalloc(hba)) {
+			printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
+				hba->host->host_no);
+			goto unmap_pci_bar;
+		}
+	}
+
+	if (hba->ops->get_config(hba, &iop_config)) {
 		printk(KERN_ERR "scsi%d: get config failed\n",
 		printk(KERN_ERR "scsi%d: get config failed\n",
 				hba->host->host_no);
 				hba->host->host_no);
 		goto unmap_pci_bar;
 		goto unmap_pci_bar;
@@ -770,7 +1049,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
 	set_config.vbus_id = cpu_to_le16(host->host_no);
 	set_config.vbus_id = cpu_to_le16(host->host_no);
 	set_config.max_host_request_size = cpu_to_le16(req_size);
 	set_config.max_host_request_size = cpu_to_le16(req_size);
 
 
-	if (iop_set_config(hba, &set_config)) {
+	if (hba->ops->set_config(hba, &set_config)) {
 		printk(KERN_ERR "scsi%d: set config failed\n",
 		printk(KERN_ERR "scsi%d: set config failed\n",
 				hba->host->host_no);
 				hba->host->host_no);
 		goto unmap_pci_bar;
 		goto unmap_pci_bar;
@@ -839,21 +1118,24 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
 
 
 free_request_mem:
 free_request_mem:
 	dma_free_coherent(&hba->pcidev->dev,
 	dma_free_coherent(&hba->pcidev->dev,
-			hba->req_size*hba->max_requests + 0x20,
+			hba->req_size * hba->max_requests + 0x20,
 			hba->dma_coherent, hba->dma_coherent_handle);
 			hba->dma_coherent, hba->dma_coherent_handle);
 
 
 free_request_irq:
 free_request_irq:
 	free_irq(hba->pcidev->irq, hba);
 	free_irq(hba->pcidev->irq, hba);
 
 
 unmap_pci_bar:
 unmap_pci_bar:
-	iounmap(hba->iop);
+	if (hba->ops->internal_memfree)
+		hba->ops->internal_memfree(hba);
 
 
-free_pci_regions:
-	pci_release_regions(pcidev) ;
+	hba->ops->unmap_pci_bar(hba);
 
 
 free_scsi_host:
 free_scsi_host:
 	scsi_host_put(host);
 	scsi_host_put(host);
 
 
+free_pci_regions:
+	pci_release_regions(pcidev);
+
 disable_pci_device:
 disable_pci_device:
 	pci_disable_device(pcidev);
 	pci_disable_device(pcidev);
 
 
@@ -865,8 +1147,6 @@ static void hptiop_shutdown(struct pci_dev *pcidev)
 {
 {
 	struct Scsi_Host *host = pci_get_drvdata(pcidev);
 	struct Scsi_Host *host = pci_get_drvdata(pcidev);
 	struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
 	struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
-	struct hpt_iopmu __iomem *iop = hba->iop;
-	u32    int_mask;
 
 
 	dprintk("hptiop_shutdown(%p)\n", hba);
 	dprintk("hptiop_shutdown(%p)\n", hba);
 
 
@@ -876,11 +1156,24 @@ static void hptiop_shutdown(struct pci_dev *pcidev)
 					hba->host->host_no);
 					hba->host->host_no);
 
 
 	/* disable all outbound interrupts */
 	/* disable all outbound interrupts */
-	int_mask = readl(&iop->outbound_intmask);
+	hba->ops->disable_intr(hba);
+}
+
+static void hptiop_disable_intr_itl(struct hptiop_hba *hba)
+{
+	u32 int_mask;
+
+	int_mask = readl(&hba->u.itl.iop->outbound_intmask);
 	writel(int_mask |
 	writel(int_mask |
 		IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
 		IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
-		&iop->outbound_intmask);
-	hptiop_pci_posting_flush(iop);
+		&hba->u.itl.iop->outbound_intmask);
+	readl(&hba->u.itl.iop->outbound_intmask);
+}
+
+static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
+{
+	writel(0, &hba->u.mv.regs->outbound_intmask);
+	readl(&hba->u.mv.regs->outbound_intmask);
 }
 }
 
 
 static void hptiop_remove(struct pci_dev *pcidev)
 static void hptiop_remove(struct pci_dev *pcidev)
@@ -901,7 +1194,10 @@ static void hptiop_remove(struct pci_dev *pcidev)
 			hba->dma_coherent,
 			hba->dma_coherent,
 			hba->dma_coherent_handle);
 			hba->dma_coherent_handle);
 
 
-	iounmap(hba->iop);
+	if (hba->ops->internal_memfree)
+		hba->ops->internal_memfree(hba);
+
+	hba->ops->unmap_pci_bar(hba);
 
 
 	pci_release_regions(hba->pcidev);
 	pci_release_regions(hba->pcidev);
 	pci_set_drvdata(hba->pcidev, NULL);
 	pci_set_drvdata(hba->pcidev, NULL);
@@ -910,11 +1206,50 @@ static void hptiop_remove(struct pci_dev *pcidev)
 	scsi_host_put(host);
 	scsi_host_put(host);
 }
 }
 
 
+static struct hptiop_adapter_ops hptiop_itl_ops = {
+	.iop_wait_ready    = iop_wait_ready_itl,
+	.internal_memalloc = 0,
+	.internal_memfree  = 0,
+	.map_pci_bar       = hptiop_map_pci_bar_itl,
+	.unmap_pci_bar     = hptiop_unmap_pci_bar_itl,
+	.enable_intr       = hptiop_enable_intr_itl,
+	.disable_intr      = hptiop_disable_intr_itl,
+	.get_config        = iop_get_config_itl,
+	.set_config        = iop_set_config_itl,
+	.iop_intr          = iop_intr_itl,
+	.post_msg          = hptiop_post_msg_itl,
+	.post_req          = hptiop_post_req_itl,
+};
+
+static struct hptiop_adapter_ops hptiop_mv_ops = {
+	.iop_wait_ready    = iop_wait_ready_mv,
+	.internal_memalloc = hptiop_internal_memalloc_mv,
+	.internal_memfree  = hptiop_internal_memfree_mv,
+	.map_pci_bar       = hptiop_map_pci_bar_mv,
+	.unmap_pci_bar     = hptiop_unmap_pci_bar_mv,
+	.enable_intr       = hptiop_enable_intr_mv,
+	.disable_intr      = hptiop_disable_intr_mv,
+	.get_config        = iop_get_config_mv,
+	.set_config        = iop_set_config_mv,
+	.iop_intr          = iop_intr_mv,
+	.post_msg          = hptiop_post_msg_mv,
+	.post_req          = hptiop_post_req_mv,
+};
+
 static struct pci_device_id hptiop_id_table[] = {
 static struct pci_device_id hptiop_id_table[] = {
-	{ PCI_VDEVICE(TTI, 0x3220) },
-	{ PCI_VDEVICE(TTI, 0x3320) },
-	{ PCI_VDEVICE(TTI, 0x3520) },
-	{ PCI_VDEVICE(TTI, 0x4320) },
+	{ PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
+	{ PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
+	{ PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
 	{},
 	{},
 };
 };
 
 

+ 89 - 35
drivers/scsi/hptiop.h

@@ -1,5 +1,5 @@
 /*
 /*
- * HighPoint RR3xxx controller driver for Linux
+ * HighPoint RR3xxx/4xxx controller driver for Linux
  * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
  * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
  *
  *
  * This program is free software; you can redistribute it and/or modify
  * This program is free software; you can redistribute it and/or modify
@@ -18,8 +18,7 @@
 #ifndef _HPTIOP_H_
 #ifndef _HPTIOP_H_
 #define _HPTIOP_H_
 #define _HPTIOP_H_
 
 
-struct hpt_iopmu
-{
+struct hpt_iopmu_itl {
 	__le32 resrved0[4];
 	__le32 resrved0[4];
 	__le32 inbound_msgaddr0;
 	__le32 inbound_msgaddr0;
 	__le32 inbound_msgaddr1;
 	__le32 inbound_msgaddr1;
@@ -54,6 +53,40 @@ struct hpt_iopmu
 #define IOPMU_INBOUND_INT_ERROR      8
 #define IOPMU_INBOUND_INT_ERROR      8
 #define IOPMU_INBOUND_INT_POSTQUEUE  0x10
 #define IOPMU_INBOUND_INT_POSTQUEUE  0x10
 
 
+#define MVIOP_QUEUE_LEN  512
+
+struct hpt_iopmu_mv {
+	__le32 inbound_head;
+	__le32 inbound_tail;
+	__le32 outbound_head;
+	__le32 outbound_tail;
+	__le32 inbound_msg;
+	__le32 outbound_msg;
+	__le32 reserve[10];
+	__le64 inbound_q[MVIOP_QUEUE_LEN];
+	__le64 outbound_q[MVIOP_QUEUE_LEN];
+};
+
+struct hpt_iopmv_regs {
+	__le32 reserved[0x20400 / 4];
+	__le32 inbound_doorbell;
+	__le32 inbound_intmask;
+	__le32 outbound_doorbell;
+	__le32 outbound_intmask;
+};
+
+#define MVIOP_MU_QUEUE_ADDR_HOST_MASK   (~(0x1full))
+#define MVIOP_MU_QUEUE_ADDR_HOST_BIT    4
+
+#define MVIOP_MU_QUEUE_ADDR_IOP_HIGH32  0xffffffff
+#define MVIOP_MU_QUEUE_REQUEST_RESULT_BIT   1
+#define MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT 2
+
+#define MVIOP_MU_INBOUND_INT_MSG        1
+#define MVIOP_MU_INBOUND_INT_POSTQUEUE  2
+#define MVIOP_MU_OUTBOUND_INT_MSG       1
+#define MVIOP_MU_OUTBOUND_INT_POSTQUEUE 2
+
 enum hpt_iopmu_message {
 enum hpt_iopmu_message {
 	/* host-to-iop messages */
 	/* host-to-iop messages */
 	IOPMU_INBOUND_MSG0_NOP = 0,
 	IOPMU_INBOUND_MSG0_NOP = 0,
@@ -72,8 +105,7 @@ enum hpt_iopmu_message {
 	IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_MAX = 0x3ff,
 	IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_MAX = 0x3ff,
 };
 };
 
 
-struct hpt_iop_request_header
-{
+struct hpt_iop_request_header {
 	__le32 size;
 	__le32 size;
 	__le32 type;
 	__le32 type;
 	__le32 flags;
 	__le32 flags;
@@ -104,11 +136,10 @@ enum hpt_iop_result_type {
 	IOP_RESULT_RESET,
 	IOP_RESULT_RESET,
 	IOP_RESULT_INVALID_REQUEST,
 	IOP_RESULT_INVALID_REQUEST,
 	IOP_RESULT_BAD_TARGET,
 	IOP_RESULT_BAD_TARGET,
-	IOP_RESULT_MODE_SENSE_CHECK_CONDITION,
+	IOP_RESULT_CHECK_CONDITION,
 };
 };
 
 
-struct hpt_iop_request_get_config
-{
+struct hpt_iop_request_get_config {
 	struct hpt_iop_request_header header;
 	struct hpt_iop_request_header header;
 	__le32 interface_version;
 	__le32 interface_version;
 	__le32 firmware_version;
 	__le32 firmware_version;
@@ -121,8 +152,7 @@ struct hpt_iop_request_get_config
 	__le32 sdram_size;
 	__le32 sdram_size;
 };
 };
 
 
-struct hpt_iop_request_set_config
-{
+struct hpt_iop_request_set_config {
 	struct hpt_iop_request_header header;
 	struct hpt_iop_request_header header;
 	__le32 iop_id;
 	__le32 iop_id;
 	__le16 vbus_id;
 	__le16 vbus_id;
@@ -130,15 +160,13 @@ struct hpt_iop_request_set_config
 	__le32 reserve[6];
 	__le32 reserve[6];
 };
 };
 
 
-struct hpt_iopsg
-{
+struct hpt_iopsg {
 	__le32 size;
 	__le32 size;
 	__le32 eot; /* non-zero: end of table */
 	__le32 eot; /* non-zero: end of table */
 	__le64 pci_address;
 	__le64 pci_address;
 };
 };
 
 
-struct hpt_iop_request_block_command
-{
+struct hpt_iop_request_block_command {
 	struct hpt_iop_request_header header;
 	struct hpt_iop_request_header header;
 	u8     channel;
 	u8     channel;
 	u8     target;
 	u8     target;
@@ -156,8 +184,7 @@ struct hpt_iop_request_block_command
 #define IOP_BLOCK_COMMAND_FLUSH    4
 #define IOP_BLOCK_COMMAND_FLUSH    4
 #define IOP_BLOCK_COMMAND_SHUTDOWN 5
 #define IOP_BLOCK_COMMAND_SHUTDOWN 5
 
 
-struct hpt_iop_request_scsi_command
-{
+struct hpt_iop_request_scsi_command {
 	struct hpt_iop_request_header header;
 	struct hpt_iop_request_header header;
 	u8     channel;
 	u8     channel;
 	u8     target;
 	u8     target;
@@ -168,8 +195,7 @@ struct hpt_iop_request_scsi_command
 	struct hpt_iopsg sg_list[1];
 	struct hpt_iopsg sg_list[1];
 };
 };
 
 
-struct hpt_iop_request_ioctl_command
-{
+struct hpt_iop_request_ioctl_command {
 	struct hpt_iop_request_header header;
 	struct hpt_iop_request_header header;
 	__le32 ioctl_code;
 	__le32 ioctl_code;
 	__le32 inbuf_size;
 	__le32 inbuf_size;
@@ -182,11 +208,11 @@ struct hpt_iop_request_ioctl_command
 #define HPTIOP_MAX_REQUESTS  256u
 #define HPTIOP_MAX_REQUESTS  256u
 
 
 struct hptiop_request {
 struct hptiop_request {
-	struct hptiop_request * next;
-	void *                  req_virt;
-	u32                     req_shifted_phy;
-	struct scsi_cmnd *      scp;
-	int                     index;
+	struct hptiop_request *next;
+	void                  *req_virt;
+	u32                   req_shifted_phy;
+	struct scsi_cmnd      *scp;
+	int                   index;
 };
 };
 
 
 struct hpt_scsi_pointer {
 struct hpt_scsi_pointer {
@@ -198,9 +224,21 @@ struct hpt_scsi_pointer {
 #define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp)
 #define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp)
 
 
 struct hptiop_hba {
 struct hptiop_hba {
-	struct hpt_iopmu __iomem * iop;
-	struct Scsi_Host * host;
-	struct pci_dev * pcidev;
+	struct hptiop_adapter_ops *ops;
+	union {
+		struct {
+			struct hpt_iopmu_itl __iomem *iop;
+		} itl;
+		struct {
+			struct hpt_iopmv_regs *regs;
+			struct hpt_iopmu_mv __iomem *mu;
+			void *internal_req;
+			dma_addr_t internal_req_phy;
+		} mv;
+	} u;
+
+	struct Scsi_Host *host;
+	struct pci_dev *pcidev;
 
 
 	/* IOP config info */
 	/* IOP config info */
 	u32     interface_version;
 	u32     interface_version;
@@ -213,15 +251,15 @@ struct hptiop_hba {
 
 
 	u32     req_size; /* host-allocated request buffer size */
 	u32     req_size; /* host-allocated request buffer size */
 
 
-	int     iopintf_v2: 1;
-	int     initialized: 1;
-	int     msg_done: 1;
+	u32     iopintf_v2: 1;
+	u32     initialized: 1;
+	u32     msg_done: 1;
 
 
 	struct hptiop_request * req_list;
 	struct hptiop_request * req_list;
 	struct hptiop_request reqs[HPTIOP_MAX_REQUESTS];
 	struct hptiop_request reqs[HPTIOP_MAX_REQUESTS];
 
 
 	/* used to free allocated dma area */
 	/* used to free allocated dma area */
-	void *      dma_coherent;
+	void        *dma_coherent;
 	dma_addr_t  dma_coherent_handle;
 	dma_addr_t  dma_coherent_handle;
 
 
 	atomic_t    reset_count;
 	atomic_t    reset_count;
@@ -231,19 +269,35 @@ struct hptiop_hba {
 	wait_queue_head_t ioctl_wq;
 	wait_queue_head_t ioctl_wq;
 };
 };
 
 
-struct hpt_ioctl_k
-{
+struct hpt_ioctl_k {
 	struct hptiop_hba * hba;
 	struct hptiop_hba * hba;
 	u32    ioctl_code;
 	u32    ioctl_code;
 	u32    inbuf_size;
 	u32    inbuf_size;
 	u32    outbuf_size;
 	u32    outbuf_size;
-	void * inbuf;
-	void * outbuf;
-	u32  * bytes_returned;
+	void   *inbuf;
+	void   *outbuf;
+	u32    *bytes_returned;
 	void (*done)(struct hpt_ioctl_k *);
 	void (*done)(struct hpt_ioctl_k *);
 	int    result; /* HPT_IOCTL_RESULT_ */
 	int    result; /* HPT_IOCTL_RESULT_ */
 };
 };
 
 
+struct hptiop_adapter_ops {
+	int  (*iop_wait_ready)(struct hptiop_hba *hba, u32 millisec);
+	int  (*internal_memalloc)(struct hptiop_hba *hba);
+	int  (*internal_memfree)(struct hptiop_hba *hba);
+	int  (*map_pci_bar)(struct hptiop_hba *hba);
+	void (*unmap_pci_bar)(struct hptiop_hba *hba);
+	void (*enable_intr)(struct hptiop_hba *hba);
+	void (*disable_intr)(struct hptiop_hba *hba);
+	int  (*get_config)(struct hptiop_hba *hba,
+				struct hpt_iop_request_get_config *config);
+	int  (*set_config)(struct hptiop_hba *hba,
+				struct hpt_iop_request_set_config *config);
+	int  (*iop_intr)(struct hptiop_hba *hba);
+	void (*post_msg)(struct hptiop_hba *hba, u32 msg);
+	void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req);
+};
+
 #define HPT_IOCTL_RESULT_OK         0
 #define HPT_IOCTL_RESULT_OK         0
 #define HPT_IOCTL_RESULT_FAILED     (-1)
 #define HPT_IOCTL_RESULT_FAILED     (-1)