|
@@ -421,7 +421,6 @@ static void mv_error_handler(struct ata_port *ap);
|
|
static void mv_post_int_cmd(struct ata_queued_cmd *qc);
|
|
static void mv_post_int_cmd(struct ata_queued_cmd *qc);
|
|
static void mv_eh_freeze(struct ata_port *ap);
|
|
static void mv_eh_freeze(struct ata_port *ap);
|
|
static void mv_eh_thaw(struct ata_port *ap);
|
|
static void mv_eh_thaw(struct ata_port *ap);
|
|
-static int mv_slave_config(struct scsi_device *sdev);
|
|
|
|
static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
|
|
static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
|
|
|
|
|
|
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
|
|
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
|
|
@@ -459,7 +458,7 @@ static struct scsi_host_template mv5_sht = {
|
|
.use_clustering = 1,
|
|
.use_clustering = 1,
|
|
.proc_name = DRV_NAME,
|
|
.proc_name = DRV_NAME,
|
|
.dma_boundary = MV_DMA_BOUNDARY,
|
|
.dma_boundary = MV_DMA_BOUNDARY,
|
|
- .slave_configure = mv_slave_config,
|
|
|
|
|
|
+ .slave_configure = ata_scsi_slave_config,
|
|
.slave_destroy = ata_scsi_slave_destroy,
|
|
.slave_destroy = ata_scsi_slave_destroy,
|
|
.bios_param = ata_std_bios_param,
|
|
.bios_param = ata_std_bios_param,
|
|
};
|
|
};
|
|
@@ -477,7 +476,7 @@ static struct scsi_host_template mv6_sht = {
|
|
.use_clustering = 1,
|
|
.use_clustering = 1,
|
|
.proc_name = DRV_NAME,
|
|
.proc_name = DRV_NAME,
|
|
.dma_boundary = MV_DMA_BOUNDARY,
|
|
.dma_boundary = MV_DMA_BOUNDARY,
|
|
- .slave_configure = mv_slave_config,
|
|
|
|
|
|
+ .slave_configure = ata_scsi_slave_config,
|
|
.slave_destroy = ata_scsi_slave_destroy,
|
|
.slave_destroy = ata_scsi_slave_destroy,
|
|
.bios_param = ata_std_bios_param,
|
|
.bios_param = ata_std_bios_param,
|
|
};
|
|
};
|
|
@@ -756,17 +755,6 @@ static void mv_irq_clear(struct ata_port *ap)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
|
|
|
|
-static int mv_slave_config(struct scsi_device *sdev)
|
|
|
|
-{
|
|
|
|
- int rc = ata_scsi_slave_config(sdev);
|
|
|
|
- if (rc)
|
|
|
|
- return rc;
|
|
|
|
-
|
|
|
|
- blk_queue_max_phys_segments(sdev->request_queue, MV_MAX_SG_CT / 2);
|
|
|
|
-
|
|
|
|
- return 0; /* scsi layer doesn't check return value, sigh */
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void mv_set_edma_ptrs(void __iomem *port_mmio,
|
|
static void mv_set_edma_ptrs(void __iomem *port_mmio,
|
|
struct mv_host_priv *hpriv,
|
|
struct mv_host_priv *hpriv,
|
|
struct mv_port_priv *pp)
|
|
struct mv_port_priv *pp)
|
|
@@ -1138,7 +1126,7 @@ static void mv_fill_sg(struct ata_queued_cmd *qc)
|
|
{
|
|
{
|
|
struct mv_port_priv *pp = qc->ap->private_data;
|
|
struct mv_port_priv *pp = qc->ap->private_data;
|
|
struct scatterlist *sg;
|
|
struct scatterlist *sg;
|
|
- struct mv_sg *mv_sg;
|
|
|
|
|
|
+ struct mv_sg *mv_sg, *last_sg = NULL;
|
|
|
|
|
|
mv_sg = pp->sg_tbl;
|
|
mv_sg = pp->sg_tbl;
|
|
ata_for_each_sg(sg, qc) {
|
|
ata_for_each_sg(sg, qc) {
|
|
@@ -1159,13 +1147,13 @@ static void mv_fill_sg(struct ata_queued_cmd *qc)
|
|
sg_len -= len;
|
|
sg_len -= len;
|
|
addr += len;
|
|
addr += len;
|
|
|
|
|
|
- if (!sg_len && ata_sg_is_last(sg, qc))
|
|
|
|
- mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
|
|
|
|
-
|
|
|
|
|
|
+ last_sg = mv_sg;
|
|
mv_sg++;
|
|
mv_sg++;
|
|
}
|
|
}
|
|
-
|
|
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ if (likely(last_sg))
|
|
|
|
+ last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
|
|
}
|
|
}
|
|
|
|
|
|
static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
|
|
static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
|