|
@@ -510,7 +510,8 @@ static struct scsi_host_template mv6_sht = {
|
|
|
.name = DRV_NAME,
|
|
|
.ioctl = ata_scsi_ioctl,
|
|
|
.queuecommand = ata_scsi_queuecmd,
|
|
|
- .can_queue = ATA_DEF_QUEUE,
|
|
|
+ .change_queue_depth = ata_scsi_change_queue_depth,
|
|
|
+ .can_queue = MV_MAX_Q_DEPTH - 1,
|
|
|
.this_id = ATA_SHT_THIS_ID,
|
|
|
.sg_tablesize = MV_MAX_SG_CT / 2,
|
|
|
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
|
|
@@ -572,6 +573,7 @@ static const struct ata_port_operations mv6_ops = {
|
|
|
.post_internal_cmd = mv_post_int_cmd,
|
|
|
.freeze = mv_eh_freeze,
|
|
|
.thaw = mv_eh_thaw,
|
|
|
+ .qc_defer = ata_std_qc_defer,
|
|
|
|
|
|
.scr_read = mv_scr_read,
|
|
|
.scr_write = mv_scr_write,
|
|
@@ -600,6 +602,7 @@ static const struct ata_port_operations mv_iie_ops = {
|
|
|
.post_internal_cmd = mv_post_int_cmd,
|
|
|
.freeze = mv_eh_freeze,
|
|
|
.thaw = mv_eh_thaw,
|
|
|
+ .qc_defer = ata_std_qc_defer,
|
|
|
|
|
|
.scr_read = mv_scr_read,
|
|
|
.scr_write = mv_scr_write,
|
|
@@ -628,26 +631,29 @@ static const struct ata_port_info mv_port_info[] = {
|
|
|
.port_ops = &mv5_ops,
|
|
|
},
|
|
|
{ /* chip_604x */
|
|
|
- .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
|
|
|
+ .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
|
|
|
+ ATA_FLAG_NCQ,
|
|
|
.pio_mask = 0x1f, /* pio0-4 */
|
|
|
.udma_mask = ATA_UDMA6,
|
|
|
.port_ops = &mv6_ops,
|
|
|
},
|
|
|
{ /* chip_608x */
|
|
|
.flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
|
|
|
- MV_FLAG_DUAL_HC,
|
|
|
+ ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
|
|
|
.pio_mask = 0x1f, /* pio0-4 */
|
|
|
.udma_mask = ATA_UDMA6,
|
|
|
.port_ops = &mv6_ops,
|
|
|
},
|
|
|
{ /* chip_6042 */
|
|
|
- .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
|
|
|
+ .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
|
|
|
+ ATA_FLAG_NCQ,
|
|
|
.pio_mask = 0x1f, /* pio0-4 */
|
|
|
.udma_mask = ATA_UDMA6,
|
|
|
.port_ops = &mv_iie_ops,
|
|
|
},
|
|
|
{ /* chip_7042 */
|
|
|
- .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
|
|
|
+ .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
|
|
|
+ ATA_FLAG_NCQ,
|
|
|
.pio_mask = 0x1f, /* pio0-4 */
|
|
|
.udma_mask = ATA_UDMA6,
|
|
|
.port_ops = &mv_iie_ops,
|
|
@@ -1295,7 +1301,8 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
|
|
|
u16 flags = 0;
|
|
|
unsigned in_index;
|
|
|
|
|
|
- if (qc->tf.protocol != ATA_PROT_DMA)
|
|
|
+ if ((qc->tf.protocol != ATA_PROT_DMA) &&
|
|
|
+ (qc->tf.protocol != ATA_PROT_NCQ))
|
|
|
return;
|
|
|
|
|
|
/* Fill in command request block
|
|
@@ -1331,13 +1338,11 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
|
|
|
case ATA_CMD_WRITE_FUA_EXT:
|
|
|
mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
|
|
|
break;
|
|
|
-#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
|
|
|
case ATA_CMD_FPDMA_READ:
|
|
|
case ATA_CMD_FPDMA_WRITE:
|
|
|
mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
|
|
|
mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
|
|
|
break;
|
|
|
-#endif /* FIXME: remove this line when NCQ added */
|
|
|
default:
|
|
|
/* The only other commands EDMA supports in non-queued and
|
|
|
* non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
|
|
@@ -1386,7 +1391,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
|
|
|
unsigned in_index;
|
|
|
u32 flags = 0;
|
|
|
|
|
|
- if (qc->tf.protocol != ATA_PROT_DMA)
|
|
|
+ if ((qc->tf.protocol != ATA_PROT_DMA) &&
|
|
|
+ (qc->tf.protocol != ATA_PROT_NCQ))
|
|
|
return;
|
|
|
|
|
|
/* Fill in Gen IIE command request block
|
|
@@ -1452,7 +1458,8 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
|
|
|
struct mv_port_priv *pp = ap->private_data;
|
|
|
u32 in_index;
|
|
|
|
|
|
- if (qc->tf.protocol != ATA_PROT_DMA) {
|
|
|
+ if ((qc->tf.protocol != ATA_PROT_DMA) &&
|
|
|
+ (qc->tf.protocol != ATA_PROT_NCQ)) {
|
|
|
/* We're about to send a non-EDMA capable command to the
|
|
|
* port. Turn off EDMA so there won't be problems accessing
|
|
|
* shadow block, etc registers.
|
|
@@ -1463,12 +1470,6 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
|
|
|
|
|
|
mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
|
|
|
|
|
|
- in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
|
|
|
-
|
|
|
- /* until we do queuing, the queue should be empty at this point */
|
|
|
- WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
|
|
|
- >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
|
|
|
-
|
|
|
pp->req_idx++;
|
|
|
|
|
|
in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
|