|
@@ -278,7 +278,7 @@ static void ata_unpack_xfermask(unsigned int xfer_mask,
|
|
|
}
|
|
|
|
|
|
static const struct ata_xfer_ent {
|
|
|
- unsigned int shift, bits;
|
|
|
+ int shift, bits;
|
|
|
u8 base;
|
|
|
} ata_xfer_tbl[] = {
|
|
|
{ ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
|
|
@@ -989,9 +989,7 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
|
|
|
qc->private_data = &wait;
|
|
|
qc->complete_fn = ata_qc_complete_internal;
|
|
|
|
|
|
- qc->err_mask = ata_qc_issue(qc);
|
|
|
- if (qc->err_mask)
|
|
|
- ata_qc_complete(qc);
|
|
|
+ ata_qc_issue(qc);
|
|
|
|
|
|
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
|
|
|
|
@@ -3997,15 +3995,14 @@ static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
|
|
|
*
|
|
|
* LOCKING:
|
|
|
* spin_lock_irqsave(host_set lock)
|
|
|
- *
|
|
|
- * RETURNS:
|
|
|
- * Zero on success, AC_ERR_* mask on failure
|
|
|
*/
|
|
|
-
|
|
|
-unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
|
|
|
+void ata_qc_issue(struct ata_queued_cmd *qc)
|
|
|
{
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
|
|
|
+ qc->ap->active_tag = qc->tag;
|
|
|
+ qc->flags |= ATA_QCFLAG_ACTIVE;
|
|
|
+
|
|
|
if (ata_should_dma_map(qc)) {
|
|
|
if (qc->flags & ATA_QCFLAG_SG) {
|
|
|
if (ata_sg_setup(qc))
|
|
@@ -4020,17 +4017,18 @@ unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
|
|
|
|
|
|
ap->ops->qc_prep(qc);
|
|
|
|
|
|
- qc->ap->active_tag = qc->tag;
|
|
|
- qc->flags |= ATA_QCFLAG_ACTIVE;
|
|
|
-
|
|
|
- return ap->ops->qc_issue(qc);
|
|
|
+ qc->err_mask |= ap->ops->qc_issue(qc);
|
|
|
+ if (unlikely(qc->err_mask))
|
|
|
+ goto err;
|
|
|
+ return;
|
|
|
|
|
|
sg_err:
|
|
|
qc->flags &= ~ATA_QCFLAG_DMAMAP;
|
|
|
- return AC_ERR_SYSTEM;
|
|
|
+ qc->err_mask |= AC_ERR_SYSTEM;
|
|
|
+err:
|
|
|
+ ata_qc_complete(qc);
|
|
|
}
|
|
|
|
|
|
-
|
|
|
/**
|
|
|
* ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
|
|
|
* @qc: command to issue to device
|