|
@@ -578,7 +578,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
|
|
}
|
|
}
|
|
|
|
|
|
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
|
|
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
|
|
- WARN_ON(!ioaddr->ctl_addr);
|
|
|
|
|
|
+ WARN_ON_ONCE(!ioaddr->ctl_addr);
|
|
iowrite8(tf->hob_feature, ioaddr->feature_addr);
|
|
iowrite8(tf->hob_feature, ioaddr->feature_addr);
|
|
iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
|
|
iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
|
|
iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
|
|
iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
|
|
@@ -651,7 +651,7 @@ void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
|
|
iowrite8(tf->ctl, ioaddr->ctl_addr);
|
|
iowrite8(tf->ctl, ioaddr->ctl_addr);
|
|
ap->last_ctl = tf->ctl;
|
|
ap->last_ctl = tf->ctl;
|
|
} else
|
|
} else
|
|
- WARN_ON(1);
|
|
|
|
|
|
+ WARN_ON_ONCE(1);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(ata_sff_tf_read);
|
|
EXPORT_SYMBOL_GPL(ata_sff_tf_read);
|
|
@@ -891,7 +891,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc)
|
|
/* READ/WRITE MULTIPLE */
|
|
/* READ/WRITE MULTIPLE */
|
|
unsigned int nsect;
|
|
unsigned int nsect;
|
|
|
|
|
|
- WARN_ON(qc->dev->multi_count == 0);
|
|
|
|
|
|
+ WARN_ON_ONCE(qc->dev->multi_count == 0);
|
|
|
|
|
|
nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
|
|
nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
|
|
qc->dev->multi_count);
|
|
qc->dev->multi_count);
|
|
@@ -918,7 +918,7 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
|
|
{
|
|
{
|
|
/* send SCSI cdb */
|
|
/* send SCSI cdb */
|
|
DPRINTK("send cdb\n");
|
|
DPRINTK("send cdb\n");
|
|
- WARN_ON(qc->dev->cdb_len < 12);
|
|
|
|
|
|
+ WARN_ON_ONCE(qc->dev->cdb_len < 12);
|
|
|
|
|
|
ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
|
|
ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
|
|
ata_sff_sync(ap);
|
|
ata_sff_sync(ap);
|
|
@@ -1014,7 +1014,7 @@ next_sg:
|
|
}
|
|
}
|
|
|
|
|
|
/* consumed can be larger than count only for the last transfer */
|
|
/* consumed can be larger than count only for the last transfer */
|
|
- WARN_ON(qc->cursg && count != consumed);
|
|
|
|
|
|
+ WARN_ON_ONCE(qc->cursg && count != consumed);
|
|
|
|
|
|
if (bytes)
|
|
if (bytes)
|
|
goto next_sg;
|
|
goto next_sg;
|
|
@@ -1172,13 +1172,13 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
|
|
unsigned long flags = 0;
|
|
unsigned long flags = 0;
|
|
int poll_next;
|
|
int poll_next;
|
|
|
|
|
|
- WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
|
|
|
|
|
|
+ WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
|
|
|
|
|
|
/* Make sure ata_sff_qc_issue() does not throw things
|
|
/* Make sure ata_sff_qc_issue() does not throw things
|
|
* like DMA polling into the workqueue. Notice that
|
|
* like DMA polling into the workqueue. Notice that
|
|
* in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
|
|
* in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
|
|
*/
|
|
*/
|
|
- WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
|
|
|
|
|
|
+ WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
|
|
|
|
|
|
fsm_start:
|
|
fsm_start:
|
|
DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
|
|
DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
|
|
@@ -1387,7 +1387,7 @@ fsm_start:
|
|
DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
|
|
DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
|
|
ap->print_id, qc->dev->devno, status);
|
|
ap->print_id, qc->dev->devno, status);
|
|
|
|
|
|
- WARN_ON(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
|
|
|
|
|
|
+ WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
|
|
|
|
|
|
ap->hsm_task_state = HSM_ST_IDLE;
|
|
ap->hsm_task_state = HSM_ST_IDLE;
|
|
|
|
|
|
@@ -1423,7 +1423,7 @@ void ata_pio_task(struct work_struct *work)
|
|
int poll_next;
|
|
int poll_next;
|
|
|
|
|
|
fsm_start:
|
|
fsm_start:
|
|
- WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
|
|
|
|
|
|
+ WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
|
|
|
|
|
|
/*
|
|
/*
|
|
* This is purely heuristic. This is a fast path.
|
|
* This is purely heuristic. This is a fast path.
|
|
@@ -1512,7 +1512,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
|
|
break;
|
|
break;
|
|
|
|
|
|
case ATA_PROT_DMA:
|
|
case ATA_PROT_DMA:
|
|
- WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
|
|
|
|
|
|
+ WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
|
|
|
|
|
|
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
|
|
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
|
|
ap->ops->bmdma_setup(qc); /* set up bmdma */
|
|
ap->ops->bmdma_setup(qc); /* set up bmdma */
|
|
@@ -1564,7 +1564,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
|
|
break;
|
|
break;
|
|
|
|
|
|
case ATAPI_PROT_DMA:
|
|
case ATAPI_PROT_DMA:
|
|
- WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
|
|
|
|
|
|
+ WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
|
|
|
|
|
|
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
|
|
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
|
|
ap->ops->bmdma_setup(qc); /* set up bmdma */
|
|
ap->ops->bmdma_setup(qc); /* set up bmdma */
|
|
@@ -1576,7 +1576,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
|
|
break;
|
|
break;
|
|
|
|
|
|
default:
|
|
default:
|
|
- WARN_ON(1);
|
|
|
|
|
|
+ WARN_ON_ONCE(1);
|
|
return AC_ERR_SYSTEM;
|
|
return AC_ERR_SYSTEM;
|
|
}
|
|
}
|
|
|
|
|