|
@@ -2156,8 +2156,9 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
|
|
|
static void ata_sg_clean(struct ata_queued_cmd *qc)
|
|
|
{
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
- struct scatterlist *sg = qc->sg;
|
|
|
+ struct scatterlist *sg = qc->__sg;
|
|
|
int dir = qc->dma_dir;
|
|
|
+ void *pad_buf = NULL;
|
|
|
|
|
|
assert(qc->flags & ATA_QCFLAG_DMAMAP);
|
|
|
assert(sg != NULL);
|
|
@@ -2167,14 +2168,35 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
|
|
|
|
|
|
DPRINTK("unmapping %u sg elements\n", qc->n_elem);
|
|
|
|
|
|
- if (qc->flags & ATA_QCFLAG_SG)
|
|
|
+ /* if we padded the buffer out to 32-bit bound, and data
|
|
|
+ * xfer direction is from-device, we must copy from the
|
|
|
+ * pad buffer back into the supplied buffer
|
|
|
+ */
|
|
|
+ if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
|
|
|
+ pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
|
|
|
+
|
|
|
+ if (qc->flags & ATA_QCFLAG_SG) {
|
|
|
dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
|
|
|
- else
|
|
|
+ /* restore last sg */
|
|
|
+ sg[qc->orig_n_elem - 1].length += qc->pad_len;
|
|
|
+ if (pad_buf) {
|
|
|
+ struct scatterlist *psg = &qc->pad_sgent;
|
|
|
+ void *addr = kmap_atomic(psg->page, KM_IRQ0);
|
|
|
+ memcpy(addr + psg->offset, pad_buf, qc->pad_len);
|
|
|
+ kunmap_atomic(psg->page, KM_IRQ0);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]),
|
|
|
sg_dma_len(&sg[0]), dir);
|
|
|
+ /* restore sg */
|
|
|
+ sg->length += qc->pad_len;
|
|
|
+ if (pad_buf)
|
|
|
+ memcpy(qc->buf_virt + sg->length - qc->pad_len,
|
|
|
+ pad_buf, qc->pad_len);
|
|
|
+ }
|
|
|
|
|
|
qc->flags &= ~ATA_QCFLAG_DMAMAP;
|
|
|
- qc->sg = NULL;
|
|
|
+ qc->__sg = NULL;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2190,15 +2212,15 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
|
|
|
*/
|
|
|
static void ata_fill_sg(struct ata_queued_cmd *qc)
|
|
|
{
|
|
|
- struct scatterlist *sg = qc->sg;
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
- unsigned int idx, nelem;
|
|
|
+ struct scatterlist *sg;
|
|
|
+ unsigned int idx;
|
|
|
|
|
|
- assert(sg != NULL);
|
|
|
+ assert(qc->__sg != NULL);
|
|
|
assert(qc->n_elem > 0);
|
|
|
|
|
|
idx = 0;
|
|
|
- for (nelem = qc->n_elem; nelem; nelem--,sg++) {
|
|
|
+ ata_for_each_sg(sg, qc) {
|
|
|
u32 addr, offset;
|
|
|
u32 sg_len, len;
|
|
|
|
|
@@ -2289,11 +2311,12 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
|
|
|
qc->flags |= ATA_QCFLAG_SINGLE;
|
|
|
|
|
|
memset(&qc->sgent, 0, sizeof(qc->sgent));
|
|
|
- qc->sg = &qc->sgent;
|
|
|
+ qc->__sg = &qc->sgent;
|
|
|
qc->n_elem = 1;
|
|
|
+ qc->orig_n_elem = 1;
|
|
|
qc->buf_virt = buf;
|
|
|
|
|
|
- sg = qc->sg;
|
|
|
+ sg = qc->__sg;
|
|
|
sg->page = virt_to_page(buf);
|
|
|
sg->offset = (unsigned long) buf & ~PAGE_MASK;
|
|
|
sg->length = buflen;
|
|
@@ -2317,8 +2340,9 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
|
|
|
unsigned int n_elem)
|
|
|
{
|
|
|
qc->flags |= ATA_QCFLAG_SG;
|
|
|
- qc->sg = sg;
|
|
|
+ qc->__sg = sg;
|
|
|
qc->n_elem = n_elem;
|
|
|
+ qc->orig_n_elem = n_elem;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2338,9 +2362,32 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
|
|
|
{
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
int dir = qc->dma_dir;
|
|
|
- struct scatterlist *sg = qc->sg;
|
|
|
+ struct scatterlist *sg = qc->__sg;
|
|
|
dma_addr_t dma_address;
|
|
|
|
|
|
+ /* we must lengthen transfers to end on a 32-bit boundary */
|
|
|
+ qc->pad_len = sg->length & 3;
|
|
|
+ if (qc->pad_len) {
|
|
|
+ void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
|
|
|
+ struct scatterlist *psg = &qc->pad_sgent;
|
|
|
+
|
|
|
+ assert(qc->dev->class == ATA_DEV_ATAPI);
|
|
|
+
|
|
|
+ memset(pad_buf, 0, ATA_DMA_PAD_SZ);
|
|
|
+
|
|
|
+ if (qc->tf.flags & ATA_TFLAG_WRITE)
|
|
|
+ memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
|
|
|
+ qc->pad_len);
|
|
|
+
|
|
|
+ sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
|
|
|
+ sg_dma_len(psg) = ATA_DMA_PAD_SZ;
|
|
|
+ /* trim sg */
|
|
|
+ sg->length -= qc->pad_len;
|
|
|
+
|
|
|
+ DPRINTK("padding done, sg->length=%u pad_len=%u\n",
|
|
|
+ sg->length, qc->pad_len);
|
|
|
+ }
|
|
|
+
|
|
|
dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
|
|
|
sg->length, dir);
|
|
|
if (dma_mapping_error(dma_address))
|
|
@@ -2372,12 +2419,47 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
|
|
|
static int ata_sg_setup(struct ata_queued_cmd *qc)
|
|
|
{
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
- struct scatterlist *sg = qc->sg;
|
|
|
+ struct scatterlist *sg = qc->__sg;
|
|
|
+ struct scatterlist *lsg = &sg[qc->n_elem - 1];
|
|
|
int n_elem, dir;
|
|
|
|
|
|
VPRINTK("ENTER, ata%u\n", ap->id);
|
|
|
assert(qc->flags & ATA_QCFLAG_SG);
|
|
|
|
|
|
+ /* we must lengthen transfers to end on a 32-bit boundary */
|
|
|
+ qc->pad_len = lsg->length & 3;
|
|
|
+ if (qc->pad_len) {
|
|
|
+ void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
|
|
|
+ struct scatterlist *psg = &qc->pad_sgent;
|
|
|
+ unsigned int offset;
|
|
|
+
|
|
|
+ assert(qc->dev->class == ATA_DEV_ATAPI);
|
|
|
+
|
|
|
+ memset(pad_buf, 0, ATA_DMA_PAD_SZ);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * psg->page/offset are used to copy to-be-written
|
|
|
+ * data in this function or read data in ata_sg_clean.
|
|
|
+ */
|
|
|
+ offset = lsg->offset + lsg->length - qc->pad_len;
|
|
|
+ psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
|
|
|
+ psg->offset = offset_in_page(offset);
|
|
|
+
|
|
|
+ if (qc->tf.flags & ATA_TFLAG_WRITE) {
|
|
|
+ void *addr = kmap_atomic(psg->page, KM_IRQ0);
|
|
|
+ memcpy(pad_buf, addr + psg->offset, qc->pad_len);
|
|
|
+ kunmap_atomic(psg->page, KM_IRQ0);
|
|
|
+ }
|
|
|
+
|
|
|
+ sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
|
|
|
+ sg_dma_len(psg) = ATA_DMA_PAD_SZ;
|
|
|
+ /* trim last sg */
|
|
|
+ lsg->length -= qc->pad_len;
|
|
|
+
|
|
|
+ DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
|
|
|
+ qc->n_elem - 1, lsg->length, qc->pad_len);
|
|
|
+ }
|
|
|
+
|
|
|
dir = qc->dma_dir;
|
|
|
n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir);
|
|
|
if (n_elem < 1)
|
|
@@ -2655,7 +2737,7 @@ static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
|
|
|
static void ata_pio_sector(struct ata_queued_cmd *qc)
|
|
|
{
|
|
|
int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
|
|
|
- struct scatterlist *sg = qc->sg;
|
|
|
+ struct scatterlist *sg = qc->__sg;
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
struct page *page;
|
|
|
unsigned int offset;
|
|
@@ -2705,7 +2787,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
|
|
|
static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
|
|
|
{
|
|
|
int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
|
|
|
- struct scatterlist *sg = qc->sg;
|
|
|
+ struct scatterlist *sg = qc->__sg;
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
struct page *page;
|
|
|
unsigned char *buf;
|
|
@@ -2738,7 +2820,7 @@ next_sg:
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- sg = &qc->sg[qc->cursg];
|
|
|
+ sg = &qc->__sg[qc->cursg];
|
|
|
|
|
|
page = sg->page;
|
|
|
offset = sg->offset + qc->cursg_ofs;
|
|
@@ -3145,7 +3227,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
|
|
|
|
|
|
qc = ata_qc_new(ap);
|
|
|
if (qc) {
|
|
|
- qc->sg = NULL;
|
|
|
+ qc->__sg = NULL;
|
|
|
qc->flags = 0;
|
|
|
qc->scsicmd = NULL;
|
|
|
qc->ap = ap;
|
|
@@ -3837,6 +3919,12 @@ int ata_port_start (struct ata_port *ap)
|
|
|
if (!ap->prd)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
+ ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ, &ap->pad_dma, GFP_KERNEL);
|
|
|
+ if (!ap->pad) {
|
|
|
+ dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
|
|
|
|
|
|
return 0;
|
|
@@ -3859,6 +3947,7 @@ void ata_port_stop (struct ata_port *ap)
|
|
|
struct device *dev = ap->host_set->dev;
|
|
|
|
|
|
dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
|
|
|
+ dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
|
|
|
}
|
|
|
|
|
|
void ata_host_stop (struct ata_host_set *host_set)
|