|
@@ -50,6 +50,7 @@
|
|
|
enum {
|
|
|
PDC_MAX_PORTS = 4,
|
|
|
PDC_MMIO_BAR = 3,
|
|
|
+ PDC_MAX_PRD = LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */
|
|
|
|
|
|
/* register offsets */
|
|
|
PDC_FEATURE = 0x04, /* Feature/Error reg (per port) */
|
|
@@ -157,7 +158,7 @@ static struct scsi_host_template pdc_ata_sht = {
|
|
|
.queuecommand = ata_scsi_queuecmd,
|
|
|
.can_queue = ATA_DEF_QUEUE,
|
|
|
.this_id = ATA_SHT_THIS_ID,
|
|
|
- .sg_tablesize = LIBATA_MAX_PRD,
|
|
|
+ .sg_tablesize = PDC_MAX_PRD,
|
|
|
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
|
|
|
.emulated = ATA_SHT_EMULATED,
|
|
|
.use_clustering = ATA_SHT_USE_CLUSTERING,
|
|
@@ -523,6 +524,84 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
|
|
|
memcpy(buf+31, cdb, cdb_len);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * pdc_fill_sg - Fill PCI IDE PRD table
|
|
|
+ * @qc: Metadata associated with taskfile to be transferred
|
|
|
+ *
|
|
|
+ * Fill PCI IDE PRD (scatter-gather) table with segments
|
|
|
+ * associated with the current disk command.
|
|
|
+ * Make sure hardware does not choke on it.
|
|
|
+ *
|
|
|
+ * LOCKING:
|
|
|
+ * spin_lock_irqsave(host lock)
|
|
|
+ *
|
|
|
+ */
|
|
|
+static void pdc_fill_sg(struct ata_queued_cmd *qc)
|
|
|
+{
|
|
|
+ struct ata_port *ap = qc->ap;
|
|
|
+ struct scatterlist *sg;
|
|
|
+ unsigned int idx;
|
|
|
+ const u32 SG_COUNT_ASIC_BUG = 41*4;
|
|
|
+
|
|
|
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
|
|
|
+ return;
|
|
|
+
|
|
|
+ WARN_ON(qc->__sg == NULL);
|
|
|
+ WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
|
|
|
+
|
|
|
+ idx = 0;
|
|
|
+ ata_for_each_sg(sg, qc) {
|
|
|
+ u32 addr, offset;
|
|
|
+ u32 sg_len, len;
|
|
|
+
|
|
|
+ /* determine if physical DMA addr spans 64K boundary.
|
|
|
+ * Note h/w doesn't support 64-bit, so we unconditionally
|
|
|
+ * truncate dma_addr_t to u32.
|
|
|
+ */
|
|
|
+ addr = (u32) sg_dma_address(sg);
|
|
|
+ sg_len = sg_dma_len(sg);
|
|
|
+
|
|
|
+ while (sg_len) {
|
|
|
+ offset = addr & 0xffff;
|
|
|
+ len = sg_len;
|
|
|
+ if ((offset + sg_len) > 0x10000)
|
|
|
+ len = 0x10000 - offset;
|
|
|
+
|
|
|
+ ap->prd[idx].addr = cpu_to_le32(addr);
|
|
|
+ ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
|
|
|
+ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
|
|
|
+
|
|
|
+ idx++;
|
|
|
+ sg_len -= len;
|
|
|
+ addr += len;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (idx) {
|
|
|
+ u32 len = le32_to_cpu(ap->prd[idx - 1].flags_len);
|
|
|
+
|
|
|
+ if (len > SG_COUNT_ASIC_BUG) {
|
|
|
+ u32 addr;
|
|
|
+
|
|
|
+ VPRINTK("Splitting last PRD.\n");
|
|
|
+
|
|
|
+ addr = le32_to_cpu(ap->prd[idx - 1].addr);
|
|
|
+ ap->prd[idx - 1].flags_len -= cpu_to_le32(SG_COUNT_ASIC_BUG);
|
|
|
+ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
|
|
|
+
|
|
|
+ addr = addr + len - SG_COUNT_ASIC_BUG;
|
|
|
+ len = SG_COUNT_ASIC_BUG;
|
|
|
+ ap->prd[idx].addr = cpu_to_le32(addr);
|
|
|
+ ap->prd[idx].flags_len = cpu_to_le32(len);
|
|
|
+ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
|
|
|
+
|
|
|
+ idx++;
|
|
|
+ }
|
|
|
+
|
|
|
+ ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static void pdc_qc_prep(struct ata_queued_cmd *qc)
|
|
|
{
|
|
|
struct pdc_port_priv *pp = qc->ap->private_data;
|
|
@@ -532,7 +611,7 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
|
|
|
|
|
|
switch (qc->tf.protocol) {
|
|
|
case ATA_PROT_DMA:
|
|
|
- ata_qc_prep(qc);
|
|
|
+ pdc_fill_sg(qc);
|
|
|
/* fall through */
|
|
|
|
|
|
case ATA_PROT_NODATA:
|
|
@@ -548,11 +627,11 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
|
|
|
break;
|
|
|
|
|
|
case ATA_PROT_ATAPI:
|
|
|
- ata_qc_prep(qc);
|
|
|
+ pdc_fill_sg(qc);
|
|
|
break;
|
|
|
|
|
|
case ATA_PROT_ATAPI_DMA:
|
|
|
- ata_qc_prep(qc);
|
|
|
+ pdc_fill_sg(qc);
|
|
|
/*FALLTHROUGH*/
|
|
|
case ATA_PROT_ATAPI_NODATA:
|
|
|
pdc_atapi_pkt(qc);
|