|
@@ -71,8 +71,6 @@ struct icside_state {
|
|
|
void __iomem *irq_port;
|
|
|
void __iomem *ioc_base;
|
|
|
unsigned int type;
|
|
|
- /* parent device... until the IDE core gets one of its own */
|
|
|
- struct device *dev;
|
|
|
ide_hwif_t *hwif[2];
|
|
|
};
|
|
|
|
|
@@ -209,7 +207,6 @@ static void icside_maskproc(ide_drive_t *drive, int mask)
|
|
|
static void icside_build_sglist(ide_drive_t *drive, struct request *rq)
|
|
|
{
|
|
|
ide_hwif_t *hwif = drive->hwif;
|
|
|
- struct icside_state *state = hwif->hwif_data;
|
|
|
struct scatterlist *sg = hwif->sg_table;
|
|
|
|
|
|
ide_map_sg(drive, rq);
|
|
@@ -219,7 +216,7 @@ static void icside_build_sglist(ide_drive_t *drive, struct request *rq)
|
|
|
else
|
|
|
hwif->sg_dma_direction = DMA_TO_DEVICE;
|
|
|
|
|
|
- hwif->sg_nents = dma_map_sg(state->dev, sg, hwif->sg_nents,
|
|
|
+ hwif->sg_nents = dma_map_sg(hwif->dev, sg, hwif->sg_nents,
|
|
|
hwif->sg_dma_direction);
|
|
|
}
|
|
|
|
|
@@ -294,33 +291,33 @@ static void icside_dma_host_set(ide_drive_t *drive, int on)
|
|
|
static int icside_dma_end(ide_drive_t *drive)
|
|
|
{
|
|
|
ide_hwif_t *hwif = HWIF(drive);
|
|
|
- struct icside_state *state = hwif->hwif_data;
|
|
|
+ struct expansion_card *ec = ECARD_DEV(hwif->dev);
|
|
|
|
|
|
drive->waiting_for_dma = 0;
|
|
|
|
|
|
- disable_dma(ECARD_DEV(state->dev)->dma);
|
|
|
+ disable_dma(ec->dma);
|
|
|
|
|
|
/* Teardown mappings after DMA has completed. */
|
|
|
- dma_unmap_sg(state->dev, hwif->sg_table, hwif->sg_nents,
|
|
|
+ dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->sg_nents,
|
|
|
hwif->sg_dma_direction);
|
|
|
|
|
|
- return get_dma_residue(ECARD_DEV(state->dev)->dma) != 0;
|
|
|
+ return get_dma_residue(ec->dma) != 0;
|
|
|
}
|
|
|
|
|
|
static void icside_dma_start(ide_drive_t *drive)
|
|
|
{
|
|
|
ide_hwif_t *hwif = HWIF(drive);
|
|
|
- struct icside_state *state = hwif->hwif_data;
|
|
|
+ struct expansion_card *ec = ECARD_DEV(hwif->dev);
|
|
|
|
|
|
/* We can not enable DMA on both channels simultaneously. */
|
|
|
- BUG_ON(dma_channel_active(ECARD_DEV(state->dev)->dma));
|
|
|
- enable_dma(ECARD_DEV(state->dev)->dma);
|
|
|
+ BUG_ON(dma_channel_active(ec->dma));
|
|
|
+ enable_dma(ec->dma);
|
|
|
}
|
|
|
|
|
|
static int icside_dma_setup(ide_drive_t *drive)
|
|
|
{
|
|
|
ide_hwif_t *hwif = HWIF(drive);
|
|
|
- struct icside_state *state = hwif->hwif_data;
|
|
|
+ struct expansion_card *ec = ECARD_DEV(hwif->dev);
|
|
|
struct request *rq = hwif->hwgroup->rq;
|
|
|
unsigned int dma_mode;
|
|
|
|
|
@@ -332,7 +329,7 @@ static int icside_dma_setup(ide_drive_t *drive)
|
|
|
/*
|
|
|
* We can not enable DMA on both channels.
|
|
|
*/
|
|
|
- BUG_ON(dma_channel_active(ECARD_DEV(state->dev)->dma));
|
|
|
+ BUG_ON(dma_channel_active(ec->dma));
|
|
|
|
|
|
icside_build_sglist(drive, rq);
|
|
|
|
|
@@ -349,14 +346,14 @@ static int icside_dma_setup(ide_drive_t *drive)
|
|
|
/*
|
|
|
* Select the correct timing for this drive.
|
|
|
*/
|
|
|
- set_dma_speed(ECARD_DEV(state->dev)->dma, drive->drive_data);
|
|
|
+ set_dma_speed(ec->dma, drive->drive_data);
|
|
|
|
|
|
/*
|
|
|
* Tell the DMA engine about the SG table and
|
|
|
* data direction.
|
|
|
*/
|
|
|
- set_dma_sg(ECARD_DEV(state->dev)->dma, hwif->sg_table, hwif->sg_nents);
|
|
|
- set_dma_mode(ECARD_DEV(state->dev)->dma, dma_mode);
|
|
|
+ set_dma_sg(ec->dma, hwif->sg_table, hwif->sg_nents);
|
|
|
+ set_dma_mode(ec->dma, dma_mode);
|
|
|
|
|
|
drive->waiting_for_dma = 1;
|
|
|
|
|
@@ -444,6 +441,7 @@ icside_setup(void __iomem *base, struct cardinfo *info, struct expansion_card *e
|
|
|
hwif->noprobe = 0;
|
|
|
hwif->chipset = ide_acorn;
|
|
|
hwif->gendev.parent = &ec->dev;
|
|
|
+ hwif->dev = &ec->dev;
|
|
|
}
|
|
|
|
|
|
return hwif;
|
|
@@ -591,7 +589,6 @@ icside_probe(struct expansion_card *ec, const struct ecard_id *id)
|
|
|
}
|
|
|
|
|
|
state->type = ICS_TYPE_NOTYPE;
|
|
|
- state->dev = &ec->dev;
|
|
|
|
|
|
idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
|
|
|
if (idmem) {
|