|
@@ -26,6 +26,13 @@
|
|
|
#include "dma.h"
|
|
|
#include "soc.h"
|
|
|
|
|
|
+/*
|
|
|
+ * dma register field offset calculation
|
|
|
+ */
|
|
|
+#define DMA64REGOFFS(field) offsetof(struct dma64regs, field)
|
|
|
+#define DMA64TXREGOFFS(di, field) (di->d64txregbase + DMA64REGOFFS(field))
|
|
|
+#define DMA64RXREGOFFS(di, field) (di->d64rxregbase + DMA64REGOFFS(field))
|
|
|
+
|
|
|
/*
|
|
|
* DMA hardware requires each descriptor ring to be 8kB aligned, and fit within
|
|
|
* a contiguous 8kB physical address.
|
|
@@ -220,15 +227,16 @@ struct dma_info {
|
|
|
uint *msg_level; /* message level pointer */
|
|
|
char name[MAXNAMEL]; /* callers name for diag msgs */
|
|
|
|
|
|
- struct pci_dev *pbus; /* bus handle */
|
|
|
+ struct bcma_device *core;
|
|
|
+ struct device *dmadev;
|
|
|
|
|
|
bool dma64; /* this dma engine is operating in 64-bit mode */
|
|
|
bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
|
|
|
|
|
|
/* 64-bit dma tx engine registers */
|
|
|
- struct dma64regs __iomem *d64txregs;
|
|
|
+ uint d64txregbase;
|
|
|
/* 64-bit dma rx engine registers */
|
|
|
- struct dma64regs __iomem *d64rxregs;
|
|
|
+ uint d64rxregbase;
|
|
|
/* pointer to dma64 tx descriptor ring */
|
|
|
struct dma64desc *txd64;
|
|
|
/* pointer to dma64 rx descriptor ring */
|
|
@@ -375,15 +383,16 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
|
|
|
if (dmactrlflags & DMA_CTRL_PEN) {
|
|
|
u32 control;
|
|
|
|
|
|
- control = R_REG(&di->d64txregs->control);
|
|
|
- W_REG(&di->d64txregs->control,
|
|
|
+ control = bcma_read32(di->core, DMA64TXREGOFFS(di, control));
|
|
|
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control),
|
|
|
control | D64_XC_PD);
|
|
|
- if (R_REG(&di->d64txregs->control) & D64_XC_PD)
|
|
|
+ if (bcma_read32(di->core, DMA64TXREGOFFS(di, control)) &
|
|
|
+ D64_XC_PD)
|
|
|
/* We *can* disable it so it is supported,
|
|
|
* restore control register
|
|
|
*/
|
|
|
- W_REG(&di->d64txregs->control,
|
|
|
- control);
|
|
|
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control),
|
|
|
+ control);
|
|
|
else
|
|
|
/* Not supported, don't allow it to be enabled */
|
|
|
dmactrlflags &= ~DMA_CTRL_PEN;
|
|
@@ -394,12 +403,12 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
|
|
|
return dmactrlflags;
|
|
|
}
|
|
|
|
|
|
-static bool _dma64_addrext(struct dma64regs __iomem *dma64regs)
|
|
|
+static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset)
|
|
|
{
|
|
|
u32 w;
|
|
|
- OR_REG(&dma64regs->control, D64_XC_AE);
|
|
|
- w = R_REG(&dma64regs->control);
|
|
|
- AND_REG(&dma64regs->control, ~D64_XC_AE);
|
|
|
+ bcma_set32(di->core, ctrl_offset, D64_XC_AE);
|
|
|
+ w = bcma_read32(di->core, ctrl_offset);
|
|
|
+ bcma_mask32(di->core, ctrl_offset, ~D64_XC_AE);
|
|
|
return (w & D64_XC_AE) == D64_XC_AE;
|
|
|
}
|
|
|
|
|
@@ -412,13 +421,13 @@ static bool _dma_isaddrext(struct dma_info *di)
|
|
|
/* DMA64 supports full 32- or 64-bit operation. AE is always valid */
|
|
|
|
|
|
/* not all tx or rx channel are available */
|
|
|
- if (di->d64txregs != NULL) {
|
|
|
- if (!_dma64_addrext(di->d64txregs))
|
|
|
+ if (di->d64txregbase != 0) {
|
|
|
+ if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control)))
|
|
|
DMA_ERROR("%s: DMA64 tx doesn't have AE set\n",
|
|
|
di->name);
|
|
|
return true;
|
|
|
- } else if (di->d64rxregs != NULL) {
|
|
|
- if (!_dma64_addrext(di->d64rxregs))
|
|
|
+ } else if (di->d64rxregbase != 0) {
|
|
|
+ if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control)))
|
|
|
DMA_ERROR("%s: DMA64 rx doesn't have AE set\n",
|
|
|
di->name);
|
|
|
return true;
|
|
@@ -432,14 +441,14 @@ static bool _dma_descriptor_align(struct dma_info *di)
|
|
|
u32 addrl;
|
|
|
|
|
|
/* Check to see if the descriptors need to be aligned on 4K/8K or not */
|
|
|
- if (di->d64txregs != NULL) {
|
|
|
- W_REG(&di->d64txregs->addrlow, 0xff0);
|
|
|
- addrl = R_REG(&di->d64txregs->addrlow);
|
|
|
+ if (di->d64txregbase != 0) {
|
|
|
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), 0xff0);
|
|
|
+ addrl = bcma_read32(di->core, DMA64TXREGOFFS(di, addrlow));
|
|
|
if (addrl != 0)
|
|
|
return false;
|
|
|
- } else if (di->d64rxregs != NULL) {
|
|
|
- W_REG(&di->d64rxregs->addrlow, 0xff0);
|
|
|
- addrl = R_REG(&di->d64rxregs->addrlow);
|
|
|
+ } else if (di->d64rxregbase != 0) {
|
|
|
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), 0xff0);
|
|
|
+ addrl = bcma_read32(di->core, DMA64RXREGOFFS(di, addrlow));
|
|
|
if (addrl != 0)
|
|
|
return false;
|
|
|
}
|
|
@@ -450,7 +459,7 @@ static bool _dma_descriptor_align(struct dma_info *di)
|
|
|
* Descriptor table must start at the DMA hardware dictated alignment, so
|
|
|
* allocated memory must be large enough to support this requirement.
|
|
|
*/
|
|
|
-static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
|
|
|
+static void *dma_alloc_consistent(struct dma_info *di, uint size,
|
|
|
u16 align_bits, uint *alloced,
|
|
|
dma_addr_t *pap)
|
|
|
{
|
|
@@ -460,7 +469,7 @@ static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
|
|
|
size += align;
|
|
|
*alloced = size;
|
|
|
}
|
|
|
- return pci_alloc_consistent(pdev, size, pap);
|
|
|
+ return dma_alloc_coherent(di->dmadev, size, pap, GFP_ATOMIC);
|
|
|
}
|
|
|
|
|
|
static
|
|
@@ -486,7 +495,7 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
|
|
|
u32 desc_strtaddr;
|
|
|
u32 alignbytes = 1 << *alignbits;
|
|
|
|
|
|
- va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
|
|
|
+ va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa);
|
|
|
|
|
|
if (NULL == va)
|
|
|
return NULL;
|
|
@@ -495,8 +504,8 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
|
|
|
if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
|
|
|
& boundary)) {
|
|
|
*alignbits = dma_align_sizetobits(size);
|
|
|
- pci_free_consistent(di->pbus, size, va, *descpa);
|
|
|
- va = dma_alloc_consistent(di->pbus, size, *alignbits,
|
|
|
+ dma_free_coherent(di->dmadev, size, va, *descpa);
|
|
|
+ va = dma_alloc_consistent(di, size, *alignbits,
|
|
|
alloced, descpa);
|
|
|
}
|
|
|
return va;
|
|
@@ -556,12 +565,13 @@ static bool _dma_alloc(struct dma_info *di, uint direction)
|
|
|
}
|
|
|
|
|
|
struct dma_pub *dma_attach(char *name, struct si_pub *sih,
|
|
|
- void __iomem *dmaregstx, void __iomem *dmaregsrx,
|
|
|
- uint ntxd, uint nrxd,
|
|
|
- uint rxbufsize, int rxextheadroom,
|
|
|
- uint nrxpost, uint rxoffset, uint *msg_level)
|
|
|
+ struct bcma_device *core,
|
|
|
+ uint txregbase, uint rxregbase, uint ntxd, uint nrxd,
|
|
|
+ uint rxbufsize, int rxextheadroom,
|
|
|
+ uint nrxpost, uint rxoffset, uint *msg_level)
|
|
|
{
|
|
|
struct dma_info *di;
|
|
|
+ u8 rev = core->id.rev;
|
|
|
uint size;
|
|
|
|
|
|
/* allocate private info structure */
|
|
@@ -572,11 +582,13 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
|
|
|
di->msg_level = msg_level ? msg_level : &dma_msg_level;
|
|
|
|
|
|
|
|
|
- di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
|
|
|
+ di->dma64 =
|
|
|
+ ((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64);
|
|
|
|
|
|
- /* init dma reg pointer */
|
|
|
- di->d64txregs = (struct dma64regs __iomem *) dmaregstx;
|
|
|
- di->d64rxregs = (struct dma64regs __iomem *) dmaregsrx;
|
|
|
+ /* init dma reg info */
|
|
|
+ di->core = core;
|
|
|
+ di->d64txregbase = txregbase;
|
|
|
+ di->d64rxregbase = rxregbase;
|
|
|
|
|
|
/*
|
|
|
* Default flags (which can be changed by the driver calling
|
|
@@ -585,16 +597,17 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
|
|
|
*/
|
|
|
_dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
|
|
|
|
|
|
- DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n",
|
|
|
- name, "DMA64",
|
|
|
+ DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d "
|
|
|
+ "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
|
|
|
+ "txregbase %u rxregbase %u\n", name, "DMA64",
|
|
|
di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
|
|
|
- rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx);
|
|
|
+ rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
|
|
|
|
|
|
/* make a private copy of our callers name */
|
|
|
strncpy(di->name, name, MAXNAMEL);
|
|
|
di->name[MAXNAMEL - 1] = '\0';
|
|
|
|
|
|
- di->pbus = ((struct si_info *)sih)->pbus;
|
|
|
+ di->dmadev = core->dma_dev;
|
|
|
|
|
|
/* save tunables */
|
|
|
di->ntxd = (u16) ntxd;
|
|
@@ -626,11 +639,11 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
|
|
|
di->dataoffsetlow = di->ddoffsetlow;
|
|
|
di->dataoffsethigh = di->ddoffsethigh;
|
|
|
/* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
|
|
|
- if ((ai_coreid(sih) == SDIOD_CORE_ID)
|
|
|
- && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2)))
|
|
|
+ if ((core->id.id == SDIOD_CORE_ID)
|
|
|
+ && ((rev > 0) && (rev <= 2)))
|
|
|
di->addrext = 0;
|
|
|
- else if ((ai_coreid(sih) == I2S_CORE_ID) &&
|
|
|
- ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1)))
|
|
|
+ else if ((core->id.id == I2S_CORE_ID) &&
|
|
|
+ ((rev == 0) || (rev == 1)))
|
|
|
di->addrext = 0;
|
|
|
else
|
|
|
di->addrext = _dma_isaddrext(di);
|
|
@@ -749,13 +762,13 @@ void dma_detach(struct dma_pub *pub)
|
|
|
|
|
|
/* free dma descriptor rings */
|
|
|
if (di->txd64)
|
|
|
- pci_free_consistent(di->pbus, di->txdalloc,
|
|
|
- ((s8 *)di->txd64 - di->txdalign),
|
|
|
- (di->txdpaorig));
|
|
|
+ dma_free_coherent(di->dmadev, di->txdalloc,
|
|
|
+ ((s8 *)di->txd64 - di->txdalign),
|
|
|
+ (di->txdpaorig));
|
|
|
if (di->rxd64)
|
|
|
- pci_free_consistent(di->pbus, di->rxdalloc,
|
|
|
- ((s8 *)di->rxd64 - di->rxdalign),
|
|
|
- (di->rxdpaorig));
|
|
|
+ dma_free_coherent(di->dmadev, di->rxdalloc,
|
|
|
+ ((s8 *)di->rxd64 - di->rxdalign),
|
|
|
+ (di->rxdpaorig));
|
|
|
|
|
|
/* free packet pointer vectors */
|
|
|
kfree(di->txp);
|
|
@@ -780,11 +793,15 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
|
|
|
if ((di->ddoffsetlow == 0)
|
|
|
|| !(pa & PCI32ADDR_HIGH)) {
|
|
|
if (direction == DMA_TX) {
|
|
|
- W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
|
|
|
- W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
|
|
|
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
|
|
|
+ pa + di->ddoffsetlow);
|
|
|
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
|
|
|
+ di->ddoffsethigh);
|
|
|
} else {
|
|
|
- W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
|
|
|
- W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
|
|
|
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
|
|
|
+ pa + di->ddoffsetlow);
|
|
|
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
|
|
|
+ di->ddoffsethigh);
|
|
|
}
|
|
|
} else {
|
|
|
/* DMA64 32bits address extension */
|
|
@@ -795,15 +812,19 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
|
|
|
pa &= ~PCI32ADDR_HIGH;
|
|
|
|
|
|
if (direction == DMA_TX) {
|
|
|
- W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
|
|
|
- W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
|
|
|
- SET_REG(&di->d64txregs->control,
|
|
|
- D64_XC_AE, (ae << D64_XC_AE_SHIFT));
|
|
|
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
|
|
|
+ pa + di->ddoffsetlow);
|
|
|
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
|
|
|
+ di->ddoffsethigh);
|
|
|
+ bcma_maskset32(di->core, DMA64TXREGOFFS(di, control),
|
|
|
+ D64_XC_AE, (ae << D64_XC_AE_SHIFT));
|
|
|
} else {
|
|
|
- W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
|
|
|
- W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
|
|
|
- SET_REG(&di->d64rxregs->control,
|
|
|
- D64_RC_AE, (ae << D64_RC_AE_SHIFT));
|
|
|
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
|
|
|
+ pa + di->ddoffsetlow);
|
|
|
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
|
|
|
+ di->ddoffsethigh);
|
|
|
+ bcma_maskset32(di->core, DMA64RXREGOFFS(di, control),
|
|
|
+ D64_RC_AE, (ae << D64_RC_AE_SHIFT));
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -815,9 +836,9 @@ static void _dma_rxenable(struct dma_info *di)
|
|
|
|
|
|
DMA_TRACE("%s:\n", di->name);
|
|
|
|
|
|
- control =
|
|
|
- (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
|
|
|
- D64_RC_RE;
|
|
|
+ control = D64_RC_RE | (bcma_read32(di->core,
|
|
|
+ DMA64RXREGOFFS(di, control)) &
|
|
|
+ D64_RC_AE);
|
|
|
|
|
|
if ((dmactrlflags & DMA_CTRL_PEN) == 0)
|
|
|
control |= D64_RC_PD;
|
|
@@ -825,7 +846,7 @@ static void _dma_rxenable(struct dma_info *di)
|
|
|
if (dmactrlflags & DMA_CTRL_ROC)
|
|
|
control |= D64_RC_OC;
|
|
|
|
|
|
- W_REG(&di->d64rxregs->control,
|
|
|
+ bcma_write32(di->core, DMA64RXREGOFFS(di, control),
|
|
|
((di->rxoffset << D64_RC_RO_SHIFT) | control));
|
|
|
}
|
|
|
|
|
@@ -868,7 +889,8 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
|
|
|
return NULL;
|
|
|
|
|
|
curr =
|
|
|
- B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
|
|
|
+ B2I(((bcma_read32(di->core,
|
|
|
+ DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) -
|
|
|
di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
|
|
|
|
|
|
/* ignore curr if forceall */
|
|
@@ -882,7 +904,7 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
|
|
|
pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow;
|
|
|
|
|
|
/* clear this packet from the descriptor ring */
|
|
|
- pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
|
|
|
+ dma_unmap_single(di->dmadev, pa, di->rxbufsize, DMA_FROM_DEVICE);
|
|
|
|
|
|
di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef);
|
|
|
di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef);
|
|
@@ -950,12 +972,12 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
|
|
|
if (resid > 0) {
|
|
|
uint cur;
|
|
|
cur =
|
|
|
- B2I(((R_REG(&di->d64rxregs->status0) &
|
|
|
- D64_RS0_CD_MASK) -
|
|
|
- di->rcvptrbase) & D64_RS0_CD_MASK,
|
|
|
- struct dma64desc);
|
|
|
+ B2I(((bcma_read32(di->core,
|
|
|
+ DMA64RXREGOFFS(di, status0)) &
|
|
|
+ D64_RS0_CD_MASK) - di->rcvptrbase) &
|
|
|
+ D64_RS0_CD_MASK, struct dma64desc);
|
|
|
DMA_ERROR("rxin %d rxout %d, hw_curr %d\n",
|
|
|
- di->rxin, di->rxout, cur);
|
|
|
+ di->rxin, di->rxout, cur);
|
|
|
}
|
|
|
#endif /* BCMDBG */
|
|
|
|
|
@@ -983,8 +1005,10 @@ static bool dma64_rxidle(struct dma_info *di)
|
|
|
if (di->nrxd == 0)
|
|
|
return true;
|
|
|
|
|
|
- return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
|
|
|
- (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
|
|
|
+ return ((bcma_read32(di->core,
|
|
|
+ DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) ==
|
|
|
+ (bcma_read32(di->core, DMA64RXREGOFFS(di, ptr)) &
|
|
|
+ D64_RS0_CD_MASK));
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1048,8 +1072,8 @@ bool dma_rxfill(struct dma_pub *pub)
|
|
|
*/
|
|
|
*(u32 *) (p->data) = 0;
|
|
|
|
|
|
- pa = pci_map_single(di->pbus, p->data,
|
|
|
- di->rxbufsize, PCI_DMA_FROMDEVICE);
|
|
|
+ pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
|
|
|
/* save the free packet pointer */
|
|
|
di->rxp[rxout] = p;
|
|
@@ -1067,7 +1091,7 @@ bool dma_rxfill(struct dma_pub *pub)
|
|
|
di->rxout = rxout;
|
|
|
|
|
|
/* update the chip lastdscr pointer */
|
|
|
- W_REG(&di->d64rxregs->ptr,
|
|
|
+ bcma_write32(di->core, DMA64RXREGOFFS(di, ptr),
|
|
|
di->rcvptrbase + I2B(rxout, struct dma64desc));
|
|
|
|
|
|
return ring_empty;
|
|
@@ -1128,7 +1152,7 @@ void dma_txinit(struct dma_pub *pub)
|
|
|
|
|
|
if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
|
|
|
control |= D64_XC_PD;
|
|
|
- OR_REG(&di->d64txregs->control, control);
|
|
|
+ bcma_set32(di->core, DMA64TXREGOFFS(di, control), control);
|
|
|
|
|
|
/* DMA engine with alignment requirement requires table to be inited
|
|
|
* before enabling the engine
|
|
@@ -1146,7 +1170,7 @@ void dma_txsuspend(struct dma_pub *pub)
|
|
|
if (di->ntxd == 0)
|
|
|
return;
|
|
|
|
|
|
- OR_REG(&di->d64txregs->control, D64_XC_SE);
|
|
|
+ bcma_set32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
|
|
|
}
|
|
|
|
|
|
void dma_txresume(struct dma_pub *pub)
|
|
@@ -1158,7 +1182,7 @@ void dma_txresume(struct dma_pub *pub)
|
|
|
if (di->ntxd == 0)
|
|
|
return;
|
|
|
|
|
|
- AND_REG(&di->d64txregs->control, ~D64_XC_SE);
|
|
|
+ bcma_mask32(di->core, DMA64TXREGOFFS(di, control), ~D64_XC_SE);
|
|
|
}
|
|
|
|
|
|
bool dma_txsuspended(struct dma_pub *pub)
|
|
@@ -1166,8 +1190,9 @@ bool dma_txsuspended(struct dma_pub *pub)
|
|
|
struct dma_info *di = (struct dma_info *)pub;
|
|
|
|
|
|
return (di->ntxd == 0) ||
|
|
|
- ((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
|
|
|
- D64_XC_SE);
|
|
|
+ ((bcma_read32(di->core,
|
|
|
+ DMA64TXREGOFFS(di, control)) & D64_XC_SE) ==
|
|
|
+ D64_XC_SE);
|
|
|
}
|
|
|
|
|
|
void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
|
|
@@ -1200,16 +1225,17 @@ bool dma_txreset(struct dma_pub *pub)
|
|
|
return true;
|
|
|
|
|
|
/* suspend tx DMA first */
|
|
|
- W_REG(&di->d64txregs->control, D64_XC_SE);
|
|
|
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
|
|
|
SPINWAIT(((status =
|
|
|
- (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
|
|
|
- != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
|
|
|
- && (status != D64_XS0_XS_STOPPED), 10000);
|
|
|
+ (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
|
|
|
+ D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) &&
|
|
|
+ (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED),
|
|
|
+ 10000);
|
|
|
|
|
|
- W_REG(&di->d64txregs->control, 0);
|
|
|
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control), 0);
|
|
|
SPINWAIT(((status =
|
|
|
- (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
|
|
|
- != D64_XS0_XS_DISABLED), 10000);
|
|
|
+ (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
|
|
|
+ D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000);
|
|
|
|
|
|
/* wait for the last transaction to complete */
|
|
|
udelay(300);
|
|
@@ -1225,10 +1251,10 @@ bool dma_rxreset(struct dma_pub *pub)
|
|
|
if (di->nrxd == 0)
|
|
|
return true;
|
|
|
|
|
|
- W_REG(&di->d64rxregs->control, 0);
|
|
|
+ bcma_write32(di->core, DMA64RXREGOFFS(di, control), 0);
|
|
|
SPINWAIT(((status =
|
|
|
- (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK))
|
|
|
- != D64_RS0_RS_DISABLED), 10000);
|
|
|
+ (bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) &
|
|
|
+ D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000);
|
|
|
|
|
|
return status == D64_RS0_RS_DISABLED;
|
|
|
}
|
|
@@ -1267,7 +1293,7 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
|
|
|
goto outoftxd;
|
|
|
|
|
|
/* get physical address of buffer start */
|
|
|
- pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
|
|
|
+ pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
|
|
|
|
|
|
/* With a DMA segment list, Descriptor table is filled
|
|
|
* using the segment list instead of looping over
|
|
@@ -1290,7 +1316,7 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
|
|
|
|
|
|
/* kick the chip */
|
|
|
if (commit)
|
|
|
- W_REG(&di->d64txregs->ptr,
|
|
|
+ bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
|
|
|
di->xmtptrbase + I2B(txout, struct dma64desc));
|
|
|
|
|
|
/* tx flow control */
|
|
@@ -1338,16 +1364,15 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
|
|
|
if (range == DMA_RANGE_ALL)
|
|
|
end = di->txout;
|
|
|
else {
|
|
|
- struct dma64regs __iomem *dregs = di->d64txregs;
|
|
|
-
|
|
|
- end = (u16) (B2I(((R_REG(&dregs->status0) &
|
|
|
- D64_XS0_CD_MASK) -
|
|
|
- di->xmtptrbase) & D64_XS0_CD_MASK,
|
|
|
- struct dma64desc));
|
|
|
+ end = (u16) (B2I(((bcma_read32(di->core,
|
|
|
+ DMA64TXREGOFFS(di, status0)) &
|
|
|
+ D64_XS0_CD_MASK) - di->xmtptrbase) &
|
|
|
+ D64_XS0_CD_MASK, struct dma64desc));
|
|
|
|
|
|
if (range == DMA_RANGE_TRANSFERED) {
|
|
|
active_desc =
|
|
|
- (u16) (R_REG(&dregs->status1) &
|
|
|
+ (u16)(bcma_read32(di->core,
|
|
|
+ DMA64TXREGOFFS(di, status1)) &
|
|
|
D64_XS1_AD_MASK);
|
|
|
active_desc =
|
|
|
(active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
|
|
@@ -1376,7 +1401,7 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
|
|
|
txp = di->txp[i];
|
|
|
di->txp[i] = NULL;
|
|
|
|
|
|
- pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
|
|
|
+ dma_unmap_single(di->dmadev, pa, size, DMA_TO_DEVICE);
|
|
|
}
|
|
|
|
|
|
di->txin = i;
|