|
@@ -703,7 +703,7 @@ static inline int post_send(struct ipoib_dev_priv *priv,
|
|
void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
|
|
void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
|
|
{
|
|
{
|
|
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
|
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
|
- struct ipoib_tx_buf *tx_req;
|
|
|
|
|
|
+ struct ipoib_cm_tx_buf *tx_req;
|
|
u64 addr;
|
|
u64 addr;
|
|
|
|
|
|
if (unlikely(skb->len > tx->mtu)) {
|
|
if (unlikely(skb->len > tx->mtu)) {
|
|
@@ -734,7 +734,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- tx_req->mapping[0] = addr;
|
|
|
|
|
|
+ tx_req->mapping = addr;
|
|
|
|
|
|
if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
|
|
if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
|
|
addr, skb->len))) {
|
|
addr, skb->len))) {
|
|
@@ -759,7 +759,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
|
|
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
|
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
|
struct ipoib_cm_tx *tx = wc->qp->qp_context;
|
|
struct ipoib_cm_tx *tx = wc->qp->qp_context;
|
|
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
|
|
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
|
|
- struct ipoib_tx_buf *tx_req;
|
|
|
|
|
|
+ struct ipoib_cm_tx_buf *tx_req;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
|
|
ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
|
|
@@ -773,7 +773,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
|
|
|
|
|
|
tx_req = &tx->tx_ring[wr_id];
|
|
tx_req = &tx->tx_ring[wr_id];
|
|
|
|
|
|
- ib_dma_unmap_single(priv->ca, tx_req->mapping[0], tx_req->skb->len, DMA_TO_DEVICE);
|
|
|
|
|
|
+ ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
|
|
|
|
|
|
/* FIXME: is this right? Shouldn't we only increment on success? */
|
|
/* FIXME: is this right? Shouldn't we only increment on success? */
|
|
++dev->stats.tx_packets;
|
|
++dev->stats.tx_packets;
|
|
@@ -1143,7 +1143,7 @@ err_tx:
|
|
static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
|
|
static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
|
|
{
|
|
{
|
|
struct ipoib_dev_priv *priv = netdev_priv(p->dev);
|
|
struct ipoib_dev_priv *priv = netdev_priv(p->dev);
|
|
- struct ipoib_tx_buf *tx_req;
|
|
|
|
|
|
+ struct ipoib_cm_tx_buf *tx_req;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
unsigned long begin;
|
|
unsigned long begin;
|
|
|
|
|
|
@@ -1171,7 +1171,7 @@ timeout:
|
|
|
|
|
|
while ((int) p->tx_tail - (int) p->tx_head < 0) {
|
|
while ((int) p->tx_tail - (int) p->tx_head < 0) {
|
|
tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
|
|
tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
|
|
- ib_dma_unmap_single(priv->ca, tx_req->mapping[0], tx_req->skb->len,
|
|
|
|
|
|
+ ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
|
|
DMA_TO_DEVICE);
|
|
DMA_TO_DEVICE);
|
|
dev_kfree_skb_any(tx_req->skb);
|
|
dev_kfree_skb_any(tx_req->skb);
|
|
++p->tx_tail;
|
|
++p->tx_tail;
|