Эх сурвалжийг харах

Merge master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

Linus Torvalds 19 жил өмнө
parent
commit
d90d4392b3

+ 3 - 2
drivers/infiniband/core/user_mad.c

@@ -334,10 +334,11 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
 			ret = -EINVAL;
 			ret = -EINVAL;
 			goto err_ah;
 			goto err_ah;
 		}
 		}
-		/* Validate that management class can support RMPP */
+
+		/* Validate that the management class can support RMPP */
 		if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
 		if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
 			hdr_len = offsetof(struct ib_sa_mad, data);
 			hdr_len = offsetof(struct ib_sa_mad, data);
-			data_len = length;
+			data_len = length - hdr_len;
 		} else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
 		} else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
 			    (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) {
 			    (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) {
 				hdr_len = offsetof(struct ib_vendor_mad, data);
 				hdr_len = offsetof(struct ib_vendor_mad, data);

+ 5 - 11
drivers/infiniband/hw/mthca/mthca_eq.c

@@ -476,12 +476,8 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
 	int i;
 	int i;
 	u8 status;
 	u8 status;
 
 
-	/* Make sure EQ size is aligned to a power of 2 size. */
-	for (i = 1; i < nent; i <<= 1)
-		; /* nothing */
-	nent = i;
-
-	eq->dev = dev;
+	eq->dev  = dev;
+	eq->nent = roundup_pow_of_two(max(nent, 2));
 
 
 	eq->page_list = kmalloc(npages * sizeof *eq->page_list,
 	eq->page_list = kmalloc(npages * sizeof *eq->page_list,
 				GFP_KERNEL);
 				GFP_KERNEL);
@@ -512,7 +508,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
 		memset(eq->page_list[i].buf, 0, PAGE_SIZE);
 		memset(eq->page_list[i].buf, 0, PAGE_SIZE);
 	}
 	}
 
 
-	for (i = 0; i < nent; ++i)
+	for (i = 0; i < eq->nent; ++i)
 		set_eqe_hw(get_eqe(eq, i));
 		set_eqe_hw(get_eqe(eq, i));
 
 
 	eq->eqn = mthca_alloc(&dev->eq_table.alloc);
 	eq->eqn = mthca_alloc(&dev->eq_table.alloc);
@@ -528,8 +524,6 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
 	if (err)
 	if (err)
 		goto err_out_free_eq;
 		goto err_out_free_eq;
 
 
-	eq->nent = nent;
-
 	memset(eq_context, 0, sizeof *eq_context);
 	memset(eq_context, 0, sizeof *eq_context);
 	eq_context->flags           = cpu_to_be32(MTHCA_EQ_STATUS_OK   |
 	eq_context->flags           = cpu_to_be32(MTHCA_EQ_STATUS_OK   |
 						  MTHCA_EQ_OWNER_HW    |
 						  MTHCA_EQ_OWNER_HW    |
@@ -538,7 +532,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
 	if (mthca_is_memfree(dev))
 	if (mthca_is_memfree(dev))
 		eq_context->flags  |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL);
 		eq_context->flags  |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL);
 
 
-	eq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24);
+	eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24);
 	if (mthca_is_memfree(dev)) {
 	if (mthca_is_memfree(dev)) {
 		eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);
 		eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);
 	} else {
 	} else {
@@ -569,7 +563,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
 	dev->eq_table.arm_mask |= eq->eqn_mask;
 	dev->eq_table.arm_mask |= eq->eqn_mask;
 
 
 	mthca_dbg(dev, "Allocated EQ %d with %d entries\n",
 	mthca_dbg(dev, "Allocated EQ %d with %d entries\n",
-		  eq->eqn, nent);
+		  eq->eqn, eq->nent);
 
 
 	return err;
 	return err;
 
 

+ 24 - 27
drivers/infiniband/hw/mthca/mthca_qp.c

@@ -227,7 +227,6 @@ static void mthca_wq_init(struct mthca_wq *wq)
 	wq->last_comp = wq->max - 1;
 	wq->last_comp = wq->max - 1;
 	wq->head      = 0;
 	wq->head      = 0;
 	wq->tail      = 0;
 	wq->tail      = 0;
-	wq->last      = NULL;
 }
 }
 
 
 void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
 void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
@@ -687,7 +686,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
 	}
 	}
 
 
 	if (attr_mask & IB_QP_TIMEOUT) {
 	if (attr_mask & IB_QP_TIMEOUT) {
-		qp_context->pri_path.ackto = attr->timeout;
+		qp_context->pri_path.ackto = attr->timeout << 3;
 		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
 		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
 	}
 	}
 
 
@@ -1103,6 +1102,9 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
 		}
 		}
 	}
 	}
 
 
+	qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
+	qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1583,15 +1585,13 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			goto out;
 			goto out;
 		}
 		}
 
 
-		if (prev_wqe) {
-			((struct mthca_next_seg *) prev_wqe)->nda_op =
-				cpu_to_be32(((ind << qp->sq.wqe_shift) +
-					     qp->send_wqe_offset) |
-					    mthca_opcode[wr->opcode]);
-			wmb();
-			((struct mthca_next_seg *) prev_wqe)->ee_nds =
-				cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
-		}
+		((struct mthca_next_seg *) prev_wqe)->nda_op =
+			cpu_to_be32(((ind << qp->sq.wqe_shift) +
+				     qp->send_wqe_offset) |
+				    mthca_opcode[wr->opcode]);
+		wmb();
+		((struct mthca_next_seg *) prev_wqe)->ee_nds =
+			cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
 
 
 		if (!size0) {
 		if (!size0) {
 			size0 = size;
 			size0 = size;
@@ -1688,13 +1688,11 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 
 
 		qp->wrid[ind] = wr->wr_id;
 		qp->wrid[ind] = wr->wr_id;
 
 
-		if (likely(prev_wqe)) {
-			((struct mthca_next_seg *) prev_wqe)->nda_op =
-				cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
-			wmb();
-			((struct mthca_next_seg *) prev_wqe)->ee_nds =
-				cpu_to_be32(MTHCA_NEXT_DBD | size);
-		}
+		((struct mthca_next_seg *) prev_wqe)->nda_op =
+			cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
+		wmb();
+		((struct mthca_next_seg *) prev_wqe)->ee_nds =
+			cpu_to_be32(MTHCA_NEXT_DBD | size);
 
 
 		if (!size0)
 		if (!size0)
 			size0 = size;
 			size0 = size;
@@ -1905,15 +1903,13 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			goto out;
 			goto out;
 		}
 		}
 
 
-		if (likely(prev_wqe)) {
-			((struct mthca_next_seg *) prev_wqe)->nda_op =
-				cpu_to_be32(((ind << qp->sq.wqe_shift) +
-					     qp->send_wqe_offset) |
-					    mthca_opcode[wr->opcode]);
-			wmb();
-			((struct mthca_next_seg *) prev_wqe)->ee_nds =
-				cpu_to_be32(MTHCA_NEXT_DBD | size);
-		}
+		((struct mthca_next_seg *) prev_wqe)->nda_op =
+			cpu_to_be32(((ind << qp->sq.wqe_shift) +
+				     qp->send_wqe_offset) |
+				    mthca_opcode[wr->opcode]);
+		wmb();
+		((struct mthca_next_seg *) prev_wqe)->ee_nds =
+			cpu_to_be32(MTHCA_NEXT_DBD | size);
 
 
 		if (!size0) {
 		if (!size0) {
 			size0 = size;
 			size0 = size;
@@ -2127,5 +2123,6 @@ void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev)
 	for (i = 0; i < 2; ++i)
 	for (i = 0; i < 2; ++i)
 		mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
 		mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
 
 
+	mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
 	mthca_alloc_cleanup(&dev->qp_table.alloc);
 	mthca_alloc_cleanup(&dev->qp_table.alloc);
 }
 }

+ 11 - 14
drivers/infiniband/hw/mthca/mthca_srq.c

@@ -172,6 +172,8 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
 			scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
 			scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
 	}
 	}
 
 
+	srq->last = get_wqe(srq, srq->max - 1);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -189,7 +191,6 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
 
 
 	srq->max      = attr->max_wr;
 	srq->max      = attr->max_wr;
 	srq->max_gs   = attr->max_sge;
 	srq->max_gs   = attr->max_sge;
-	srq->last     = NULL;
 	srq->counter  = 0;
 	srq->counter  = 0;
 
 
 	if (mthca_is_memfree(dev))
 	if (mthca_is_memfree(dev))
@@ -409,7 +410,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
 			mthca_err(dev, "SRQ %06x full\n", srq->srqn);
 			mthca_err(dev, "SRQ %06x full\n", srq->srqn);
 			err = -ENOMEM;
 			err = -ENOMEM;
 			*bad_wr = wr;
 			*bad_wr = wr;
-			return nreq;
+			break;
 		}
 		}
 
 
 		wqe       = get_wqe(srq, ind);
 		wqe       = get_wqe(srq, ind);
@@ -427,7 +428,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
 			err = -EINVAL;
 			err = -EINVAL;
 			*bad_wr = wr;
 			*bad_wr = wr;
 			srq->last = prev_wqe;
 			srq->last = prev_wqe;
-			return nreq;
+			break;
 		}
 		}
 
 
 		for (i = 0; i < wr->num_sge; ++i) {
 		for (i = 0; i < wr->num_sge; ++i) {
@@ -446,20 +447,16 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
 			((struct mthca_data_seg *) wqe)->addr = 0;
 			((struct mthca_data_seg *) wqe)->addr = 0;
 		}
 		}
 
 
-		if (likely(prev_wqe)) {
-			((struct mthca_next_seg *) prev_wqe)->nda_op =
-				cpu_to_be32((ind << srq->wqe_shift) | 1);
-			wmb();
-			((struct mthca_next_seg *) prev_wqe)->ee_nds =
-				cpu_to_be32(MTHCA_NEXT_DBD);
-		}
+		((struct mthca_next_seg *) prev_wqe)->nda_op =
+			cpu_to_be32((ind << srq->wqe_shift) | 1);
+		wmb();
+		((struct mthca_next_seg *) prev_wqe)->ee_nds =
+			cpu_to_be32(MTHCA_NEXT_DBD);
 
 
 		srq->wrid[ind]  = wr->wr_id;
 		srq->wrid[ind]  = wr->wr_id;
 		srq->first_free = next_ind;
 		srq->first_free = next_ind;
 	}
 	}
 
 
-	return nreq;
-
 	if (likely(nreq)) {
 	if (likely(nreq)) {
 		__be32 doorbell[2];
 		__be32 doorbell[2];
 
 
@@ -503,7 +500,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
 			mthca_err(dev, "SRQ %06x full\n", srq->srqn);
 			mthca_err(dev, "SRQ %06x full\n", srq->srqn);
 			err = -ENOMEM;
 			err = -ENOMEM;
 			*bad_wr = wr;
 			*bad_wr = wr;
-			return nreq;
+			break;
 		}
 		}
 
 
 		wqe       = get_wqe(srq, ind);
 		wqe       = get_wqe(srq, ind);
@@ -519,7 +516,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
 		if (unlikely(wr->num_sge > srq->max_gs)) {
 		if (unlikely(wr->num_sge > srq->max_gs)) {
 			err = -EINVAL;
 			err = -EINVAL;
 			*bad_wr = wr;
 			*bad_wr = wr;
-			return nreq;
+			break;
 		}
 		}
 
 
 		for (i = 0; i < wr->num_sge; ++i) {
 		for (i = 0; i < wr->num_sge; ++i) {

+ 1 - 1
drivers/infiniband/ulp/ipoib/ipoib.h

@@ -257,7 +257,7 @@ void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
 
 
 void ipoib_mcast_restart_task(void *dev_ptr);
 void ipoib_mcast_restart_task(void *dev_ptr);
 int ipoib_mcast_start_thread(struct net_device *dev);
 int ipoib_mcast_start_thread(struct net_device *dev);
-int ipoib_mcast_stop_thread(struct net_device *dev);
+int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
 
 
 void ipoib_mcast_dev_down(struct net_device *dev);
 void ipoib_mcast_dev_down(struct net_device *dev);
 void ipoib_mcast_dev_flush(struct net_device *dev);
 void ipoib_mcast_dev_flush(struct net_device *dev);

+ 2 - 2
drivers/infiniband/ulp/ipoib/ipoib_ib.c

@@ -432,7 +432,7 @@ int ipoib_ib_dev_down(struct net_device *dev)
 		flush_workqueue(ipoib_workqueue);
 		flush_workqueue(ipoib_workqueue);
 	}
 	}
 
 
-	ipoib_mcast_stop_thread(dev);
+	ipoib_mcast_stop_thread(dev, 1);
 
 
 	/*
 	/*
 	 * Flush the multicast groups first so we stop any multicast joins. The
 	 * Flush the multicast groups first so we stop any multicast joins. The
@@ -599,7 +599,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
 
 
 	ipoib_dbg(priv, "cleaning up ib_dev\n");
 	ipoib_dbg(priv, "cleaning up ib_dev\n");
 
 
-	ipoib_mcast_stop_thread(dev);
+	ipoib_mcast_stop_thread(dev, 1);
 
 
 	/* Delete the broadcast address and the local address */
 	/* Delete the broadcast address and the local address */
 	ipoib_mcast_dev_down(dev);
 	ipoib_mcast_dev_down(dev);

+ 2 - 0
drivers/infiniband/ulp/ipoib/ipoib_main.c

@@ -1005,6 +1005,7 @@ debug_failed:
 
 
 register_failed:
 register_failed:
 	ib_unregister_event_handler(&priv->event_handler);
 	ib_unregister_event_handler(&priv->event_handler);
+	flush_scheduled_work();
 
 
 event_failed:
 event_failed:
 	ipoib_dev_cleanup(priv->dev);
 	ipoib_dev_cleanup(priv->dev);
@@ -1057,6 +1058,7 @@ static void ipoib_remove_one(struct ib_device *device)
 
 
 	list_for_each_entry_safe(priv, tmp, dev_list, list) {
 	list_for_each_entry_safe(priv, tmp, dev_list, list) {
 		ib_unregister_event_handler(&priv->event_handler);
 		ib_unregister_event_handler(&priv->event_handler);
+		flush_scheduled_work();
 
 
 		unregister_netdev(priv->dev);
 		unregister_netdev(priv->dev);
 		ipoib_dev_cleanup(priv->dev);
 		ipoib_dev_cleanup(priv->dev);

+ 7 - 6
drivers/infiniband/ulp/ipoib/ipoib_multicast.c

@@ -145,7 +145,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
 
 
 	mcast->dev = dev;
 	mcast->dev = dev;
 	mcast->created = jiffies;
 	mcast->created = jiffies;
-	mcast->backoff = HZ;
+	mcast->backoff = 1;
 	mcast->logcount = 0;
 	mcast->logcount = 0;
 
 
 	INIT_LIST_HEAD(&mcast->list);
 	INIT_LIST_HEAD(&mcast->list);
@@ -396,7 +396,7 @@ static void ipoib_mcast_join_complete(int status,
 			IPOIB_GID_ARG(mcast->mcmember.mgid), status);
 			IPOIB_GID_ARG(mcast->mcmember.mgid), status);
 
 
 	if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) {
 	if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) {
-		mcast->backoff = HZ;
+		mcast->backoff = 1;
 		down(&mcast_mutex);
 		down(&mcast_mutex);
 		if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
 		if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
 			queue_work(ipoib_workqueue, &priv->mcast_task);
 			queue_work(ipoib_workqueue, &priv->mcast_task);
@@ -496,7 +496,7 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
 		if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
 		if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
 			queue_delayed_work(ipoib_workqueue,
 			queue_delayed_work(ipoib_workqueue,
 					   &priv->mcast_task,
 					   &priv->mcast_task,
-					   mcast->backoff);
+					   mcast->backoff * HZ);
 		up(&mcast_mutex);
 		up(&mcast_mutex);
 	} else
 	} else
 		mcast->query_id = ret;
 		mcast->query_id = ret;
@@ -598,7 +598,7 @@ int ipoib_mcast_start_thread(struct net_device *dev)
 	return 0;
 	return 0;
 }
 }
 
 
-int ipoib_mcast_stop_thread(struct net_device *dev)
+int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
 {
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	struct ipoib_mcast *mcast;
 	struct ipoib_mcast *mcast;
@@ -610,7 +610,8 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
 	cancel_delayed_work(&priv->mcast_task);
 	cancel_delayed_work(&priv->mcast_task);
 	up(&mcast_mutex);
 	up(&mcast_mutex);
 
 
-	flush_workqueue(ipoib_workqueue);
+	if (flush)
+		flush_workqueue(ipoib_workqueue);
 
 
 	if (priv->broadcast && priv->broadcast->query) {
 	if (priv->broadcast && priv->broadcast->query) {
 		ib_sa_cancel_query(priv->broadcast->query_id, priv->broadcast->query);
 		ib_sa_cancel_query(priv->broadcast->query_id, priv->broadcast->query);
@@ -832,7 +833,7 @@ void ipoib_mcast_restart_task(void *dev_ptr)
 
 
 	ipoib_dbg_mcast(priv, "restarting multicast task\n");
 	ipoib_dbg_mcast(priv, "restarting multicast task\n");
 
 
-	ipoib_mcast_stop_thread(dev);
+	ipoib_mcast_stop_thread(dev, 0);
 
 
 	spin_lock_irqsave(&priv->lock, flags);
 	spin_lock_irqsave(&priv->lock, flags);