|
@@ -274,7 +274,7 @@ void ipath_free_all_qps(struct ipath_qp_table *qpt)
|
|
free_qpn(qpt, qp->ibqp.qp_num);
|
|
free_qpn(qpt, qp->ibqp.qp_num);
|
|
if (!atomic_dec_and_test(&qp->refcount) ||
|
|
if (!atomic_dec_and_test(&qp->refcount) ||
|
|
!ipath_destroy_qp(&qp->ibqp))
|
|
!ipath_destroy_qp(&qp->ibqp))
|
|
- ipath_dbg(KERN_INFO "QP memory leak!\n");
|
|
|
|
|
|
+ ipath_dbg("QP memory leak!\n");
|
|
qp = nqp;
|
|
qp = nqp;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -369,7 +369,7 @@ void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
|
|
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
|
|
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
|
|
struct ib_wc wc;
|
|
struct ib_wc wc;
|
|
|
|
|
|
- ipath_dbg(KERN_INFO "QP%d/%d in error state\n",
|
|
|
|
|
|
+ ipath_dbg("QP%d/%d in error state\n",
|
|
qp->ibqp.qp_num, qp->remote_qpn);
|
|
qp->ibqp.qp_num, qp->remote_qpn);
|
|
|
|
|
|
spin_lock(&dev->pending_lock);
|
|
spin_lock(&dev->pending_lock);
|
|
@@ -980,7 +980,7 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
|
|
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
|
|
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
|
|
struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
|
|
struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
|
|
|
|
|
|
- ipath_dbg(KERN_INFO "Send queue error on QP%d/%d: err: %d\n",
|
|
|
|
|
|
+ ipath_dbg("Send queue error on QP%d/%d: err: %d\n",
|
|
qp->ibqp.qp_num, qp->remote_qpn, wc->status);
|
|
qp->ibqp.qp_num, qp->remote_qpn, wc->status);
|
|
|
|
|
|
spin_lock(&dev->pending_lock);
|
|
spin_lock(&dev->pending_lock);
|