Browse Source

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (42 commits)
  IB/qib: Fix refcount leak in lkey/rkey validation
  IB/qib: Improve SERDES tunning on QMH boards
  IB/qib: Unnecessary delayed completions on RC connection
  IB/qib: Issue pre-emptive NAKs on eager buffer overflow
  IB/qib: RDMA lkey/rkey validation is inefficient for large MRs
  IB/qib: Change QPN increment
  IB/qib: Add fix missing from earlier patch
  IB/qib: Change receive queue/QPN selection
  IB/qib: Fix interrupt mitigation
  IB/qib: Avoid duplicate writes to the rcv head register
  IB/qib: Add a few new SERDES tunings
  IB/qib: Reset packet list after freeing
  IB/qib: New SERDES init routine and improvements to SI quality
  IB/qib: Clear WAIT_SEND flags when setting QP to error state
  IB/qib: Fix context allocation with multiple HCAs
  IB/qib: Fix multi-Florida HCA host panic on reboot
  IB/qib: Handle transitions from ACTIVE_DEFERRED to ACTIVE better
  IB/qib: UD send with immediate receive completion has wrong size
  IB/qib: Set port physical state even if other fields are invalid
  IB/qib: Generate completion callback on errors
  ...
Linus Torvalds 14 years ago
parent
commit
f1d6d6cd90
37 changed files with 889 additions and 622 deletions
  1. 2 0
      drivers/infiniband/hw/cxgb3/cxio_hal.c
  2. 0 2
      drivers/infiniband/hw/cxgb3/iwch_provider.h
  3. 0 56
      drivers/infiniband/hw/cxgb3/iwch_qp.c
  4. 0 1
      drivers/infiniband/hw/cxgb4/iw_cxgb4.h
  5. 0 32
      drivers/infiniband/hw/cxgb4/qp.c
  6. 2 3
      drivers/infiniband/hw/ipath/ipath_driver.c
  7. 8 1
      drivers/infiniband/hw/mlx4/cq.c
  8. 2 0
      drivers/infiniband/hw/mlx4/mad.c
  9. 2 0
      drivers/infiniband/hw/mthca/mthca_mad.c
  10. 2 2
      drivers/infiniband/hw/nes/nes_nic.c
  11. 1 1
      drivers/infiniband/hw/qib/qib.h
  12. 2 1
      drivers/infiniband/hw/qib/qib_cq.c
  13. 148 7
      drivers/infiniband/hw/qib/qib_driver.c
  14. 5 5
      drivers/infiniband/hw/qib/qib_file_ops.c
  15. 1 1
      drivers/infiniband/hw/qib/qib_iba6120.c
  16. 2 2
      drivers/infiniband/hw/qib/qib_iba7220.c
  17. 339 34
      drivers/infiniband/hw/qib/qib_iba7322.c
  18. 4 2
      drivers/infiniband/hw/qib/qib_init.c
  19. 2 1
      drivers/infiniband/hw/qib/qib_intr.c
  20. 56 24
      drivers/infiniband/hw/qib/qib_keys.c
  21. 25 20
      drivers/infiniband/hw/qib/qib_mad.c
  22. 5 3
      drivers/infiniband/hw/qib/qib_mr.c
  23. 15 17
      drivers/infiniband/hw/qib/qib_qp.c
  24. 24 0
      drivers/infiniband/hw/qib/qib_rc.c
  25. 25 32
      drivers/infiniband/hw/qib/qib_ud.c
  26. 1 0
      drivers/infiniband/hw/qib/qib_user_sdma.c
  27. 3 8
      drivers/infiniband/hw/qib/qib_verbs.h
  28. 0 1
      drivers/infiniband/ulp/ipoib/Kconfig
  29. 1 11
      drivers/infiniband/ulp/ipoib/ipoib.h
  30. 1 0
      drivers/infiniband/ulp/ipoib/ipoib_cm.c
  31. 0 51
      drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
  32. 1 7
      drivers/infiniband/ulp/ipoib/ipoib_ib.c
  33. 1 61
      drivers/infiniband/ulp/ipoib/ipoib_main.c
  34. 181 211
      drivers/infiniband/ulp/srp/ib_srp.c
  35. 25 21
      drivers/infiniband/ulp/srp/ib_srp.h
  36. 2 1
      drivers/net/mlx4/alloc.c
  37. 1 3
      drivers/net/mlx4/fw.c

+ 2 - 0
drivers/infiniband/hw/cxgb3/cxio_hal.c

@@ -189,6 +189,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
 	return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
 	return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
 }
 }
 
 
+#ifdef notyet
 int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
 int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
 {
 {
 	struct rdma_cq_setup setup;
 	struct rdma_cq_setup setup;
@@ -200,6 +201,7 @@ int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
 	setup.ovfl_mode = 1;
 	setup.ovfl_mode = 1;
 	return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
 	return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
 }
 }
+#endif
 
 
 static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
 static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
 {
 {

+ 0 - 2
drivers/infiniband/hw/cxgb3/iwch_provider.h

@@ -335,8 +335,6 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
 int iwch_post_zb_read(struct iwch_qp *qhp);
 int iwch_post_zb_read(struct iwch_qp *qhp);
 int iwch_register_device(struct iwch_dev *dev);
 int iwch_register_device(struct iwch_dev *dev);
 void iwch_unregister_device(struct iwch_dev *dev);
 void iwch_unregister_device(struct iwch_dev *dev);
-int iwch_quiesce_qps(struct iwch_cq *chp);
-int iwch_resume_qps(struct iwch_cq *chp);
 void stop_read_rep_timer(struct iwch_qp *qhp);
 void stop_read_rep_timer(struct iwch_qp *qhp);
 int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
 int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
 		      struct iwch_mr *mhp, int shift);
 		      struct iwch_mr *mhp, int shift);

+ 0 - 56
drivers/infiniband/hw/cxgb3/iwch_qp.c

@@ -1149,59 +1149,3 @@ out:
 	PDBG("%s exit state %d\n", __func__, qhp->attr.state);
 	PDBG("%s exit state %d\n", __func__, qhp->attr.state);
 	return ret;
 	return ret;
 }
 }
-
-static int quiesce_qp(struct iwch_qp *qhp)
-{
-	spin_lock_irq(&qhp->lock);
-	iwch_quiesce_tid(qhp->ep);
-	qhp->flags |= QP_QUIESCED;
-	spin_unlock_irq(&qhp->lock);
-	return 0;
-}
-
-static int resume_qp(struct iwch_qp *qhp)
-{
-	spin_lock_irq(&qhp->lock);
-	iwch_resume_tid(qhp->ep);
-	qhp->flags &= ~QP_QUIESCED;
-	spin_unlock_irq(&qhp->lock);
-	return 0;
-}
-
-int iwch_quiesce_qps(struct iwch_cq *chp)
-{
-	int i;
-	struct iwch_qp *qhp;
-
-	for (i=0; i < T3_MAX_NUM_QP; i++) {
-		qhp = get_qhp(chp->rhp, i);
-		if (!qhp)
-			continue;
-		if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) {
-			quiesce_qp(qhp);
-			continue;
-		}
-		if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp))
-			quiesce_qp(qhp);
-	}
-	return 0;
-}
-
-int iwch_resume_qps(struct iwch_cq *chp)
-{
-	int i;
-	struct iwch_qp *qhp;
-
-	for (i=0; i < T3_MAX_NUM_QP; i++) {
-		qhp = get_qhp(chp->rhp, i);
-		if (!qhp)
-			continue;
-		if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) {
-			resume_qp(qhp);
-			continue;
-		}
-		if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp))
-			resume_qp(qhp);
-	}
-	return 0;
-}

+ 0 - 1
drivers/infiniband/hw/cxgb4/iw_cxgb4.h

@@ -760,7 +760,6 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
 int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
 int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
 int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
 int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
 u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
 u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
-int c4iw_post_zb_read(struct c4iw_qp *qhp);
 int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
 int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,

+ 0 - 32
drivers/infiniband/hw/cxgb4/qp.c

@@ -892,36 +892,6 @@ static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
 	}
 	}
 }
 }
 
 
-int c4iw_post_zb_read(struct c4iw_qp *qhp)
-{
-	union t4_wr *wqe;
-	struct sk_buff *skb;
-	u8 len16;
-
-	PDBG("%s enter\n", __func__);
-	skb = alloc_skb(40, GFP_KERNEL);
-	if (!skb) {
-		printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
-		return -ENOMEM;
-	}
-	set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
-
-	wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
-	memset(wqe, 0, sizeof wqe->read);
-	wqe->read.r2 = cpu_to_be64(0);
-	wqe->read.stag_sink = cpu_to_be32(1);
-	wqe->read.to_sink_hi = cpu_to_be32(0);
-	wqe->read.to_sink_lo = cpu_to_be32(1);
-	wqe->read.stag_src = cpu_to_be32(1);
-	wqe->read.plen = cpu_to_be32(0);
-	wqe->read.to_src_hi = cpu_to_be32(0);
-	wqe->read.to_src_lo = cpu_to_be32(1);
-	len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
-	init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
-
-	return c4iw_ofld_send(&qhp->rhp->rdev, skb);
-}
-
 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
 			   gfp_t gfp)
 			   gfp_t gfp)
 {
 {
@@ -1029,7 +999,6 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
 	wqe->cookie = (unsigned long) &ep->com.wr_wait;
 	wqe->cookie = (unsigned long) &ep->com.wr_wait;
 
 
 	wqe->u.fini.type = FW_RI_TYPE_FINI;
 	wqe->u.fini.type = FW_RI_TYPE_FINI;
-	c4iw_init_wr_wait(&ep->com.wr_wait);
 	ret = c4iw_ofld_send(&rhp->rdev, skb);
 	ret = c4iw_ofld_send(&rhp->rdev, skb);
 	if (ret)
 	if (ret)
 		goto out;
 		goto out;
@@ -1125,7 +1094,6 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
 	if (qhp->attr.mpa_attr.initiator)
 	if (qhp->attr.mpa_attr.initiator)
 		build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
 		build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
 
 
-	c4iw_init_wr_wait(&qhp->ep->com.wr_wait);
 	ret = c4iw_ofld_send(&rhp->rdev, skb);
 	ret = c4iw_ofld_send(&rhp->rdev, skb);
 	if (ret)
 	if (ret)
 		goto out;
 		goto out;

+ 2 - 3
drivers/infiniband/hw/ipath/ipath_driver.c

@@ -530,9 +530,8 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
 	for (j = 0; j < 6; j++) {
 	for (j = 0; j < 6; j++) {
 		if (!pdev->resource[j].start)
 		if (!pdev->resource[j].start)
 			continue;
 			continue;
-		ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n",
-			   j, (unsigned long long)pdev->resource[j].start,
-			   (unsigned long long)pdev->resource[j].end,
+		ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n",
+			   j, &pdev->resource[j],
 			   (unsigned long long)pci_resource_len(pdev, j));
 			   (unsigned long long)pci_resource_len(pdev, j));
 	}
 	}
 
 

+ 8 - 1
drivers/infiniband/hw/mlx4/cq.c

@@ -397,10 +397,14 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
 		cq->resize_buf = NULL;
 		cq->resize_buf = NULL;
 		cq->resize_umem = NULL;
 		cq->resize_umem = NULL;
 	} else {
 	} else {
+		struct mlx4_ib_cq_buf tmp_buf;
+		int tmp_cqe = 0;
+
 		spin_lock_irq(&cq->lock);
 		spin_lock_irq(&cq->lock);
 		if (cq->resize_buf) {
 		if (cq->resize_buf) {
 			mlx4_ib_cq_resize_copy_cqes(cq);
 			mlx4_ib_cq_resize_copy_cqes(cq);
-			mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
+			tmp_buf = cq->buf;
+			tmp_cqe = cq->ibcq.cqe;
 			cq->buf      = cq->resize_buf->buf;
 			cq->buf      = cq->resize_buf->buf;
 			cq->ibcq.cqe = cq->resize_buf->cqe;
 			cq->ibcq.cqe = cq->resize_buf->cqe;
 
 
@@ -408,6 +412,9 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
 			cq->resize_buf = NULL;
 			cq->resize_buf = NULL;
 		}
 		}
 		spin_unlock_irq(&cq->lock);
 		spin_unlock_irq(&cq->lock);
+
+		if (tmp_cqe)
+			mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
 	}
 	}
 
 
 	goto out;
 	goto out;

+ 2 - 0
drivers/infiniband/hw/mlx4/mad.c

@@ -211,6 +211,8 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
 	if (agent) {
 	if (agent) {
 		send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
 		send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
 					      IB_MGMT_MAD_DATA, GFP_ATOMIC);
 					      IB_MGMT_MAD_DATA, GFP_ATOMIC);
+		if (IS_ERR(send_buf))
+			return;
 		/*
 		/*
 		 * We rely here on the fact that MLX QPs don't use the
 		 * We rely here on the fact that MLX QPs don't use the
 		 * address handle after the send is posted (this is
 		 * address handle after the send is posted (this is

+ 2 - 0
drivers/infiniband/hw/mthca/mthca_mad.c

@@ -171,6 +171,8 @@ static void forward_trap(struct mthca_dev *dev,
 	if (agent) {
 	if (agent) {
 		send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
 		send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
 					      IB_MGMT_MAD_DATA, GFP_ATOMIC);
 					      IB_MGMT_MAD_DATA, GFP_ATOMIC);
+		if (IS_ERR(send_buf))
+			return;
 		/*
 		/*
 		 * We rely here on the fact that MLX QPs don't use the
 		 * We rely here on the fact that MLX QPs don't use the
 		 * address handle after the send is posted (this is
 		 * address handle after the send is posted (this is

+ 2 - 2
drivers/infiniband/hw/nes/nes_nic.c

@@ -908,8 +908,8 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
 					nesvnic->nic_index &&
 					nesvnic->nic_index &&
 					mc_index < max_pft_entries_avaiable) {
 					mc_index < max_pft_entries_avaiable) {
 						nes_debug(NES_DBG_NIC_RX,
 						nes_debug(NES_DBG_NIC_RX,
-					"mc_index=%d skipping nic_index=%d,\
-					used for=%d \n", mc_index,
+					"mc_index=%d skipping nic_index=%d, "
+					"used for=%d \n", mc_index,
 					nesvnic->nic_index,
 					nesvnic->nic_index,
 					nesadapter->pft_mcast_map[mc_index]);
 					nesadapter->pft_mcast_map[mc_index]);
 				mc_index++;
 				mc_index++;

+ 1 - 1
drivers/infiniband/hw/qib/qib.h

@@ -766,7 +766,7 @@ struct qib_devdata {
 	void (*f_sdma_hw_start_up)(struct qib_pportdata *);
 	void (*f_sdma_hw_start_up)(struct qib_pportdata *);
 	void (*f_sdma_init_early)(struct qib_pportdata *);
 	void (*f_sdma_init_early)(struct qib_pportdata *);
 	void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
 	void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
-	void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32);
+	void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32);
 	u32 (*f_hdrqempty)(struct qib_ctxtdata *);
 	u32 (*f_hdrqempty)(struct qib_ctxtdata *);
 	u64 (*f_portcntr)(struct qib_pportdata *, u32);
 	u64 (*f_portcntr)(struct qib_pportdata *, u32);
 	u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
 	u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,

+ 2 - 1
drivers/infiniband/hw/qib/qib_cq.c

@@ -100,7 +100,8 @@ void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
 	wc->head = next;
 	wc->head = next;
 
 
 	if (cq->notify == IB_CQ_NEXT_COMP ||
 	if (cq->notify == IB_CQ_NEXT_COMP ||
-	    (cq->notify == IB_CQ_SOLICITED && solicited)) {
+	    (cq->notify == IB_CQ_SOLICITED &&
+	     (solicited || entry->status != IB_WC_SUCCESS))) {
 		cq->notify = IB_CQ_NONE;
 		cq->notify = IB_CQ_NONE;
 		cq->triggered++;
 		cq->triggered++;
 		/*
 		/*

+ 148 - 7
drivers/infiniband/hw/qib/qib_driver.c

@@ -71,6 +71,11 @@ MODULE_DESCRIPTION("QLogic IB driver");
  */
  */
 #define QIB_PIO_MAXIBHDR 128
 #define QIB_PIO_MAXIBHDR 128
 
 
+/*
+ * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt.
+ */
+#define QIB_MAX_PKT_RECV 64
+
 struct qlogic_ib_stats qib_stats;
 struct qlogic_ib_stats qib_stats;
 
 
 const char *qib_get_unit_name(int unit)
 const char *qib_get_unit_name(int unit)
@@ -284,14 +289,147 @@ static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
  * Returns 1 if error was a CRC, else 0.
  * Returns 1 if error was a CRC, else 0.
  * Needed for some chip's synthesized error counters.
  * Needed for some chip's synthesized error counters.
  */
  */
-static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt,
-			  u32 eflags, u32 l, u32 etail, __le32 *rhf_addr,
-			  struct qib_message_header *hdr)
+static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
+			  u32 ctxt, u32 eflags, u32 l, u32 etail,
+			  __le32 *rhf_addr, struct qib_message_header *rhdr)
 {
 {
 	u32 ret = 0;
 	u32 ret = 0;
 
 
 	if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
 	if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
 		ret = 1;
 		ret = 1;
+	else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
+		/* For TIDERR and RC QPs premptively schedule a NAK */
+		struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr;
+		struct qib_other_headers *ohdr = NULL;
+		struct qib_ibport *ibp = &ppd->ibport_data;
+		struct qib_qp *qp = NULL;
+		u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
+		u16 lid  = be16_to_cpu(hdr->lrh[1]);
+		int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+		u32 qp_num;
+		u32 opcode;
+		u32 psn;
+		int diff;
+		unsigned long flags;
+
+		/* Sanity check packet */
+		if (tlen < 24)
+			goto drop;
+
+		if (lid < QIB_MULTICAST_LID_BASE) {
+			lid &= ~((1 << ppd->lmc) - 1);
+			if (unlikely(lid != ppd->lid))
+				goto drop;
+		}
+
+		/* Check for GRH */
+		if (lnh == QIB_LRH_BTH)
+			ohdr = &hdr->u.oth;
+		else if (lnh == QIB_LRH_GRH) {
+			u32 vtf;
+
+			ohdr = &hdr->u.l.oth;
+			if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
+				goto drop;
+			vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
+			if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
+				goto drop;
+		} else
+			goto drop;
+
+		/* Get opcode and PSN from packet */
+		opcode = be32_to_cpu(ohdr->bth[0]);
+		opcode >>= 24;
+		psn = be32_to_cpu(ohdr->bth[2]);
+
+		/* Get the destination QP number. */
+		qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
+		if (qp_num != QIB_MULTICAST_QPN) {
+			int ruc_res;
+			qp = qib_lookup_qpn(ibp, qp_num);
+			if (!qp)
+				goto drop;
+
+			/*
+			 * Handle only RC QPs - for other QP types drop error
+			 * packet.
+			 */
+			spin_lock(&qp->r_lock);
+
+			/* Check for valid receive state. */
+			if (!(ib_qib_state_ops[qp->state] &
+			      QIB_PROCESS_RECV_OK)) {
+				ibp->n_pkt_drops++;
+				goto unlock;
+			}
+
+			switch (qp->ibqp.qp_type) {
+			case IB_QPT_RC:
+				spin_lock_irqsave(&qp->s_lock, flags);
+				ruc_res =
+					qib_ruc_check_hdr(
+						ibp, hdr,
+						lnh == QIB_LRH_GRH,
+						qp,
+						be32_to_cpu(ohdr->bth[0]));
+				if (ruc_res) {
+					spin_unlock_irqrestore(&qp->s_lock,
+							       flags);
+					goto unlock;
+				}
+				spin_unlock_irqrestore(&qp->s_lock, flags);
+
+				/* Only deal with RDMA Writes for now */
+				if (opcode <
+				    IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
+					diff = qib_cmp24(psn, qp->r_psn);
+					if (!qp->r_nak_state && diff >= 0) {
+						ibp->n_rc_seqnak++;
+						qp->r_nak_state =
+							IB_NAK_PSN_ERROR;
+						/* Use the expected PSN. */
+						qp->r_ack_psn = qp->r_psn;
+						/*
+						 * Wait to send the sequence
+						 * NAK until all packets
+						 * in the receive queue have
+						 * been processed.
+						 * Otherwise, we end up
+						 * propagating congestion.
+						 */
+						if (list_empty(&qp->rspwait)) {
+							qp->r_flags |=
+								QIB_R_RSP_NAK;
+							atomic_inc(
+								&qp->refcount);
+							list_add_tail(
+							 &qp->rspwait,
+							 &rcd->qp_wait_list);
+						}
+					} /* Out of sequence NAK */
+				} /* QP Request NAKs */
+				break;
+			case IB_QPT_SMI:
+			case IB_QPT_GSI:
+			case IB_QPT_UD:
+			case IB_QPT_UC:
+			default:
+				/* For now don't handle any other QP types */
+				break;
+			}
+
+unlock:
+			spin_unlock(&qp->r_lock);
+			/*
+			 * Notify qib_destroy_qp() if it is waiting
+			 * for us to finish.
+			 */
+			if (atomic_dec_and_test(&qp->refcount))
+				wake_up(&qp->wait);
+		} /* Unicast QP */
+	} /* Valid packet with TIDErr */
+
+drop:
 	return ret;
 	return ret;
 }
 }
 
 
@@ -335,7 +473,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
 		smp_rmb();  /* prevent speculative reads of dma'ed hdrq */
 		smp_rmb();  /* prevent speculative reads of dma'ed hdrq */
 	}
 	}
 
 
-	for (last = 0, i = 1; !last && i <= 64; i += !last) {
+	for (last = 0, i = 1; !last; i += !last) {
 		hdr = dd->f_get_msgheader(dd, rhf_addr);
 		hdr = dd->f_get_msgheader(dd, rhf_addr);
 		eflags = qib_hdrget_err_flags(rhf_addr);
 		eflags = qib_hdrget_err_flags(rhf_addr);
 		etype = qib_hdrget_rcv_type(rhf_addr);
 		etype = qib_hdrget_rcv_type(rhf_addr);
@@ -371,7 +509,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
 		 * packets; only qibhdrerr should be set.
 		 * packets; only qibhdrerr should be set.
 		 */
 		 */
 		if (unlikely(eflags))
 		if (unlikely(eflags))
-			crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l,
+			crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l,
 					       etail, rhf_addr, hdr);
 					       etail, rhf_addr, hdr);
 		else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
 		else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
 			qib_ib_rcv(rcd, hdr, ebuf, tlen);
 			qib_ib_rcv(rcd, hdr, ebuf, tlen);
@@ -384,6 +522,9 @@ move_along:
 		l += rsize;
 		l += rsize;
 		if (l >= maxcnt)
 		if (l >= maxcnt)
 			l = 0;
 			l = 0;
+		if (i == QIB_MAX_PKT_RECV)
+			last = 1;
+
 		rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
 		rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
 		if (dd->flags & QIB_NODMA_RTAIL) {
 		if (dd->flags & QIB_NODMA_RTAIL) {
 			u32 seq = qib_hdrget_seq(rhf_addr);
 			u32 seq = qib_hdrget_seq(rhf_addr);
@@ -402,7 +543,7 @@ move_along:
 		 */
 		 */
 		lval = l;
 		lval = l;
 		if (!last && !(i & 0xf)) {
 		if (!last && !(i & 0xf)) {
-			dd->f_update_usrhead(rcd, lval, updegr, etail);
+			dd->f_update_usrhead(rcd, lval, updegr, etail, i);
 			updegr = 0;
 			updegr = 0;
 		}
 		}
 	}
 	}
@@ -444,7 +585,7 @@ bail:
 	 * if no packets were processed.
 	 * if no packets were processed.
 	 */
 	 */
 	lval = (u64)rcd->head | dd->rhdrhead_intr_off;
 	lval = (u64)rcd->head | dd->rhdrhead_intr_off;
-	dd->f_update_usrhead(rcd, lval, updegr, etail);
+	dd->f_update_usrhead(rcd, lval, updegr, etail, i);
 	return crcs;
 	return crcs;
 }
 }
 
 

+ 5 - 5
drivers/infiniband/hw/qib/qib_file_ops.c

@@ -1379,17 +1379,17 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
 		/* find device (with ACTIVE ports) with fewest ctxts in use */
 		/* find device (with ACTIVE ports) with fewest ctxts in use */
 		for (ndev = 0; ndev < devmax; ndev++) {
 		for (ndev = 0; ndev < devmax; ndev++) {
 			struct qib_devdata *dd = qib_lookup(ndev);
 			struct qib_devdata *dd = qib_lookup(ndev);
-			unsigned cused = 0, cfree = 0;
+			unsigned cused = 0, cfree = 0, pusable = 0;
 			if (!dd)
 			if (!dd)
 				continue;
 				continue;
 			if (port && port <= dd->num_pports &&
 			if (port && port <= dd->num_pports &&
 			    usable(dd->pport + port - 1))
 			    usable(dd->pport + port - 1))
-				dusable = 1;
+				pusable = 1;
 			else
 			else
 				for (i = 0; i < dd->num_pports; i++)
 				for (i = 0; i < dd->num_pports; i++)
 					if (usable(dd->pport + i))
 					if (usable(dd->pport + i))
-						dusable++;
-			if (!dusable)
+						pusable++;
+			if (!pusable)
 				continue;
 				continue;
 			for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
 			for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
 			     ctxt++)
 			     ctxt++)
@@ -1397,7 +1397,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
 					cused++;
 					cused++;
 				else
 				else
 					cfree++;
 					cfree++;
-			if (cfree && cused < inuse) {
+			if (pusable && cfree && cused < inuse) {
 				udd = dd;
 				udd = dd;
 				inuse = cused;
 				inuse = cused;
 			}
 			}

+ 1 - 1
drivers/infiniband/hw/qib/qib_iba6120.c

@@ -2074,7 +2074,7 @@ static void qib_6120_config_ctxts(struct qib_devdata *dd)
 }
 }
 
 
 static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
 static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
-				    u32 updegr, u32 egrhd)
+				    u32 updegr, u32 egrhd, u32 npkts)
 {
 {
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	if (updegr)
 	if (updegr)

+ 2 - 2
drivers/infiniband/hw/qib/qib_iba7220.c

@@ -2297,7 +2297,7 @@ static void qib_7220_config_ctxts(struct qib_devdata *dd)
 	nchipctxts = qib_read_kreg32(dd, kr_portcnt);
 	nchipctxts = qib_read_kreg32(dd, kr_portcnt);
 	dd->cspec->numctxts = nchipctxts;
 	dd->cspec->numctxts = nchipctxts;
 	if (qib_n_krcv_queues > 1) {
 	if (qib_n_krcv_queues > 1) {
-		dd->qpn_mask = 0x3f;
+		dd->qpn_mask = 0x3e;
 		dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
 		dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
 		if (dd->first_user_ctxt > nchipctxts)
 		if (dd->first_user_ctxt > nchipctxts)
 			dd->first_user_ctxt = nchipctxts;
 			dd->first_user_ctxt = nchipctxts;
@@ -2703,7 +2703,7 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
 }
 }
 
 
 static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
 static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
-				    u32 updegr, u32 egrhd)
+				    u32 updegr, u32 egrhd, u32 npkts)
 {
 {
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	if (updegr)
 	if (updegr)

+ 339 - 34
drivers/infiniband/hw/qib/qib_iba7322.c

@@ -71,6 +71,9 @@ static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
 
 
 static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
 static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
 static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
 static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
+static void serdes_7322_los_enable(struct qib_pportdata *, int);
+static int serdes_7322_init_old(struct qib_pportdata *);
+static int serdes_7322_init_new(struct qib_pportdata *);
 
 
 #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
 #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
 
 
@@ -111,6 +114,21 @@ static ushort qib_singleport;
 module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
 module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
 MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
 MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
 
 
+/*
+ * Receive header queue sizes
+ */
+static unsigned qib_rcvhdrcnt;
+module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
+
+static unsigned qib_rcvhdrsize;
+module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
+
+static unsigned qib_rcvhdrentsize;
+module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
+
 #define MAX_ATTEN_LEN 64 /* plenty for any real system */
 #define MAX_ATTEN_LEN 64 /* plenty for any real system */
 /* for read back, default index is ~5m copper cable */
 /* for read back, default index is ~5m copper cable */
 static char txselect_list[MAX_ATTEN_LEN] = "10";
 static char txselect_list[MAX_ATTEN_LEN] = "10";
@@ -544,6 +562,7 @@ static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
 
 
 #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
 #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
 #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
 #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
+#define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */
 #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
 #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
 
 
 #define H1_FORCE_VAL 8
 #define H1_FORCE_VAL 8
@@ -604,6 +623,7 @@ struct qib_chippport_specific {
 	u8 ibmalfusesnap;
 	u8 ibmalfusesnap;
 	struct qib_qsfp_data qsfp_data;
 	struct qib_qsfp_data qsfp_data;
 	char epmsgbuf[192]; /* for port error interrupt msg buffer */
 	char epmsgbuf[192]; /* for port error interrupt msg buffer */
+	u8 bounced;
 };
 };
 
 
 static struct {
 static struct {
@@ -1677,6 +1697,8 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
 	    (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
 	    (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
 		force_h1(ppd);
 		force_h1(ppd);
 		ppd->cpspec->qdr_reforce = 1;
 		ppd->cpspec->qdr_reforce = 1;
+		if (!ppd->dd->cspec->r1)
+			serdes_7322_los_enable(ppd, 0);
 	} else if (ppd->cpspec->qdr_reforce &&
 	} else if (ppd->cpspec->qdr_reforce &&
 		(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
 		(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
 		 (ibclt == IB_7322_LT_STATE_CFGENH ||
 		 (ibclt == IB_7322_LT_STATE_CFGENH ||
@@ -1692,18 +1714,37 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
 	      ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
 	      ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
 		adj_tx_serdes(ppd);
 		adj_tx_serdes(ppd);
 
 
-	if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP &&
-	    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
-		ppd->cpspec->qdr_dfe_on = 1;
-		ppd->cpspec->qdr_dfe_time = 0;
-		/* On link down, reenable QDR adaptation */
-		qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
-			ppd->dd->cspec->r1 ?
-				    QDR_STATIC_ADAPT_DOWN_R1 :
-				    QDR_STATIC_ADAPT_DOWN);
+	if (ibclt != IB_7322_LT_STATE_LINKUP) {
+		u8 ltstate = qib_7322_phys_portstate(ibcst);
+		u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
+					  LinkTrainingState);
+		if (!ppd->dd->cspec->r1 &&
+		    pibclt == IB_7322_LT_STATE_LINKUP &&
+		    ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
+		    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
+		    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
+		    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
+			/* If the link went down (but no into recovery,
+			 * turn LOS back on */
+			serdes_7322_los_enable(ppd, 1);
+		if (!ppd->cpspec->qdr_dfe_on &&
+		    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
+			ppd->cpspec->qdr_dfe_on = 1;
+			ppd->cpspec->qdr_dfe_time = 0;
+			/* On link down, reenable QDR adaptation */
+			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+					    ppd->dd->cspec->r1 ?
+					    QDR_STATIC_ADAPT_DOWN_R1 :
+					    QDR_STATIC_ADAPT_DOWN);
+			printk(KERN_INFO QIB_DRV_NAME
+				" IB%u:%u re-enabled QDR adaptation "
+				"ibclt %x\n", ppd->dd->unit, ppd->port, ibclt);
+		}
 	}
 	}
 }
 }
 
 
+static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
+
 /*
 /*
  * This is per-pport error handling.
  * This is per-pport error handling.
  * will likely get it's own MSIx interrupt (one for each port,
  * will likely get it's own MSIx interrupt (one for each port,
@@ -1840,7 +1881,23 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
 		    IB_PHYSPORTSTATE_DISABLED)
 		    IB_PHYSPORTSTATE_DISABLED)
 			qib_set_ib_7322_lstate(ppd, 0,
 			qib_set_ib_7322_lstate(ppd, 0,
 			       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
 			       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
-		else
+		else {
+			u32 lstate;
+			/*
+			 * We need the current logical link state before
+			 * lflags are set in handle_e_ibstatuschanged.
+			 */
+			lstate = qib_7322_iblink_state(ibcs);
+
+			if (IS_QMH(dd) && !ppd->cpspec->bounced &&
+			    ltstate == IB_PHYSPORTSTATE_LINKUP &&
+			    (lstate >= IB_PORT_INIT &&
+				lstate <= IB_PORT_ACTIVE)) {
+				ppd->cpspec->bounced = 1;
+				qib_7322_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+					IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
+			}
+
 			/*
 			/*
 			 * Since going into a recovery state causes the link
 			 * Since going into a recovery state causes the link
 			 * state to go down and since recovery is transitory,
 			 * state to go down and since recovery is transitory,
@@ -1854,6 +1911,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
 				qib_handle_e_ibstatuschanged(ppd, ibcs);
 				qib_handle_e_ibstatuschanged(ppd, ibcs);
+		}
 	}
 	}
 	if (*msg && iserr)
 	if (*msg && iserr)
 		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
 		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
@@ -2785,7 +2843,6 @@ static irqreturn_t qib_7322intr(int irq, void *data)
 				ctxtrbits &= ~rmask;
 				ctxtrbits &= ~rmask;
 				if (dd->rcd[i]) {
 				if (dd->rcd[i]) {
 					qib_kreceive(dd->rcd[i], NULL, &npkts);
 					qib_kreceive(dd->rcd[i], NULL, &npkts);
-					adjust_rcv_timeout(dd->rcd[i], npkts);
 				}
 				}
 			}
 			}
 			rmask <<= 1;
 			rmask <<= 1;
@@ -2835,7 +2892,6 @@ static irqreturn_t qib_7322pintr(int irq, void *data)
 		       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
 		       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
 
 
 	qib_kreceive(rcd, NULL, &npkts);
 	qib_kreceive(rcd, NULL, &npkts);
-	adjust_rcv_timeout(rcd, npkts);
 
 
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
@@ -3157,6 +3213,10 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
 	case BOARD_QME7342:
 	case BOARD_QME7342:
 		n = "InfiniPath_QME7342";
 		n = "InfiniPath_QME7342";
 		break;
 		break;
+	case 8:
+		n = "InfiniPath_QME7362";
+		dd->flags |= QIB_HAS_QSFP;
+		break;
 	case 15:
 	case 15:
 		n = "InfiniPath_QLE7342_TEST";
 		n = "InfiniPath_QLE7342_TEST";
 		dd->flags |= QIB_HAS_QSFP;
 		dd->flags |= QIB_HAS_QSFP;
@@ -3475,11 +3535,6 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd)
 	nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
 	nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
 	dd->cspec->numctxts = nchipctxts;
 	dd->cspec->numctxts = nchipctxts;
 	if (qib_n_krcv_queues > 1 && dd->num_pports) {
 	if (qib_n_krcv_queues > 1 && dd->num_pports) {
-		/*
-		 * Set the mask for which bits from the QPN are used
-		 * to select a context number.
-		 */
-		dd->qpn_mask = 0x3f;
 		dd->first_user_ctxt = NUM_IB_PORTS +
 		dd->first_user_ctxt = NUM_IB_PORTS +
 			(qib_n_krcv_queues - 1) * dd->num_pports;
 			(qib_n_krcv_queues - 1) * dd->num_pports;
 		if (dd->first_user_ctxt > nchipctxts)
 		if (dd->first_user_ctxt > nchipctxts)
@@ -3530,8 +3585,11 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd)
 
 
 	/* kr_rcvegrcnt changes based on the number of contexts enabled */
 	/* kr_rcvegrcnt changes based on the number of contexts enabled */
 	dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
 	dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
-	dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
-				dd->num_pports > 1 ? 1024U : 2048U);
+	if (qib_rcvhdrcnt)
+		dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
+	else
+		dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
+				    dd->num_pports > 1 ? 1024U : 2048U);
 }
 }
 
 
 static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
 static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
@@ -4002,8 +4060,14 @@ static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
 }
 }
 
 
 static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
 static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
-				    u32 updegr, u32 egrhd)
+				    u32 updegr, u32 egrhd, u32 npkts)
 {
 {
+	/*
+	 * Need to write timeout register before updating rcvhdrhead to ensure
+	 * that the timer is enabled on reception of a packet.
+	 */
+	if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
+		adjust_rcv_timeout(rcd, npkts);
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	if (updegr)
 	if (updegr)
@@ -5522,7 +5586,7 @@ static void qsfp_7322_event(struct work_struct *work)
 		u64 now = get_jiffies_64();
 		u64 now = get_jiffies_64();
 		if (time_after64(now, pwrup))
 		if (time_after64(now, pwrup))
 			break;
 			break;
-		msleep(1);
+		msleep(20);
 	}
 	}
 	ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
 	ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
 	/*
 	/*
@@ -5579,6 +5643,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
 	u32 pidx, unit, port, deflt, h1;
 	u32 pidx, unit, port, deflt, h1;
 	unsigned long val;
 	unsigned long val;
 	int any = 0, seth1;
 	int any = 0, seth1;
+	int txdds_size;
 
 
 	str = txselect_list;
 	str = txselect_list;
 
 
@@ -5587,6 +5652,10 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
 		dd->pport[pidx].cpspec->no_eep = deflt;
 		dd->pport[pidx].cpspec->no_eep = deflt;
 
 
+	txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
+	if (IS_QME(dd) || IS_QMH(dd))
+		txdds_size += TXDDS_MFG_SZ;
+
 	while (*nxt && nxt[1]) {
 	while (*nxt && nxt[1]) {
 		str = ++nxt;
 		str = ++nxt;
 		unit = simple_strtoul(str, &nxt, 0);
 		unit = simple_strtoul(str, &nxt, 0);
@@ -5609,7 +5678,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
 				;
 				;
 			continue;
 			continue;
 		}
 		}
-		if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)
+		if (val >= txdds_size)
 			continue;
 			continue;
 		seth1 = 0;
 		seth1 = 0;
 		h1 = 0; /* gcc thinks it might be used uninitted */
 		h1 = 0; /* gcc thinks it might be used uninitted */
@@ -5661,10 +5730,11 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
 		return -ENOSPC;
 		return -ENOSPC;
 	}
 	}
 	val = simple_strtoul(str, &n, 0);
 	val = simple_strtoul(str, &n, 0);
-	if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
+	if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
+				TXDDS_MFG_SZ)) {
 		printk(KERN_INFO QIB_DRV_NAME
 		printk(KERN_INFO QIB_DRV_NAME
 		       "txselect_values must start with a number < %d\n",
 		       "txselect_values must start with a number < %d\n",
-			TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
+			TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 	strcpy(txselect_list, str);
 	strcpy(txselect_list, str);
@@ -5810,7 +5880,8 @@ static void write_7322_initregs(struct qib_devdata *dd)
 		unsigned n, regno;
 		unsigned n, regno;
 		unsigned long flags;
 		unsigned long flags;
 
 
-		if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported)
+		if (dd->n_krcv_queues < 2 ||
+			!dd->pport[pidx].link_speed_supported)
 			continue;
 			continue;
 
 
 		ppd = &dd->pport[pidx];
 		ppd = &dd->pport[pidx];
@@ -6097,8 +6168,10 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
 		ppd++;
 		ppd++;
 	}
 	}
 
 
-	dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
-	dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
+	dd->rcvhdrentsize = qib_rcvhdrentsize ?
+		qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
+	dd->rcvhdrsize = qib_rcvhdrsize ?
+		qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
 	dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
 	dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
 
 
 	/* we always allocate at least 2048 bytes for eager buffers */
 	/* we always allocate at least 2048 bytes for eager buffers */
@@ -6495,7 +6568,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
 		/* make sure we see an updated copy next time around */
 		/* make sure we see an updated copy next time around */
 		sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
 		sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
 		sleeps++;
 		sleeps++;
-		msleep(1);
+		msleep(20);
 	}
 	}
 
 
 	switch (which) {
 	switch (which) {
@@ -6993,6 +7066,12 @@ static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
 	{  0, 1,  0, 12 },	/* QMH7342 backplane settings */
 	{  0, 1,  0, 12 },	/* QMH7342 backplane settings */
 };
 };
 
 
+static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
+	/* amp, pre, main, post */
+	{ 0, 0, 0, 0 },         /* QME7342 mfg settings */
+	{ 0, 0, 0, 6 },         /* QME7342 P2 mfg settings */
+};
+
 static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
 static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
 					       unsigned atten)
 					       unsigned atten)
 {
 {
@@ -7066,6 +7145,16 @@ static void find_best_ent(struct qib_pportdata *ppd,
 		*sdr_dds = &txdds_extra_sdr[idx];
 		*sdr_dds = &txdds_extra_sdr[idx];
 		*ddr_dds = &txdds_extra_ddr[idx];
 		*ddr_dds = &txdds_extra_ddr[idx];
 		*qdr_dds = &txdds_extra_qdr[idx];
 		*qdr_dds = &txdds_extra_qdr[idx];
+	} else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
+		   ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
+					  TXDDS_MFG_SZ)) {
+		idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
+		printk(KERN_INFO QIB_DRV_NAME
+			" IB%u:%u use idx %u into txdds_mfg\n",
+			ppd->dd->unit, ppd->port, idx);
+		*sdr_dds = &txdds_extra_mfg[idx];
+		*ddr_dds = &txdds_extra_mfg[idx];
+		*qdr_dds = &txdds_extra_mfg[idx];
 	} else {
 	} else {
 		/* this shouldn't happen, it's range checked */
 		/* this shouldn't happen, it's range checked */
 		*sdr_dds = txdds_sdr + qib_long_atten;
 		*sdr_dds = txdds_sdr + qib_long_atten;
@@ -7210,9 +7299,30 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
 	}
 	}
 }
 }
 
 
+static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
+{
+	u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
+	printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n",
+		ppd->dd->unit, ppd->port, (enable ? "on" : "off"));
+	if (enable)
+		data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
+	else
+		data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
+	qib_write_kreg_port(ppd, krp_serdesctrl, data);
+}
+
 static int serdes_7322_init(struct qib_pportdata *ppd)
 static int serdes_7322_init(struct qib_pportdata *ppd)
 {
 {
-	u64 data;
+	int ret = 0;
+	if (ppd->dd->cspec->r1)
+		ret = serdes_7322_init_old(ppd);
+	else
+		ret = serdes_7322_init_new(ppd);
+	return ret;
+}
+
+static int serdes_7322_init_old(struct qib_pportdata *ppd)
+{
 	u32 le_val;
 	u32 le_val;
 
 
 	/*
 	/*
@@ -7270,11 +7380,7 @@ static int serdes_7322_init(struct qib_pportdata *ppd)
 	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
 	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
 	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
 	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
 
 
-	data = qib_read_kreg_port(ppd, krp_serdesctrl);
-	/* Turn off IB latency mode */
-	data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE);
-	qib_write_kreg_port(ppd, krp_serdesctrl, data |
-		SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
+	serdes_7322_los_enable(ppd, 1);
 
 
 	/* rxbistena; set 0 to avoid effects of it switch later */
 	/* rxbistena; set 0 to avoid effects of it switch later */
 	ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
 	ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
@@ -7314,6 +7420,205 @@ static int serdes_7322_init(struct qib_pportdata *ppd)
 	return 0;
 	return 0;
 }
 }
 
 
+static int serdes_7322_init_new(struct qib_pportdata *ppd)
+{
+	u64 tstart;
+	u32 le_val, rxcaldone;
+	int chan, chan_done = (1 << SERDES_CHANS) - 1;
+
+	/*
+	 * Initialize the Tx DDS tables.  Also done every QSFP event,
+	 * for adapters with QSFP
+	 */
+	init_txdds_table(ppd, 0);
+
+	/* Clear cmode-override, may be set from older driver */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
+
+	/* ensure no tx overrides from earlier driver loads */
+	qib_write_kreg_port(ppd, krp_tx_deemph_override,
+		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+		reset_tx_deemphasis_override));
+
+	/* START OF LSI SUGGESTED SERDES BRINGUP */
+	/* Reset - Calibration Setup */
+	/*       Stop DFE adaptaion */
+	ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
+	/*       Disable LE1 */
+	ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
+	/*       Disable autoadapt for LE1 */
+	ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
+	/*       Disable LE2 */
+	ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
+	/*       Disable VGA */
+	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
+	/*       Disable AFE Offset Cancel */
+	ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
+	/*       Disable Timing Loop */
+	ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
+	/*       Disable Frequency Loop */
+	ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
+	/*       Disable Baseline Wander Correction */
+	ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
+	/*       Disable RX Calibration */
+	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
+	/*       Disable RX Offset Calibration */
+	ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
+	/*       Select BB CDR */
+	ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
+	/*       CDR Step Size */
+	ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
+	/*       Enable phase Calibration */
+	ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
+	/*       DFE Bandwidth [2:14-12] */
+	ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
+	/*       DFE Config (4 taps only) */
+	ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
+	/*       Gain Loop Bandwidth */
+	if (!ppd->dd->cspec->r1) {
+		ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
+		ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
+	} else {
+		ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
+	}
+	/*       Baseline Wander Correction Gain [13:4-0] (leave as default) */
+	/*       Baseline Wander Correction Gain [3:7-5] (leave as default) */
+	/*       Data Rate Select [5:7-6] (leave as default) */
+	/*       RX Parralel Word Width [3:10-8] (leave as default) */
+
+	/* RX REST */
+	/*       Single- or Multi-channel reset */
+	/*       RX Analog reset */
+	/*       RX Digital reset */
+	ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
+	msleep(20);
+	/*       RX Analog reset */
+	ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
+	msleep(20);
+	/*       RX Digital reset */
+	ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
+	msleep(20);
+
+	/* setup LoS params; these are subsystem, so chan == 5 */
+	/* LoS filter threshold_count on, ch 0-3, set to 8 */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
+
+	/* LoS filter threshold_count off, ch 0-3, set to 4 */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
+
+	/* LoS filter select enabled */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
+
+	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
+	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
+	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
+	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
+
+	/* Turn on LOS on initial SERDES init */
+	serdes_7322_los_enable(ppd, 1);
+	/* FLoop LOS gate: PPM filter  enabled */
+	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
+
+	/* RX LATCH CALIBRATION */
+	/*       Enable Eyefinder Phase Calibration latch */
+	ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
+	/*       Enable RX Offset Calibration latch */
+	ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
+	msleep(20);
+	/*       Start Calibration */
+	ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
+	tstart = get_jiffies_64();
+	while (chan_done &&
+	       !time_after64(tstart, tstart + msecs_to_jiffies(500))) {
+		msleep(20);
+		for (chan = 0; chan < SERDES_CHANS; ++chan) {
+			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
+					    (chan + (chan >> 1)),
+					    25, 0, 0);
+			if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
+			    (~chan_done & (1 << chan)) == 0)
+				chan_done &= ~(1 << chan);
+		}
+	}
+	if (chan_done) {
+		printk(KERN_INFO QIB_DRV_NAME
+			 " Serdes %d calibration not done after .5 sec: 0x%x\n",
+			 IBSD(ppd->hw_pidx), chan_done);
+	} else {
+		for (chan = 0; chan < SERDES_CHANS; ++chan) {
+			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
+					    (chan + (chan >> 1)),
+					    25, 0, 0);
+			if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
+				printk(KERN_INFO QIB_DRV_NAME
+					 " Serdes %d chan %d calibration "
+					 "failed\n", IBSD(ppd->hw_pidx), chan);
+		}
+	}
+
+	/*       Turn off Calibration */
+	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
+	msleep(20);
+
+	/* BRING RX UP */
+	/*       Set LE2 value (May be overridden in qsfp_7322_event) */
+	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
+	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
+	/*       Set LE2 Loop bandwidth */
+	ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
+	/*       Enable LE2 */
+	ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
+	msleep(20);
+	/*       Enable H0 only */
+	ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
+	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
+	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
+	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
+	/*       Enable VGA */
+	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
+	msleep(20);
+	/*       Set Frequency Loop Bandwidth */
+	ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5));
+	/*       Enable Frequency Loop */
+	ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
+	/*       Set Timing Loop Bandwidth */
+	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
+	/*       Enable Timing Loop */
+	ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
+	msleep(50);
+	/*       Enable DFE
+	 *       Set receive adaptation mode.  SDR and DDR adaptation are
+	 *       always on, and QDR is initially enabled; later disabled.
+	 */
+	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
+	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
+	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+			    ppd->dd->cspec->r1 ?
+			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
+	ppd->cpspec->qdr_dfe_on = 1;
+	/*       Disable LE1  */
+	ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
+	/*       Disable auto adapt for LE1 */
+	ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
+	msleep(20);
+	/*       Enable AFE Offset Cancel */
+	ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
+	/*       Enable Baseline Wander Correction */
+	ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
+	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
+	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
+	/* VGA output common mode */
+	ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
+
+	return 0;
+}
+
 /* start adjust QMH serdes parameters */
 /* start adjust QMH serdes parameters */
 
 
 static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
 static void set_man_code(struct qib_pportdata *ppd, int chan, int code)

+ 4 - 2
drivers/infiniband/hw/qib/qib_init.c

@@ -92,9 +92,11 @@ unsigned long *qib_cpulist;
 /* set number of contexts we'll actually use */
 /* set number of contexts we'll actually use */
 void qib_set_ctxtcnt(struct qib_devdata *dd)
 void qib_set_ctxtcnt(struct qib_devdata *dd)
 {
 {
-	if (!qib_cfgctxts)
+	if (!qib_cfgctxts) {
 		dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
 		dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
-	else if (qib_cfgctxts < dd->num_pports)
+		if (dd->cfgctxts > dd->ctxtcnt)
+			dd->cfgctxts = dd->ctxtcnt;
+	} else if (qib_cfgctxts < dd->num_pports)
 		dd->cfgctxts = dd->ctxtcnt;
 		dd->cfgctxts = dd->ctxtcnt;
 	else if (qib_cfgctxts <= dd->ctxtcnt)
 	else if (qib_cfgctxts <= dd->ctxtcnt)
 		dd->cfgctxts = qib_cfgctxts;
 		dd->cfgctxts = qib_cfgctxts;

+ 2 - 1
drivers/infiniband/hw/qib/qib_intr.c

@@ -131,7 +131,8 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
 			/* start a 75msec timer to clear symbol errors */
 			/* start a 75msec timer to clear symbol errors */
 			mod_timer(&ppd->symerr_clear_timer,
 			mod_timer(&ppd->symerr_clear_timer,
 				  msecs_to_jiffies(75));
 				  msecs_to_jiffies(75));
-		} else if (ltstate == IB_PHYSPORTSTATE_LINKUP) {
+		} else if (ltstate == IB_PHYSPORTSTATE_LINKUP &&
+			   !(ppd->lflags & QIBL_LINKACTIVE)) {
 			/* active, but not active defered */
 			/* active, but not active defered */
 			qib_hol_up(ppd); /* useful only for 6120 now */
 			qib_hol_up(ppd); /* useful only for 6120 now */
 			*ppd->statusp |=
 			*ppd->statusp |=

+ 56 - 24
drivers/infiniband/hw/qib/qib_keys.c

@@ -136,7 +136,6 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
 	struct qib_mregion *mr;
 	struct qib_mregion *mr;
 	unsigned n, m;
 	unsigned n, m;
 	size_t off;
 	size_t off;
-	int ret = 0;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	/*
 	/*
@@ -152,6 +151,8 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
 		if (!dev->dma_mr)
 		if (!dev->dma_mr)
 			goto bail;
 			goto bail;
 		atomic_inc(&dev->dma_mr->refcount);
 		atomic_inc(&dev->dma_mr->refcount);
+		spin_unlock_irqrestore(&rkt->lock, flags);
+
 		isge->mr = dev->dma_mr;
 		isge->mr = dev->dma_mr;
 		isge->vaddr = (void *) sge->addr;
 		isge->vaddr = (void *) sge->addr;
 		isge->length = sge->length;
 		isge->length = sge->length;
@@ -170,19 +171,34 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
 		     off + sge->length > mr->length ||
 		     off + sge->length > mr->length ||
 		     (mr->access_flags & acc) != acc))
 		     (mr->access_flags & acc) != acc))
 		goto bail;
 		goto bail;
+	atomic_inc(&mr->refcount);
+	spin_unlock_irqrestore(&rkt->lock, flags);
 
 
 	off += mr->offset;
 	off += mr->offset;
-	m = 0;
-	n = 0;
-	while (off >= mr->map[m]->segs[n].length) {
-		off -= mr->map[m]->segs[n].length;
-		n++;
-		if (n >= QIB_SEGSZ) {
-			m++;
-			n = 0;
+	if (mr->page_shift) {
+		/*
+		page sizes are uniform power of 2 so no loop is necessary
+		entries_spanned_by_off is the number of times the loop below
+		would have executed.
+		*/
+		size_t entries_spanned_by_off;
+
+		entries_spanned_by_off = off >> mr->page_shift;
+		off -= (entries_spanned_by_off << mr->page_shift);
+		m = entries_spanned_by_off/QIB_SEGSZ;
+		n = entries_spanned_by_off%QIB_SEGSZ;
+	} else {
+		m = 0;
+		n = 0;
+		while (off >= mr->map[m]->segs[n].length) {
+			off -= mr->map[m]->segs[n].length;
+			n++;
+			if (n >= QIB_SEGSZ) {
+				m++;
+				n = 0;
+			}
 		}
 		}
 	}
 	}
-	atomic_inc(&mr->refcount);
 	isge->mr = mr;
 	isge->mr = mr;
 	isge->vaddr = mr->map[m]->segs[n].vaddr + off;
 	isge->vaddr = mr->map[m]->segs[n].vaddr + off;
 	isge->length = mr->map[m]->segs[n].length - off;
 	isge->length = mr->map[m]->segs[n].length - off;
@@ -190,10 +206,10 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
 	isge->m = m;
 	isge->m = m;
 	isge->n = n;
 	isge->n = n;
 ok:
 ok:
-	ret = 1;
+	return 1;
 bail:
 bail:
 	spin_unlock_irqrestore(&rkt->lock, flags);
 	spin_unlock_irqrestore(&rkt->lock, flags);
-	return ret;
+	return 0;
 }
 }
 
 
 /**
 /**
@@ -214,7 +230,6 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
 	struct qib_mregion *mr;
 	struct qib_mregion *mr;
 	unsigned n, m;
 	unsigned n, m;
 	size_t off;
 	size_t off;
-	int ret = 0;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	/*
 	/*
@@ -231,6 +246,8 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
 		if (!dev->dma_mr)
 		if (!dev->dma_mr)
 			goto bail;
 			goto bail;
 		atomic_inc(&dev->dma_mr->refcount);
 		atomic_inc(&dev->dma_mr->refcount);
+		spin_unlock_irqrestore(&rkt->lock, flags);
+
 		sge->mr = dev->dma_mr;
 		sge->mr = dev->dma_mr;
 		sge->vaddr = (void *) vaddr;
 		sge->vaddr = (void *) vaddr;
 		sge->length = len;
 		sge->length = len;
@@ -248,19 +265,34 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
 	if (unlikely(vaddr < mr->iova || off + len > mr->length ||
 	if (unlikely(vaddr < mr->iova || off + len > mr->length ||
 		     (mr->access_flags & acc) == 0))
 		     (mr->access_flags & acc) == 0))
 		goto bail;
 		goto bail;
+	atomic_inc(&mr->refcount);
+	spin_unlock_irqrestore(&rkt->lock, flags);
 
 
 	off += mr->offset;
 	off += mr->offset;
-	m = 0;
-	n = 0;
-	while (off >= mr->map[m]->segs[n].length) {
-		off -= mr->map[m]->segs[n].length;
-		n++;
-		if (n >= QIB_SEGSZ) {
-			m++;
-			n = 0;
+	if (mr->page_shift) {
+		/*
+		page sizes are uniform power of 2 so no loop is necessary
+		entries_spanned_by_off is the number of times the loop below
+		would have executed.
+		*/
+		size_t entries_spanned_by_off;
+
+		entries_spanned_by_off = off >> mr->page_shift;
+		off -= (entries_spanned_by_off << mr->page_shift);
+		m = entries_spanned_by_off/QIB_SEGSZ;
+		n = entries_spanned_by_off%QIB_SEGSZ;
+	} else {
+		m = 0;
+		n = 0;
+		while (off >= mr->map[m]->segs[n].length) {
+			off -= mr->map[m]->segs[n].length;
+			n++;
+			if (n >= QIB_SEGSZ) {
+				m++;
+				n = 0;
+			}
 		}
 		}
 	}
 	}
-	atomic_inc(&mr->refcount);
 	sge->mr = mr;
 	sge->mr = mr;
 	sge->vaddr = mr->map[m]->segs[n].vaddr + off;
 	sge->vaddr = mr->map[m]->segs[n].vaddr + off;
 	sge->length = mr->map[m]->segs[n].length - off;
 	sge->length = mr->map[m]->segs[n].length - off;
@@ -268,10 +300,10 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
 	sge->m = m;
 	sge->m = m;
 	sge->n = n;
 	sge->n = n;
 ok:
 ok:
-	ret = 1;
+	return 1;
 bail:
 bail:
 	spin_unlock_irqrestore(&rkt->lock, flags);
 	spin_unlock_irqrestore(&rkt->lock, flags);
-	return ret;
+	return 0;
 }
 }
 
 
 /*
 /*

+ 25 - 20
drivers/infiniband/hw/qib/qib_mad.c

@@ -668,8 +668,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
 	lid = be16_to_cpu(pip->lid);
 	lid = be16_to_cpu(pip->lid);
 	/* Must be a valid unicast LID address. */
 	/* Must be a valid unicast LID address. */
 	if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
 	if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
-		goto err;
-	if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
+		smp->status |= IB_SMP_INVALID_FIELD;
+	else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
 		if (ppd->lid != lid)
 		if (ppd->lid != lid)
 			qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
 			qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
 		if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
 		if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
@@ -683,8 +683,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
 	msl = pip->neighbormtu_mastersmsl & 0xF;
 	msl = pip->neighbormtu_mastersmsl & 0xF;
 	/* Must be a valid unicast LID address. */
 	/* Must be a valid unicast LID address. */
 	if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
 	if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
-		goto err;
-	if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
+		smp->status |= IB_SMP_INVALID_FIELD;
+	else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
 		spin_lock_irqsave(&ibp->lock, flags);
 		spin_lock_irqsave(&ibp->lock, flags);
 		if (ibp->sm_ah) {
 		if (ibp->sm_ah) {
 			if (smlid != ibp->sm_lid)
 			if (smlid != ibp->sm_lid)
@@ -707,8 +707,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
 		if (lwe == 0xFF)
 		if (lwe == 0xFF)
 			lwe = ppd->link_width_supported;
 			lwe = ppd->link_width_supported;
 		else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
 		else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
-			goto err;
-		set_link_width_enabled(ppd, lwe);
+			smp->status |= IB_SMP_INVALID_FIELD;
+		else if (lwe != ppd->link_width_enabled)
+			set_link_width_enabled(ppd, lwe);
 	}
 	}
 
 
 	lse = pip->linkspeedactive_enabled & 0xF;
 	lse = pip->linkspeedactive_enabled & 0xF;
@@ -721,8 +722,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
 		if (lse == 15)
 		if (lse == 15)
 			lse = ppd->link_speed_supported;
 			lse = ppd->link_speed_supported;
 		else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
 		else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
-			goto err;
-		set_link_speed_enabled(ppd, lse);
+			smp->status |= IB_SMP_INVALID_FIELD;
+		else if (lse != ppd->link_speed_enabled)
+			set_link_speed_enabled(ppd, lse);
 	}
 	}
 
 
 	/* Set link down default state. */
 	/* Set link down default state. */
@@ -738,7 +740,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
 					IB_LINKINITCMD_POLL);
 					IB_LINKINITCMD_POLL);
 		break;
 		break;
 	default:
 	default:
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 	}
 	}
 
 
 	ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
 	ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
@@ -748,15 +750,17 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
 
 
 	mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
 	mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
 	if (mtu == -1)
 	if (mtu == -1)
-		goto err;
-	qib_set_mtu(ppd, mtu);
+		smp->status |= IB_SMP_INVALID_FIELD;
+	else
+		qib_set_mtu(ppd, mtu);
 
 
 	/* Set operational VLs */
 	/* Set operational VLs */
 	vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
 	vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
 	if (vls) {
 	if (vls) {
 		if (vls > ppd->vls_supported)
 		if (vls > ppd->vls_supported)
-			goto err;
-		(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
+			smp->status |= IB_SMP_INVALID_FIELD;
+		else
+			(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
 	}
 	}
 
 
 	if (pip->mkey_violations == 0)
 	if (pip->mkey_violations == 0)
@@ -770,10 +774,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
 
 
 	ore = pip->localphyerrors_overrunerrors;
 	ore = pip->localphyerrors_overrunerrors;
 	if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
 	if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 
 
 	if (set_overrunthreshold(ppd, (ore & 0xF)))
 	if (set_overrunthreshold(ppd, (ore & 0xF)))
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 
 
 	ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
 	ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
 
 
@@ -792,7 +796,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
 	state = pip->linkspeed_portstate & 0xF;
 	state = pip->linkspeed_portstate & 0xF;
 	lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
 	lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
 	if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
 	if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 
 
 	/*
 	/*
 	 * Only state changes of DOWN, ARM, and ACTIVE are valid
 	 * Only state changes of DOWN, ARM, and ACTIVE are valid
@@ -812,8 +816,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
 			lstate = QIB_IB_LINKDOWN;
 			lstate = QIB_IB_LINKDOWN;
 		else if (lstate == 3)
 		else if (lstate == 3)
 			lstate = QIB_IB_LINKDOWN_DISABLE;
 			lstate = QIB_IB_LINKDOWN_DISABLE;
-		else
-			goto err;
+		else {
+			smp->status |= IB_SMP_INVALID_FIELD;
+			break;
+		}
 		spin_lock_irqsave(&ppd->lflags_lock, flags);
 		spin_lock_irqsave(&ppd->lflags_lock, flags);
 		ppd->lflags &= ~QIBL_LINKV;
 		ppd->lflags &= ~QIBL_LINKV;
 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
@@ -835,8 +841,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
 		qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
 		qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
 		break;
 		break;
 	default:
 	default:
-		/* XXX We have already partially updated our state! */
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 	}
 	}
 
 
 	ret = subn_get_portinfo(smp, ibdev, port);
 	ret = subn_get_portinfo(smp, ibdev, port);

+ 5 - 3
drivers/infiniband/hw/qib/qib_mr.c

@@ -39,7 +39,6 @@
 /* Fast memory region */
 /* Fast memory region */
 struct qib_fmr {
 struct qib_fmr {
 	struct ib_fmr ibfmr;
 	struct ib_fmr ibfmr;
-	u8 page_shift;
 	struct qib_mregion mr;        /* must be last */
 	struct qib_mregion mr;        /* must be last */
 };
 };
 
 
@@ -107,6 +106,7 @@ static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
 			goto bail;
 			goto bail;
 	}
 	}
 	mr->mr.mapsz = m;
 	mr->mr.mapsz = m;
+	mr->mr.page_shift = 0;
 	mr->mr.max_segs = count;
 	mr->mr.max_segs = count;
 
 
 	/*
 	/*
@@ -231,6 +231,8 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	mr->mr.access_flags = mr_access_flags;
 	mr->mr.access_flags = mr_access_flags;
 	mr->umem = umem;
 	mr->umem = umem;
 
 
+	if (is_power_of_2(umem->page_size))
+		mr->mr.page_shift = ilog2(umem->page_size);
 	m = 0;
 	m = 0;
 	n = 0;
 	n = 0;
 	list_for_each_entry(chunk, &umem->chunk_list, list) {
 	list_for_each_entry(chunk, &umem->chunk_list, list) {
@@ -390,7 +392,7 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
 	fmr->mr.offset = 0;
 	fmr->mr.offset = 0;
 	fmr->mr.access_flags = mr_access_flags;
 	fmr->mr.access_flags = mr_access_flags;
 	fmr->mr.max_segs = fmr_attr->max_pages;
 	fmr->mr.max_segs = fmr_attr->max_pages;
-	fmr->page_shift = fmr_attr->page_shift;
+	fmr->mr.page_shift = fmr_attr->page_shift;
 
 
 	atomic_set(&fmr->mr.refcount, 0);
 	atomic_set(&fmr->mr.refcount, 0);
 	ret = &fmr->ibfmr;
 	ret = &fmr->ibfmr;
@@ -437,7 +439,7 @@ int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
 	spin_lock_irqsave(&rkt->lock, flags);
 	spin_lock_irqsave(&rkt->lock, flags);
 	fmr->mr.user_base = iova;
 	fmr->mr.user_base = iova;
 	fmr->mr.iova = iova;
 	fmr->mr.iova = iova;
-	ps = 1 << fmr->page_shift;
+	ps = 1 << fmr->mr.page_shift;
 	fmr->mr.length = list_len * ps;
 	fmr->mr.length = list_len * ps;
 	m = 0;
 	m = 0;
 	n = 0;
 	n = 0;

+ 15 - 17
drivers/infiniband/hw/qib/qib_qp.c

@@ -48,13 +48,12 @@ static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
 
 
 static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
 static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
 					struct qpn_map *map, unsigned off,
 					struct qpn_map *map, unsigned off,
-					unsigned r)
+					unsigned n)
 {
 {
 	if (qpt->mask) {
 	if (qpt->mask) {
 		off++;
 		off++;
-		if ((off & qpt->mask) >> 1 != r)
-			off = ((off & qpt->mask) ?
-				(off | qpt->mask) + 1 : off) | (r << 1);
+		if (((off & qpt->mask) >> 1) >= n)
+			off = (off | qpt->mask) + 2;
 	} else
 	} else
 		off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
 		off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
 	return off;
 	return off;
@@ -123,7 +122,6 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
 	u32 i, offset, max_scan, qpn;
 	u32 i, offset, max_scan, qpn;
 	struct qpn_map *map;
 	struct qpn_map *map;
 	u32 ret;
 	u32 ret;
-	int r;
 
 
 	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
 	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
 		unsigned n;
 		unsigned n;
@@ -139,15 +137,11 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
 		goto bail;
 		goto bail;
 	}
 	}
 
 
-	r = smp_processor_id();
-	if (r >= dd->n_krcv_queues)
-		r %= dd->n_krcv_queues;
-	qpn = qpt->last + 1;
+	qpn = qpt->last + 2;
 	if (qpn >= QPN_MAX)
 	if (qpn >= QPN_MAX)
 		qpn = 2;
 		qpn = 2;
-	if (qpt->mask && ((qpn & qpt->mask) >> 1) != r)
-		qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) |
-			(r << 1);
+	if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
+		qpn = (qpn | qpt->mask) + 2;
 	offset = qpn & BITS_PER_PAGE_MASK;
 	offset = qpn & BITS_PER_PAGE_MASK;
 	map = &qpt->map[qpn / BITS_PER_PAGE];
 	map = &qpt->map[qpn / BITS_PER_PAGE];
 	max_scan = qpt->nmaps - !offset;
 	max_scan = qpt->nmaps - !offset;
@@ -163,7 +157,8 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
 				ret = qpn;
 				ret = qpn;
 				goto bail;
 				goto bail;
 			}
 			}
-			offset = find_next_offset(qpt, map, offset, r);
+			offset = find_next_offset(qpt, map, offset,
+				dd->n_krcv_queues);
 			qpn = mk_qpn(qpt, map, offset);
 			qpn = mk_qpn(qpt, map, offset);
 			/*
 			/*
 			 * This test differs from alloc_pidmap().
 			 * This test differs from alloc_pidmap().
@@ -183,13 +178,13 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
 			if (qpt->nmaps == QPNMAP_ENTRIES)
 			if (qpt->nmaps == QPNMAP_ENTRIES)
 				break;
 				break;
 			map = &qpt->map[qpt->nmaps++];
 			map = &qpt->map[qpt->nmaps++];
-			offset = qpt->mask ? (r << 1) : 0;
+			offset = 0;
 		} else if (map < &qpt->map[qpt->nmaps]) {
 		} else if (map < &qpt->map[qpt->nmaps]) {
 			++map;
 			++map;
-			offset = qpt->mask ? (r << 1) : 0;
+			offset = 0;
 		} else {
 		} else {
 			map = &qpt->map[0];
 			map = &qpt->map[0];
-			offset = qpt->mask ? (r << 1) : 2;
+			offset = 2;
 		}
 		}
 		qpn = mk_qpn(qpt, map, offset);
 		qpn = mk_qpn(qpt, map, offset);
 	}
 	}
@@ -468,6 +463,10 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
 		qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
 		qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
 		del_timer(&qp->s_timer);
 		del_timer(&qp->s_timer);
 	}
 	}
+
+	if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
+		qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
+
 	spin_lock(&dev->pending_lock);
 	spin_lock(&dev->pending_lock);
 	if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
 	if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
 		qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
 		qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
@@ -1061,7 +1060,6 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
 		}
 		}
 		qp->ibqp.qp_num = err;
 		qp->ibqp.qp_num = err;
 		qp->port_num = init_attr->port_num;
 		qp->port_num = init_attr->port_num;
-		qp->processor_id = smp_processor_id();
 		qib_reset_qp(qp, init_attr->qp_type);
 		qib_reset_qp(qp, init_attr->qp_type);
 		break;
 		break;
 
 

+ 24 - 0
drivers/infiniband/hw/qib/qib_rc.c

@@ -1407,6 +1407,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
 			    struct qib_ctxtdata *rcd)
 			    struct qib_ctxtdata *rcd)
 {
 {
 	struct qib_swqe *wqe;
 	struct qib_swqe *wqe;
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
 	enum ib_wc_status status;
 	enum ib_wc_status status;
 	unsigned long flags;
 	unsigned long flags;
 	int diff;
 	int diff;
@@ -1414,6 +1415,29 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
 	u32 aeth;
 	u32 aeth;
 	u64 val;
 	u64 val;
 
 
+	if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
+		/*
+		 * If ACK'd PSN on SDMA busy list try to make progress to
+		 * reclaim SDMA credits.
+		 */
+		if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
+		    (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
+
+			/*
+			 * If send tasklet not running attempt to progress
+			 * SDMA queue.
+			 */
+			if (!(qp->s_flags & QIB_S_BUSY)) {
+				/* Acquire SDMA Lock */
+				spin_lock_irqsave(&ppd->sdma_lock, flags);
+				/* Invoke sdma make progress */
+				qib_sdma_make_progress(ppd);
+				/* Release SDMA Lock */
+				spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+			}
+		}
+	}
+
 	spin_lock_irqsave(&qp->s_lock, flags);
 	spin_lock_irqsave(&qp->s_lock, flags);
 
 
 	/* Ignore invalid responses. */
 	/* Ignore invalid responses. */

+ 25 - 32
drivers/infiniband/hw/qib/qib_ud.c

@@ -445,13 +445,14 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
 	qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
 	qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
 	src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
 	src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
 
 
-	/* Get the number of bytes the message was padded by. */
+	/*
+	 * Get the number of bytes the message was padded by
+	 * and drop incomplete packets.
+	 */
 	pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
 	pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-	if (unlikely(tlen < (hdrsize + pad + 4))) {
-		/* Drop incomplete packets. */
-		ibp->n_pkt_drops++;
-		goto bail;
-	}
+	if (unlikely(tlen < (hdrsize + pad + 4)))
+		goto drop;
+
 	tlen -= hdrsize + pad + 4;
 	tlen -= hdrsize + pad + 4;
 
 
 	/*
 	/*
@@ -460,10 +461,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
 	 */
 	 */
 	if (qp->ibqp.qp_num) {
 	if (qp->ibqp.qp_num) {
 		if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
 		if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
-			     hdr->lrh[3] == IB_LID_PERMISSIVE)) {
-			ibp->n_pkt_drops++;
-			goto bail;
-		}
+			     hdr->lrh[3] == IB_LID_PERMISSIVE))
+			goto drop;
 		if (qp->ibqp.qp_num > 1) {
 		if (qp->ibqp.qp_num > 1) {
 			u16 pkey1, pkey2;
 			u16 pkey1, pkey2;
 
 
@@ -476,7 +475,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
 						0xF,
 						0xF,
 					      src_qp, qp->ibqp.qp_num,
 					      src_qp, qp->ibqp.qp_num,
 					      hdr->lrh[3], hdr->lrh[1]);
 					      hdr->lrh[3], hdr->lrh[1]);
-				goto bail;
+				return;
 			}
 			}
 		}
 		}
 		if (unlikely(qkey != qp->qkey)) {
 		if (unlikely(qkey != qp->qkey)) {
@@ -484,30 +483,24 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
 				      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
 				      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
 				      src_qp, qp->ibqp.qp_num,
 				      src_qp, qp->ibqp.qp_num,
 				      hdr->lrh[3], hdr->lrh[1]);
 				      hdr->lrh[3], hdr->lrh[1]);
-			goto bail;
+			return;
 		}
 		}
 		/* Drop invalid MAD packets (see 13.5.3.1). */
 		/* Drop invalid MAD packets (see 13.5.3.1). */
 		if (unlikely(qp->ibqp.qp_num == 1 &&
 		if (unlikely(qp->ibqp.qp_num == 1 &&
 			     (tlen != 256 ||
 			     (tlen != 256 ||
-			      (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) {
-			ibp->n_pkt_drops++;
-			goto bail;
-		}
+			      (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
+			goto drop;
 	} else {
 	} else {
 		struct ib_smp *smp;
 		struct ib_smp *smp;
 
 
 		/* Drop invalid MAD packets (see 13.5.3.1). */
 		/* Drop invalid MAD packets (see 13.5.3.1). */
-		if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) {
-			ibp->n_pkt_drops++;
-			goto bail;
-		}
+		if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
+			goto drop;
 		smp = (struct ib_smp *) data;
 		smp = (struct ib_smp *) data;
 		if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
 		if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
 		     hdr->lrh[3] == IB_LID_PERMISSIVE) &&
 		     hdr->lrh[3] == IB_LID_PERMISSIVE) &&
-		    smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
-			ibp->n_pkt_drops++;
-			goto bail;
-		}
+		    smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+			goto drop;
 	}
 	}
 
 
 	/*
 	/*
@@ -519,14 +512,12 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
 	    opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
 	    opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
 		wc.ex.imm_data = ohdr->u.ud.imm_data;
 		wc.ex.imm_data = ohdr->u.ud.imm_data;
 		wc.wc_flags = IB_WC_WITH_IMM;
 		wc.wc_flags = IB_WC_WITH_IMM;
-		hdrsize += sizeof(u32);
+		tlen -= sizeof(u32);
 	} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
 	} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
 		wc.ex.imm_data = 0;
 		wc.ex.imm_data = 0;
 		wc.wc_flags = 0;
 		wc.wc_flags = 0;
-	} else {
-		ibp->n_pkt_drops++;
-		goto bail;
-	}
+	} else
+		goto drop;
 
 
 	/*
 	/*
 	 * A GRH is expected to preceed the data even if not
 	 * A GRH is expected to preceed the data even if not
@@ -556,8 +547,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
 	/* Silently drop packets which are too big. */
 	/* Silently drop packets which are too big. */
 	if (unlikely(wc.byte_len > qp->r_len)) {
 	if (unlikely(wc.byte_len > qp->r_len)) {
 		qp->r_flags |= QIB_R_REUSE_SGE;
 		qp->r_flags |= QIB_R_REUSE_SGE;
-		ibp->n_pkt_drops++;
-		return;
+		goto drop;
 	}
 	}
 	if (has_grh) {
 	if (has_grh) {
 		qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
 		qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
@@ -594,5 +584,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
 	qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
 	qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
 		     (ohdr->bth[0] &
 		     (ohdr->bth[0] &
 			cpu_to_be32(IB_BTH_SOLICITED)) != 0);
 			cpu_to_be32(IB_BTH_SOLICITED)) != 0);
-bail:;
+	return;
+
+drop:
+	ibp->n_pkt_drops++;
 }
 }

+ 1 - 0
drivers/infiniband/hw/qib/qib_user_sdma.c

@@ -382,6 +382,7 @@ static void qib_user_sdma_free_pkt_list(struct device *dev,
 
 
 		kmem_cache_free(pq->pkt_slab, pkt);
 		kmem_cache_free(pq->pkt_slab, pkt);
 	}
 	}
+	INIT_LIST_HEAD(list);
 }
 }
 
 
 /*
 /*

+ 3 - 8
drivers/infiniband/hw/qib/qib_verbs.h

@@ -301,6 +301,7 @@ struct qib_mregion {
 	int access_flags;
 	int access_flags;
 	u32 max_segs;           /* number of qib_segs in all the arrays */
 	u32 max_segs;           /* number of qib_segs in all the arrays */
 	u32 mapsz;              /* size of the map array */
 	u32 mapsz;              /* size of the map array */
+	u8  page_shift;         /* 0 - non unform/non powerof2 sizes */
 	atomic_t refcount;
 	atomic_t refcount;
 	struct qib_segarray *map[0];    /* the segments */
 	struct qib_segarray *map[0];    /* the segments */
 };
 };
@@ -435,7 +436,6 @@ struct qib_qp {
 	spinlock_t r_lock;      /* used for APM */
 	spinlock_t r_lock;      /* used for APM */
 	spinlock_t s_lock;
 	spinlock_t s_lock;
 	atomic_t s_dma_busy;
 	atomic_t s_dma_busy;
-	unsigned processor_id;	/* Processor ID QP is bound to */
 	u32 s_flags;
 	u32 s_flags;
 	u32 s_cur_size;         /* size of send packet in bytes */
 	u32 s_cur_size;         /* size of send packet in bytes */
 	u32 s_len;              /* total length of s_sge */
 	u32 s_len;              /* total length of s_sge */
@@ -813,13 +813,8 @@ extern struct workqueue_struct *qib_cq_wq;
  */
  */
 static inline void qib_schedule_send(struct qib_qp *qp)
 static inline void qib_schedule_send(struct qib_qp *qp)
 {
 {
-	if (qib_send_ok(qp)) {
-		if (qp->processor_id == smp_processor_id())
-			queue_work(qib_wq, &qp->s_work);
-		else
-			queue_work_on(qp->processor_id,
-				      qib_wq, &qp->s_work);
-	}
+	if (qib_send_ok(qp))
+		queue_work(qib_wq, &qp->s_work);
 }
 }
 
 
 static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
 static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)

+ 0 - 1
drivers/infiniband/ulp/ipoib/Kconfig

@@ -1,7 +1,6 @@
 config INFINIBAND_IPOIB
 config INFINIBAND_IPOIB
 	tristate "IP-over-InfiniBand"
 	tristate "IP-over-InfiniBand"
 	depends on NETDEVICES && INET && (IPV6 || IPV6=n)
 	depends on NETDEVICES && INET && (IPV6 || IPV6=n)
-	select INET_LRO
 	---help---
 	---help---
 	  Support for the IP-over-InfiniBand protocol (IPoIB). This
 	  Support for the IP-over-InfiniBand protocol (IPoIB). This
 	  transports IP packets over InfiniBand so you can use your IB
 	  transports IP packets over InfiniBand so you can use your IB

+ 1 - 11
drivers/infiniband/ulp/ipoib/ipoib.h

@@ -50,7 +50,7 @@
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_pack.h>
 #include <rdma/ib_pack.h>
 #include <rdma/ib_sa.h>
 #include <rdma/ib_sa.h>
-#include <linux/inet_lro.h>
+#include <linux/sched.h>
 
 
 /* constants */
 /* constants */
 
 
@@ -100,9 +100,6 @@ enum {
 	IPOIB_MCAST_FLAG_BUSY	  = 2,	/* joining or already joined */
 	IPOIB_MCAST_FLAG_BUSY	  = 2,	/* joining or already joined */
 	IPOIB_MCAST_FLAG_ATTACHED = 3,
 	IPOIB_MCAST_FLAG_ATTACHED = 3,
 
 
-	IPOIB_MAX_LRO_DESCRIPTORS = 8,
-	IPOIB_LRO_MAX_AGGR 	  = 64,
-
 	MAX_SEND_CQE		  = 16,
 	MAX_SEND_CQE		  = 16,
 	IPOIB_CM_COPYBREAK	  = 256,
 	IPOIB_CM_COPYBREAK	  = 256,
 };
 };
@@ -262,11 +259,6 @@ struct ipoib_ethtool_st {
 	u16     max_coalesced_frames;
 	u16     max_coalesced_frames;
 };
 };
 
 
-struct ipoib_lro {
-	struct net_lro_mgr lro_mgr;
-	struct net_lro_desc lro_desc[IPOIB_MAX_LRO_DESCRIPTORS];
-};
-
 /*
 /*
  * Device private locking: network stack tx_lock protects members used
  * Device private locking: network stack tx_lock protects members used
  * in TX fast path, lock protects everything else.  lock nests inside
  * in TX fast path, lock protects everything else.  lock nests inside
@@ -352,8 +344,6 @@ struct ipoib_dev_priv {
 	int	hca_caps;
 	int	hca_caps;
 	struct ipoib_ethtool_st ethtool;
 	struct ipoib_ethtool_st ethtool;
 	struct timer_list poll_timer;
 	struct timer_list poll_timer;
-
-	struct ipoib_lro lro;
 };
 };
 
 
 struct ipoib_ah {
 struct ipoib_ah {

+ 1 - 0
drivers/infiniband/ulp/ipoib/ipoib_cm.c

@@ -1480,6 +1480,7 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
 
 
 		if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
 		if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
 			dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
 			dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+			priv->dev->features |= NETIF_F_GRO;
 			if (priv->hca_caps & IB_DEVICE_UD_TSO)
 			if (priv->hca_caps & IB_DEVICE_UD_TSO)
 				dev->features |= NETIF_F_TSO;
 				dev->features |= NETIF_F_TSO;
 		}
 		}

+ 0 - 51
drivers/infiniband/ulp/ipoib/ipoib_ethtool.c

@@ -106,63 +106,12 @@ static int ipoib_set_coalesce(struct net_device *dev,
 	return 0;
 	return 0;
 }
 }
 
 
-static const char ipoib_stats_keys[][ETH_GSTRING_LEN] = {
-	"LRO aggregated", "LRO flushed",
-	"LRO avg aggr", "LRO no desc"
-};
-
-static void ipoib_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
-{
-	switch (stringset) {
-	case ETH_SS_STATS:
-		memcpy(data, *ipoib_stats_keys,	sizeof(ipoib_stats_keys));
-		break;
-	}
-}
-
-static int ipoib_get_sset_count(struct net_device *dev, int sset)
-{
-	switch (sset) {
-	case ETH_SS_STATS:
-		return ARRAY_SIZE(ipoib_stats_keys);
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-static void ipoib_get_ethtool_stats(struct net_device *dev,
-				struct ethtool_stats *stats, uint64_t *data)
-{
-	struct ipoib_dev_priv *priv = netdev_priv(dev);
-	int index = 0;
-
-	/* Get LRO statistics */
-	data[index++] = priv->lro.lro_mgr.stats.aggregated;
-	data[index++] = priv->lro.lro_mgr.stats.flushed;
-	if (priv->lro.lro_mgr.stats.flushed)
-		data[index++] = priv->lro.lro_mgr.stats.aggregated /
-				priv->lro.lro_mgr.stats.flushed;
-	else
-		data[index++] = 0;
-	data[index++] = priv->lro.lro_mgr.stats.no_desc;
-}
-
-static int ipoib_set_flags(struct net_device *dev, u32 flags)
-{
-	return ethtool_op_set_flags(dev, flags, ETH_FLAG_LRO);
-}
-
 static const struct ethtool_ops ipoib_ethtool_ops = {
 static const struct ethtool_ops ipoib_ethtool_ops = {
 	.get_drvinfo		= ipoib_get_drvinfo,
 	.get_drvinfo		= ipoib_get_drvinfo,
 	.get_rx_csum		= ipoib_get_rx_csum,
 	.get_rx_csum		= ipoib_get_rx_csum,
 	.set_tso		= ipoib_set_tso,
 	.set_tso		= ipoib_set_tso,
 	.get_coalesce		= ipoib_get_coalesce,
 	.get_coalesce		= ipoib_get_coalesce,
 	.set_coalesce		= ipoib_set_coalesce,
 	.set_coalesce		= ipoib_set_coalesce,
-	.get_flags		= ethtool_op_get_flags,
-	.set_flags		= ipoib_set_flags,
-	.get_strings		= ipoib_get_strings,
-	.get_sset_count		= ipoib_get_sset_count,
-	.get_ethtool_stats	= ipoib_get_ethtool_stats,
 };
 };
 
 
 void ipoib_set_ethtool_ops(struct net_device *dev)
 void ipoib_set_ethtool_ops(struct net_device *dev)

+ 1 - 7
drivers/infiniband/ulp/ipoib/ipoib_ib.c

@@ -295,10 +295,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
 	if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
 	if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 
 
-	if (dev->features & NETIF_F_LRO)
-		lro_receive_skb(&priv->lro.lro_mgr, skb, NULL);
-	else
-		netif_receive_skb(skb);
+	napi_gro_receive(&priv->napi, skb);
 
 
 repost:
 repost:
 	if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
 	if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
@@ -450,9 +447,6 @@ poll_more:
 	}
 	}
 
 
 	if (done < budget) {
 	if (done < budget) {
-		if (dev->features & NETIF_F_LRO)
-			lro_flush_all(&priv->lro.lro_mgr);
-
 		napi_complete(napi);
 		napi_complete(napi);
 		if (unlikely(ib_req_notify_cq(priv->recv_cq,
 		if (unlikely(ib_req_notify_cq(priv->recv_cq,
 					      IB_CQ_NEXT_COMP |
 					      IB_CQ_NEXT_COMP |

+ 1 - 61
drivers/infiniband/ulp/ipoib/ipoib_main.c

@@ -60,15 +60,6 @@ MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
 
 
-static int lro;
-module_param(lro, bool, 0444);
-MODULE_PARM_DESC(lro,  "Enable LRO (Large Receive Offload)");
-
-static int lro_max_aggr = IPOIB_LRO_MAX_AGGR;
-module_param(lro_max_aggr, int, 0644);
-MODULE_PARM_DESC(lro_max_aggr, "LRO: Max packets to be aggregated "
-		"(default = 64)");
-
 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
 int ipoib_debug_level;
 int ipoib_debug_level;
 
 
@@ -976,54 +967,6 @@ static const struct header_ops ipoib_header_ops = {
 	.create	= ipoib_hard_header,
 	.create	= ipoib_hard_header,
 };
 };
 
 
-static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
-		       void **tcph, u64 *hdr_flags, void *priv)
-{
-	unsigned int ip_len;
-	struct iphdr *iph;
-
-	if (unlikely(skb->protocol != htons(ETH_P_IP)))
-		return -1;
-
-	/*
-	 * In the future we may add an else clause that verifies the
-	 * checksum and allows devices which do not calculate checksum
-	 * to use LRO.
-	 */
-	if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY))
-		return -1;
-
-	/* Check for non-TCP packet */
-	skb_reset_network_header(skb);
-	iph = ip_hdr(skb);
-	if (iph->protocol != IPPROTO_TCP)
-		return -1;
-
-	ip_len = ip_hdrlen(skb);
-	skb_set_transport_header(skb, ip_len);
-	*tcph = tcp_hdr(skb);
-
-	/* check if IP header and TCP header are complete */
-	if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
-		return -1;
-
-	*hdr_flags = LRO_IPV4 | LRO_TCP;
-	*iphdr = iph;
-
-	return 0;
-}
-
-static void ipoib_lro_setup(struct ipoib_dev_priv *priv)
-{
-	priv->lro.lro_mgr.max_aggr	 = lro_max_aggr;
-	priv->lro.lro_mgr.max_desc	 = IPOIB_MAX_LRO_DESCRIPTORS;
-	priv->lro.lro_mgr.lro_arr	 = priv->lro.lro_desc;
-	priv->lro.lro_mgr.get_skb_header = get_skb_hdr;
-	priv->lro.lro_mgr.features	 = LRO_F_NAPI;
-	priv->lro.lro_mgr.dev		 = priv->dev;
-	priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
-}
-
 static const struct net_device_ops ipoib_netdev_ops = {
 static const struct net_device_ops ipoib_netdev_ops = {
 	.ndo_open		 = ipoib_open,
 	.ndo_open		 = ipoib_open,
 	.ndo_stop		 = ipoib_stop,
 	.ndo_stop		 = ipoib_stop,
@@ -1067,8 +1010,6 @@ static void ipoib_setup(struct net_device *dev)
 
 
 	priv->dev = dev;
 	priv->dev = dev;
 
 
-	ipoib_lro_setup(priv);
-
 	spin_lock_init(&priv->lock);
 	spin_lock_init(&priv->lock);
 
 
 	mutex_init(&priv->vlan_mutex);
 	mutex_init(&priv->vlan_mutex);
@@ -1218,8 +1159,7 @@ int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
 		priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
 		priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
 	}
 	}
 
 
-	if (lro)
-		priv->dev->features |= NETIF_F_LRO;
+	priv->dev->features |= NETIF_F_GRO;
 
 
 	if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
 	if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
 		priv->dev->features |= NETIF_F_TSO;
 		priv->dev->features |= NETIF_F_TSO;

+ 181 - 211
drivers/infiniband/ulp/srp/ib_srp.c

@@ -441,18 +441,28 @@ static void srp_disconnect_target(struct srp_target_port *target)
 	wait_for_completion(&target->done);
 	wait_for_completion(&target->done);
 }
 }
 
 
+static bool srp_change_state(struct srp_target_port *target,
+			    enum srp_target_state old,
+			    enum srp_target_state new)
+{
+	bool changed = false;
+
+	spin_lock_irq(&target->lock);
+	if (target->state == old) {
+		target->state = new;
+		changed = true;
+	}
+	spin_unlock_irq(&target->lock);
+	return changed;
+}
+
 static void srp_remove_work(struct work_struct *work)
 static void srp_remove_work(struct work_struct *work)
 {
 {
 	struct srp_target_port *target =
 	struct srp_target_port *target =
 		container_of(work, struct srp_target_port, work);
 		container_of(work, struct srp_target_port, work);
 
 
-	spin_lock_irq(target->scsi_host->host_lock);
-	if (target->state != SRP_TARGET_DEAD) {
-		spin_unlock_irq(target->scsi_host->host_lock);
+	if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
 		return;
 		return;
-	}
-	target->state = SRP_TARGET_REMOVED;
-	spin_unlock_irq(target->scsi_host->host_lock);
 
 
 	spin_lock(&target->srp_host->target_lock);
 	spin_lock(&target->srp_host->target_lock);
 	list_del(&target->list);
 	list_del(&target->list);
@@ -539,33 +549,34 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
 			scsi_sg_count(scmnd), scmnd->sc_data_direction);
 			scsi_sg_count(scmnd), scmnd->sc_data_direction);
 }
 }
 
 
-static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
+static void srp_remove_req(struct srp_target_port *target,
+			   struct srp_request *req, s32 req_lim_delta)
 {
 {
+	unsigned long flags;
+
 	srp_unmap_data(req->scmnd, target, req);
 	srp_unmap_data(req->scmnd, target, req);
-	list_move_tail(&req->list, &target->free_reqs);
+	spin_lock_irqsave(&target->lock, flags);
+	target->req_lim += req_lim_delta;
+	req->scmnd = NULL;
+	list_add_tail(&req->list, &target->free_reqs);
+	spin_unlock_irqrestore(&target->lock, flags);
 }
 }
 
 
 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
 {
 {
 	req->scmnd->result = DID_RESET << 16;
 	req->scmnd->result = DID_RESET << 16;
 	req->scmnd->scsi_done(req->scmnd);
 	req->scmnd->scsi_done(req->scmnd);
-	srp_remove_req(target, req);
+	srp_remove_req(target, req, 0);
 }
 }
 
 
 static int srp_reconnect_target(struct srp_target_port *target)
 static int srp_reconnect_target(struct srp_target_port *target)
 {
 {
 	struct ib_qp_attr qp_attr;
 	struct ib_qp_attr qp_attr;
-	struct srp_request *req, *tmp;
 	struct ib_wc wc;
 	struct ib_wc wc;
-	int ret;
+	int i, ret;
 
 
-	spin_lock_irq(target->scsi_host->host_lock);
-	if (target->state != SRP_TARGET_LIVE) {
-		spin_unlock_irq(target->scsi_host->host_lock);
+	if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
 		return -EAGAIN;
 		return -EAGAIN;
-	}
-	target->state = SRP_TARGET_CONNECTING;
-	spin_unlock_irq(target->scsi_host->host_lock);
 
 
 	srp_disconnect_target(target);
 	srp_disconnect_target(target);
 	/*
 	/*
@@ -590,27 +601,23 @@ static int srp_reconnect_target(struct srp_target_port *target)
 	while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
 	while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
 		; /* nothing */
 		; /* nothing */
 
 
-	spin_lock_irq(target->scsi_host->host_lock);
-	list_for_each_entry_safe(req, tmp, &target->req_queue, list)
-		srp_reset_req(target, req);
-	spin_unlock_irq(target->scsi_host->host_lock);
+	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
+		struct srp_request *req = &target->req_ring[i];
+		if (req->scmnd)
+			srp_reset_req(target, req);
+	}
 
 
-	target->rx_head	 = 0;
-	target->tx_head	 = 0;
-	target->tx_tail  = 0;
+	INIT_LIST_HEAD(&target->free_tx);
+	for (i = 0; i < SRP_SQ_SIZE; ++i)
+		list_add(&target->tx_ring[i]->list, &target->free_tx);
 
 
 	target->qp_in_error = 0;
 	target->qp_in_error = 0;
 	ret = srp_connect_target(target);
 	ret = srp_connect_target(target);
 	if (ret)
 	if (ret)
 		goto err;
 		goto err;
 
 
-	spin_lock_irq(target->scsi_host->host_lock);
-	if (target->state == SRP_TARGET_CONNECTING) {
-		ret = 0;
-		target->state = SRP_TARGET_LIVE;
-	} else
+	if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
 		ret = -EAGAIN;
 		ret = -EAGAIN;
-	spin_unlock_irq(target->scsi_host->host_lock);
 
 
 	return ret;
 	return ret;
 
 
@@ -620,17 +627,20 @@ err:
 
 
 	/*
 	/*
 	 * We couldn't reconnect, so kill our target port off.
 	 * We couldn't reconnect, so kill our target port off.
-	 * However, we have to defer the real removal because we might
-	 * be in the context of the SCSI error handler now, which
-	 * would deadlock if we call scsi_remove_host().
+	 * However, we have to defer the real removal because we
+	 * are in the context of the SCSI error handler now, which
+	 * will deadlock if we call scsi_remove_host().
+	 *
+	 * Schedule our work inside the lock to avoid a race with
+	 * the flush_scheduled_work() in srp_remove_one().
 	 */
 	 */
-	spin_lock_irq(target->scsi_host->host_lock);
+	spin_lock_irq(&target->lock);
 	if (target->state == SRP_TARGET_CONNECTING) {
 	if (target->state == SRP_TARGET_CONNECTING) {
 		target->state = SRP_TARGET_DEAD;
 		target->state = SRP_TARGET_DEAD;
 		INIT_WORK(&target->work, srp_remove_work);
 		INIT_WORK(&target->work, srp_remove_work);
 		schedule_work(&target->work);
 		schedule_work(&target->work);
 	}
 	}
-	spin_unlock_irq(target->scsi_host->host_lock);
+	spin_unlock_irq(&target->lock);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -758,7 +768,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
 		struct srp_direct_buf *buf = (void *) cmd->add_data;
 		struct srp_direct_buf *buf = (void *) cmd->add_data;
 
 
 		buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
 		buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
-		buf->key = cpu_to_be32(dev->mr->rkey);
+		buf->key = cpu_to_be32(target->rkey);
 		buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
 		buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
 	} else if (srp_map_fmr(target, scat, count, req,
 	} else if (srp_map_fmr(target, scat, count, req,
 			       (void *) cmd->add_data)) {
 			       (void *) cmd->add_data)) {
@@ -783,7 +793,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
 			buf->desc_list[i].va  =
 			buf->desc_list[i].va  =
 				cpu_to_be64(ib_sg_dma_address(ibdev, sg));
 				cpu_to_be64(ib_sg_dma_address(ibdev, sg));
 			buf->desc_list[i].key =
 			buf->desc_list[i].key =
-				cpu_to_be32(dev->mr->rkey);
+				cpu_to_be32(target->rkey);
 			buf->desc_list[i].len = cpu_to_be32(dma_len);
 			buf->desc_list[i].len = cpu_to_be32(dma_len);
 			datalen += dma_len;
 			datalen += dma_len;
 		}
 		}
@@ -796,7 +806,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
 		buf->table_desc.va  =
 		buf->table_desc.va  =
 			cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
 			cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
 		buf->table_desc.key =
 		buf->table_desc.key =
-			cpu_to_be32(target->srp_host->srp_dev->mr->rkey);
+			cpu_to_be32(target->rkey);
 		buf->table_desc.len =
 		buf->table_desc.len =
 			cpu_to_be32(count * sizeof (struct srp_direct_buf));
 			cpu_to_be32(count * sizeof (struct srp_direct_buf));
 
 
@@ -812,9 +822,23 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
 }
 }
 
 
 /*
 /*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head.  Lock cannot be dropped between call here and
- * call to __srp_post_send().
+ * Return an IU and possible credit to the free pool
+ */
+static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
+			  enum srp_iu_type iu_type)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&target->lock, flags);
+	list_add(&iu->list, &target->free_tx);
+	if (iu_type != SRP_IU_RSP)
+		++target->req_lim;
+	spin_unlock_irqrestore(&target->lock, flags);
+}
+
+/*
+ * Must be called with target->lock held to protect req_lim and free_tx.
+ * If IU is not sent, it must be returned using srp_put_tx_iu().
  *
  *
  * Note:
  * Note:
  * An upper limit for the number of allocated information units for each
  * An upper limit for the number of allocated information units for each
@@ -833,83 +857,59 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
 
 
 	srp_send_completion(target->send_cq, target);
 	srp_send_completion(target->send_cq, target);
 
 
-	if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
+	if (list_empty(&target->free_tx))
 		return NULL;
 		return NULL;
 
 
 	/* Initiator responses to target requests do not consume credits */
 	/* Initiator responses to target requests do not consume credits */
-	if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) {
-		++target->zero_req_lim;
-		return NULL;
+	if (iu_type != SRP_IU_RSP) {
+		if (target->req_lim <= rsv) {
+			++target->zero_req_lim;
+			return NULL;
+		}
+
+		--target->req_lim;
 	}
 	}
 
 
-	iu = target->tx_ring[target->tx_head & SRP_SQ_MASK];
-	iu->type = iu_type;
+	iu = list_first_entry(&target->free_tx, struct srp_iu, list);
+	list_del(&iu->list);
 	return iu;
 	return iu;
 }
 }
 
 
-/*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head.
- */
-static int __srp_post_send(struct srp_target_port *target,
-			   struct srp_iu *iu, int len)
+static int srp_post_send(struct srp_target_port *target,
+			 struct srp_iu *iu, int len)
 {
 {
 	struct ib_sge list;
 	struct ib_sge list;
 	struct ib_send_wr wr, *bad_wr;
 	struct ib_send_wr wr, *bad_wr;
-	int ret = 0;
 
 
 	list.addr   = iu->dma;
 	list.addr   = iu->dma;
 	list.length = len;
 	list.length = len;
-	list.lkey   = target->srp_host->srp_dev->mr->lkey;
+	list.lkey   = target->lkey;
 
 
 	wr.next       = NULL;
 	wr.next       = NULL;
-	wr.wr_id      = target->tx_head & SRP_SQ_MASK;
+	wr.wr_id      = (uintptr_t) iu;
 	wr.sg_list    = &list;
 	wr.sg_list    = &list;
 	wr.num_sge    = 1;
 	wr.num_sge    = 1;
 	wr.opcode     = IB_WR_SEND;
 	wr.opcode     = IB_WR_SEND;
 	wr.send_flags = IB_SEND_SIGNALED;
 	wr.send_flags = IB_SEND_SIGNALED;
 
 
-	ret = ib_post_send(target->qp, &wr, &bad_wr);
-
-	if (!ret) {
-		++target->tx_head;
-		if (iu->type != SRP_IU_RSP)
-			--target->req_lim;
-	}
-
-	return ret;
+	return ib_post_send(target->qp, &wr, &bad_wr);
 }
 }
 
 
-static int srp_post_recv(struct srp_target_port *target)
+static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
 {
 {
-	unsigned long flags;
-	struct srp_iu *iu;
-	struct ib_sge list;
 	struct ib_recv_wr wr, *bad_wr;
 	struct ib_recv_wr wr, *bad_wr;
-	unsigned int next;
-	int ret;
-
-	spin_lock_irqsave(target->scsi_host->host_lock, flags);
-
-	next	 = target->rx_head & SRP_RQ_MASK;
-	wr.wr_id = next;
-	iu	 = target->rx_ring[next];
+	struct ib_sge list;
 
 
 	list.addr   = iu->dma;
 	list.addr   = iu->dma;
 	list.length = iu->size;
 	list.length = iu->size;
-	list.lkey   = target->srp_host->srp_dev->mr->lkey;
+	list.lkey   = target->lkey;
 
 
 	wr.next     = NULL;
 	wr.next     = NULL;
+	wr.wr_id    = (uintptr_t) iu;
 	wr.sg_list  = &list;
 	wr.sg_list  = &list;
 	wr.num_sge  = 1;
 	wr.num_sge  = 1;
 
 
-	ret = ib_post_recv(target->qp, &wr, &bad_wr);
-	if (!ret)
-		++target->rx_head;
-
-	spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
-
-	return ret;
+	return ib_post_recv(target->qp, &wr, &bad_wr);
 }
 }
 
 
 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
@@ -917,23 +917,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
 	struct srp_request *req;
 	struct srp_request *req;
 	struct scsi_cmnd *scmnd;
 	struct scsi_cmnd *scmnd;
 	unsigned long flags;
 	unsigned long flags;
-	s32 delta;
-
-	delta = (s32) be32_to_cpu(rsp->req_lim_delta);
-
-	spin_lock_irqsave(target->scsi_host->host_lock, flags);
-
-	target->req_lim += delta;
-
-	req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];
 
 
 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
-		if (be32_to_cpu(rsp->resp_data_len) < 4)
-			req->tsk_status = -1;
-		else
-			req->tsk_status = rsp->data[3];
-		complete(&req->done);
+		spin_lock_irqsave(&target->lock, flags);
+		target->req_lim += be32_to_cpu(rsp->req_lim_delta);
+		spin_unlock_irqrestore(&target->lock, flags);
+
+		target->tsk_mgmt_status = -1;
+		if (be32_to_cpu(rsp->resp_data_len) >= 4)
+			target->tsk_mgmt_status = rsp->data[3];
+		complete(&target->tsk_mgmt_done);
 	} else {
 	} else {
+		req = &target->req_ring[rsp->tag];
 		scmnd = req->scmnd;
 		scmnd = req->scmnd;
 		if (!scmnd)
 		if (!scmnd)
 			shost_printk(KERN_ERR, target->scsi_host,
 			shost_printk(KERN_ERR, target->scsi_host,
@@ -953,49 +948,42 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
 		else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
 		else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
 
 
-		if (!req->tsk_mgmt) {
-			scmnd->host_scribble = (void *) -1L;
-			scmnd->scsi_done(scmnd);
-
-			srp_remove_req(target, req);
-		} else
-			req->cmd_done = 1;
+		srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
+		scmnd->host_scribble = NULL;
+		scmnd->scsi_done(scmnd);
 	}
 	}
-
-	spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
 }
 }
 
 
 static int srp_response_common(struct srp_target_port *target, s32 req_delta,
 static int srp_response_common(struct srp_target_port *target, s32 req_delta,
 			       void *rsp, int len)
 			       void *rsp, int len)
 {
 {
-	struct ib_device *dev;
+	struct ib_device *dev = target->srp_host->srp_dev->dev;
 	unsigned long flags;
 	unsigned long flags;
 	struct srp_iu *iu;
 	struct srp_iu *iu;
-	int err = 1;
+	int err;
 
 
-	dev = target->srp_host->srp_dev->dev;
-
-	spin_lock_irqsave(target->scsi_host->host_lock, flags);
+	spin_lock_irqsave(&target->lock, flags);
 	target->req_lim += req_delta;
 	target->req_lim += req_delta;
-
 	iu = __srp_get_tx_iu(target, SRP_IU_RSP);
 	iu = __srp_get_tx_iu(target, SRP_IU_RSP);
+	spin_unlock_irqrestore(&target->lock, flags);
+
 	if (!iu) {
 	if (!iu) {
 		shost_printk(KERN_ERR, target->scsi_host, PFX
 		shost_printk(KERN_ERR, target->scsi_host, PFX
 			     "no IU available to send response\n");
 			     "no IU available to send response\n");
-		goto out;
+		return 1;
 	}
 	}
 
 
 	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
 	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
 	memcpy(iu->buf, rsp, len);
 	memcpy(iu->buf, rsp, len);
 	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
 	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
 
 
-	err = __srp_post_send(target, iu, len);
-	if (err)
+	err = srp_post_send(target, iu, len);
+	if (err) {
 		shost_printk(KERN_ERR, target->scsi_host, PFX
 		shost_printk(KERN_ERR, target->scsi_host, PFX
 			     "unable to post response: %d\n", err);
 			     "unable to post response: %d\n", err);
+		srp_put_tx_iu(target, iu, SRP_IU_RSP);
+	}
 
 
-out:
-	spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
 	return err;
 	return err;
 }
 }
 
 
@@ -1032,14 +1020,11 @@ static void srp_process_aer_req(struct srp_target_port *target,
 
 
 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
 {
 {
-	struct ib_device *dev;
-	struct srp_iu *iu;
+	struct ib_device *dev = target->srp_host->srp_dev->dev;
+	struct srp_iu *iu = (struct srp_iu *) wc->wr_id;
 	int res;
 	int res;
 	u8 opcode;
 	u8 opcode;
 
 
-	iu = target->rx_ring[wc->wr_id];
-
-	dev = target->srp_host->srp_dev->dev;
 	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
 	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
 				   DMA_FROM_DEVICE);
 				   DMA_FROM_DEVICE);
 
 
@@ -1080,7 +1065,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
 	ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
 	ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
 				      DMA_FROM_DEVICE);
 				      DMA_FROM_DEVICE);
 
 
-	res = srp_post_recv(target);
+	res = srp_post_recv(target, iu);
 	if (res != 0)
 	if (res != 0)
 		shost_printk(KERN_ERR, target->scsi_host,
 		shost_printk(KERN_ERR, target->scsi_host,
 			     PFX "Recv failed with error code %d\n", res);
 			     PFX "Recv failed with error code %d\n", res);
@@ -1109,6 +1094,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
 {
 {
 	struct srp_target_port *target = target_ptr;
 	struct srp_target_port *target = target_ptr;
 	struct ib_wc wc;
 	struct ib_wc wc;
+	struct srp_iu *iu;
 
 
 	while (ib_poll_cq(cq, 1, &wc) > 0) {
 	while (ib_poll_cq(cq, 1, &wc) > 0) {
 		if (wc.status) {
 		if (wc.status) {
@@ -1119,18 +1105,19 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
 			break;
 			break;
 		}
 		}
 
 
-		++target->tx_tail;
+		iu = (struct srp_iu *) wc.wr_id;
+		list_add(&iu->list, &target->free_tx);
 	}
 	}
 }
 }
 
 
-static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
-			    void (*done)(struct scsi_cmnd *))
+static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
 {
 {
-	struct srp_target_port *target = host_to_target(scmnd->device->host);
+	struct srp_target_port *target = host_to_target(shost);
 	struct srp_request *req;
 	struct srp_request *req;
 	struct srp_iu *iu;
 	struct srp_iu *iu;
 	struct srp_cmd *cmd;
 	struct srp_cmd *cmd;
 	struct ib_device *dev;
 	struct ib_device *dev;
+	unsigned long flags;
 	int len;
 	int len;
 
 
 	if (target->state == SRP_TARGET_CONNECTING)
 	if (target->state == SRP_TARGET_CONNECTING)
@@ -1139,11 +1126,19 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
 	if (target->state == SRP_TARGET_DEAD ||
 	if (target->state == SRP_TARGET_DEAD ||
 	    target->state == SRP_TARGET_REMOVED) {
 	    target->state == SRP_TARGET_REMOVED) {
 		scmnd->result = DID_BAD_TARGET << 16;
 		scmnd->result = DID_BAD_TARGET << 16;
-		done(scmnd);
+		scmnd->scsi_done(scmnd);
 		return 0;
 		return 0;
 	}
 	}
 
 
+	spin_lock_irqsave(&target->lock, flags);
 	iu = __srp_get_tx_iu(target, SRP_IU_CMD);
 	iu = __srp_get_tx_iu(target, SRP_IU_CMD);
+	if (iu) {
+		req = list_first_entry(&target->free_reqs, struct srp_request,
+				      list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&target->lock, flags);
+
 	if (!iu)
 	if (!iu)
 		goto err;
 		goto err;
 
 
@@ -1151,11 +1146,8 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
 	ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
 	ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
 				   DMA_TO_DEVICE);
 				   DMA_TO_DEVICE);
 
 
-	req = list_first_entry(&target->free_reqs, struct srp_request, list);
-
-	scmnd->scsi_done     = done;
 	scmnd->result        = 0;
 	scmnd->result        = 0;
-	scmnd->host_scribble = (void *) (long) req->index;
+	scmnd->host_scribble = (void *) req;
 
 
 	cmd = iu->buf;
 	cmd = iu->buf;
 	memset(cmd, 0, sizeof *cmd);
 	memset(cmd, 0, sizeof *cmd);
@@ -1167,37 +1159,38 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
 
 
 	req->scmnd    = scmnd;
 	req->scmnd    = scmnd;
 	req->cmd      = iu;
 	req->cmd      = iu;
-	req->cmd_done = 0;
-	req->tsk_mgmt = NULL;
 
 
 	len = srp_map_data(scmnd, target, req);
 	len = srp_map_data(scmnd, target, req);
 	if (len < 0) {
 	if (len < 0) {
 		shost_printk(KERN_ERR, target->scsi_host,
 		shost_printk(KERN_ERR, target->scsi_host,
 			     PFX "Failed to map data\n");
 			     PFX "Failed to map data\n");
-		goto err;
+		goto err_iu;
 	}
 	}
 
 
 	ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
 	ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
 				      DMA_TO_DEVICE);
 				      DMA_TO_DEVICE);
 
 
-	if (__srp_post_send(target, iu, len)) {
+	if (srp_post_send(target, iu, len)) {
 		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
 		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
 		goto err_unmap;
 		goto err_unmap;
 	}
 	}
 
 
-	list_move_tail(&req->list, &target->req_queue);
-
 	return 0;
 	return 0;
 
 
 err_unmap:
 err_unmap:
 	srp_unmap_data(scmnd, target, req);
 	srp_unmap_data(scmnd, target, req);
 
 
+err_iu:
+	srp_put_tx_iu(target, iu, SRP_IU_CMD);
+
+	spin_lock_irqsave(&target->lock, flags);
+	list_add(&req->list, &target->free_reqs);
+	spin_unlock_irqrestore(&target->lock, flags);
+
 err:
 err:
 	return SCSI_MLQUEUE_HOST_BUSY;
 	return SCSI_MLQUEUE_HOST_BUSY;
 }
 }
 
 
-static DEF_SCSI_QCMD(srp_queuecommand)
-
 static int srp_alloc_iu_bufs(struct srp_target_port *target)
 static int srp_alloc_iu_bufs(struct srp_target_port *target)
 {
 {
 	int i;
 	int i;
@@ -1216,6 +1209,8 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
 						  GFP_KERNEL, DMA_TO_DEVICE);
 						  GFP_KERNEL, DMA_TO_DEVICE);
 		if (!target->tx_ring[i])
 		if (!target->tx_ring[i])
 			goto err;
 			goto err;
+
+		list_add(&target->tx_ring[i]->list, &target->free_tx);
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -1377,7 +1372,8 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
 			break;
 			break;
 
 
 		for (i = 0; i < SRP_RQ_SIZE; i++) {
 		for (i = 0; i < SRP_RQ_SIZE; i++) {
-			target->status = srp_post_recv(target);
+			struct srp_iu *iu = target->rx_ring[i];
+			target->status = srp_post_recv(target, iu);
 			if (target->status)
 			if (target->status)
 				break;
 				break;
 		}
 		}
@@ -1442,25 +1438,24 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
 }
 }
 
 
 static int srp_send_tsk_mgmt(struct srp_target_port *target,
 static int srp_send_tsk_mgmt(struct srp_target_port *target,
-			     struct srp_request *req, u8 func)
+			     u64 req_tag, unsigned int lun, u8 func)
 {
 {
 	struct ib_device *dev = target->srp_host->srp_dev->dev;
 	struct ib_device *dev = target->srp_host->srp_dev->dev;
 	struct srp_iu *iu;
 	struct srp_iu *iu;
 	struct srp_tsk_mgmt *tsk_mgmt;
 	struct srp_tsk_mgmt *tsk_mgmt;
 
 
-	spin_lock_irq(target->scsi_host->host_lock);
-
 	if (target->state == SRP_TARGET_DEAD ||
 	if (target->state == SRP_TARGET_DEAD ||
-	    target->state == SRP_TARGET_REMOVED) {
-		req->scmnd->result = DID_BAD_TARGET << 16;
-		goto out;
-	}
+	    target->state == SRP_TARGET_REMOVED)
+		return -1;
 
 
-	init_completion(&req->done);
+	init_completion(&target->tsk_mgmt_done);
 
 
+	spin_lock_irq(&target->lock);
 	iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
 	iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
+	spin_unlock_irq(&target->lock);
+
 	if (!iu)
 	if (!iu)
-		goto out;
+		return -1;
 
 
 	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
 	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
 				   DMA_TO_DEVICE);
 				   DMA_TO_DEVICE);
@@ -1468,70 +1463,46 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
 
 
 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
-	tsk_mgmt->lun 		= cpu_to_be64((u64) req->scmnd->device->lun << 48);
-	tsk_mgmt->tag 		= req->index | SRP_TAG_TSK_MGMT;
+	tsk_mgmt->lun		= cpu_to_be64((u64) lun << 48);
+	tsk_mgmt->tag		= req_tag | SRP_TAG_TSK_MGMT;
 	tsk_mgmt->tsk_mgmt_func = func;
 	tsk_mgmt->tsk_mgmt_func = func;
-	tsk_mgmt->task_tag 	= req->index;
+	tsk_mgmt->task_tag	= req_tag;
 
 
 	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
 	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
 				      DMA_TO_DEVICE);
 				      DMA_TO_DEVICE);
-	if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
-		goto out;
-
-	req->tsk_mgmt = iu;
-
-	spin_unlock_irq(target->scsi_host->host_lock);
-
-	if (!wait_for_completion_timeout(&req->done,
-					 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
+	if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
+		srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
 		return -1;
 		return -1;
+	}
 
 
-	return 0;
-
-out:
-	spin_unlock_irq(target->scsi_host->host_lock);
-	return -1;
-}
-
-static int srp_find_req(struct srp_target_port *target,
-			struct scsi_cmnd *scmnd,
-			struct srp_request **req)
-{
-	if (scmnd->host_scribble == (void *) -1L)
+	if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
+					 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
 		return -1;
 		return -1;
 
 
-	*req = &target->req_ring[(long) scmnd->host_scribble];
-
 	return 0;
 	return 0;
 }
 }
 
 
 static int srp_abort(struct scsi_cmnd *scmnd)
 static int srp_abort(struct scsi_cmnd *scmnd)
 {
 {
 	struct srp_target_port *target = host_to_target(scmnd->device->host);
 	struct srp_target_port *target = host_to_target(scmnd->device->host);
-	struct srp_request *req;
+	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
 	int ret = SUCCESS;
 	int ret = SUCCESS;
 
 
 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
 
 
-	if (target->qp_in_error)
+	if (!req || target->qp_in_error)
 		return FAILED;
 		return FAILED;
-	if (srp_find_req(target, scmnd, &req))
+	if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
+			      SRP_TSK_ABORT_TASK))
 		return FAILED;
 		return FAILED;
-	if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
-		return FAILED;
-
-	spin_lock_irq(target->scsi_host->host_lock);
 
 
-	if (req->cmd_done) {
-		srp_remove_req(target, req);
-		scmnd->scsi_done(scmnd);
-	} else if (!req->tsk_status) {
-		srp_remove_req(target, req);
-		scmnd->result = DID_ABORT << 16;
-	} else
-		ret = FAILED;
-
-	spin_unlock_irq(target->scsi_host->host_lock);
+	if (req->scmnd) {
+		if (!target->tsk_mgmt_status) {
+			srp_remove_req(target, req, 0);
+			scmnd->result = DID_ABORT << 16;
+		} else
+			ret = FAILED;
+	}
 
 
 	return ret;
 	return ret;
 }
 }
@@ -1539,26 +1510,23 @@ static int srp_abort(struct scsi_cmnd *scmnd)
 static int srp_reset_device(struct scsi_cmnd *scmnd)
 static int srp_reset_device(struct scsi_cmnd *scmnd)
 {
 {
 	struct srp_target_port *target = host_to_target(scmnd->device->host);
 	struct srp_target_port *target = host_to_target(scmnd->device->host);
-	struct srp_request *req, *tmp;
+	int i;
 
 
 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
 
 
 	if (target->qp_in_error)
 	if (target->qp_in_error)
 		return FAILED;
 		return FAILED;
-	if (srp_find_req(target, scmnd, &req))
+	if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
+			      SRP_TSK_LUN_RESET))
 		return FAILED;
 		return FAILED;
-	if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
+	if (target->tsk_mgmt_status)
 		return FAILED;
 		return FAILED;
-	if (req->tsk_status)
-		return FAILED;
-
-	spin_lock_irq(target->scsi_host->host_lock);
 
 
-	list_for_each_entry_safe(req, tmp, &target->req_queue, list)
-		if (req->scmnd->device == scmnd->device)
+	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
+		struct srp_request *req = &target->req_ring[i];
+		if (req->scmnd && req->scmnd->device == scmnd->device)
 			srp_reset_req(target, req);
 			srp_reset_req(target, req);
-
-	spin_unlock_irq(target->scsi_host->host_lock);
+	}
 
 
 	return SUCCESS;
 	return SUCCESS;
 }
 }
@@ -1987,9 +1955,12 @@ static ssize_t srp_create_target(struct device *dev,
 	target->io_class   = SRP_REV16A_IB_IO_CLASS;
 	target->io_class   = SRP_REV16A_IB_IO_CLASS;
 	target->scsi_host  = target_host;
 	target->scsi_host  = target_host;
 	target->srp_host   = host;
 	target->srp_host   = host;
+	target->lkey	   = host->srp_dev->mr->lkey;
+	target->rkey	   = host->srp_dev->mr->rkey;
 
 
+	spin_lock_init(&target->lock);
+	INIT_LIST_HEAD(&target->free_tx);
 	INIT_LIST_HEAD(&target->free_reqs);
 	INIT_LIST_HEAD(&target->free_reqs);
-	INIT_LIST_HEAD(&target->req_queue);
 	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
 	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
 		target->req_ring[i].index = i;
 		target->req_ring[i].index = i;
 		list_add_tail(&target->req_ring[i].list, &target->free_reqs);
 		list_add_tail(&target->req_ring[i].list, &target->free_reqs);
@@ -2217,9 +2188,9 @@ static void srp_remove_one(struct ib_device *device)
 		 */
 		 */
 		spin_lock(&host->target_lock);
 		spin_lock(&host->target_lock);
 		list_for_each_entry(target, &host->target_list, list) {
 		list_for_each_entry(target, &host->target_list, list) {
-			spin_lock_irq(target->scsi_host->host_lock);
+			spin_lock_irq(&target->lock);
 			target->state = SRP_TARGET_REMOVED;
 			target->state = SRP_TARGET_REMOVED;
-			spin_unlock_irq(target->scsi_host->host_lock);
+			spin_unlock_irq(&target->lock);
 		}
 		}
 		spin_unlock(&host->target_lock);
 		spin_unlock(&host->target_lock);
 
 
@@ -2258,8 +2229,7 @@ static int __init srp_init_module(void)
 {
 {
 	int ret;
 	int ret;
 
 
-	BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE);
-	BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE);
+	BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
 
 
 	if (srp_sg_tablesize > 255) {
 	if (srp_sg_tablesize > 255) {
 		printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
 		printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");

+ 25 - 21
drivers/infiniband/ulp/srp/ib_srp.h

@@ -59,16 +59,15 @@ enum {
 
 
 	SRP_RQ_SHIFT    	= 6,
 	SRP_RQ_SHIFT    	= 6,
 	SRP_RQ_SIZE		= 1 << SRP_RQ_SHIFT,
 	SRP_RQ_SIZE		= 1 << SRP_RQ_SHIFT,
-	SRP_RQ_MASK		= SRP_RQ_SIZE - 1,
 
 
 	SRP_SQ_SIZE		= SRP_RQ_SIZE,
 	SRP_SQ_SIZE		= SRP_RQ_SIZE,
-	SRP_SQ_MASK		= SRP_SQ_SIZE - 1,
 	SRP_RSP_SQ_SIZE		= 1,
 	SRP_RSP_SQ_SIZE		= 1,
 	SRP_REQ_SQ_SIZE		= SRP_SQ_SIZE - SRP_RSP_SQ_SIZE,
 	SRP_REQ_SQ_SIZE		= SRP_SQ_SIZE - SRP_RSP_SQ_SIZE,
 	SRP_TSK_MGMT_SQ_SIZE	= 1,
 	SRP_TSK_MGMT_SQ_SIZE	= 1,
 	SRP_CMD_SQ_SIZE		= SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE,
 	SRP_CMD_SQ_SIZE		= SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE,
 
 
-	SRP_TAG_TSK_MGMT	= 1 << (SRP_RQ_SHIFT + 1),
+	SRP_TAG_NO_REQ		= ~0U,
+	SRP_TAG_TSK_MGMT	= 1U << 31,
 
 
 	SRP_FMR_SIZE		= 256,
 	SRP_FMR_SIZE		= 256,
 	SRP_FMR_POOL_SIZE	= 1024,
 	SRP_FMR_POOL_SIZE	= 1024,
@@ -113,15 +112,29 @@ struct srp_request {
 	struct list_head	list;
 	struct list_head	list;
 	struct scsi_cmnd       *scmnd;
 	struct scsi_cmnd       *scmnd;
 	struct srp_iu	       *cmd;
 	struct srp_iu	       *cmd;
-	struct srp_iu	       *tsk_mgmt;
 	struct ib_pool_fmr     *fmr;
 	struct ib_pool_fmr     *fmr;
-	struct completion	done;
 	short			index;
 	short			index;
-	u8			cmd_done;
-	u8			tsk_status;
 };
 };
 
 
 struct srp_target_port {
 struct srp_target_port {
+	/* These are RW in the hot path, and commonly used together */
+	struct list_head	free_tx;
+	struct list_head	free_reqs;
+	spinlock_t		lock;
+	s32			req_lim;
+
+	/* These are read-only in the hot path */
+	struct ib_cq	       *send_cq ____cacheline_aligned_in_smp;
+	struct ib_cq	       *recv_cq;
+	struct ib_qp	       *qp;
+	u32			lkey;
+	u32			rkey;
+	enum srp_target_state	state;
+
+	/* Everything above this point is used in the hot path of
+	 * command processing. Try to keep them packed into cachelines.
+	 */
+
 	__be64			id_ext;
 	__be64			id_ext;
 	__be64			ioc_guid;
 	__be64			ioc_guid;
 	__be64			service_id;
 	__be64			service_id;
@@ -138,24 +151,13 @@ struct srp_target_port {
 	int			path_query_id;
 	int			path_query_id;
 
 
 	struct ib_cm_id	       *cm_id;
 	struct ib_cm_id	       *cm_id;
-	struct ib_cq	       *recv_cq;
-	struct ib_cq	       *send_cq;
-	struct ib_qp	       *qp;
 
 
 	int			max_ti_iu_len;
 	int			max_ti_iu_len;
-	s32			req_lim;
 
 
 	int			zero_req_lim;
 	int			zero_req_lim;
 
 
-	unsigned		rx_head;
-	struct srp_iu	       *rx_ring[SRP_RQ_SIZE];
-
-	unsigned		tx_head;
-	unsigned		tx_tail;
 	struct srp_iu	       *tx_ring[SRP_SQ_SIZE];
 	struct srp_iu	       *tx_ring[SRP_SQ_SIZE];
-
-	struct list_head	free_reqs;
-	struct list_head	req_queue;
+	struct srp_iu	       *rx_ring[SRP_RQ_SIZE];
 	struct srp_request	req_ring[SRP_CMD_SQ_SIZE];
 	struct srp_request	req_ring[SRP_CMD_SQ_SIZE];
 
 
 	struct work_struct	work;
 	struct work_struct	work;
@@ -163,16 +165,18 @@ struct srp_target_port {
 	struct list_head	list;
 	struct list_head	list;
 	struct completion	done;
 	struct completion	done;
 	int			status;
 	int			status;
-	enum srp_target_state	state;
 	int			qp_in_error;
 	int			qp_in_error;
+
+	struct completion	tsk_mgmt_done;
+	u8			tsk_mgmt_status;
 };
 };
 
 
 struct srp_iu {
 struct srp_iu {
+	struct list_head	list;
 	u64			dma;
 	u64			dma;
 	void		       *buf;
 	void		       *buf;
 	size_t			size;
 	size_t			size;
 	enum dma_data_direction	direction;
 	enum dma_data_direction	direction;
-	enum srp_iu_type	type;
 };
 };
 
 
 #endif /* IB_SRP_H */
 #endif /* IB_SRP_H */

+ 2 - 1
drivers/net/mlx4/alloc.c

@@ -178,6 +178,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
 	} else {
 	} else {
 		int i;
 		int i;
 
 
+		buf->direct.buf  = NULL;
 		buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
 		buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
 		buf->npages      = buf->nbufs;
 		buf->npages      = buf->nbufs;
 		buf->page_shift  = PAGE_SHIFT;
 		buf->page_shift  = PAGE_SHIFT;
@@ -229,7 +230,7 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
 		dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
 		dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
 				  buf->direct.map);
 				  buf->direct.map);
 	else {
 	else {
-		if (BITS_PER_LONG == 64)
+		if (BITS_PER_LONG == 64 && buf->direct.buf)
 			vunmap(buf->direct.buf);
 			vunmap(buf->direct.buf);
 
 
 		for (i = 0; i < buf->nbufs; ++i)
 		for (i = 0; i < buf->nbufs; ++i)

+ 1 - 3
drivers/net/mlx4/fw.c

@@ -289,10 +289,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
 		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
 		dev_cap->bf_reg_size = 1 << (field & 0x1f);
 		dev_cap->bf_reg_size = 1 << (field & 0x1f);
 		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
 		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
-		if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) {
-			mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f);
+		if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
 			field = 3;
 			field = 3;
-		}
 		dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
 		dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
 		mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
 		mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
 			 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
 			 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);