|
@@ -96,7 +96,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
|
|
|
attr->max_qp_rd_atom =
|
|
|
min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
|
|
|
attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
|
|
|
- attr->max_srq = (dev->attr.max_qp - 1);
|
|
|
+ attr->max_srq = dev->attr.max_srq;
|
|
|
attr->max_srq_sge = dev->attr.max_srq_sge;
|
|
|
attr->max_srq_wr = dev->attr.max_rqe;
|
|
|
attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
|
|
@@ -1304,7 +1304,6 @@ static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
|
|
|
|
|
|
static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
|
|
|
{
|
|
|
- int free_cnt;
|
|
|
return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
|
|
|
}
|
|
|
|
|
@@ -1746,6 +1745,96 @@ static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
|
|
|
ext_rw->len = hdr->total_len;
|
|
|
}
|
|
|
|
|
|
+static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
|
|
|
+ struct ocrdma_hw_mr *hwmr)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ u64 buf_addr = 0;
|
|
|
+ int num_pbes;
|
|
|
+ struct ocrdma_pbe *pbe;
|
|
|
+
|
|
|
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
|
|
|
+ num_pbes = 0;
|
|
|
+
|
|
|
+ /* go through the OS phy regions & fill hw pbe entries into pbls. */
|
|
|
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
|
|
|
+ /* number of pbes can be more for one OS buf, when
|
|
|
+ * buffers are of different sizes.
|
|
|
+ * split the ib_buf to one or more pbes.
|
|
|
+ */
|
|
|
+ buf_addr = wr->wr.fast_reg.page_list->page_list[i];
|
|
|
+ pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
|
|
|
+ pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
|
|
|
+ num_pbes += 1;
|
|
|
+ pbe++;
|
|
|
+
|
|
|
+ /* if the pbl is full storing the pbes,
|
|
|
+ * move to next pbl.
|
|
|
+ */
|
|
|
+ if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
|
|
|
+ pbl_tbl++;
|
|
|
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return;
|
|
|
+}
|
|
|
+
|
|
|
+static int get_encoded_page_size(int pg_sz)
|
|
|
+{
|
|
|
+ /* Max size is 256M 4096 << 16 */
|
|
|
+ int i = 0;
|
|
|
+ for (; i < 17; i++)
|
|
|
+ if (pg_sz == (4096 << i))
|
|
|
+ break;
|
|
|
+ return i;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
|
|
|
+ struct ib_send_wr *wr)
|
|
|
+{
|
|
|
+ u64 fbo;
|
|
|
+ struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
|
|
|
+ struct ocrdma_mr *mr;
|
|
|
+ u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
|
|
|
+
|
|
|
+ wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
|
|
|
+
|
|
|
+ if ((wr->wr.fast_reg.page_list_len >
|
|
|
+ qp->dev->attr.max_pages_per_frmr) ||
|
|
|
+ (wr->wr.fast_reg.length > 0xffffffffULL))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
|
|
|
+ hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
|
|
|
+
|
|
|
+ if (wr->wr.fast_reg.page_list_len == 0)
|
|
|
+ BUG();
|
|
|
+ if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE)
|
|
|
+ hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
|
|
|
+ if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE)
|
|
|
+ hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
|
|
|
+ if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ)
|
|
|
+ hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
|
|
|
+ hdr->lkey = wr->wr.fast_reg.rkey;
|
|
|
+ hdr->total_len = wr->wr.fast_reg.length;
|
|
|
+
|
|
|
+ fbo = wr->wr.fast_reg.iova_start -
|
|
|
+ (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
|
|
|
+
|
|
|
+ fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start);
|
|
|
+ fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff);
|
|
|
+ fast_reg->fbo_hi = upper_32_bits(fbo);
|
|
|
+ fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
|
|
|
+ fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
|
|
|
+ fast_reg->size_sge =
|
|
|
+ get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
|
|
|
+ mr = (struct ocrdma_mr *)qp->dev->stag_arr[(hdr->lkey >> 8) &
|
|
|
+ (OCRDMA_MAX_STAG - 1)];
|
|
|
+ build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
|
|
|
{
|
|
|
u32 val = qp->sq.dbid | (1 << 16);
|
|
@@ -1815,10 +1904,14 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|
|
case IB_WR_LOCAL_INV:
|
|
|
hdr->cw |=
|
|
|
(OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
|
|
|
- hdr->cw |= (sizeof(struct ocrdma_hdr_wqe) /
|
|
|
+ hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
|
|
|
+ sizeof(struct ocrdma_sge)) /
|
|
|
OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
|
|
|
hdr->lkey = wr->ex.invalidate_rkey;
|
|
|
break;
|
|
|
+ case IB_WR_FAST_REG_MR:
|
|
|
+ status = ocrdma_build_fr(qp, hdr, wr);
|
|
|
+ break;
|
|
|
default:
|
|
|
status = -EINVAL;
|
|
|
break;
|
|
@@ -2085,6 +2178,9 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
|
|
|
case OCRDMA_SEND:
|
|
|
ibwc->opcode = IB_WC_SEND;
|
|
|
break;
|
|
|
+ case OCRDMA_FR_MR:
|
|
|
+ ibwc->opcode = IB_WC_FAST_REG_MR;
|
|
|
+ break;
|
|
|
case OCRDMA_LKEY_INV:
|
|
|
ibwc->opcode = IB_WC_LOCAL_INV;
|
|
|
break;
|
|
@@ -2530,3 +2626,63 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
|
|
|
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
|
|
return 0;
|
|
|
}
|
|
|
+
|
|
|
+struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
|
|
|
+{
|
|
|
+ int status;
|
|
|
+ struct ocrdma_mr *mr;
|
|
|
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
|
|
|
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
|
|
|
+
|
|
|
+ if (max_page_list_len > dev->attr.max_pages_per_frmr)
|
|
|
+ return ERR_PTR(-EINVAL);
|
|
|
+
|
|
|
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
|
|
|
+ if (!mr)
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+
|
|
|
+ status = ocrdma_get_pbl_info(dev, mr, max_page_list_len);
|
|
|
+ if (status)
|
|
|
+ goto pbl_err;
|
|
|
+ mr->hwmr.fr_mr = 1;
|
|
|
+ mr->hwmr.remote_rd = 0;
|
|
|
+ mr->hwmr.remote_wr = 0;
|
|
|
+ mr->hwmr.local_rd = 0;
|
|
|
+ mr->hwmr.local_wr = 0;
|
|
|
+ mr->hwmr.mw_bind = 0;
|
|
|
+ status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
|
|
|
+ if (status)
|
|
|
+ goto pbl_err;
|
|
|
+ status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
|
|
|
+ if (status)
|
|
|
+ goto mbx_err;
|
|
|
+ mr->ibmr.rkey = mr->hwmr.lkey;
|
|
|
+ mr->ibmr.lkey = mr->hwmr.lkey;
|
|
|
+ dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = (u64) mr;
|
|
|
+ return &mr->ibmr;
|
|
|
+mbx_err:
|
|
|
+ ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
|
|
|
+pbl_err:
|
|
|
+ kfree(mr);
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+}
|
|
|
+
|
|
|
+struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
|
|
|
+ *ibdev,
|
|
|
+ int page_list_len)
|
|
|
+{
|
|
|
+ struct ib_fast_reg_page_list *frmr_list;
|
|
|
+ int size;
|
|
|
+
|
|
|
+ size = sizeof(*frmr_list) + (page_list_len * sizeof(u64));
|
|
|
+ frmr_list = kzalloc(size, GFP_KERNEL);
|
|
|
+ if (!frmr_list)
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+ frmr_list->page_list = (u64 *)(frmr_list + 1);
|
|
|
+ return frmr_list;
|
|
|
+}
|
|
|
+
|
|
|
+void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
|
|
|
+{
|
|
|
+ kfree(page_list);
|
|
|
+}
|