|
@@ -349,7 +349,8 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
|
|
|
pr->rq1_skba.os_skbs = 0;
|
|
|
|
|
|
if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
|
|
|
- pr->rq1_skba.index = index;
|
|
|
+ if (nr_of_wqes > 0)
|
|
|
+ pr->rq1_skba.index = index;
|
|
|
pr->rq1_skba.os_skbs = fill_wqes;
|
|
|
return;
|
|
|
}
|
|
@@ -1464,7 +1465,9 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
|
|
|
init_attr->act_nr_rwqes_rq2,
|
|
|
init_attr->act_nr_rwqes_rq3);
|
|
|
|
|
|
- ret = ehea_init_q_skba(&pr->sq_skba, init_attr->act_nr_send_wqes + 1);
|
|
|
+ pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
|
|
|
+
|
|
|
+ ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
|
|
|
ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
|
|
|
ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
|
|
|
ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
|
|
@@ -2621,6 +2624,22 @@ void ehea_purge_sq(struct ehea_qp *orig_qp)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+void ehea_flush_sq(struct ehea_port *port)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
|
|
|
+ struct ehea_port_res *pr = &port->port_res[i];
|
|
|
+ int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
|
|
|
+ int k = 0;
|
|
|
+ while (atomic_read(&pr->swqe_avail) < swqe_max) {
|
|
|
+ msleep(5);
|
|
|
+ if (++k == 20)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
int ehea_stop_qps(struct net_device *dev)
|
|
|
{
|
|
|
struct ehea_port *port = netdev_priv(dev);
|
|
@@ -2845,6 +2864,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
|
|
|
if (dev->flags & IFF_UP) {
|
|
|
down(&port->port_lock);
|
|
|
netif_stop_queue(dev);
|
|
|
+ ehea_flush_sq(port);
|
|
|
ret = ehea_stop_qps(dev);
|
|
|
if (ret) {
|
|
|
up(&port->port_lock);
|