|
@@ -1027,7 +1027,6 @@ static void __svc_rdma_free(struct work_struct *work)
|
|
|
* cm_id because the device ptr is needed to unmap the dma in
|
|
|
* svc_rdma_put_context.
|
|
|
*/
|
|
|
- spin_lock_bh(&rdma->sc_read_complete_lock);
|
|
|
while (!list_empty(&rdma->sc_read_complete_q)) {
|
|
|
struct svc_rdma_op_ctxt *ctxt;
|
|
|
ctxt = list_entry(rdma->sc_read_complete_q.next,
|
|
@@ -1036,10 +1035,8 @@ static void __svc_rdma_free(struct work_struct *work)
|
|
|
list_del_init(&ctxt->dto_q);
|
|
|
svc_rdma_put_context(ctxt, 1);
|
|
|
}
|
|
|
- spin_unlock_bh(&rdma->sc_read_complete_lock);
|
|
|
|
|
|
/* Destroy queued, but not processed recv completions */
|
|
|
- spin_lock_bh(&rdma->sc_rq_dto_lock);
|
|
|
while (!list_empty(&rdma->sc_rq_dto_q)) {
|
|
|
struct svc_rdma_op_ctxt *ctxt;
|
|
|
ctxt = list_entry(rdma->sc_rq_dto_q.next,
|
|
@@ -1048,7 +1045,6 @@ static void __svc_rdma_free(struct work_struct *work)
|
|
|
list_del_init(&ctxt->dto_q);
|
|
|
svc_rdma_put_context(ctxt, 1);
|
|
|
}
|
|
|
- spin_unlock_bh(&rdma->sc_rq_dto_lock);
|
|
|
|
|
|
/* Warn if we leaked a resource or under-referenced */
|
|
|
WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0);
|