|
@@ -245,6 +245,22 @@ static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
|
|
|
kmem_cache_free(rsxx_dma_pool, dma);
|
|
|
}
|
|
|
|
|
|
+int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
|
|
|
+ struct list_head *q)
|
|
|
+{
|
|
|
+ struct rsxx_dma *dma;
|
|
|
+ struct rsxx_dma *tmp;
|
|
|
+ int cnt = 0;
|
|
|
+
|
|
|
+ list_for_each_entry_safe(dma, tmp, q, list) {
|
|
|
+ list_del(&dma->list);
|
|
|
+ rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
|
|
|
+ cnt++;
|
|
|
+ }
|
|
|
+
|
|
|
+ return cnt;
|
|
|
+}
|
|
|
+
|
|
|
static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
|
|
|
struct rsxx_dma *dma)
|
|
|
{
|
|
@@ -252,9 +268,9 @@ static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
|
|
|
* Requeued DMAs go to the front of the queue so they are issued
|
|
|
* first.
|
|
|
*/
|
|
|
- spin_lock(&ctrl->queue_lock);
|
|
|
+ spin_lock_bh(&ctrl->queue_lock);
|
|
|
list_add(&dma->list, &ctrl->queue);
|
|
|
- spin_unlock(&ctrl->queue_lock);
|
|
|
+ spin_unlock_bh(&ctrl->queue_lock);
|
|
|
}
|
|
|
|
|
|
static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
|
|
@@ -329,6 +345,7 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
|
|
|
static void dma_engine_stalled(unsigned long data)
|
|
|
{
|
|
|
struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
|
|
|
+ int cnt;
|
|
|
|
|
|
if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
|
|
|
unlikely(ctrl->card->eeh_state))
|
|
@@ -349,6 +366,18 @@ static void dma_engine_stalled(unsigned long data)
|
|
|
"DMA channel %d has stalled, faulting interface.\n",
|
|
|
ctrl->id);
|
|
|
ctrl->card->dma_fault = 1;
|
|
|
+
|
|
|
+ /* Clean up the DMA queue */
|
|
|
+ spin_lock(&ctrl->queue_lock);
|
|
|
+ cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
|
|
|
+ spin_unlock(&ctrl->queue_lock);
|
|
|
+
|
|
|
+ cnt += rsxx_dma_cancel(ctrl);
|
|
|
+
|
|
|
+ if (cnt)
|
|
|
+ dev_info(CARD_TO_DEV(ctrl->card),
|
|
|
+ "Freed %d queued DMAs on channel %d\n",
|
|
|
+ cnt, ctrl->id);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -368,22 +397,22 @@ static void rsxx_issue_dmas(struct work_struct *work)
|
|
|
return;
|
|
|
|
|
|
while (1) {
|
|
|
- spin_lock(&ctrl->queue_lock);
|
|
|
+ spin_lock_bh(&ctrl->queue_lock);
|
|
|
if (list_empty(&ctrl->queue)) {
|
|
|
- spin_unlock(&ctrl->queue_lock);
|
|
|
+ spin_unlock_bh(&ctrl->queue_lock);
|
|
|
break;
|
|
|
}
|
|
|
- spin_unlock(&ctrl->queue_lock);
|
|
|
+ spin_unlock_bh(&ctrl->queue_lock);
|
|
|
|
|
|
tag = pop_tracker(ctrl->trackers);
|
|
|
if (tag == -1)
|
|
|
break;
|
|
|
|
|
|
- spin_lock(&ctrl->queue_lock);
|
|
|
+ spin_lock_bh(&ctrl->queue_lock);
|
|
|
dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
|
|
|
list_del(&dma->list);
|
|
|
ctrl->stats.sw_q_depth--;
|
|
|
- spin_unlock(&ctrl->queue_lock);
|
|
|
+ spin_unlock_bh(&ctrl->queue_lock);
|
|
|
|
|
|
/*
|
|
|
* This will catch any DMAs that slipped in right before the
|
|
@@ -520,33 +549,10 @@ static void rsxx_dma_done(struct work_struct *work)
|
|
|
rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id));
|
|
|
spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
|
|
|
|
|
|
- spin_lock(&ctrl->queue_lock);
|
|
|
+ spin_lock_bh(&ctrl->queue_lock);
|
|
|
if (ctrl->stats.sw_q_depth)
|
|
|
queue_work(ctrl->issue_wq, &ctrl->issue_dma_work);
|
|
|
- spin_unlock(&ctrl->queue_lock);
|
|
|
-}
|
|
|
-
|
|
|
-static int rsxx_cleanup_dma_queue(struct rsxx_cardinfo *card,
|
|
|
- struct list_head *q)
|
|
|
-{
|
|
|
- struct rsxx_dma *dma;
|
|
|
- struct rsxx_dma *tmp;
|
|
|
- int cnt = 0;
|
|
|
-
|
|
|
- list_for_each_entry_safe(dma, tmp, q, list) {
|
|
|
- list_del(&dma->list);
|
|
|
-
|
|
|
- if (dma->dma_addr)
|
|
|
- pci_unmap_page(card->dev, dma->dma_addr,
|
|
|
- get_dma_size(dma),
|
|
|
- (dma->cmd == HW_CMD_BLK_WRITE) ?
|
|
|
- PCI_DMA_TODEVICE :
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
- kmem_cache_free(rsxx_dma_pool, dma);
|
|
|
- cnt++;
|
|
|
- }
|
|
|
-
|
|
|
- return cnt;
|
|
|
+ spin_unlock_bh(&ctrl->queue_lock);
|
|
|
}
|
|
|
|
|
|
static int rsxx_queue_discard(struct rsxx_cardinfo *card,
|
|
@@ -698,10 +704,10 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
|
|
|
|
|
|
for (i = 0; i < card->n_targets; i++) {
|
|
|
if (!list_empty(&dma_list[i])) {
|
|
|
- spin_lock(&card->ctrl[i].queue_lock);
|
|
|
+ spin_lock_bh(&card->ctrl[i].queue_lock);
|
|
|
card->ctrl[i].stats.sw_q_depth += dma_cnt[i];
|
|
|
list_splice_tail(&dma_list[i], &card->ctrl[i].queue);
|
|
|
- spin_unlock(&card->ctrl[i].queue_lock);
|
|
|
+ spin_unlock_bh(&card->ctrl[i].queue_lock);
|
|
|
|
|
|
queue_work(card->ctrl[i].issue_wq,
|
|
|
&card->ctrl[i].issue_dma_work);
|
|
@@ -711,8 +717,11 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
|
|
|
return 0;
|
|
|
|
|
|
bvec_err:
|
|
|
- for (i = 0; i < card->n_targets; i++)
|
|
|
- rsxx_cleanup_dma_queue(card, &dma_list[i]);
|
|
|
+ for (i = 0; i < card->n_targets; i++) {
|
|
|
+ spin_lock_bh(&card->ctrl[i].queue_lock);
|
|
|
+ rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i]);
|
|
|
+ spin_unlock_bh(&card->ctrl[i].queue_lock);
|
|
|
+ }
|
|
|
|
|
|
return st;
|
|
|
}
|
|
@@ -918,13 +927,30 @@ failed_dma_setup:
|
|
|
return st;
|
|
|
}
|
|
|
|
|
|
+int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl)
|
|
|
+{
|
|
|
+ struct rsxx_dma *dma;
|
|
|
+ int i;
|
|
|
+ int cnt = 0;
|
|
|
+
|
|
|
+ /* Clean up issued DMAs */
|
|
|
+ for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
|
|
|
+ dma = get_tracker_dma(ctrl->trackers, i);
|
|
|
+ if (dma) {
|
|
|
+ atomic_dec(&ctrl->stats.hw_q_depth);
|
|
|
+ rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
|
|
|
+ push_tracker(ctrl->trackers, i);
|
|
|
+ cnt++;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return cnt;
|
|
|
+}
|
|
|
|
|
|
void rsxx_dma_destroy(struct rsxx_cardinfo *card)
|
|
|
{
|
|
|
struct rsxx_dma_ctrl *ctrl;
|
|
|
- struct rsxx_dma *dma;
|
|
|
- int i, j;
|
|
|
- int cnt = 0;
|
|
|
+ int i;
|
|
|
|
|
|
for (i = 0; i < card->n_targets; i++) {
|
|
|
ctrl = &card->ctrl[i];
|
|
@@ -943,33 +969,11 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card)
|
|
|
del_timer_sync(&ctrl->activity_timer);
|
|
|
|
|
|
/* Clean up the DMA queue */
|
|
|
- spin_lock(&ctrl->queue_lock);
|
|
|
- cnt = rsxx_cleanup_dma_queue(card, &ctrl->queue);
|
|
|
- spin_unlock(&ctrl->queue_lock);
|
|
|
+ spin_lock_bh(&ctrl->queue_lock);
|
|
|
+ rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
|
|
|
+ spin_unlock_bh(&ctrl->queue_lock);
|
|
|
|
|
|
- if (cnt)
|
|
|
- dev_info(CARD_TO_DEV(card),
|
|
|
- "Freed %d queued DMAs on channel %d\n",
|
|
|
- cnt, i);
|
|
|
-
|
|
|
- /* Clean up issued DMAs */
|
|
|
- for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
|
|
|
- dma = get_tracker_dma(ctrl->trackers, j);
|
|
|
- if (dma) {
|
|
|
- pci_unmap_page(card->dev, dma->dma_addr,
|
|
|
- get_dma_size(dma),
|
|
|
- (dma->cmd == HW_CMD_BLK_WRITE) ?
|
|
|
- PCI_DMA_TODEVICE :
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
- kmem_cache_free(rsxx_dma_pool, dma);
|
|
|
- cnt++;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if (cnt)
|
|
|
- dev_info(CARD_TO_DEV(card),
|
|
|
- "Freed %d pending DMAs on channel %d\n",
|
|
|
- cnt, i);
|
|
|
+ rsxx_dma_cancel(ctrl);
|
|
|
|
|
|
vfree(ctrl->trackers);
|
|
|
|
|
@@ -1013,7 +1017,7 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
|
|
|
cnt++;
|
|
|
}
|
|
|
|
|
|
- spin_lock(&card->ctrl[i].queue_lock);
|
|
|
+ spin_lock_bh(&card->ctrl[i].queue_lock);
|
|
|
list_splice(&issued_dmas[i], &card->ctrl[i].queue);
|
|
|
|
|
|
atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
|
|
@@ -1028,7 +1032,7 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
|
|
|
PCI_DMA_TODEVICE :
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
}
|
|
|
- spin_unlock(&card->ctrl[i].queue_lock);
|
|
|
+ spin_unlock_bh(&card->ctrl[i].queue_lock);
|
|
|
}
|
|
|
|
|
|
kfree(issued_dmas);
|
|
@@ -1036,30 +1040,13 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card)
|
|
|
-{
|
|
|
- struct rsxx_dma *dma;
|
|
|
- struct rsxx_dma *tmp;
|
|
|
- int i;
|
|
|
-
|
|
|
- for (i = 0; i < card->n_targets; i++) {
|
|
|
- spin_lock(&card->ctrl[i].queue_lock);
|
|
|
- list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) {
|
|
|
- list_del(&dma->list);
|
|
|
-
|
|
|
- rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED);
|
|
|
- }
|
|
|
- spin_unlock(&card->ctrl[i].queue_lock);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
|
|
|
{
|
|
|
struct rsxx_dma *dma;
|
|
|
int i;
|
|
|
|
|
|
for (i = 0; i < card->n_targets; i++) {
|
|
|
- spin_lock(&card->ctrl[i].queue_lock);
|
|
|
+ spin_lock_bh(&card->ctrl[i].queue_lock);
|
|
|
list_for_each_entry(dma, &card->ctrl[i].queue, list) {
|
|
|
dma->dma_addr = pci_map_page(card->dev, dma->page,
|
|
|
dma->pg_off, get_dma_size(dma),
|
|
@@ -1067,12 +1054,12 @@ int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
|
|
|
PCI_DMA_TODEVICE :
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
if (!dma->dma_addr) {
|
|
|
- spin_unlock(&card->ctrl[i].queue_lock);
|
|
|
+ spin_unlock_bh(&card->ctrl[i].queue_lock);
|
|
|
kmem_cache_free(rsxx_dma_pool, dma);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
}
|
|
|
- spin_unlock(&card->ctrl[i].queue_lock);
|
|
|
+ spin_unlock_bh(&card->ctrl[i].queue_lock);
|
|
|
}
|
|
|
|
|
|
return 0;
|