|
@@ -53,6 +53,7 @@ struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
|
|
|
enum queue_index index)
|
|
|
{
|
|
|
struct queue_entry *entry;
|
|
|
+ unsigned long irqflags;
|
|
|
|
|
|
if (unlikely(index >= Q_INDEX_MAX)) {
|
|
|
ERROR(queue->rt2x00dev,
|
|
@@ -60,11 +61,11 @@ struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
- spin_lock(&queue->lock);
|
|
|
+ spin_lock_irqsave(&queue->lock, irqflags);
|
|
|
|
|
|
entry = &queue->entries[queue->index[index]];
|
|
|
|
|
|
- spin_unlock(&queue->lock);
|
|
|
+ spin_unlock_irqrestore(&queue->lock, irqflags);
|
|
|
|
|
|
return entry;
|
|
|
}
|
|
@@ -72,13 +73,15 @@ EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
|
|
|
|
|
|
void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
|
|
|
{
|
|
|
+ unsigned long irqflags;
|
|
|
+
|
|
|
if (unlikely(index >= Q_INDEX_MAX)) {
|
|
|
ERROR(queue->rt2x00dev,
|
|
|
"Index change on invalid index type (%d)\n", index);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- spin_lock(&queue->lock);
|
|
|
+ spin_lock_irqsave(&queue->lock, irqflags);
|
|
|
|
|
|
queue->index[index]++;
|
|
|
if (queue->index[index] >= queue->limit)
|
|
@@ -91,19 +94,21 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
|
|
|
queue->count ++;
|
|
|
}
|
|
|
|
|
|
- spin_unlock(&queue->lock);
|
|
|
+ spin_unlock_irqrestore(&queue->lock, irqflags);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rt2x00queue_index_inc);
|
|
|
|
|
|
static void rt2x00queue_reset(struct data_queue *queue)
|
|
|
{
|
|
|
- spin_lock(&queue->lock);
|
|
|
+ unsigned long irqflags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&queue->lock, irqflags);
|
|
|
|
|
|
queue->count = 0;
|
|
|
queue->length = 0;
|
|
|
memset(queue->index, 0, sizeof(queue->index));
|
|
|
|
|
|
- spin_unlock(&queue->lock);
|
|
|
+ spin_unlock_irqrestore(&queue->lock, irqflags);
|
|
|
}
|
|
|
|
|
|
void rt2x00queue_init_rx(struct rt2x00_dev *rt2x00dev)
|