|
@@ -16,11 +16,11 @@ static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
|
|
|
|
|
|
static struct {
|
|
static struct {
|
|
struct list_head queue;
|
|
struct list_head queue;
|
|
- spinlock_t lock;
|
|
|
|
|
|
+ raw_spinlock_t lock;
|
|
} call_function __cacheline_aligned_in_smp =
|
|
} call_function __cacheline_aligned_in_smp =
|
|
{
|
|
{
|
|
.queue = LIST_HEAD_INIT(call_function.queue),
|
|
.queue = LIST_HEAD_INIT(call_function.queue),
|
|
- .lock = __SPIN_LOCK_UNLOCKED(call_function.lock),
|
|
|
|
|
|
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
|
|
};
|
|
};
|
|
|
|
|
|
enum {
|
|
enum {
|
|
@@ -35,7 +35,7 @@ struct call_function_data {
|
|
|
|
|
|
struct call_single_queue {
|
|
struct call_single_queue {
|
|
struct list_head list;
|
|
struct list_head list;
|
|
- spinlock_t lock;
|
|
|
|
|
|
+ raw_spinlock_t lock;
|
|
};
|
|
};
|
|
|
|
|
|
static DEFINE_PER_CPU(struct call_function_data, cfd_data);
|
|
static DEFINE_PER_CPU(struct call_function_data, cfd_data);
|
|
@@ -80,7 +80,7 @@ static int __cpuinit init_call_single_data(void)
|
|
for_each_possible_cpu(i) {
|
|
for_each_possible_cpu(i) {
|
|
struct call_single_queue *q = &per_cpu(call_single_queue, i);
|
|
struct call_single_queue *q = &per_cpu(call_single_queue, i);
|
|
|
|
|
|
- spin_lock_init(&q->lock);
|
|
|
|
|
|
+ raw_spin_lock_init(&q->lock);
|
|
INIT_LIST_HEAD(&q->list);
|
|
INIT_LIST_HEAD(&q->list);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -141,10 +141,10 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
int ipi;
|
|
int ipi;
|
|
|
|
|
|
- spin_lock_irqsave(&dst->lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&dst->lock, flags);
|
|
ipi = list_empty(&dst->list);
|
|
ipi = list_empty(&dst->list);
|
|
list_add_tail(&data->list, &dst->list);
|
|
list_add_tail(&data->list, &dst->list);
|
|
- spin_unlock_irqrestore(&dst->lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&dst->lock, flags);
|
|
|
|
|
|
/*
|
|
/*
|
|
* The list addition should be visible before sending the IPI
|
|
* The list addition should be visible before sending the IPI
|
|
@@ -201,9 +201,9 @@ void generic_smp_call_function_interrupt(void)
|
|
refs = atomic_dec_return(&data->refs);
|
|
refs = atomic_dec_return(&data->refs);
|
|
WARN_ON(refs < 0);
|
|
WARN_ON(refs < 0);
|
|
if (!refs) {
|
|
if (!refs) {
|
|
- spin_lock(&call_function.lock);
|
|
|
|
|
|
+ raw_spin_lock(&call_function.lock);
|
|
list_del_rcu(&data->csd.list);
|
|
list_del_rcu(&data->csd.list);
|
|
- spin_unlock(&call_function.lock);
|
|
|
|
|
|
+ raw_spin_unlock(&call_function.lock);
|
|
}
|
|
}
|
|
|
|
|
|
if (refs)
|
|
if (refs)
|
|
@@ -230,9 +230,9 @@ void generic_smp_call_function_single_interrupt(void)
|
|
*/
|
|
*/
|
|
WARN_ON_ONCE(!cpu_online(smp_processor_id()));
|
|
WARN_ON_ONCE(!cpu_online(smp_processor_id()));
|
|
|
|
|
|
- spin_lock(&q->lock);
|
|
|
|
|
|
+ raw_spin_lock(&q->lock);
|
|
list_replace_init(&q->list, &list);
|
|
list_replace_init(&q->list, &list);
|
|
- spin_unlock(&q->lock);
|
|
|
|
|
|
+ raw_spin_unlock(&q->lock);
|
|
|
|
|
|
while (!list_empty(&list)) {
|
|
while (!list_empty(&list)) {
|
|
struct call_single_data *data;
|
|
struct call_single_data *data;
|
|
@@ -449,14 +449,14 @@ void smp_call_function_many(const struct cpumask *mask,
|
|
cpumask_clear_cpu(this_cpu, data->cpumask);
|
|
cpumask_clear_cpu(this_cpu, data->cpumask);
|
|
atomic_set(&data->refs, cpumask_weight(data->cpumask));
|
|
atomic_set(&data->refs, cpumask_weight(data->cpumask));
|
|
|
|
|
|
- spin_lock_irqsave(&call_function.lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&call_function.lock, flags);
|
|
/*
|
|
/*
|
|
* Place entry at the _HEAD_ of the list, so that any cpu still
|
|
* Place entry at the _HEAD_ of the list, so that any cpu still
|
|
* observing the entry in generic_smp_call_function_interrupt()
|
|
* observing the entry in generic_smp_call_function_interrupt()
|
|
* will not miss any other list entries:
|
|
* will not miss any other list entries:
|
|
*/
|
|
*/
|
|
list_add_rcu(&data->csd.list, &call_function.queue);
|
|
list_add_rcu(&data->csd.list, &call_function.queue);
|
|
- spin_unlock_irqrestore(&call_function.lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&call_function.lock, flags);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Make the list addition visible before sending the ipi.
|
|
* Make the list addition visible before sending the ipi.
|
|
@@ -501,20 +501,20 @@ EXPORT_SYMBOL(smp_call_function);
|
|
|
|
|
|
void ipi_call_lock(void)
|
|
void ipi_call_lock(void)
|
|
{
|
|
{
|
|
- spin_lock(&call_function.lock);
|
|
|
|
|
|
+ raw_spin_lock(&call_function.lock);
|
|
}
|
|
}
|
|
|
|
|
|
void ipi_call_unlock(void)
|
|
void ipi_call_unlock(void)
|
|
{
|
|
{
|
|
- spin_unlock(&call_function.lock);
|
|
|
|
|
|
+ raw_spin_unlock(&call_function.lock);
|
|
}
|
|
}
|
|
|
|
|
|
void ipi_call_lock_irq(void)
|
|
void ipi_call_lock_irq(void)
|
|
{
|
|
{
|
|
- spin_lock_irq(&call_function.lock);
|
|
|
|
|
|
+ raw_spin_lock_irq(&call_function.lock);
|
|
}
|
|
}
|
|
|
|
|
|
void ipi_call_unlock_irq(void)
|
|
void ipi_call_unlock_irq(void)
|
|
{
|
|
{
|
|
- spin_unlock_irq(&call_function.lock);
|
|
|
|
|
|
+ raw_spin_unlock_irq(&call_function.lock);
|
|
}
|
|
}
|