|
@@ -164,7 +164,8 @@ static char *rcu_try_flip_state_names[] =
|
|
{ "idle", "waitack", "waitzero", "waitmb" };
|
|
{ "idle", "waitack", "waitzero", "waitmb" };
|
|
#endif /* #ifdef CONFIG_RCU_TRACE */
|
|
#endif /* #ifdef CONFIG_RCU_TRACE */
|
|
|
|
|
|
-static cpumask_t rcu_cpu_online_map __read_mostly = CPU_MASK_NONE;
|
|
|
|
|
|
+static DECLARE_BITMAP(rcu_cpu_online_map, NR_CPUS) __read_mostly
|
|
|
|
+ = CPU_BITS_NONE;
|
|
|
|
|
|
/*
|
|
/*
|
|
* Enum and per-CPU flag to determine when each CPU has seen
|
|
* Enum and per-CPU flag to determine when each CPU has seen
|
|
@@ -758,7 +759,7 @@ rcu_try_flip_idle(void)
|
|
|
|
|
|
/* Now ask each CPU for acknowledgement of the flip. */
|
|
/* Now ask each CPU for acknowledgement of the flip. */
|
|
|
|
|
|
- for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
|
|
|
|
|
|
+ for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) {
|
|
per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
|
|
per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
|
|
dyntick_save_progress_counter(cpu);
|
|
dyntick_save_progress_counter(cpu);
|
|
}
|
|
}
|
|
@@ -776,7 +777,7 @@ rcu_try_flip_waitack(void)
|
|
int cpu;
|
|
int cpu;
|
|
|
|
|
|
RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
|
|
RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
|
|
- for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
|
|
|
|
|
|
+ for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
|
|
if (rcu_try_flip_waitack_needed(cpu) &&
|
|
if (rcu_try_flip_waitack_needed(cpu) &&
|
|
per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
|
|
per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
|
|
RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
|
|
RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
|
|
@@ -808,7 +809,7 @@ rcu_try_flip_waitzero(void)
|
|
/* Check to see if the sum of the "last" counters is zero. */
|
|
/* Check to see if the sum of the "last" counters is zero. */
|
|
|
|
|
|
RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
|
|
RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
|
|
- for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
|
|
|
|
|
|
+ for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
|
|
sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
|
|
sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
|
|
if (sum != 0) {
|
|
if (sum != 0) {
|
|
RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
|
|
RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
|
|
@@ -823,7 +824,7 @@ rcu_try_flip_waitzero(void)
|
|
smp_mb(); /* ^^^^^^^^^^^^ */
|
|
smp_mb(); /* ^^^^^^^^^^^^ */
|
|
|
|
|
|
/* Call for a memory barrier from each CPU. */
|
|
/* Call for a memory barrier from each CPU. */
|
|
- for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
|
|
|
|
|
|
+ for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) {
|
|
per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
|
|
per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
|
|
dyntick_save_progress_counter(cpu);
|
|
dyntick_save_progress_counter(cpu);
|
|
}
|
|
}
|
|
@@ -843,7 +844,7 @@ rcu_try_flip_waitmb(void)
|
|
int cpu;
|
|
int cpu;
|
|
|
|
|
|
RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
|
|
RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
|
|
- for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
|
|
|
|
|
|
+ for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
|
|
if (rcu_try_flip_waitmb_needed(cpu) &&
|
|
if (rcu_try_flip_waitmb_needed(cpu) &&
|
|
per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
|
|
per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
|
|
RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
|
|
RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
|
|
@@ -1032,7 +1033,7 @@ void rcu_offline_cpu(int cpu)
|
|
RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0;
|
|
RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0;
|
|
RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0;
|
|
RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0;
|
|
|
|
|
|
- cpu_clear(cpu, rcu_cpu_online_map);
|
|
|
|
|
|
+ cpumask_clear_cpu(cpu, to_cpumask(rcu_cpu_online_map));
|
|
|
|
|
|
spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
|
|
spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
|
|
|
|
|
|
@@ -1072,7 +1073,7 @@ void __cpuinit rcu_online_cpu(int cpu)
|
|
struct rcu_data *rdp;
|
|
struct rcu_data *rdp;
|
|
|
|
|
|
spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
|
|
spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
|
|
- cpu_set(cpu, rcu_cpu_online_map);
|
|
|
|
|
|
+ cpumask_set_cpu(cpu, to_cpumask(rcu_cpu_online_map));
|
|
spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
|
|
spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1430,7 +1431,7 @@ void __init __rcu_init(void)
|
|
* We don't need protection against CPU-Hotplug here
|
|
* We don't need protection against CPU-Hotplug here
|
|
* since
|
|
* since
|
|
* a) If a CPU comes online while we are iterating over the
|
|
* a) If a CPU comes online while we are iterating over the
|
|
- * cpu_online_map below, we would only end up making a
|
|
|
|
|
|
+ * cpu_online_mask below, we would only end up making a
|
|
* duplicate call to rcu_online_cpu() which sets the corresponding
|
|
* duplicate call to rcu_online_cpu() which sets the corresponding
|
|
* CPU's mask in the rcu_cpu_online_map.
|
|
* CPU's mask in the rcu_cpu_online_map.
|
|
*
|
|
*
|