|
@@ -67,7 +67,43 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
|
|
|
|
|
|
/* Fake initialization required by compiler */
|
|
/* Fake initialization required by compiler */
|
|
static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
|
|
static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
|
|
-static int maxbatch = 10000;
|
|
|
|
|
|
+static int blimit = 10;
|
|
|
|
+static int qhimark = 10000;
|
|
|
|
+static int qlowmark = 100;
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
+static int rsinterval = 1000;
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+static atomic_t rcu_barrier_cpu_count;
|
|
|
|
+static struct semaphore rcu_barrier_sema;
|
|
|
|
+static struct completion rcu_barrier_completion;
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
+static void force_quiescent_state(struct rcu_data *rdp,
|
|
|
|
+ struct rcu_ctrlblk *rcp)
|
|
|
|
+{
|
|
|
|
+ int cpu;
|
|
|
|
+ cpumask_t cpumask;
|
|
|
|
+ set_need_resched();
|
|
|
|
+ if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) {
|
|
|
|
+ rdp->last_rs_qlen = rdp->qlen;
|
|
|
|
+ /*
|
|
|
|
+ * Don't send IPI to itself. With irqs disabled,
|
|
|
|
+ * rdp->cpu is the current cpu.
|
|
|
|
+ */
|
|
|
|
+ cpumask = rcp->cpumask;
|
|
|
|
+ cpu_clear(rdp->cpu, cpumask);
|
|
|
|
+ for_each_cpu_mask(cpu, cpumask)
|
|
|
|
+ smp_send_reschedule(cpu);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+#else
|
|
|
|
+static inline void force_quiescent_state(struct rcu_data *rdp,
|
|
|
|
+ struct rcu_ctrlblk *rcp)
|
|
|
|
+{
|
|
|
|
+ set_need_resched();
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
|
|
/**
|
|
/**
|
|
* call_rcu - Queue an RCU callback for invocation after a grace period.
|
|
* call_rcu - Queue an RCU callback for invocation after a grace period.
|
|
@@ -92,17 +128,13 @@ void fastcall call_rcu(struct rcu_head *head,
|
|
rdp = &__get_cpu_var(rcu_data);
|
|
rdp = &__get_cpu_var(rcu_data);
|
|
*rdp->nxttail = head;
|
|
*rdp->nxttail = head;
|
|
rdp->nxttail = &head->next;
|
|
rdp->nxttail = &head->next;
|
|
-
|
|
|
|
- if (unlikely(++rdp->count > 10000))
|
|
|
|
- set_need_resched();
|
|
|
|
-
|
|
|
|
|
|
+ if (unlikely(++rdp->qlen > qhimark)) {
|
|
|
|
+ rdp->blimit = INT_MAX;
|
|
|
|
+ force_quiescent_state(rdp, &rcu_ctrlblk);
|
|
|
|
+ }
|
|
local_irq_restore(flags);
|
|
local_irq_restore(flags);
|
|
}
|
|
}
|
|
|
|
|
|
-static atomic_t rcu_barrier_cpu_count;
|
|
|
|
-static struct semaphore rcu_barrier_sema;
|
|
|
|
-static struct completion rcu_barrier_completion;
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
|
|
* call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
|
|
* @head: structure to be used for queueing the RCU updates.
|
|
* @head: structure to be used for queueing the RCU updates.
|
|
@@ -131,12 +163,12 @@ void fastcall call_rcu_bh(struct rcu_head *head,
|
|
rdp = &__get_cpu_var(rcu_bh_data);
|
|
rdp = &__get_cpu_var(rcu_bh_data);
|
|
*rdp->nxttail = head;
|
|
*rdp->nxttail = head;
|
|
rdp->nxttail = &head->next;
|
|
rdp->nxttail = &head->next;
|
|
- rdp->count++;
|
|
|
|
-/*
|
|
|
|
- * Should we directly call rcu_do_batch() here ?
|
|
|
|
- * if (unlikely(rdp->count > 10000))
|
|
|
|
- * rcu_do_batch(rdp);
|
|
|
|
- */
|
|
|
|
|
|
+
|
|
|
|
+ if (unlikely(++rdp->qlen > qhimark)) {
|
|
|
|
+ rdp->blimit = INT_MAX;
|
|
|
|
+ force_quiescent_state(rdp, &rcu_bh_ctrlblk);
|
|
|
|
+ }
|
|
|
|
+
|
|
local_irq_restore(flags);
|
|
local_irq_restore(flags);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -199,10 +231,12 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
|
next = rdp->donelist = list->next;
|
|
next = rdp->donelist = list->next;
|
|
list->func(list);
|
|
list->func(list);
|
|
list = next;
|
|
list = next;
|
|
- rdp->count--;
|
|
|
|
- if (++count >= maxbatch)
|
|
|
|
|
|
+ rdp->qlen--;
|
|
|
|
+ if (++count >= rdp->blimit)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
+ if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
|
|
|
|
+ rdp->blimit = blimit;
|
|
if (!rdp->donelist)
|
|
if (!rdp->donelist)
|
|
rdp->donetail = &rdp->donelist;
|
|
rdp->donetail = &rdp->donelist;
|
|
else
|
|
else
|
|
@@ -473,6 +507,7 @@ static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
|
|
rdp->quiescbatch = rcp->completed;
|
|
rdp->quiescbatch = rcp->completed;
|
|
rdp->qs_pending = 0;
|
|
rdp->qs_pending = 0;
|
|
rdp->cpu = cpu;
|
|
rdp->cpu = cpu;
|
|
|
|
+ rdp->blimit = blimit;
|
|
}
|
|
}
|
|
|
|
|
|
static void __devinit rcu_online_cpu(int cpu)
|
|
static void __devinit rcu_online_cpu(int cpu)
|
|
@@ -567,7 +602,12 @@ void synchronize_kernel(void)
|
|
synchronize_rcu();
|
|
synchronize_rcu();
|
|
}
|
|
}
|
|
|
|
|
|
-module_param(maxbatch, int, 0);
|
|
|
|
|
|
+module_param(blimit, int, 0);
|
|
|
|
+module_param(qhimark, int, 0);
|
|
|
|
+module_param(qlowmark, int, 0);
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
+module_param(rsinterval, int, 0);
|
|
|
|
+#endif
|
|
EXPORT_SYMBOL_GPL(rcu_batches_completed);
|
|
EXPORT_SYMBOL_GPL(rcu_batches_completed);
|
|
EXPORT_SYMBOL(call_rcu); /* WARNING: GPL-only in April 2006. */
|
|
EXPORT_SYMBOL(call_rcu); /* WARNING: GPL-only in April 2006. */
|
|
EXPORT_SYMBOL(call_rcu_bh); /* WARNING: GPL-only in April 2006. */
|
|
EXPORT_SYMBOL(call_rcu_bh); /* WARNING: GPL-only in April 2006. */
|