|
@@ -246,7 +246,10 @@ module_param(jiffies_till_next_fqs, ulong, 0644);
|
|
|
|
|
|
static void rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
struct rcu_data *rdp);
|
|
|
-static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *));
|
|
|
+static void force_qs_rnp(struct rcu_state *rsp,
|
|
|
+ int (*f)(struct rcu_data *rsp, bool *isidle,
|
|
|
+ unsigned long *maxj),
|
|
|
+ bool *isidle, unsigned long *maxj);
|
|
|
static void force_quiescent_state(struct rcu_state *rsp);
|
|
|
static int rcu_pending(int cpu);
|
|
|
|
|
@@ -727,7 +730,8 @@ static int rcu_is_cpu_rrupt_from_idle(void)
|
|
|
* credit them with an implicit quiescent state. Return 1 if this CPU
|
|
|
* is in dynticks idle mode, which is an extended quiescent state.
|
|
|
*/
|
|
|
-static int dyntick_save_progress_counter(struct rcu_data *rdp)
|
|
|
+static int dyntick_save_progress_counter(struct rcu_data *rdp,
|
|
|
+ bool *isidle, unsigned long *maxj)
|
|
|
{
|
|
|
rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
|
|
|
return (rdp->dynticks_snap & 0x1) == 0;
|
|
@@ -739,7 +743,8 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
|
|
|
* idle state since the last call to dyntick_save_progress_counter()
|
|
|
* for this same CPU, or by virtue of having been offline.
|
|
|
*/
|
|
|
-static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
|
|
+static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
|
|
|
+ bool *isidle, unsigned long *maxj)
|
|
|
{
|
|
|
unsigned int curr;
|
|
|
unsigned int snap;
|
|
@@ -1361,16 +1366,19 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
|
|
int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
|
|
|
{
|
|
|
int fqs_state = fqs_state_in;
|
|
|
+ bool isidle = false;
|
|
|
+ unsigned long maxj;
|
|
|
struct rcu_node *rnp = rcu_get_root(rsp);
|
|
|
|
|
|
rsp->n_force_qs++;
|
|
|
if (fqs_state == RCU_SAVE_DYNTICK) {
|
|
|
/* Collect dyntick-idle snapshots. */
|
|
|
- force_qs_rnp(rsp, dyntick_save_progress_counter);
|
|
|
+ force_qs_rnp(rsp, dyntick_save_progress_counter,
|
|
|
+ &isidle, &maxj);
|
|
|
fqs_state = RCU_FORCE_QS;
|
|
|
} else {
|
|
|
/* Handle dyntick-idle and offline CPUs. */
|
|
|
- force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
|
|
|
+ force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
|
|
|
}
|
|
|
/* Clear flag to prevent immediate re-entry. */
|
|
|
if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
|
|
@@ -2069,7 +2077,10 @@ void rcu_check_callbacks(int cpu, int user)
|
|
|
*
|
|
|
* The caller must have suppressed start of new grace periods.
|
|
|
*/
|
|
|
-static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
|
|
|
+static void force_qs_rnp(struct rcu_state *rsp,
|
|
|
+ int (*f)(struct rcu_data *rsp, bool *isidle,
|
|
|
+ unsigned long *maxj),
|
|
|
+ bool *isidle, unsigned long *maxj)
|
|
|
{
|
|
|
unsigned long bit;
|
|
|
int cpu;
|
|
@@ -2093,7 +2104,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
|
|
|
bit = 1;
|
|
|
for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
|
|
|
if ((rnp->qsmask & bit) != 0 &&
|
|
|
- f(per_cpu_ptr(rsp->rda, cpu)))
|
|
|
+ f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
|
|
|
mask |= bit;
|
|
|
}
|
|
|
if (mask != 0) {
|