12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262 |
- /*
- * Read-Copy Update mechanism for mutual exclusion (tree-based version)
- * Internal non-public definitions that provide either classic
- * or preemptable semantics.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright Red Hat, 2009
- * Copyright IBM Corporation, 2009
- *
- * Author: Ingo Molnar <mingo@elte.hu>
- * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
- */
- #include <linux/delay.h>
- #include <linux/stop_machine.h>
- /*
- * Check the RCU kernel configuration parameters and print informative
- * messages about anything out of the ordinary. If you like #ifdef, you
- * will love this function.
- */
- static void __init rcu_bootup_announce_oddness(void)
- {
- #ifdef CONFIG_RCU_TRACE
- printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
- #endif
- #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
- printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
- CONFIG_RCU_FANOUT);
- #endif
- #ifdef CONFIG_RCU_FANOUT_EXACT
- printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
- #endif
- #ifdef CONFIG_RCU_FAST_NO_HZ
- printk(KERN_INFO
- "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
- #endif
- #ifdef CONFIG_PROVE_RCU
- printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
- #endif
- #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
- printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
- #endif
- #ifndef CONFIG_RCU_CPU_STALL_DETECTOR
- printk(KERN_INFO
- "\tRCU-based detection of stalled CPUs is disabled.\n");
- #endif
- #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
- printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n");
- #endif
- #if NUM_RCU_LVL_4 != 0
- printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
- #endif
- }
- #ifdef CONFIG_TREE_PREEMPT_RCU
- struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
- DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
- static int rcu_preempted_readers_exp(struct rcu_node *rnp);
- /*
- * Tell them what RCU they are running.
- */
- static void __init rcu_bootup_announce(void)
- {
- printk(KERN_INFO "Preemptable hierarchical RCU implementation.\n");
- rcu_bootup_announce_oddness();
- }
- /*
- * Return the number of RCU-preempt batches processed thus far
- * for debug and statistics.
- */
- long rcu_batches_completed_preempt(void)
- {
- return rcu_preempt_state.completed;
- }
- EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
- /*
- * Return the number of RCU batches processed thus far for debug & stats.
- */
- long rcu_batches_completed(void)
- {
- return rcu_batches_completed_preempt();
- }
- EXPORT_SYMBOL_GPL(rcu_batches_completed);
- /*
- * Force a quiescent state for preemptible RCU.
- */
- void rcu_force_quiescent_state(void)
- {
- force_quiescent_state(&rcu_preempt_state, 0);
- }
- EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
- /*
- * Record a preemptable-RCU quiescent state for the specified CPU. Note
- * that this just means that the task currently running on the CPU is
- * not in a quiescent state. There might be any number of tasks blocked
- * while in an RCU read-side critical section.
- *
- * Unlike the other rcu_*_qs() functions, callers to this function
- * must disable irqs in order to protect the assignment to
- * ->rcu_read_unlock_special.
- */
- static void rcu_preempt_qs(int cpu)
- {
- struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
- rdp->passed_quiesc_completed = rdp->gpnum - 1;
- barrier();
- rdp->passed_quiesc = 1;
- current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
- }
- /*
- * We have entered the scheduler, and the current task might soon be
- * context-switched away from. If this task is in an RCU read-side
- * critical section, we will no longer be able to rely on the CPU to
- * record that fact, so we enqueue the task on the appropriate entry
- * of the blocked_tasks[] array. The task will dequeue itself when
- * it exits the outermost enclosing RCU read-side critical section.
- * Therefore, the current grace period cannot be permitted to complete
- * until the blocked_tasks[] entry indexed by the low-order bit of
- * rnp->gpnum empties.
- *
- * Caller must disable preemption.
- */
- static void rcu_preempt_note_context_switch(int cpu)
- {
- struct task_struct *t = current;
- unsigned long flags;
- int phase;
- struct rcu_data *rdp;
- struct rcu_node *rnp;
- if (t->rcu_read_lock_nesting &&
- (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
- /* Possibly blocking in an RCU read-side critical section. */
- rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
- rnp = rdp->mynode;
- raw_spin_lock_irqsave(&rnp->lock, flags);
- t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
- t->rcu_blocked_node = rnp;
- /*
- * If this CPU has already checked in, then this task
- * will hold up the next grace period rather than the
- * current grace period. Queue the task accordingly.
- * If the task is queued for the current grace period
- * (i.e., this CPU has not yet passed through a quiescent
- * state for the current grace period), then as long
- * as that task remains queued, the current grace period
- * cannot end.
- *
- * But first, note that the current CPU must still be
- * on line!
- */
- WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
- WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
- phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1;
- list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
- }
- /*
- * Either we were not in an RCU read-side critical section to
- * begin with, or we have now recorded that critical section
- * globally. Either way, we can now note a quiescent state
- * for this CPU. Again, if we were in an RCU read-side critical
- * section, and if that critical section was blocking the current
- * grace period, then the fact that the task has been enqueued
- * means that we continue to block the current grace period.
- */
- local_irq_save(flags);
- rcu_preempt_qs(cpu);
- local_irq_restore(flags);
- }
- /*
- * Tree-preemptable RCU implementation for rcu_read_lock().
- * Just increment ->rcu_read_lock_nesting, shared state will be updated
- * if we block.
- */
- void __rcu_read_lock(void)
- {
- current->rcu_read_lock_nesting++;
- barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
- }
- EXPORT_SYMBOL_GPL(__rcu_read_lock);
- /*
- * Check for preempted RCU readers blocking the current grace period
- * for the specified rcu_node structure. If the caller needs a reliable
- * answer, it must hold the rcu_node's ->lock.
- */
- static int rcu_preempted_readers(struct rcu_node *rnp)
- {
- int phase = rnp->gpnum & 0x1;
- return !list_empty(&rnp->blocked_tasks[phase]) ||
- !list_empty(&rnp->blocked_tasks[phase + 2]);
- }
- /*
- * Record a quiescent state for all tasks that were previously queued
- * on the specified rcu_node structure and that were blocking the current
- * RCU grace period. The caller must hold the specified rnp->lock with
- * irqs disabled, and this lock is released upon return, but irqs remain
- * disabled.
- */
- static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
- __releases(rnp->lock)
- {
- unsigned long mask;
- struct rcu_node *rnp_p;
- if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
- return; /* Still need more quiescent states! */
- }
- rnp_p = rnp->parent;
- if (rnp_p == NULL) {
- /*
- * Either there is only one rcu_node in the tree,
- * or tasks were kicked up to root rcu_node due to
- * CPUs going offline.
- */
- rcu_report_qs_rsp(&rcu_preempt_state, flags);
- return;
- }
- /* Report up the rest of the hierarchy. */
- mask = rnp->grpmask;
- raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
- raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
- rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
- }
- /*
- * Handle special cases during rcu_read_unlock(), such as needing to
- * notify RCU core processing or task having blocked during the RCU
- * read-side critical section.
- */
- static void rcu_read_unlock_special(struct task_struct *t)
- {
- int empty;
- int empty_exp;
- unsigned long flags;
- struct rcu_node *rnp;
- int special;
- /* NMI handlers cannot block and cannot safely manipulate state. */
- if (in_nmi())
- return;
- local_irq_save(flags);
- /*
- * If RCU core is waiting for this CPU to exit critical section,
- * let it know that we have done so.
- */
- special = t->rcu_read_unlock_special;
- if (special & RCU_READ_UNLOCK_NEED_QS) {
- rcu_preempt_qs(smp_processor_id());
- }
- /* Hardware IRQ handlers cannot block. */
- if (in_irq()) {
- local_irq_restore(flags);
- return;
- }
- /* Clean up if blocked during RCU read-side critical section. */
- if (special & RCU_READ_UNLOCK_BLOCKED) {
- t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
- /*
- * Remove this task from the list it blocked on. The
- * task can migrate while we acquire the lock, but at
- * most one time. So at most two passes through loop.
- */
- for (;;) {
- rnp = t->rcu_blocked_node;
- raw_spin_lock(&rnp->lock); /* irqs already disabled. */
- if (rnp == t->rcu_blocked_node)
- break;
- raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
- }
- empty = !rcu_preempted_readers(rnp);
- empty_exp = !rcu_preempted_readers_exp(rnp);
- smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
- list_del_init(&t->rcu_node_entry);
- t->rcu_blocked_node = NULL;
- /*
- * If this was the last task on the current list, and if
- * we aren't waiting on any CPUs, report the quiescent state.
- * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
- */
- if (empty)
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
- else
- rcu_report_unblock_qs_rnp(rnp, flags);
- /*
- * If this was the last task on the expedited lists,
- * then we need to report up the rcu_node hierarchy.
- */
- if (!empty_exp && !rcu_preempted_readers_exp(rnp))
- rcu_report_exp_rnp(&rcu_preempt_state, rnp);
- } else {
- local_irq_restore(flags);
- }
- }
- /*
- * Tree-preemptable RCU implementation for rcu_read_unlock().
- * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
- * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
- * invoke rcu_read_unlock_special() to clean up after a context switch
- * in an RCU read-side critical section and other special cases.
- */
- void __rcu_read_unlock(void)
- {
- struct task_struct *t = current;
- barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
- --t->rcu_read_lock_nesting;
- barrier(); /* decrement before load of ->rcu_read_unlock_special */
- if (t->rcu_read_lock_nesting == 0 &&
- unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
- rcu_read_unlock_special(t);
- #ifdef CONFIG_PROVE_LOCKING
- WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0);
- #endif /* #ifdef CONFIG_PROVE_LOCKING */
- }
- EXPORT_SYMBOL_GPL(__rcu_read_unlock);
- #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
- #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
- /*
- * Dump detailed information for all tasks blocking the current RCU
- * grace period on the specified rcu_node structure.
- */
- static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
- {
- unsigned long flags;
- struct list_head *lp;
- int phase;
- struct task_struct *t;
- if (rcu_preempted_readers(rnp)) {
- raw_spin_lock_irqsave(&rnp->lock, flags);
- phase = rnp->gpnum & 0x1;
- lp = &rnp->blocked_tasks[phase];
- list_for_each_entry(t, lp, rcu_node_entry)
- sched_show_task(t);
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
- }
- }
- /*
- * Dump detailed information for all tasks blocking the current RCU
- * grace period.
- */
- static void rcu_print_detail_task_stall(struct rcu_state *rsp)
- {
- struct rcu_node *rnp = rcu_get_root(rsp);
- rcu_print_detail_task_stall_rnp(rnp);
- rcu_for_each_leaf_node(rsp, rnp)
- rcu_print_detail_task_stall_rnp(rnp);
- }
- #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
- static void rcu_print_detail_task_stall(struct rcu_state *rsp)
- {
- }
- #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
- /*
- * Scan the current list of tasks blocked within RCU read-side critical
- * sections, printing out the tid of each.
- */
- static void rcu_print_task_stall(struct rcu_node *rnp)
- {
- struct list_head *lp;
- int phase;
- struct task_struct *t;
- if (rcu_preempted_readers(rnp)) {
- phase = rnp->gpnum & 0x1;
- lp = &rnp->blocked_tasks[phase];
- list_for_each_entry(t, lp, rcu_node_entry)
- printk(" P%d", t->pid);
- }
- }
- /*
- * Suppress preemptible RCU's CPU stall warnings by pushing the
- * time of the next stall-warning message comfortably far into the
- * future.
- */
- static void rcu_preempt_stall_reset(void)
- {
- rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
- }
- #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
- /*
- * Check that the list of blocked tasks for the newly completed grace
- * period is in fact empty. It is a serious bug to complete a grace
- * period that still has RCU readers blocked! This function must be
- * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
- * must be held by the caller.
- */
- static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
- {
- WARN_ON_ONCE(rcu_preempted_readers(rnp));
- WARN_ON_ONCE(rnp->qsmask);
- }
- #ifdef CONFIG_HOTPLUG_CPU
- /*
- * Handle tasklist migration for case in which all CPUs covered by the
- * specified rcu_node have gone offline. Move them up to the root
- * rcu_node. The reason for not just moving them to the immediate
- * parent is to remove the need for rcu_read_unlock_special() to
- * make more than two attempts to acquire the target rcu_node's lock.
- * Returns true if there were tasks blocking the current RCU grace
- * period.
- *
- * Returns 1 if there was previously a task blocking the current grace
- * period on the specified rcu_node structure.
- *
- * The caller must hold rnp->lock with irqs disabled.
- */
- static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
- struct rcu_node *rnp,
- struct rcu_data *rdp)
- {
- int i;
- struct list_head *lp;
- struct list_head *lp_root;
- int retval = 0;
- struct rcu_node *rnp_root = rcu_get_root(rsp);
- struct task_struct *tp;
- if (rnp == rnp_root) {
- WARN_ONCE(1, "Last CPU thought to be offlined?");
- return 0; /* Shouldn't happen: at least one CPU online. */
- }
- WARN_ON_ONCE(rnp != rdp->mynode &&
- (!list_empty(&rnp->blocked_tasks[0]) ||
- !list_empty(&rnp->blocked_tasks[1]) ||
- !list_empty(&rnp->blocked_tasks[2]) ||
- !list_empty(&rnp->blocked_tasks[3])));
- /*
- * Move tasks up to root rcu_node. Rely on the fact that the
- * root rcu_node can be at most one ahead of the rest of the
- * rcu_nodes in terms of gp_num value. This fact allows us to
- * move the blocked_tasks[] array directly, element by element.
- */
- if (rcu_preempted_readers(rnp))
- retval |= RCU_OFL_TASKS_NORM_GP;
- if (rcu_preempted_readers_exp(rnp))
- retval |= RCU_OFL_TASKS_EXP_GP;
- for (i = 0; i < 4; i++) {
- lp = &rnp->blocked_tasks[i];
- lp_root = &rnp_root->blocked_tasks[i];
- while (!list_empty(lp)) {
- tp = list_entry(lp->next, typeof(*tp), rcu_node_entry);
- raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
- list_del(&tp->rcu_node_entry);
- tp->rcu_blocked_node = rnp_root;
- list_add(&tp->rcu_node_entry, lp_root);
- raw_spin_unlock(&rnp_root->lock); /* irqs remain disabled */
- }
- }
- return retval;
- }
- /*
- * Do CPU-offline processing for preemptable RCU.
- */
- static void rcu_preempt_offline_cpu(int cpu)
- {
- __rcu_offline_cpu(cpu, &rcu_preempt_state);
- }
- #endif /* #ifdef CONFIG_HOTPLUG_CPU */
- /*
- * Check for a quiescent state from the current CPU. When a task blocks,
- * the task is recorded in the corresponding CPU's rcu_node structure,
- * which is checked elsewhere.
- *
- * Caller must disable hard irqs.
- */
- static void rcu_preempt_check_callbacks(int cpu)
- {
- struct task_struct *t = current;
- if (t->rcu_read_lock_nesting == 0) {
- rcu_preempt_qs(cpu);
- return;
- }
- if (per_cpu(rcu_preempt_data, cpu).qs_pending)
- t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
- }
- /*
- * Process callbacks for preemptable RCU.
- */
- static void rcu_preempt_process_callbacks(void)
- {
- __rcu_process_callbacks(&rcu_preempt_state,
- &__get_cpu_var(rcu_preempt_data));
- }
- /*
- * Queue a preemptable-RCU callback for invocation after a grace period.
- */
- void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
- {
- __call_rcu(head, func, &rcu_preempt_state);
- }
- EXPORT_SYMBOL_GPL(call_rcu);
- /**
- * synchronize_rcu - wait until a grace period has elapsed.
- *
- * Control will return to the caller some time after a full grace
- * period has elapsed, in other words after all currently executing RCU
- * read-side critical sections have completed. Note, however, that
- * upon return from synchronize_rcu(), the caller might well be executing
- * concurrently with new RCU read-side critical sections that began while
- * synchronize_rcu() was waiting. RCU read-side critical sections are
- * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
- */
- void synchronize_rcu(void)
- {
- struct rcu_synchronize rcu;
- if (!rcu_scheduler_active)
- return;
- init_rcu_head_on_stack(&rcu.head);
- init_completion(&rcu.completion);
- /* Will wake me after RCU finished. */
- call_rcu(&rcu.head, wakeme_after_rcu);
- /* Wait for it. */
- wait_for_completion(&rcu.completion);
- destroy_rcu_head_on_stack(&rcu.head);
- }
- EXPORT_SYMBOL_GPL(synchronize_rcu);
- static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
- static long sync_rcu_preempt_exp_count;
- static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
- /*
- * Return non-zero if there are any tasks in RCU read-side critical
- * sections blocking the current preemptible-RCU expedited grace period.
- * If there is no preemptible-RCU expedited grace period currently in
- * progress, returns zero unconditionally.
- */
- static int rcu_preempted_readers_exp(struct rcu_node *rnp)
- {
- return !list_empty(&rnp->blocked_tasks[2]) ||
- !list_empty(&rnp->blocked_tasks[3]);
- }
- /*
- * return non-zero if there is no RCU expedited grace period in progress
- * for the specified rcu_node structure, in other words, if all CPUs and
- * tasks covered by the specified rcu_node structure have done their bit
- * for the current expedited grace period. Works only for preemptible
- * RCU -- other RCU implementation use other means.
- *
- * Caller must hold sync_rcu_preempt_exp_mutex.
- */
- static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
- {
- return !rcu_preempted_readers_exp(rnp) &&
- ACCESS_ONCE(rnp->expmask) == 0;
- }
- /*
- * Report the exit from RCU read-side critical section for the last task
- * that queued itself during or before the current expedited preemptible-RCU
- * grace period. This event is reported either to the rcu_node structure on
- * which the task was queued or to one of that rcu_node structure's ancestors,
- * recursively up the tree. (Calm down, calm down, we do the recursion
- * iteratively!)
- *
- * Caller must hold sync_rcu_preempt_exp_mutex.
- */
- static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
- {
- unsigned long flags;
- unsigned long mask;
- raw_spin_lock_irqsave(&rnp->lock, flags);
- for (;;) {
- if (!sync_rcu_preempt_exp_done(rnp))
- break;
- if (rnp->parent == NULL) {
- wake_up(&sync_rcu_preempt_exp_wq);
- break;
- }
- mask = rnp->grpmask;
- raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
- rnp = rnp->parent;
- raw_spin_lock(&rnp->lock); /* irqs already disabled */
- rnp->expmask &= ~mask;
- }
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
- }
- /*
- * Snapshot the tasks blocking the newly started preemptible-RCU expedited
- * grace period for the specified rcu_node structure. If there are no such
- * tasks, report it up the rcu_node hierarchy.
- *
- * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
- */
- static void
- sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
- {
- int must_wait;
- raw_spin_lock(&rnp->lock); /* irqs already disabled */
- list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]);
- list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]);
- must_wait = rcu_preempted_readers_exp(rnp);
- raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
- if (!must_wait)
- rcu_report_exp_rnp(rsp, rnp);
- }
- /*
- * Wait for an rcu-preempt grace period, but expedite it. The basic idea
- * is to invoke synchronize_sched_expedited() to push all the tasks to
- * the ->blocked_tasks[] lists, move all entries from the first set of
- * ->blocked_tasks[] lists to the second set, and finally wait for this
- * second set to drain.
- */
- void synchronize_rcu_expedited(void)
- {
- unsigned long flags;
- struct rcu_node *rnp;
- struct rcu_state *rsp = &rcu_preempt_state;
- long snap;
- int trycount = 0;
- smp_mb(); /* Caller's modifications seen first by other CPUs. */
- snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
- smp_mb(); /* Above access cannot bleed into critical section. */
- /*
- * Acquire lock, falling back to synchronize_rcu() if too many
- * lock-acquisition failures. Of course, if someone does the
- * expedited grace period for us, just leave.
- */
- while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
- if (trycount++ < 10)
- udelay(trycount * num_online_cpus());
- else {
- synchronize_rcu();
- return;
- }
- if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
- goto mb_ret; /* Others did our work for us. */
- }
- if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
- goto unlock_mb_ret; /* Others did our work for us. */
- /* force all RCU readers onto blocked_tasks[]. */
- synchronize_sched_expedited();
- raw_spin_lock_irqsave(&rsp->onofflock, flags);
- /* Initialize ->expmask for all non-leaf rcu_node structures. */
- rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
- raw_spin_lock(&rnp->lock); /* irqs already disabled. */
- rnp->expmask = rnp->qsmaskinit;
- raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
- }
- /* Snapshot current state of ->blocked_tasks[] lists. */
- rcu_for_each_leaf_node(rsp, rnp)
- sync_rcu_preempt_exp_init(rsp, rnp);
- if (NUM_RCU_NODES > 1)
- sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
- raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
- /* Wait for snapshotted ->blocked_tasks[] lists to drain. */
- rnp = rcu_get_root(rsp);
- wait_event(sync_rcu_preempt_exp_wq,
- sync_rcu_preempt_exp_done(rnp));
- /* Clean up and exit. */
- smp_mb(); /* ensure expedited GP seen before counter increment. */
- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
- unlock_mb_ret:
- mutex_unlock(&sync_rcu_preempt_exp_mutex);
- mb_ret:
- smp_mb(); /* ensure subsequent action seen after grace period. */
- }
- EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
- /*
- * Check to see if there is any immediate preemptable-RCU-related work
- * to be done.
- */
- static int rcu_preempt_pending(int cpu)
- {
- return __rcu_pending(&rcu_preempt_state,
- &per_cpu(rcu_preempt_data, cpu));
- }
- /*
- * Does preemptable RCU need the CPU to stay out of dynticks mode?
- */
- static int rcu_preempt_needs_cpu(int cpu)
- {
- return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
- }
- /**
- * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
- */
- void rcu_barrier(void)
- {
- _rcu_barrier(&rcu_preempt_state, call_rcu);
- }
- EXPORT_SYMBOL_GPL(rcu_barrier);
- /*
- * Initialize preemptable RCU's per-CPU data.
- */
- static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
- {
- rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
- }
- /*
- * Move preemptable RCU's callbacks from dying CPU to other online CPU.
- */
- static void rcu_preempt_send_cbs_to_online(void)
- {
- rcu_send_cbs_to_online(&rcu_preempt_state);
- }
- /*
- * Initialize preemptable RCU's state structures.
- */
- static void __init __rcu_init_preempt(void)
- {
- rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
- }
- /*
- * Check for a task exiting while in a preemptable-RCU read-side
- * critical section, clean up if so. No need to issue warnings,
- * as debug_check_no_locks_held() already does this if lockdep
- * is enabled.
- */
- void exit_rcu(void)
- {
- struct task_struct *t = current;
- if (t->rcu_read_lock_nesting == 0)
- return;
- t->rcu_read_lock_nesting = 1;
- rcu_read_unlock();
- }
- #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
- /*
- * Tell them what RCU they are running.
- */
- static void __init rcu_bootup_announce(void)
- {
- printk(KERN_INFO "Hierarchical RCU implementation.\n");
- rcu_bootup_announce_oddness();
- }
- /*
- * Return the number of RCU batches processed thus far for debug & stats.
- */
- long rcu_batches_completed(void)
- {
- return rcu_batches_completed_sched();
- }
- EXPORT_SYMBOL_GPL(rcu_batches_completed);
- /*
- * Force a quiescent state for RCU, which, because there is no preemptible
- * RCU, becomes the same as rcu-sched.
- */
- void rcu_force_quiescent_state(void)
- {
- rcu_sched_force_quiescent_state();
- }
- EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
- /*
- * Because preemptable RCU does not exist, we never have to check for
- * CPUs being in quiescent states.
- */
- static void rcu_preempt_note_context_switch(int cpu)
- {
- }
- /*
- * Because preemptable RCU does not exist, there are never any preempted
- * RCU readers.
- */
- static int rcu_preempted_readers(struct rcu_node *rnp)
- {
- return 0;
- }
- #ifdef CONFIG_HOTPLUG_CPU
- /* Because preemptible RCU does not exist, no quieting of tasks. */
- static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
- {
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
- }
- #endif /* #ifdef CONFIG_HOTPLUG_CPU */
- #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
- /*
- * Because preemptable RCU does not exist, we never have to check for
- * tasks blocked within RCU read-side critical sections.
- */
- static void rcu_print_detail_task_stall(struct rcu_state *rsp)
- {
- }
- /*
- * Because preemptable RCU does not exist, we never have to check for
- * tasks blocked within RCU read-side critical sections.
- */
- static void rcu_print_task_stall(struct rcu_node *rnp)
- {
- }
- /*
- * Because preemptible RCU does not exist, there is no need to suppress
- * its CPU stall warnings.
- */
- static void rcu_preempt_stall_reset(void)
- {
- }
- #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
- /*
- * Because there is no preemptable RCU, there can be no readers blocked,
- * so there is no need to check for blocked tasks. So check only for
- * bogus qsmask values.
- */
- static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
- {
- WARN_ON_ONCE(rnp->qsmask);
- }
- #ifdef CONFIG_HOTPLUG_CPU
- /*
- * Because preemptable RCU does not exist, it never needs to migrate
- * tasks that were blocked within RCU read-side critical sections, and
- * such non-existent tasks cannot possibly have been blocking the current
- * grace period.
- */
- static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
- struct rcu_node *rnp,
- struct rcu_data *rdp)
- {
- return 0;
- }
- /*
- * Because preemptable RCU does not exist, it never needs CPU-offline
- * processing.
- */
- static void rcu_preempt_offline_cpu(int cpu)
- {
- }
- #endif /* #ifdef CONFIG_HOTPLUG_CPU */
- /*
- * Because preemptable RCU does not exist, it never has any callbacks
- * to check.
- */
- static void rcu_preempt_check_callbacks(int cpu)
- {
- }
- /*
- * Because preemptable RCU does not exist, it never has any callbacks
- * to process.
- */
- static void rcu_preempt_process_callbacks(void)
- {
- }
- /*
- * Wait for an rcu-preempt grace period, but make it happen quickly.
- * But because preemptable RCU does not exist, map to rcu-sched.
- */
- void synchronize_rcu_expedited(void)
- {
- synchronize_sched_expedited();
- }
- EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
- #ifdef CONFIG_HOTPLUG_CPU
- /*
- * Because preemptable RCU does not exist, there is never any need to
- * report on tasks preempted in RCU read-side critical sections during
- * expedited RCU grace periods.
- */
- static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
- {
- return;
- }
- #endif /* #ifdef CONFIG_HOTPLUG_CPU */
- /*
- * Because preemptable RCU does not exist, it never has any work to do.
- */
- static int rcu_preempt_pending(int cpu)
- {
- return 0;
- }
- /*
- * Because preemptable RCU does not exist, it never needs any CPU.
- */
- static int rcu_preempt_needs_cpu(int cpu)
- {
- return 0;
- }
- /*
- * Because preemptable RCU does not exist, rcu_barrier() is just
- * another name for rcu_barrier_sched().
- */
- void rcu_barrier(void)
- {
- rcu_barrier_sched();
- }
- EXPORT_SYMBOL_GPL(rcu_barrier);
- /*
- * Because preemptable RCU does not exist, there is no per-CPU
- * data to initialize.
- */
- static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
- {
- }
- /*
- * Because there is no preemptable RCU, there are no callbacks to move.
- */
- static void rcu_preempt_send_cbs_to_online(void)
- {
- }
- /*
- * Because preemptable RCU does not exist, it need not be initialized.
- */
- static void __init __rcu_init_preempt(void)
- {
- }
- #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
- #ifndef CONFIG_SMP
- void synchronize_sched_expedited(void)
- {
- cond_resched();
- }
- EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
- #else /* #ifndef CONFIG_SMP */
- static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
- static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
- static int synchronize_sched_expedited_cpu_stop(void *data)
- {
- /*
- * There must be a full memory barrier on each affected CPU
- * between the time that try_stop_cpus() is called and the
- * time that it returns.
- *
- * In the current initial implementation of cpu_stop, the
- * above condition is already met when the control reaches
- * this point and the following smp_mb() is not strictly
- * necessary. Do smp_mb() anyway for documentation and
- * robustness against future implementation changes.
- */
- smp_mb(); /* See above comment block. */
- return 0;
- }
- /*
- * Wait for an rcu-sched grace period to elapse, but use "big hammer"
- * approach to force grace period to end quickly. This consumes
- * significant time on all CPUs, and is thus not recommended for
- * any sort of common-case code.
- *
- * Note that it is illegal to call this function while holding any
- * lock that is acquired by a CPU-hotplug notifier. Failing to
- * observe this restriction will result in deadlock.
- *
- * This implementation can be thought of as an application of ticket
- * locking to RCU, with sync_sched_expedited_started and
- * sync_sched_expedited_done taking on the roles of the halves
- * of the ticket-lock word. Each task atomically increments
- * sync_sched_expedited_started upon entry, snapshotting the old value,
- * then attempts to stop all the CPUs. If this succeeds, then each
- * CPU will have executed a context switch, resulting in an RCU-sched
- * grace period. We are then done, so we use atomic_cmpxchg() to
- * update sync_sched_expedited_done to match our snapshot -- but
- * only if someone else has not already advanced past our snapshot.
- *
- * On the other hand, if try_stop_cpus() fails, we check the value
- * of sync_sched_expedited_done. If it has advanced past our
- * initial snapshot, then someone else must have forced a grace period
- * some time after we took our snapshot. In this case, our work is
- * done for us, and we can simply return. Otherwise, we try again,
- * but keep our initial snapshot for purposes of checking for someone
- * doing our work for us.
- *
- * If we fail too many times in a row, we fall back to synchronize_sched().
- */
- void synchronize_sched_expedited(void)
- {
- int firstsnap, s, snap, trycount = 0;
- /* Note that atomic_inc_return() implies full memory barrier. */
- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
- get_online_cpus();
- /*
- * Each pass through the following loop attempts to force a
- * context switch on each CPU.
- */
- while (try_stop_cpus(cpu_online_mask,
- synchronize_sched_expedited_cpu_stop,
- NULL) == -EAGAIN) {
- put_online_cpus();
- /* No joy, try again later. Or just synchronize_sched(). */
- if (trycount++ < 10)
- udelay(trycount * num_online_cpus());
- else {
- synchronize_sched();
- return;
- }
- /* Check to see if someone else did our work for us. */
- s = atomic_read(&sync_sched_expedited_done);
- if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
- smp_mb(); /* ensure test happens before caller kfree */
- return;
- }
- /*
- * Refetching sync_sched_expedited_started allows later
- * callers to piggyback on our grace period. We subtract
- * 1 to get the same token that the last incrementer got.
- * We retry after they started, so our grace period works
- * for them, and they started after our first try, so their
- * grace period works for us.
- */
- get_online_cpus();
- snap = atomic_read(&sync_sched_expedited_started) - 1;
- smp_mb(); /* ensure read is before try_stop_cpus(). */
- }
- /*
- * Everyone up to our most recent fetch is covered by our grace
- * period. Update the counter, but only if our work is still
- * relevant -- which it won't be if someone who started later
- * than we did beat us to the punch.
- */
- do {
- s = atomic_read(&sync_sched_expedited_done);
- if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
- smp_mb(); /* ensure test happens before caller kfree */
- break;
- }
- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
- put_online_cpus();
- }
- EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
- #endif /* #else #ifndef CONFIG_SMP */
- #if !defined(CONFIG_RCU_FAST_NO_HZ)
- /*
- * Check to see if any future RCU-related work will need to be done
- * by the current CPU, even if none need be done immediately, returning
- * 1 if so. This function is part of the RCU implementation; it is -not-
- * an exported member of the RCU API.
- *
- * Because we have preemptible RCU, just check whether this CPU needs
- * any flavor of RCU. Do not chew up lots of CPU cycles with preemption
- * disabled in a most-likely vain attempt to cause RCU not to need this CPU.
- */
- int rcu_needs_cpu(int cpu)
- {
- return rcu_needs_cpu_quick_check(cpu);
- }
- /*
- * Check to see if we need to continue a callback-flush operations to
- * allow the last CPU to enter dyntick-idle mode. But fast dyntick-idle
- * entry is not configured, so we never do need to.
- */
- static void rcu_needs_cpu_flush(void)
- {
- }
- #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
- #define RCU_NEEDS_CPU_FLUSHES 5
- static DEFINE_PER_CPU(int, rcu_dyntick_drain);
- static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
- /*
- * Check to see if any future RCU-related work will need to be done
- * by the current CPU, even if none need be done immediately, returning
- * 1 if so. This function is part of the RCU implementation; it is -not-
- * an exported member of the RCU API.
- *
- * Because we are not supporting preemptible RCU, attempt to accelerate
- * any current grace periods so that RCU no longer needs this CPU, but
- * only if all other CPUs are already in dynticks-idle mode. This will
- * allow the CPU cores to be powered down immediately, as opposed to after
- * waiting many milliseconds for grace periods to elapse.
- *
- * Because it is not legal to invoke rcu_process_callbacks() with irqs
- * disabled, we do one pass of force_quiescent_state(), then do a
- * raise_softirq() to cause rcu_process_callbacks() to be invoked later.
- * The per-cpu rcu_dyntick_drain variable controls the sequencing.
- */
- int rcu_needs_cpu(int cpu)
- {
- int c = 0;
- int snap;
- int snap_nmi;
- int thatcpu;
- /* Check for being in the holdoff period. */
- if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies)
- return rcu_needs_cpu_quick_check(cpu);
- /* Don't bother unless we are the last non-dyntick-idle CPU. */
- for_each_online_cpu(thatcpu) {
- if (thatcpu == cpu)
- continue;
- snap = per_cpu(rcu_dynticks, thatcpu).dynticks;
- snap_nmi = per_cpu(rcu_dynticks, thatcpu).dynticks_nmi;
- smp_mb(); /* Order sampling of snap with end of grace period. */
- if (((snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)) {
- per_cpu(rcu_dyntick_drain, cpu) = 0;
- per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
- return rcu_needs_cpu_quick_check(cpu);
- }
- }
- /* Check and update the rcu_dyntick_drain sequencing. */
- if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
- /* First time through, initialize the counter. */
- per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES;
- } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
- /* We have hit the limit, so time to give up. */
- per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
- return rcu_needs_cpu_quick_check(cpu);
- }
- /* Do one step pushing remaining RCU callbacks through. */
- if (per_cpu(rcu_sched_data, cpu).nxtlist) {
- rcu_sched_qs(cpu);
- force_quiescent_state(&rcu_sched_state, 0);
- c = c || per_cpu(rcu_sched_data, cpu).nxtlist;
- }
- if (per_cpu(rcu_bh_data, cpu).nxtlist) {
- rcu_bh_qs(cpu);
- force_quiescent_state(&rcu_bh_state, 0);
- c = c || per_cpu(rcu_bh_data, cpu).nxtlist;
- }
- /* If RCU callbacks are still pending, RCU still needs this CPU. */
- if (c)
- raise_softirq(RCU_SOFTIRQ);
- return c;
- }
- /*
- * Check to see if we need to continue a callback-flush operations to
- * allow the last CPU to enter dyntick-idle mode.
- */
- static void rcu_needs_cpu_flush(void)
- {
- int cpu = smp_processor_id();
- unsigned long flags;
- if (per_cpu(rcu_dyntick_drain, cpu) <= 0)
- return;
- local_irq_save(flags);
- (void)rcu_needs_cpu(cpu);
- local_irq_restore(flags);
- }
- #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
|