|
@@ -1920,10 +1920,9 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
|
|
|
* They are read and saved off onto struct rq in update_rq_clock().
|
|
|
* This may result in other CPU reading this CPU's irq time and can
|
|
|
* race with irq/account_system_vtime on this CPU. We would either get old
|
|
|
- * or new value (or semi updated value on 32 bit) with a side effect of
|
|
|
- * accounting a slice of irq time to wrong task when irq is in progress
|
|
|
- * while we read rq->clock. That is a worthy compromise in place of having
|
|
|
- * locks on each irq in account_system_time.
|
|
|
+ * or new value with a side effect of accounting a slice of irq time to wrong
|
|
|
+ * task when irq is in progress while we read rq->clock. That is a worthy
|
|
|
+ * compromise in place of having locks on each irq in account_system_time.
|
|
|
*/
|
|
|
static DEFINE_PER_CPU(u64, cpu_hardirq_time);
|
|
|
static DEFINE_PER_CPU(u64, cpu_softirq_time);
|
|
@@ -1941,10 +1940,48 @@ void disable_sched_clock_irqtime(void)
|
|
|
sched_clock_irqtime = 0;
|
|
|
}
|
|
|
|
|
|
-static inline u64 irq_time_cpu(int cpu)
|
|
|
+#ifndef CONFIG_64BIT
|
|
|
+static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
|
|
|
+
|
|
|
+static inline void irq_time_write_begin(void)
|
|
|
+{
|
|
|
+ __this_cpu_inc(irq_time_seq.sequence);
|
|
|
+ smp_wmb();
|
|
|
+}
|
|
|
+
|
|
|
+static inline void irq_time_write_end(void)
|
|
|
+{
|
|
|
+ smp_wmb();
|
|
|
+ __this_cpu_inc(irq_time_seq.sequence);
|
|
|
+}
|
|
|
+
|
|
|
+static inline u64 irq_time_read(int cpu)
|
|
|
+{
|
|
|
+ u64 irq_time;
|
|
|
+ unsigned seq;
|
|
|
+
|
|
|
+ do {
|
|
|
+ seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
|
|
|
+ irq_time = per_cpu(cpu_softirq_time, cpu) +
|
|
|
+ per_cpu(cpu_hardirq_time, cpu);
|
|
|
+ } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
|
|
|
+
|
|
|
+ return irq_time;
|
|
|
+}
|
|
|
+#else /* CONFIG_64BIT */
|
|
|
+static inline void irq_time_write_begin(void)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static inline void irq_time_write_end(void)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static inline u64 irq_time_read(int cpu)
|
|
|
{
|
|
|
return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
|
|
|
}
|
|
|
+#endif /* CONFIG_64BIT */
|
|
|
|
|
|
/*
|
|
|
* Called before incrementing preempt_count on {soft,}irq_enter
|
|
@@ -1965,6 +2002,7 @@ void account_system_vtime(struct task_struct *curr)
|
|
|
delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
|
|
|
__this_cpu_add(irq_start_time, delta);
|
|
|
|
|
|
+ irq_time_write_begin();
|
|
|
/*
|
|
|
* We do not account for softirq time from ksoftirqd here.
|
|
|
* We want to continue accounting softirq time to ksoftirqd thread
|
|
@@ -1976,6 +2014,7 @@ void account_system_vtime(struct task_struct *curr)
|
|
|
else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD))
|
|
|
__this_cpu_add(cpu_softirq_time, delta);
|
|
|
|
|
|
+ irq_time_write_end();
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(account_system_vtime);
|
|
@@ -1984,7 +2023,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
|
|
|
{
|
|
|
s64 irq_delta;
|
|
|
|
|
|
- irq_delta = irq_time_cpu(cpu_of(rq)) - rq->prev_irq_time;
|
|
|
+ irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
|
|
|
|
|
|
/*
|
|
|
* Since irq_time is only updated on {soft,}irq_exit, we might run into
|