|
@@ -161,10 +161,9 @@ extern struct timezone sys_tz;
|
|
static long timezone_offset;
|
|
static long timezone_offset;
|
|
|
|
|
|
unsigned long ppc_proc_freq;
|
|
unsigned long ppc_proc_freq;
|
|
-EXPORT_SYMBOL(ppc_proc_freq);
|
|
|
|
|
|
+EXPORT_SYMBOL_GPL(ppc_proc_freq);
|
|
unsigned long ppc_tb_freq;
|
|
unsigned long ppc_tb_freq;
|
|
-
|
|
|
|
-static DEFINE_PER_CPU(u64, last_jiffy);
|
|
|
|
|
|
+EXPORT_SYMBOL_GPL(ppc_tb_freq);
|
|
|
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
/*
|
|
/*
|
|
@@ -185,6 +184,8 @@ DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
|
|
|
|
|
|
cputime_t cputime_one_jiffy;
|
|
cputime_t cputime_one_jiffy;
|
|
|
|
|
|
|
|
+void (*dtl_consumer)(struct dtl_entry *, u64);
|
|
|
|
+
|
|
static void calc_cputime_factors(void)
|
|
static void calc_cputime_factors(void)
|
|
{
|
|
{
|
|
struct div_result res;
|
|
struct div_result res;
|
|
@@ -200,62 +201,153 @@ static void calc_cputime_factors(void)
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Read the PURR on systems that have it, otherwise the timebase.
|
|
|
|
|
|
+ * Read the SPURR on systems that have it, otherwise the PURR,
|
|
|
|
+ * or if that doesn't exist return the timebase value passed in.
|
|
*/
|
|
*/
|
|
-static u64 read_purr(void)
|
|
|
|
|
|
+static u64 read_spurr(u64 tb)
|
|
{
|
|
{
|
|
|
|
+ if (cpu_has_feature(CPU_FTR_SPURR))
|
|
|
|
+ return mfspr(SPRN_SPURR);
|
|
if (cpu_has_feature(CPU_FTR_PURR))
|
|
if (cpu_has_feature(CPU_FTR_PURR))
|
|
return mfspr(SPRN_PURR);
|
|
return mfspr(SPRN_PURR);
|
|
- return mftb();
|
|
|
|
|
|
+ return tb;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#ifdef CONFIG_PPC_SPLPAR
|
|
|
|
+
|
|
/*
|
|
/*
|
|
- * Read the SPURR on systems that have it, otherwise the purr
|
|
|
|
|
|
+ * Scan the dispatch trace log and count up the stolen time.
|
|
|
|
+ * Should be called with interrupts disabled.
|
|
*/
|
|
*/
|
|
-static u64 read_spurr(u64 purr)
|
|
|
|
|
|
+static u64 scan_dispatch_log(u64 stop_tb)
|
|
{
|
|
{
|
|
- /*
|
|
|
|
- * cpus without PURR won't have a SPURR
|
|
|
|
- * We already know the former when we use this, so tell gcc
|
|
|
|
- */
|
|
|
|
- if (cpu_has_feature(CPU_FTR_PURR) && cpu_has_feature(CPU_FTR_SPURR))
|
|
|
|
- return mfspr(SPRN_SPURR);
|
|
|
|
- return purr;
|
|
|
|
|
|
+ u64 i = local_paca->dtl_ridx;
|
|
|
|
+ struct dtl_entry *dtl = local_paca->dtl_curr;
|
|
|
|
+ struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
|
|
|
|
+ struct lppaca *vpa = local_paca->lppaca_ptr;
|
|
|
|
+ u64 tb_delta;
|
|
|
|
+ u64 stolen = 0;
|
|
|
|
+ u64 dtb;
|
|
|
|
+
|
|
|
|
+ if (i == vpa->dtl_idx)
|
|
|
|
+ return 0;
|
|
|
|
+ while (i < vpa->dtl_idx) {
|
|
|
|
+ if (dtl_consumer)
|
|
|
|
+ dtl_consumer(dtl, i);
|
|
|
|
+ dtb = dtl->timebase;
|
|
|
|
+ tb_delta = dtl->enqueue_to_dispatch_time +
|
|
|
|
+ dtl->ready_to_enqueue_time;
|
|
|
|
+ barrier();
|
|
|
|
+ if (i + N_DISPATCH_LOG < vpa->dtl_idx) {
|
|
|
|
+ /* buffer has overflowed */
|
|
|
|
+ i = vpa->dtl_idx - N_DISPATCH_LOG;
|
|
|
|
+ dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+ if (dtb > stop_tb)
|
|
|
|
+ break;
|
|
|
|
+ stolen += tb_delta;
|
|
|
|
+ ++i;
|
|
|
|
+ ++dtl;
|
|
|
|
+ if (dtl == dtl_end)
|
|
|
|
+ dtl = local_paca->dispatch_log;
|
|
|
|
+ }
|
|
|
|
+ local_paca->dtl_ridx = i;
|
|
|
|
+ local_paca->dtl_curr = dtl;
|
|
|
|
+ return stolen;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Accumulate stolen time by scanning the dispatch trace log.
|
|
|
|
+ * Called on entry from user mode.
|
|
|
|
+ */
|
|
|
|
+void accumulate_stolen_time(void)
|
|
|
|
+{
|
|
|
|
+ u64 sst, ust;
|
|
|
|
+
|
|
|
|
+ sst = scan_dispatch_log(get_paca()->starttime_user);
|
|
|
|
+ ust = scan_dispatch_log(get_paca()->starttime);
|
|
|
|
+ get_paca()->system_time -= sst;
|
|
|
|
+ get_paca()->user_time -= ust;
|
|
|
|
+ get_paca()->stolen_time += ust + sst;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline u64 calculate_stolen_time(u64 stop_tb)
|
|
|
|
+{
|
|
|
|
+ u64 stolen = 0;
|
|
|
|
+
|
|
|
|
+ if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) {
|
|
|
|
+ stolen = scan_dispatch_log(stop_tb);
|
|
|
|
+ get_paca()->system_time -= stolen;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ stolen += get_paca()->stolen_time;
|
|
|
|
+ get_paca()->stolen_time = 0;
|
|
|
|
+ return stolen;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#else /* CONFIG_PPC_SPLPAR */
|
|
|
|
+static inline u64 calculate_stolen_time(u64 stop_tb)
|
|
|
|
+{
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#endif /* CONFIG_PPC_SPLPAR */
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Account time for a transition between system, hard irq
|
|
* Account time for a transition between system, hard irq
|
|
* or soft irq state.
|
|
* or soft irq state.
|
|
*/
|
|
*/
|
|
void account_system_vtime(struct task_struct *tsk)
|
|
void account_system_vtime(struct task_struct *tsk)
|
|
{
|
|
{
|
|
- u64 now, nowscaled, delta, deltascaled, sys_time;
|
|
|
|
|
|
+ u64 now, nowscaled, delta, deltascaled;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
+ u64 stolen, udelta, sys_scaled, user_scaled;
|
|
|
|
|
|
local_irq_save(flags);
|
|
local_irq_save(flags);
|
|
- now = read_purr();
|
|
|
|
|
|
+ now = mftb();
|
|
nowscaled = read_spurr(now);
|
|
nowscaled = read_spurr(now);
|
|
- delta = now - get_paca()->startpurr;
|
|
|
|
|
|
+ get_paca()->system_time += now - get_paca()->starttime;
|
|
|
|
+ get_paca()->starttime = now;
|
|
deltascaled = nowscaled - get_paca()->startspurr;
|
|
deltascaled = nowscaled - get_paca()->startspurr;
|
|
- get_paca()->startpurr = now;
|
|
|
|
get_paca()->startspurr = nowscaled;
|
|
get_paca()->startspurr = nowscaled;
|
|
- if (!in_interrupt()) {
|
|
|
|
- /* deltascaled includes both user and system time.
|
|
|
|
- * Hence scale it based on the purr ratio to estimate
|
|
|
|
- * the system time */
|
|
|
|
- sys_time = get_paca()->system_time;
|
|
|
|
- if (get_paca()->user_time)
|
|
|
|
- deltascaled = deltascaled * sys_time /
|
|
|
|
- (sys_time + get_paca()->user_time);
|
|
|
|
- delta += sys_time;
|
|
|
|
- get_paca()->system_time = 0;
|
|
|
|
|
|
+
|
|
|
|
+ stolen = calculate_stolen_time(now);
|
|
|
|
+
|
|
|
|
+ delta = get_paca()->system_time;
|
|
|
|
+ get_paca()->system_time = 0;
|
|
|
|
+ udelta = get_paca()->user_time - get_paca()->utime_sspurr;
|
|
|
|
+ get_paca()->utime_sspurr = get_paca()->user_time;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Because we don't read the SPURR on every kernel entry/exit,
|
|
|
|
+ * deltascaled includes both user and system SPURR ticks.
|
|
|
|
+ * Apportion these ticks to system SPURR ticks and user
|
|
|
|
+ * SPURR ticks in the same ratio as the system time (delta)
|
|
|
|
+ * and user time (udelta) values obtained from the timebase
|
|
|
|
+ * over the same interval. The system ticks get accounted here;
|
|
|
|
+ * the user ticks get saved up in paca->user_time_scaled to be
|
|
|
|
+ * used by account_process_tick.
|
|
|
|
+ */
|
|
|
|
+ sys_scaled = delta;
|
|
|
|
+ user_scaled = udelta;
|
|
|
|
+ if (deltascaled != delta + udelta) {
|
|
|
|
+ if (udelta) {
|
|
|
|
+ sys_scaled = deltascaled * delta / (delta + udelta);
|
|
|
|
+ user_scaled = deltascaled - sys_scaled;
|
|
|
|
+ } else {
|
|
|
|
+ sys_scaled = deltascaled;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ get_paca()->user_time_scaled += user_scaled;
|
|
|
|
+
|
|
|
|
+ if (in_irq() || idle_task(smp_processor_id()) != tsk) {
|
|
|
|
+ account_system_time(tsk, 0, delta, sys_scaled);
|
|
|
|
+ if (stolen)
|
|
|
|
+ account_steal_time(stolen);
|
|
|
|
+ } else {
|
|
|
|
+ account_idle_time(delta + stolen);
|
|
}
|
|
}
|
|
- if (in_irq() || idle_task(smp_processor_id()) != tsk)
|
|
|
|
- account_system_time(tsk, 0, delta, deltascaled);
|
|
|
|
- else
|
|
|
|
- account_idle_time(delta);
|
|
|
|
- __get_cpu_var(cputime_last_delta) = delta;
|
|
|
|
- __get_cpu_var(cputime_scaled_last_delta) = deltascaled;
|
|
|
|
local_irq_restore(flags);
|
|
local_irq_restore(flags);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(account_system_vtime);
|
|
EXPORT_SYMBOL_GPL(account_system_vtime);
|
|
@@ -265,125 +357,26 @@ EXPORT_SYMBOL_GPL(account_system_vtime);
|
|
* by the exception entry and exit code to the generic process
|
|
* by the exception entry and exit code to the generic process
|
|
* user and system time records.
|
|
* user and system time records.
|
|
* Must be called with interrupts disabled.
|
|
* Must be called with interrupts disabled.
|
|
|
|
+ * Assumes that account_system_vtime() has been called recently
|
|
|
|
+ * (i.e. since the last entry from usermode) so that
|
|
|
|
+ * get_paca()->user_time_scaled is up to date.
|
|
*/
|
|
*/
|
|
void account_process_tick(struct task_struct *tsk, int user_tick)
|
|
void account_process_tick(struct task_struct *tsk, int user_tick)
|
|
{
|
|
{
|
|
cputime_t utime, utimescaled;
|
|
cputime_t utime, utimescaled;
|
|
|
|
|
|
utime = get_paca()->user_time;
|
|
utime = get_paca()->user_time;
|
|
|
|
+ utimescaled = get_paca()->user_time_scaled;
|
|
get_paca()->user_time = 0;
|
|
get_paca()->user_time = 0;
|
|
- utimescaled = cputime_to_scaled(utime);
|
|
|
|
|
|
+ get_paca()->user_time_scaled = 0;
|
|
|
|
+ get_paca()->utime_sspurr = 0;
|
|
account_user_time(tsk, utime, utimescaled);
|
|
account_user_time(tsk, utime, utimescaled);
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * Stuff for accounting stolen time.
|
|
|
|
- */
|
|
|
|
-struct cpu_purr_data {
|
|
|
|
- int initialized; /* thread is running */
|
|
|
|
- u64 tb; /* last TB value read */
|
|
|
|
- u64 purr; /* last PURR value read */
|
|
|
|
- u64 spurr; /* last SPURR value read */
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Each entry in the cpu_purr_data array is manipulated only by its
|
|
|
|
- * "owner" cpu -- usually in the timer interrupt but also occasionally
|
|
|
|
- * in process context for cpu online. As long as cpus do not touch
|
|
|
|
- * each others' cpu_purr_data, disabling local interrupts is
|
|
|
|
- * sufficient to serialize accesses.
|
|
|
|
- */
|
|
|
|
-static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
|
|
|
|
-
|
|
|
|
-static void snapshot_tb_and_purr(void *data)
|
|
|
|
-{
|
|
|
|
- unsigned long flags;
|
|
|
|
- struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
|
|
|
|
-
|
|
|
|
- local_irq_save(flags);
|
|
|
|
- p->tb = get_tb_or_rtc();
|
|
|
|
- p->purr = mfspr(SPRN_PURR);
|
|
|
|
- wmb();
|
|
|
|
- p->initialized = 1;
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Called during boot when all cpus have come up.
|
|
|
|
- */
|
|
|
|
-void snapshot_timebases(void)
|
|
|
|
-{
|
|
|
|
- if (!cpu_has_feature(CPU_FTR_PURR))
|
|
|
|
- return;
|
|
|
|
- on_each_cpu(snapshot_tb_and_purr, NULL, 1);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Must be called with interrupts disabled.
|
|
|
|
- */
|
|
|
|
-void calculate_steal_time(void)
|
|
|
|
-{
|
|
|
|
- u64 tb, purr;
|
|
|
|
- s64 stolen;
|
|
|
|
- struct cpu_purr_data *pme;
|
|
|
|
-
|
|
|
|
- pme = &__get_cpu_var(cpu_purr_data);
|
|
|
|
- if (!pme->initialized)
|
|
|
|
- return; /* !CPU_FTR_PURR or early in early boot */
|
|
|
|
- tb = mftb();
|
|
|
|
- purr = mfspr(SPRN_PURR);
|
|
|
|
- stolen = (tb - pme->tb) - (purr - pme->purr);
|
|
|
|
- if (stolen > 0) {
|
|
|
|
- if (idle_task(smp_processor_id()) != current)
|
|
|
|
- account_steal_time(stolen);
|
|
|
|
- else
|
|
|
|
- account_idle_time(stolen);
|
|
|
|
- }
|
|
|
|
- pme->tb = tb;
|
|
|
|
- pme->purr = purr;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-#ifdef CONFIG_PPC_SPLPAR
|
|
|
|
-/*
|
|
|
|
- * Must be called before the cpu is added to the online map when
|
|
|
|
- * a cpu is being brought up at runtime.
|
|
|
|
- */
|
|
|
|
-static void snapshot_purr(void)
|
|
|
|
-{
|
|
|
|
- struct cpu_purr_data *pme;
|
|
|
|
- unsigned long flags;
|
|
|
|
-
|
|
|
|
- if (!cpu_has_feature(CPU_FTR_PURR))
|
|
|
|
- return;
|
|
|
|
- local_irq_save(flags);
|
|
|
|
- pme = &__get_cpu_var(cpu_purr_data);
|
|
|
|
- pme->tb = mftb();
|
|
|
|
- pme->purr = mfspr(SPRN_PURR);
|
|
|
|
- pme->initialized = 1;
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-#endif /* CONFIG_PPC_SPLPAR */
|
|
|
|
-
|
|
|
|
#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
|
|
#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
|
|
#define calc_cputime_factors()
|
|
#define calc_cputime_factors()
|
|
-#define calculate_steal_time() do { } while (0)
|
|
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-#if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
|
|
|
|
-#define snapshot_purr() do { } while (0)
|
|
|
|
-#endif
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Called when a cpu comes up after the system has finished booting,
|
|
|
|
- * i.e. as a result of a hotplug cpu action.
|
|
|
|
- */
|
|
|
|
-void snapshot_timebase(void)
|
|
|
|
-{
|
|
|
|
- __get_cpu_var(last_jiffy) = get_tb_or_rtc();
|
|
|
|
- snapshot_purr();
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
void __delay(unsigned long loops)
|
|
void __delay(unsigned long loops)
|
|
{
|
|
{
|
|
unsigned long start;
|
|
unsigned long start;
|
|
@@ -585,8 +578,6 @@ void timer_interrupt(struct pt_regs * regs)
|
|
old_regs = set_irq_regs(regs);
|
|
old_regs = set_irq_regs(regs);
|
|
irq_enter();
|
|
irq_enter();
|
|
|
|
|
|
- calculate_steal_time();
|
|
|
|
-
|
|
|
|
if (test_irq_work_pending()) {
|
|
if (test_irq_work_pending()) {
|
|
clear_irq_work_pending();
|
|
clear_irq_work_pending();
|
|
irq_work_run();
|
|
irq_work_run();
|