|
@@ -532,25 +532,60 @@ void __init iSeries_time_init_early(void)
|
|
|
}
|
|
|
#endif /* CONFIG_PPC_ISERIES */
|
|
|
|
|
|
-#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32)
|
|
|
-DEFINE_PER_CPU(u8, perf_event_pending);
|
|
|
+#ifdef CONFIG_PERF_EVENTS
|
|
|
|
|
|
-void set_perf_event_pending(void)
|
|
|
+/*
|
|
|
+ * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
|
|
|
+ */
|
|
|
+#ifdef CONFIG_PPC64
|
|
|
+static inline unsigned long test_perf_event_pending(void)
|
|
|
{
|
|
|
- get_cpu_var(perf_event_pending) = 1;
|
|
|
- set_dec(1);
|
|
|
- put_cpu_var(perf_event_pending);
|
|
|
+ unsigned long x;
|
|
|
+
|
|
|
+ asm volatile("lbz %0,%1(13)"
|
|
|
+ : "=r" (x)
|
|
|
+ : "i" (offsetof(struct paca_struct, perf_event_pending)));
|
|
|
+ return x;
|
|
|
}
|
|
|
|
|
|
+static inline void set_perf_event_pending_flag(void)
|
|
|
+{
|
|
|
+ asm volatile("stb %0,%1(13)" : :
|
|
|
+ "r" (1),
|
|
|
+ "i" (offsetof(struct paca_struct, perf_event_pending)));
|
|
|
+}
|
|
|
+
|
|
|
+static inline void clear_perf_event_pending(void)
|
|
|
+{
|
|
|
+ asm volatile("stb %0,%1(13)" : :
|
|
|
+ "r" (0),
|
|
|
+ "i" (offsetof(struct paca_struct, perf_event_pending)));
|
|
|
+}
|
|
|
+
|
|
|
+#else /* 32-bit */
|
|
|
+
|
|
|
+DEFINE_PER_CPU(u8, perf_event_pending);
|
|
|
+
|
|
|
+#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1
|
|
|
#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
|
|
|
#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
|
|
|
|
|
|
-#else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
|
|
|
+#endif /* 32 vs 64 bit */
|
|
|
+
|
|
|
+void set_perf_event_pending(void)
|
|
|
+{
|
|
|
+ preempt_disable();
|
|
|
+ set_perf_event_pending_flag();
|
|
|
+ set_dec(1);
|
|
|
+ preempt_enable();
|
|
|
+}
|
|
|
+
|
|
|
+#else /* CONFIG_PERF_EVENTS */
|
|
|
|
|
|
#define test_perf_event_pending() 0
|
|
|
#define clear_perf_event_pending()
|
|
|
|
|
|
-#endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
|
|
|
+#endif /* CONFIG_PERF_EVENTS */
|
|
|
|
|
|
/*
|
|
|
* For iSeries shared processors, we have to let the hypervisor
|
|
@@ -582,10 +617,6 @@ void timer_interrupt(struct pt_regs * regs)
|
|
|
set_dec(DECREMENTER_MAX);
|
|
|
|
|
|
#ifdef CONFIG_PPC32
|
|
|
- if (test_perf_event_pending()) {
|
|
|
- clear_perf_event_pending();
|
|
|
- perf_event_do_pending();
|
|
|
- }
|
|
|
if (atomic_read(&ppc_n_lost_interrupts) != 0)
|
|
|
do_IRQ(regs);
|
|
|
#endif
|
|
@@ -604,6 +635,11 @@ void timer_interrupt(struct pt_regs * regs)
|
|
|
|
|
|
calculate_steal_time();
|
|
|
|
|
|
+ if (test_perf_event_pending()) {
|
|
|
+ clear_perf_event_pending();
|
|
|
+ perf_event_do_pending();
|
|
|
+ }
|
|
|
+
|
|
|
#ifdef CONFIG_PPC_ISERIES
|
|
|
if (firmware_has_feature(FW_FEATURE_ISERIES))
|
|
|
get_lppaca()->int_dword.fields.decr_int = 0;
|