|
@@ -1186,7 +1186,7 @@ static void perf_log_period(struct perf_counter *counter, u64 period);
|
|
static void perf_adjust_freq(struct perf_counter_context *ctx)
|
|
static void perf_adjust_freq(struct perf_counter_context *ctx)
|
|
{
|
|
{
|
|
struct perf_counter *counter;
|
|
struct perf_counter *counter;
|
|
- u64 interrupts, irq_period;
|
|
|
|
|
|
+ u64 interrupts, sample_period;
|
|
u64 events, period;
|
|
u64 events, period;
|
|
s64 delta;
|
|
s64 delta;
|
|
|
|
|
|
@@ -1204,23 +1204,23 @@ static void perf_adjust_freq(struct perf_counter_context *ctx)
|
|
interrupts = 2*sysctl_perf_counter_limit/HZ;
|
|
interrupts = 2*sysctl_perf_counter_limit/HZ;
|
|
}
|
|
}
|
|
|
|
|
|
- if (!counter->hw_event.freq || !counter->hw_event.irq_freq)
|
|
|
|
|
|
+ if (!counter->hw_event.freq || !counter->hw_event.sample_freq)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
- events = HZ * interrupts * counter->hw.irq_period;
|
|
|
|
- period = div64_u64(events, counter->hw_event.irq_freq);
|
|
|
|
|
|
+ events = HZ * interrupts * counter->hw.sample_period;
|
|
|
|
+ period = div64_u64(events, counter->hw_event.sample_freq);
|
|
|
|
|
|
- delta = (s64)(1 + period - counter->hw.irq_period);
|
|
|
|
|
|
+ delta = (s64)(1 + period - counter->hw.sample_period);
|
|
delta >>= 1;
|
|
delta >>= 1;
|
|
|
|
|
|
- irq_period = counter->hw.irq_period + delta;
|
|
|
|
|
|
+ sample_period = counter->hw.sample_period + delta;
|
|
|
|
|
|
- if (!irq_period)
|
|
|
|
- irq_period = 1;
|
|
|
|
|
|
+ if (!sample_period)
|
|
|
|
+ sample_period = 1;
|
|
|
|
|
|
- perf_log_period(counter, irq_period);
|
|
|
|
|
|
+ perf_log_period(counter, sample_period);
|
|
|
|
|
|
- counter->hw.irq_period = irq_period;
|
|
|
|
|
|
+ counter->hw.sample_period = sample_period;
|
|
}
|
|
}
|
|
spin_unlock(&ctx->lock);
|
|
spin_unlock(&ctx->lock);
|
|
}
|
|
}
|
|
@@ -2297,7 +2297,7 @@ static void perf_counter_output(struct perf_counter *counter,
|
|
int nmi, struct pt_regs *regs, u64 addr)
|
|
int nmi, struct pt_regs *regs, u64 addr)
|
|
{
|
|
{
|
|
int ret;
|
|
int ret;
|
|
- u64 record_type = counter->hw_event.record_type;
|
|
|
|
|
|
+ u64 sample_type = counter->hw_event.sample_type;
|
|
struct perf_output_handle handle;
|
|
struct perf_output_handle handle;
|
|
struct perf_event_header header;
|
|
struct perf_event_header header;
|
|
u64 ip;
|
|
u64 ip;
|
|
@@ -2321,61 +2321,61 @@ static void perf_counter_output(struct perf_counter *counter,
|
|
header.misc = PERF_EVENT_MISC_OVERFLOW;
|
|
header.misc = PERF_EVENT_MISC_OVERFLOW;
|
|
header.misc |= perf_misc_flags(regs);
|
|
header.misc |= perf_misc_flags(regs);
|
|
|
|
|
|
- if (record_type & PERF_RECORD_IP) {
|
|
|
|
|
|
+ if (sample_type & PERF_SAMPLE_IP) {
|
|
ip = perf_instruction_pointer(regs);
|
|
ip = perf_instruction_pointer(regs);
|
|
- header.type |= PERF_RECORD_IP;
|
|
|
|
|
|
+ header.type |= PERF_SAMPLE_IP;
|
|
header.size += sizeof(ip);
|
|
header.size += sizeof(ip);
|
|
}
|
|
}
|
|
|
|
|
|
- if (record_type & PERF_RECORD_TID) {
|
|
|
|
|
|
+ if (sample_type & PERF_SAMPLE_TID) {
|
|
/* namespace issues */
|
|
/* namespace issues */
|
|
tid_entry.pid = perf_counter_pid(counter, current);
|
|
tid_entry.pid = perf_counter_pid(counter, current);
|
|
tid_entry.tid = perf_counter_tid(counter, current);
|
|
tid_entry.tid = perf_counter_tid(counter, current);
|
|
|
|
|
|
- header.type |= PERF_RECORD_TID;
|
|
|
|
|
|
+ header.type |= PERF_SAMPLE_TID;
|
|
header.size += sizeof(tid_entry);
|
|
header.size += sizeof(tid_entry);
|
|
}
|
|
}
|
|
|
|
|
|
- if (record_type & PERF_RECORD_TIME) {
|
|
|
|
|
|
+ if (sample_type & PERF_SAMPLE_TIME) {
|
|
/*
|
|
/*
|
|
* Maybe do better on x86 and provide cpu_clock_nmi()
|
|
* Maybe do better on x86 and provide cpu_clock_nmi()
|
|
*/
|
|
*/
|
|
time = sched_clock();
|
|
time = sched_clock();
|
|
|
|
|
|
- header.type |= PERF_RECORD_TIME;
|
|
|
|
|
|
+ header.type |= PERF_SAMPLE_TIME;
|
|
header.size += sizeof(u64);
|
|
header.size += sizeof(u64);
|
|
}
|
|
}
|
|
|
|
|
|
- if (record_type & PERF_RECORD_ADDR) {
|
|
|
|
- header.type |= PERF_RECORD_ADDR;
|
|
|
|
|
|
+ if (sample_type & PERF_SAMPLE_ADDR) {
|
|
|
|
+ header.type |= PERF_SAMPLE_ADDR;
|
|
header.size += sizeof(u64);
|
|
header.size += sizeof(u64);
|
|
}
|
|
}
|
|
|
|
|
|
- if (record_type & PERF_RECORD_CONFIG) {
|
|
|
|
- header.type |= PERF_RECORD_CONFIG;
|
|
|
|
|
|
+ if (sample_type & PERF_SAMPLE_CONFIG) {
|
|
|
|
+ header.type |= PERF_SAMPLE_CONFIG;
|
|
header.size += sizeof(u64);
|
|
header.size += sizeof(u64);
|
|
}
|
|
}
|
|
|
|
|
|
- if (record_type & PERF_RECORD_CPU) {
|
|
|
|
- header.type |= PERF_RECORD_CPU;
|
|
|
|
|
|
+ if (sample_type & PERF_SAMPLE_CPU) {
|
|
|
|
+ header.type |= PERF_SAMPLE_CPU;
|
|
header.size += sizeof(cpu_entry);
|
|
header.size += sizeof(cpu_entry);
|
|
|
|
|
|
cpu_entry.cpu = raw_smp_processor_id();
|
|
cpu_entry.cpu = raw_smp_processor_id();
|
|
}
|
|
}
|
|
|
|
|
|
- if (record_type & PERF_RECORD_GROUP) {
|
|
|
|
- header.type |= PERF_RECORD_GROUP;
|
|
|
|
|
|
+ if (sample_type & PERF_SAMPLE_GROUP) {
|
|
|
|
+ header.type |= PERF_SAMPLE_GROUP;
|
|
header.size += sizeof(u64) +
|
|
header.size += sizeof(u64) +
|
|
counter->nr_siblings * sizeof(group_entry);
|
|
counter->nr_siblings * sizeof(group_entry);
|
|
}
|
|
}
|
|
|
|
|
|
- if (record_type & PERF_RECORD_CALLCHAIN) {
|
|
|
|
|
|
+ if (sample_type & PERF_SAMPLE_CALLCHAIN) {
|
|
callchain = perf_callchain(regs);
|
|
callchain = perf_callchain(regs);
|
|
|
|
|
|
if (callchain) {
|
|
if (callchain) {
|
|
callchain_size = (1 + callchain->nr) * sizeof(u64);
|
|
callchain_size = (1 + callchain->nr) * sizeof(u64);
|
|
|
|
|
|
- header.type |= PERF_RECORD_CALLCHAIN;
|
|
|
|
|
|
+ header.type |= PERF_SAMPLE_CALLCHAIN;
|
|
header.size += callchain_size;
|
|
header.size += callchain_size;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -2386,28 +2386,28 @@ static void perf_counter_output(struct perf_counter *counter,
|
|
|
|
|
|
perf_output_put(&handle, header);
|
|
perf_output_put(&handle, header);
|
|
|
|
|
|
- if (record_type & PERF_RECORD_IP)
|
|
|
|
|
|
+ if (sample_type & PERF_SAMPLE_IP)
|
|
perf_output_put(&handle, ip);
|
|
perf_output_put(&handle, ip);
|
|
|
|
|
|
- if (record_type & PERF_RECORD_TID)
|
|
|
|
|
|
+ if (sample_type & PERF_SAMPLE_TID)
|
|
perf_output_put(&handle, tid_entry);
|
|
perf_output_put(&handle, tid_entry);
|
|
|
|
|
|
- if (record_type & PERF_RECORD_TIME)
|
|
|
|
|
|
+ if (sample_type & PERF_SAMPLE_TIME)
|
|
perf_output_put(&handle, time);
|
|
perf_output_put(&handle, time);
|
|
|
|
|
|
- if (record_type & PERF_RECORD_ADDR)
|
|
|
|
|
|
+ if (sample_type & PERF_SAMPLE_ADDR)
|
|
perf_output_put(&handle, addr);
|
|
perf_output_put(&handle, addr);
|
|
|
|
|
|
- if (record_type & PERF_RECORD_CONFIG)
|
|
|
|
|
|
+ if (sample_type & PERF_SAMPLE_CONFIG)
|
|
perf_output_put(&handle, counter->hw_event.config);
|
|
perf_output_put(&handle, counter->hw_event.config);
|
|
|
|
|
|
- if (record_type & PERF_RECORD_CPU)
|
|
|
|
|
|
+ if (sample_type & PERF_SAMPLE_CPU)
|
|
perf_output_put(&handle, cpu_entry);
|
|
perf_output_put(&handle, cpu_entry);
|
|
|
|
|
|
/*
|
|
/*
|
|
- * XXX PERF_RECORD_GROUP vs inherited counters seems difficult.
|
|
|
|
|
|
+ * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
|
|
*/
|
|
*/
|
|
- if (record_type & PERF_RECORD_GROUP) {
|
|
|
|
|
|
+ if (sample_type & PERF_SAMPLE_GROUP) {
|
|
struct perf_counter *leader, *sub;
|
|
struct perf_counter *leader, *sub;
|
|
u64 nr = counter->nr_siblings;
|
|
u64 nr = counter->nr_siblings;
|
|
|
|
|
|
@@ -2702,7 +2702,7 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Log irq_period changes so that analyzing tools can re-normalize the
|
|
|
|
|
|
+ * Log sample_period changes so that analyzing tools can re-normalize the
|
|
* event flow.
|
|
* event flow.
|
|
*/
|
|
*/
|
|
|
|
|
|
@@ -2725,7 +2725,7 @@ static void perf_log_period(struct perf_counter *counter, u64 period)
|
|
.period = period,
|
|
.period = period,
|
|
};
|
|
};
|
|
|
|
|
|
- if (counter->hw.irq_period == period)
|
|
|
|
|
|
+ if (counter->hw.sample_period == period)
|
|
return;
|
|
return;
|
|
|
|
|
|
ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
|
|
ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
|
|
@@ -2834,7 +2834,7 @@ static void perf_swcounter_set_period(struct perf_counter *counter)
|
|
{
|
|
{
|
|
struct hw_perf_counter *hwc = &counter->hw;
|
|
struct hw_perf_counter *hwc = &counter->hw;
|
|
s64 left = atomic64_read(&hwc->period_left);
|
|
s64 left = atomic64_read(&hwc->period_left);
|
|
- s64 period = hwc->irq_period;
|
|
|
|
|
|
+ s64 period = hwc->sample_period;
|
|
|
|
|
|
if (unlikely(left <= -period)) {
|
|
if (unlikely(left <= -period)) {
|
|
left = period;
|
|
left = period;
|
|
@@ -2874,7 +2874,7 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
|
|
ret = HRTIMER_NORESTART;
|
|
ret = HRTIMER_NORESTART;
|
|
}
|
|
}
|
|
|
|
|
|
- period = max_t(u64, 10000, counter->hw.irq_period);
|
|
|
|
|
|
+ period = max_t(u64, 10000, counter->hw.sample_period);
|
|
hrtimer_forward_now(hrtimer, ns_to_ktime(period));
|
|
hrtimer_forward_now(hrtimer, ns_to_ktime(period));
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
@@ -2959,7 +2959,7 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
|
|
{
|
|
{
|
|
int neg = atomic64_add_negative(nr, &counter->hw.count);
|
|
int neg = atomic64_add_negative(nr, &counter->hw.count);
|
|
|
|
|
|
- if (counter->hw.irq_period && !neg && regs)
|
|
|
|
|
|
+ if (counter->hw.sample_period && !neg && regs)
|
|
perf_swcounter_overflow(counter, nmi, regs, addr);
|
|
perf_swcounter_overflow(counter, nmi, regs, addr);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3080,8 +3080,8 @@ static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
|
|
atomic64_set(&hwc->prev_count, cpu_clock(cpu));
|
|
atomic64_set(&hwc->prev_count, cpu_clock(cpu));
|
|
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
hwc->hrtimer.function = perf_swcounter_hrtimer;
|
|
hwc->hrtimer.function = perf_swcounter_hrtimer;
|
|
- if (hwc->irq_period) {
|
|
|
|
- u64 period = max_t(u64, 10000, hwc->irq_period);
|
|
|
|
|
|
+ if (hwc->sample_period) {
|
|
|
|
+ u64 period = max_t(u64, 10000, hwc->sample_period);
|
|
__hrtimer_start_range_ns(&hwc->hrtimer,
|
|
__hrtimer_start_range_ns(&hwc->hrtimer,
|
|
ns_to_ktime(period), 0,
|
|
ns_to_ktime(period), 0,
|
|
HRTIMER_MODE_REL, 0);
|
|
HRTIMER_MODE_REL, 0);
|
|
@@ -3092,7 +3092,7 @@ static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
|
|
|
|
|
|
static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
|
|
static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
|
|
{
|
|
{
|
|
- if (counter->hw.irq_period)
|
|
|
|
|
|
+ if (counter->hw.sample_period)
|
|
hrtimer_cancel(&counter->hw.hrtimer);
|
|
hrtimer_cancel(&counter->hw.hrtimer);
|
|
cpu_clock_perf_counter_update(counter);
|
|
cpu_clock_perf_counter_update(counter);
|
|
}
|
|
}
|
|
@@ -3132,8 +3132,8 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter)
|
|
atomic64_set(&hwc->prev_count, now);
|
|
atomic64_set(&hwc->prev_count, now);
|
|
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
hwc->hrtimer.function = perf_swcounter_hrtimer;
|
|
hwc->hrtimer.function = perf_swcounter_hrtimer;
|
|
- if (hwc->irq_period) {
|
|
|
|
- u64 period = max_t(u64, 10000, hwc->irq_period);
|
|
|
|
|
|
+ if (hwc->sample_period) {
|
|
|
|
+ u64 period = max_t(u64, 10000, hwc->sample_period);
|
|
__hrtimer_start_range_ns(&hwc->hrtimer,
|
|
__hrtimer_start_range_ns(&hwc->hrtimer,
|
|
ns_to_ktime(period), 0,
|
|
ns_to_ktime(period), 0,
|
|
HRTIMER_MODE_REL, 0);
|
|
HRTIMER_MODE_REL, 0);
|
|
@@ -3144,7 +3144,7 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter)
|
|
|
|
|
|
static void task_clock_perf_counter_disable(struct perf_counter *counter)
|
|
static void task_clock_perf_counter_disable(struct perf_counter *counter)
|
|
{
|
|
{
|
|
- if (counter->hw.irq_period)
|
|
|
|
|
|
+ if (counter->hw.sample_period)
|
|
hrtimer_cancel(&counter->hw.hrtimer);
|
|
hrtimer_cancel(&counter->hw.hrtimer);
|
|
task_clock_perf_counter_update(counter, counter->ctx->time);
|
|
task_clock_perf_counter_update(counter, counter->ctx->time);
|
|
|
|
|
|
@@ -3223,7 +3223,7 @@ static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
counter->destroy = tp_perf_counter_destroy;
|
|
counter->destroy = tp_perf_counter_destroy;
|
|
- counter->hw.irq_period = counter->hw_event.irq_period;
|
|
|
|
|
|
+ counter->hw.sample_period = counter->hw_event.sample_period;
|
|
|
|
|
|
return &perf_ops_generic;
|
|
return &perf_ops_generic;
|
|
}
|
|
}
|
|
@@ -3323,15 +3323,15 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
|
|
pmu = NULL;
|
|
pmu = NULL;
|
|
|
|
|
|
hwc = &counter->hw;
|
|
hwc = &counter->hw;
|
|
- if (hw_event->freq && hw_event->irq_freq)
|
|
|
|
- hwc->irq_period = div64_u64(TICK_NSEC, hw_event->irq_freq);
|
|
|
|
|
|
+ if (hw_event->freq && hw_event->sample_freq)
|
|
|
|
+ hwc->sample_period = div64_u64(TICK_NSEC, hw_event->sample_freq);
|
|
else
|
|
else
|
|
- hwc->irq_period = hw_event->irq_period;
|
|
|
|
|
|
+ hwc->sample_period = hw_event->sample_period;
|
|
|
|
|
|
/*
|
|
/*
|
|
- * we currently do not support PERF_RECORD_GROUP on inherited counters
|
|
|
|
|
|
+ * we currently do not support PERF_SAMPLE_GROUP on inherited counters
|
|
*/
|
|
*/
|
|
- if (hw_event->inherit && (hw_event->record_type & PERF_RECORD_GROUP))
|
|
|
|
|
|
+ if (hw_event->inherit && (hw_event->sample_type & PERF_SAMPLE_GROUP))
|
|
goto done;
|
|
goto done;
|
|
|
|
|
|
if (perf_event_raw(hw_event)) {
|
|
if (perf_event_raw(hw_event)) {
|