|
@@ -1,5 +1,5 @@
|
|
/*
|
|
/*
|
|
- * Performance counters:
|
|
|
|
|
|
+ * Performance events:
|
|
*
|
|
*
|
|
* Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
|
|
* Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
|
|
* Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
|
|
* Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
|
|
@@ -11,8 +11,8 @@
|
|
*
|
|
*
|
|
* For licencing details see kernel-base/COPYING
|
|
* For licencing details see kernel-base/COPYING
|
|
*/
|
|
*/
|
|
-#ifndef _LINUX_PERF_COUNTER_H
|
|
|
|
-#define _LINUX_PERF_COUNTER_H
|
|
|
|
|
|
+#ifndef _LINUX_PERF_EVENT_H
|
|
|
|
+#define _LINUX_PERF_EVENT_H
|
|
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/types.h>
|
|
#include <linux/ioctl.h>
|
|
#include <linux/ioctl.h>
|
|
@@ -36,8 +36,8 @@ enum perf_type_id {
|
|
};
|
|
};
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Generalized performance counter event types, used by the
|
|
|
|
- * attr.event_id parameter of the sys_perf_counter_open()
|
|
|
|
|
|
+ * Generalized performance event event_id types, used by the
|
|
|
|
+ * attr.event_id parameter of the sys_perf_event_open()
|
|
* syscall:
|
|
* syscall:
|
|
*/
|
|
*/
|
|
enum perf_hw_id {
|
|
enum perf_hw_id {
|
|
@@ -56,7 +56,7 @@ enum perf_hw_id {
|
|
};
|
|
};
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Generalized hardware cache counters:
|
|
|
|
|
|
+ * Generalized hardware cache events:
|
|
*
|
|
*
|
|
* { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
|
|
* { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
|
|
* { read, write, prefetch } x
|
|
* { read, write, prefetch } x
|
|
@@ -89,8 +89,8 @@ enum perf_hw_cache_op_result_id {
|
|
};
|
|
};
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Special "software" counters provided by the kernel, even if the hardware
|
|
|
|
- * does not support performance counters. These counters measure various
|
|
|
|
|
|
+ * Special "software" events provided by the kernel, even if the hardware
|
|
|
|
+ * does not support performance events. These events measure various
|
|
* physical and sw events of the kernel (and allow the profiling of them as
|
|
* physical and sw events of the kernel (and allow the profiling of them as
|
|
* well):
|
|
* well):
|
|
*/
|
|
*/
|
|
@@ -110,7 +110,7 @@ enum perf_sw_ids {
|
|
* Bits that can be set in attr.sample_type to request information
|
|
* Bits that can be set in attr.sample_type to request information
|
|
* in the overflow packets.
|
|
* in the overflow packets.
|
|
*/
|
|
*/
|
|
-enum perf_counter_sample_format {
|
|
|
|
|
|
+enum perf_event_sample_format {
|
|
PERF_SAMPLE_IP = 1U << 0,
|
|
PERF_SAMPLE_IP = 1U << 0,
|
|
PERF_SAMPLE_TID = 1U << 1,
|
|
PERF_SAMPLE_TID = 1U << 1,
|
|
PERF_SAMPLE_TIME = 1U << 2,
|
|
PERF_SAMPLE_TIME = 1U << 2,
|
|
@@ -127,7 +127,7 @@ enum perf_counter_sample_format {
|
|
};
|
|
};
|
|
|
|
|
|
/*
|
|
/*
|
|
- * The format of the data returned by read() on a perf counter fd,
|
|
|
|
|
|
+ * The format of the data returned by read() on a perf event fd,
|
|
* as specified by attr.read_format:
|
|
* as specified by attr.read_format:
|
|
*
|
|
*
|
|
* struct read_format {
|
|
* struct read_format {
|
|
@@ -146,7 +146,7 @@ enum perf_counter_sample_format {
|
|
* } && PERF_FORMAT_GROUP
|
|
* } && PERF_FORMAT_GROUP
|
|
* };
|
|
* };
|
|
*/
|
|
*/
|
|
-enum perf_counter_read_format {
|
|
|
|
|
|
+enum perf_event_read_format {
|
|
PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
|
|
PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
|
|
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
|
|
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
|
|
PERF_FORMAT_ID = 1U << 2,
|
|
PERF_FORMAT_ID = 1U << 2,
|
|
@@ -158,9 +158,9 @@ enum perf_counter_read_format {
|
|
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
|
|
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Hardware event to monitor via a performance monitoring counter:
|
|
|
|
|
|
+ * Hardware event_id to monitor via a performance monitoring event:
|
|
*/
|
|
*/
|
|
-struct perf_counter_attr {
|
|
|
|
|
|
+struct perf_event_attr {
|
|
|
|
|
|
/*
|
|
/*
|
|
* Major type: hardware/software/tracepoint/etc.
|
|
* Major type: hardware/software/tracepoint/etc.
|
|
@@ -213,28 +213,28 @@ struct perf_counter_attr {
|
|
};
|
|
};
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Ioctls that can be done on a perf counter fd:
|
|
|
|
|
|
+ * Ioctls that can be done on a perf event fd:
|
|
*/
|
|
*/
|
|
-#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0)
|
|
|
|
-#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1)
|
|
|
|
-#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2)
|
|
|
|
-#define PERF_COUNTER_IOC_RESET _IO ('$', 3)
|
|
|
|
-#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64)
|
|
|
|
-#define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5)
|
|
|
|
-
|
|
|
|
-enum perf_counter_ioc_flags {
|
|
|
|
|
|
+#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
|
|
|
|
+#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
|
|
|
|
+#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
|
|
|
|
+#define PERF_EVENT_IOC_RESET _IO ('$', 3)
|
|
|
|
+#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64)
|
|
|
|
+#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
|
|
|
|
+
|
|
|
|
+enum perf_event_ioc_flags {
|
|
PERF_IOC_FLAG_GROUP = 1U << 0,
|
|
PERF_IOC_FLAG_GROUP = 1U << 0,
|
|
};
|
|
};
|
|
|
|
|
|
/*
|
|
/*
|
|
* Structure of the page that can be mapped via mmap
|
|
* Structure of the page that can be mapped via mmap
|
|
*/
|
|
*/
|
|
-struct perf_counter_mmap_page {
|
|
|
|
|
|
+struct perf_event_mmap_page {
|
|
__u32 version; /* version number of this structure */
|
|
__u32 version; /* version number of this structure */
|
|
__u32 compat_version; /* lowest version this is compat with */
|
|
__u32 compat_version; /* lowest version this is compat with */
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Bits needed to read the hw counters in user-space.
|
|
|
|
|
|
+ * Bits needed to read the hw events in user-space.
|
|
*
|
|
*
|
|
* u32 seq;
|
|
* u32 seq;
|
|
* s64 count;
|
|
* s64 count;
|
|
@@ -256,10 +256,10 @@ struct perf_counter_mmap_page {
|
|
* processes.
|
|
* processes.
|
|
*/
|
|
*/
|
|
__u32 lock; /* seqlock for synchronization */
|
|
__u32 lock; /* seqlock for synchronization */
|
|
- __u32 index; /* hardware counter identifier */
|
|
|
|
- __s64 offset; /* add to hardware counter value */
|
|
|
|
- __u64 time_enabled; /* time counter active */
|
|
|
|
- __u64 time_running; /* time counter on cpu */
|
|
|
|
|
|
+ __u32 index; /* hardware event identifier */
|
|
|
|
+ __s64 offset; /* add to hardware event value */
|
|
|
|
+ __u64 time_enabled; /* time event active */
|
|
|
|
+ __u64 time_running; /* time event on cpu */
|
|
|
|
|
|
/*
|
|
/*
|
|
* Hole for extension of the self monitor capabilities
|
|
* Hole for extension of the self monitor capabilities
|
|
@@ -272,7 +272,7 @@ struct perf_counter_mmap_page {
|
|
*
|
|
*
|
|
* User-space reading the @data_head value should issue an rmb(), on
|
|
* User-space reading the @data_head value should issue an rmb(), on
|
|
* SMP capable platforms, after reading this value -- see
|
|
* SMP capable platforms, after reading this value -- see
|
|
- * perf_counter_wakeup().
|
|
|
|
|
|
+ * perf_event_wakeup().
|
|
*
|
|
*
|
|
* When the mapping is PROT_WRITE the @data_tail value should be
|
|
* When the mapping is PROT_WRITE the @data_tail value should be
|
|
* written by userspace to reflect the last read data. In this case
|
|
* written by userspace to reflect the last read data. In this case
|
|
@@ -282,11 +282,11 @@ struct perf_counter_mmap_page {
|
|
__u64 data_tail; /* user-space written tail */
|
|
__u64 data_tail; /* user-space written tail */
|
|
};
|
|
};
|
|
|
|
|
|
-#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0)
|
|
|
|
-#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0)
|
|
|
|
-#define PERF_EVENT_MISC_KERNEL (1 << 0)
|
|
|
|
-#define PERF_EVENT_MISC_USER (2 << 0)
|
|
|
|
-#define PERF_EVENT_MISC_HYPERVISOR (3 << 0)
|
|
|
|
|
|
+#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0)
|
|
|
|
+#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
|
|
|
|
+#define PERF_RECORD_MISC_KERNEL (1 << 0)
|
|
|
|
+#define PERF_RECORD_MISC_USER (2 << 0)
|
|
|
|
+#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
|
|
|
|
|
|
struct perf_event_header {
|
|
struct perf_event_header {
|
|
__u32 type;
|
|
__u32 type;
|
|
@@ -310,7 +310,7 @@ enum perf_event_type {
|
|
* char filename[];
|
|
* char filename[];
|
|
* };
|
|
* };
|
|
*/
|
|
*/
|
|
- PERF_EVENT_MMAP = 1,
|
|
|
|
|
|
+ PERF_RECORD_MMAP = 1,
|
|
|
|
|
|
/*
|
|
/*
|
|
* struct {
|
|
* struct {
|
|
@@ -319,7 +319,7 @@ enum perf_event_type {
|
|
* u64 lost;
|
|
* u64 lost;
|
|
* };
|
|
* };
|
|
*/
|
|
*/
|
|
- PERF_EVENT_LOST = 2,
|
|
|
|
|
|
+ PERF_RECORD_LOST = 2,
|
|
|
|
|
|
/*
|
|
/*
|
|
* struct {
|
|
* struct {
|
|
@@ -329,7 +329,7 @@ enum perf_event_type {
|
|
* char comm[];
|
|
* char comm[];
|
|
* };
|
|
* };
|
|
*/
|
|
*/
|
|
- PERF_EVENT_COMM = 3,
|
|
|
|
|
|
+ PERF_RECORD_COMM = 3,
|
|
|
|
|
|
/*
|
|
/*
|
|
* struct {
|
|
* struct {
|
|
@@ -339,7 +339,7 @@ enum perf_event_type {
|
|
* u64 time;
|
|
* u64 time;
|
|
* };
|
|
* };
|
|
*/
|
|
*/
|
|
- PERF_EVENT_EXIT = 4,
|
|
|
|
|
|
+ PERF_RECORD_EXIT = 4,
|
|
|
|
|
|
/*
|
|
/*
|
|
* struct {
|
|
* struct {
|
|
@@ -349,8 +349,8 @@ enum perf_event_type {
|
|
* u64 stream_id;
|
|
* u64 stream_id;
|
|
* };
|
|
* };
|
|
*/
|
|
*/
|
|
- PERF_EVENT_THROTTLE = 5,
|
|
|
|
- PERF_EVENT_UNTHROTTLE = 6,
|
|
|
|
|
|
+ PERF_RECORD_THROTTLE = 5,
|
|
|
|
+ PERF_RECORD_UNTHROTTLE = 6,
|
|
|
|
|
|
/*
|
|
/*
|
|
* struct {
|
|
* struct {
|
|
@@ -360,7 +360,7 @@ enum perf_event_type {
|
|
* { u64 time; } && PERF_SAMPLE_TIME
|
|
* { u64 time; } && PERF_SAMPLE_TIME
|
|
* };
|
|
* };
|
|
*/
|
|
*/
|
|
- PERF_EVENT_FORK = 7,
|
|
|
|
|
|
+ PERF_RECORD_FORK = 7,
|
|
|
|
|
|
/*
|
|
/*
|
|
* struct {
|
|
* struct {
|
|
@@ -370,7 +370,7 @@ enum perf_event_type {
|
|
* struct read_format values;
|
|
* struct read_format values;
|
|
* };
|
|
* };
|
|
*/
|
|
*/
|
|
- PERF_EVENT_READ = 8,
|
|
|
|
|
|
+ PERF_RECORD_READ = 8,
|
|
|
|
|
|
/*
|
|
/*
|
|
* struct {
|
|
* struct {
|
|
@@ -395,7 +395,7 @@ enum perf_event_type {
|
|
* #
|
|
* #
|
|
* # That is, the ABI doesn't make any promises wrt to
|
|
* # That is, the ABI doesn't make any promises wrt to
|
|
* # the stability of its content, it may vary depending
|
|
* # the stability of its content, it may vary depending
|
|
- * # on event, hardware, kernel version and phase of
|
|
|
|
|
|
+ * # on event_id, hardware, kernel version and phase of
|
|
* # the moon.
|
|
* # the moon.
|
|
* #
|
|
* #
|
|
* # In other words, PERF_SAMPLE_RAW contents are not an ABI.
|
|
* # In other words, PERF_SAMPLE_RAW contents are not an ABI.
|
|
@@ -405,9 +405,9 @@ enum perf_event_type {
|
|
* char data[size];}&& PERF_SAMPLE_RAW
|
|
* char data[size];}&& PERF_SAMPLE_RAW
|
|
* };
|
|
* };
|
|
*/
|
|
*/
|
|
- PERF_EVENT_SAMPLE = 9,
|
|
|
|
|
|
+ PERF_RECORD_SAMPLE = 9,
|
|
|
|
|
|
- PERF_EVENT_MAX, /* non-ABI */
|
|
|
|
|
|
+ PERF_RECORD_MAX, /* non-ABI */
|
|
};
|
|
};
|
|
|
|
|
|
enum perf_callchain_context {
|
|
enum perf_callchain_context {
|
|
@@ -430,8 +430,8 @@ enum perf_callchain_context {
|
|
* Kernel-internal data types and definitions:
|
|
* Kernel-internal data types and definitions:
|
|
*/
|
|
*/
|
|
|
|
|
|
-#ifdef CONFIG_PERF_COUNTERS
|
|
|
|
-# include <asm/perf_counter.h>
|
|
|
|
|
|
+#ifdef CONFIG_PERF_EVENTS
|
|
|
|
+# include <asm/perf_event.h>
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#include <linux/list.h>
|
|
#include <linux/list.h>
|
|
@@ -459,15 +459,15 @@ struct perf_raw_record {
|
|
struct task_struct;
|
|
struct task_struct;
|
|
|
|
|
|
/**
|
|
/**
|
|
- * struct hw_perf_counter - performance counter hardware details:
|
|
|
|
|
|
+ * struct hw_perf_event - performance event hardware details:
|
|
*/
|
|
*/
|
|
-struct hw_perf_counter {
|
|
|
|
-#ifdef CONFIG_PERF_COUNTERS
|
|
|
|
|
|
+struct hw_perf_event {
|
|
|
|
+#ifdef CONFIG_PERF_EVENTS
|
|
union {
|
|
union {
|
|
struct { /* hardware */
|
|
struct { /* hardware */
|
|
u64 config;
|
|
u64 config;
|
|
unsigned long config_base;
|
|
unsigned long config_base;
|
|
- unsigned long counter_base;
|
|
|
|
|
|
+ unsigned long event_base;
|
|
int idx;
|
|
int idx;
|
|
};
|
|
};
|
|
union { /* software */
|
|
union { /* software */
|
|
@@ -487,26 +487,26 @@ struct hw_perf_counter {
|
|
#endif
|
|
#endif
|
|
};
|
|
};
|
|
|
|
|
|
-struct perf_counter;
|
|
|
|
|
|
+struct perf_event;
|
|
|
|
|
|
/**
|
|
/**
|
|
* struct pmu - generic performance monitoring unit
|
|
* struct pmu - generic performance monitoring unit
|
|
*/
|
|
*/
|
|
struct pmu {
|
|
struct pmu {
|
|
- int (*enable) (struct perf_counter *counter);
|
|
|
|
- void (*disable) (struct perf_counter *counter);
|
|
|
|
- void (*read) (struct perf_counter *counter);
|
|
|
|
- void (*unthrottle) (struct perf_counter *counter);
|
|
|
|
|
|
+ int (*enable) (struct perf_event *event);
|
|
|
|
+ void (*disable) (struct perf_event *event);
|
|
|
|
+ void (*read) (struct perf_event *event);
|
|
|
|
+ void (*unthrottle) (struct perf_event *event);
|
|
};
|
|
};
|
|
|
|
|
|
/**
|
|
/**
|
|
- * enum perf_counter_active_state - the states of a counter
|
|
|
|
|
|
+ * enum perf_event_active_state - the states of a event
|
|
*/
|
|
*/
|
|
-enum perf_counter_active_state {
|
|
|
|
- PERF_COUNTER_STATE_ERROR = -2,
|
|
|
|
- PERF_COUNTER_STATE_OFF = -1,
|
|
|
|
- PERF_COUNTER_STATE_INACTIVE = 0,
|
|
|
|
- PERF_COUNTER_STATE_ACTIVE = 1,
|
|
|
|
|
|
+enum perf_event_active_state {
|
|
|
|
+ PERF_EVENT_STATE_ERROR = -2,
|
|
|
|
+ PERF_EVENT_STATE_OFF = -1,
|
|
|
|
+ PERF_EVENT_STATE_INACTIVE = 0,
|
|
|
|
+ PERF_EVENT_STATE_ACTIVE = 1,
|
|
};
|
|
};
|
|
|
|
|
|
struct file;
|
|
struct file;
|
|
@@ -518,7 +518,7 @@ struct perf_mmap_data {
|
|
int nr_locked; /* nr pages mlocked */
|
|
int nr_locked; /* nr pages mlocked */
|
|
|
|
|
|
atomic_t poll; /* POLL_ for wakeups */
|
|
atomic_t poll; /* POLL_ for wakeups */
|
|
- atomic_t events; /* event limit */
|
|
|
|
|
|
+ atomic_t events; /* event_id limit */
|
|
|
|
|
|
atomic_long_t head; /* write position */
|
|
atomic_long_t head; /* write position */
|
|
atomic_long_t done_head; /* completed head */
|
|
atomic_long_t done_head; /* completed head */
|
|
@@ -529,7 +529,7 @@ struct perf_mmap_data {
|
|
|
|
|
|
long watermark; /* wakeup watermark */
|
|
long watermark; /* wakeup watermark */
|
|
|
|
|
|
- struct perf_counter_mmap_page *user_page;
|
|
|
|
|
|
+ struct perf_event_mmap_page *user_page;
|
|
void *data_pages[0];
|
|
void *data_pages[0];
|
|
};
|
|
};
|
|
|
|
|
|
@@ -539,56 +539,56 @@ struct perf_pending_entry {
|
|
};
|
|
};
|
|
|
|
|
|
/**
|
|
/**
|
|
- * struct perf_counter - performance counter kernel representation:
|
|
|
|
|
|
+ * struct perf_event - performance event kernel representation:
|
|
*/
|
|
*/
|
|
-struct perf_counter {
|
|
|
|
-#ifdef CONFIG_PERF_COUNTERS
|
|
|
|
|
|
+struct perf_event {
|
|
|
|
+#ifdef CONFIG_PERF_EVENTS
|
|
struct list_head group_entry;
|
|
struct list_head group_entry;
|
|
struct list_head event_entry;
|
|
struct list_head event_entry;
|
|
struct list_head sibling_list;
|
|
struct list_head sibling_list;
|
|
int nr_siblings;
|
|
int nr_siblings;
|
|
- struct perf_counter *group_leader;
|
|
|
|
- struct perf_counter *output;
|
|
|
|
|
|
+ struct perf_event *group_leader;
|
|
|
|
+ struct perf_event *output;
|
|
const struct pmu *pmu;
|
|
const struct pmu *pmu;
|
|
|
|
|
|
- enum perf_counter_active_state state;
|
|
|
|
|
|
+ enum perf_event_active_state state;
|
|
atomic64_t count;
|
|
atomic64_t count;
|
|
|
|
|
|
/*
|
|
/*
|
|
- * These are the total time in nanoseconds that the counter
|
|
|
|
|
|
+ * These are the total time in nanoseconds that the event
|
|
* has been enabled (i.e. eligible to run, and the task has
|
|
* has been enabled (i.e. eligible to run, and the task has
|
|
- * been scheduled in, if this is a per-task counter)
|
|
|
|
|
|
+ * been scheduled in, if this is a per-task event)
|
|
* and running (scheduled onto the CPU), respectively.
|
|
* and running (scheduled onto the CPU), respectively.
|
|
*
|
|
*
|
|
* They are computed from tstamp_enabled, tstamp_running and
|
|
* They are computed from tstamp_enabled, tstamp_running and
|
|
- * tstamp_stopped when the counter is in INACTIVE or ACTIVE state.
|
|
|
|
|
|
+ * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
|
|
*/
|
|
*/
|
|
u64 total_time_enabled;
|
|
u64 total_time_enabled;
|
|
u64 total_time_running;
|
|
u64 total_time_running;
|
|
|
|
|
|
/*
|
|
/*
|
|
* These are timestamps used for computing total_time_enabled
|
|
* These are timestamps used for computing total_time_enabled
|
|
- * and total_time_running when the counter is in INACTIVE or
|
|
|
|
|
|
+ * and total_time_running when the event is in INACTIVE or
|
|
* ACTIVE state, measured in nanoseconds from an arbitrary point
|
|
* ACTIVE state, measured in nanoseconds from an arbitrary point
|
|
* in time.
|
|
* in time.
|
|
- * tstamp_enabled: the notional time when the counter was enabled
|
|
|
|
- * tstamp_running: the notional time when the counter was scheduled on
|
|
|
|
|
|
+ * tstamp_enabled: the notional time when the event was enabled
|
|
|
|
+ * tstamp_running: the notional time when the event was scheduled on
|
|
* tstamp_stopped: in INACTIVE state, the notional time when the
|
|
* tstamp_stopped: in INACTIVE state, the notional time when the
|
|
- * counter was scheduled off.
|
|
|
|
|
|
+ * event was scheduled off.
|
|
*/
|
|
*/
|
|
u64 tstamp_enabled;
|
|
u64 tstamp_enabled;
|
|
u64 tstamp_running;
|
|
u64 tstamp_running;
|
|
u64 tstamp_stopped;
|
|
u64 tstamp_stopped;
|
|
|
|
|
|
- struct perf_counter_attr attr;
|
|
|
|
- struct hw_perf_counter hw;
|
|
|
|
|
|
+ struct perf_event_attr attr;
|
|
|
|
+ struct hw_perf_event hw;
|
|
|
|
|
|
- struct perf_counter_context *ctx;
|
|
|
|
|
|
+ struct perf_event_context *ctx;
|
|
struct file *filp;
|
|
struct file *filp;
|
|
|
|
|
|
/*
|
|
/*
|
|
* These accumulate total time (in nanoseconds) that children
|
|
* These accumulate total time (in nanoseconds) that children
|
|
- * counters have been enabled and running, respectively.
|
|
|
|
|
|
+ * events have been enabled and running, respectively.
|
|
*/
|
|
*/
|
|
atomic64_t child_total_time_enabled;
|
|
atomic64_t child_total_time_enabled;
|
|
atomic64_t child_total_time_running;
|
|
atomic64_t child_total_time_running;
|
|
@@ -598,7 +598,7 @@ struct perf_counter {
|
|
*/
|
|
*/
|
|
struct mutex child_mutex;
|
|
struct mutex child_mutex;
|
|
struct list_head child_list;
|
|
struct list_head child_list;
|
|
- struct perf_counter *parent;
|
|
|
|
|
|
+ struct perf_event *parent;
|
|
|
|
|
|
int oncpu;
|
|
int oncpu;
|
|
int cpu;
|
|
int cpu;
|
|
@@ -623,7 +623,7 @@ struct perf_counter {
|
|
|
|
|
|
atomic_t event_limit;
|
|
atomic_t event_limit;
|
|
|
|
|
|
- void (*destroy)(struct perf_counter *);
|
|
|
|
|
|
+ void (*destroy)(struct perf_event *);
|
|
struct rcu_head rcu_head;
|
|
struct rcu_head rcu_head;
|
|
|
|
|
|
struct pid_namespace *ns;
|
|
struct pid_namespace *ns;
|
|
@@ -632,18 +632,18 @@ struct perf_counter {
|
|
};
|
|
};
|
|
|
|
|
|
/**
|
|
/**
|
|
- * struct perf_counter_context - counter context structure
|
|
|
|
|
|
+ * struct perf_event_context - event context structure
|
|
*
|
|
*
|
|
- * Used as a container for task counters and CPU counters as well:
|
|
|
|
|
|
+ * Used as a container for task events and CPU events as well:
|
|
*/
|
|
*/
|
|
-struct perf_counter_context {
|
|
|
|
|
|
+struct perf_event_context {
|
|
/*
|
|
/*
|
|
- * Protect the states of the counters in the list,
|
|
|
|
|
|
+ * Protect the states of the events in the list,
|
|
* nr_active, and the list:
|
|
* nr_active, and the list:
|
|
*/
|
|
*/
|
|
spinlock_t lock;
|
|
spinlock_t lock;
|
|
/*
|
|
/*
|
|
- * Protect the list of counters. Locking either mutex or lock
|
|
|
|
|
|
+ * Protect the list of events. Locking either mutex or lock
|
|
* is sufficient to ensure the list doesn't change; to change
|
|
* is sufficient to ensure the list doesn't change; to change
|
|
* the list you need to lock both the mutex and the spinlock.
|
|
* the list you need to lock both the mutex and the spinlock.
|
|
*/
|
|
*/
|
|
@@ -651,7 +651,7 @@ struct perf_counter_context {
|
|
|
|
|
|
struct list_head group_list;
|
|
struct list_head group_list;
|
|
struct list_head event_list;
|
|
struct list_head event_list;
|
|
- int nr_counters;
|
|
|
|
|
|
+ int nr_events;
|
|
int nr_active;
|
|
int nr_active;
|
|
int is_active;
|
|
int is_active;
|
|
int nr_stat;
|
|
int nr_stat;
|
|
@@ -668,7 +668,7 @@ struct perf_counter_context {
|
|
* These fields let us detect when two contexts have both
|
|
* These fields let us detect when two contexts have both
|
|
* been cloned (inherited) from a common ancestor.
|
|
* been cloned (inherited) from a common ancestor.
|
|
*/
|
|
*/
|
|
- struct perf_counter_context *parent_ctx;
|
|
|
|
|
|
+ struct perf_event_context *parent_ctx;
|
|
u64 parent_gen;
|
|
u64 parent_gen;
|
|
u64 generation;
|
|
u64 generation;
|
|
int pin_count;
|
|
int pin_count;
|
|
@@ -676,11 +676,11 @@ struct perf_counter_context {
|
|
};
|
|
};
|
|
|
|
|
|
/**
|
|
/**
|
|
- * struct perf_counter_cpu_context - per cpu counter context structure
|
|
|
|
|
|
+ * struct perf_event_cpu_context - per cpu event context structure
|
|
*/
|
|
*/
|
|
struct perf_cpu_context {
|
|
struct perf_cpu_context {
|
|
- struct perf_counter_context ctx;
|
|
|
|
- struct perf_counter_context *task_ctx;
|
|
|
|
|
|
+ struct perf_event_context ctx;
|
|
|
|
+ struct perf_event_context *task_ctx;
|
|
int active_oncpu;
|
|
int active_oncpu;
|
|
int max_pertask;
|
|
int max_pertask;
|
|
int exclusive;
|
|
int exclusive;
|
|
@@ -694,7 +694,7 @@ struct perf_cpu_context {
|
|
};
|
|
};
|
|
|
|
|
|
struct perf_output_handle {
|
|
struct perf_output_handle {
|
|
- struct perf_counter *counter;
|
|
|
|
|
|
+ struct perf_event *event;
|
|
struct perf_mmap_data *data;
|
|
struct perf_mmap_data *data;
|
|
unsigned long head;
|
|
unsigned long head;
|
|
unsigned long offset;
|
|
unsigned long offset;
|
|
@@ -704,35 +704,35 @@ struct perf_output_handle {
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
};
|
|
};
|
|
|
|
|
|
-#ifdef CONFIG_PERF_COUNTERS
|
|
|
|
|
|
+#ifdef CONFIG_PERF_EVENTS
|
|
|
|
|
|
/*
|
|
/*
|
|
* Set by architecture code:
|
|
* Set by architecture code:
|
|
*/
|
|
*/
|
|
-extern int perf_max_counters;
|
|
|
|
|
|
+extern int perf_max_events;
|
|
|
|
|
|
-extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter);
|
|
|
|
|
|
+extern const struct pmu *hw_perf_event_init(struct perf_event *event);
|
|
|
|
|
|
-extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
|
|
|
|
-extern void perf_counter_task_sched_out(struct task_struct *task,
|
|
|
|
|
|
+extern void perf_event_task_sched_in(struct task_struct *task, int cpu);
|
|
|
|
+extern void perf_event_task_sched_out(struct task_struct *task,
|
|
struct task_struct *next, int cpu);
|
|
struct task_struct *next, int cpu);
|
|
-extern void perf_counter_task_tick(struct task_struct *task, int cpu);
|
|
|
|
-extern int perf_counter_init_task(struct task_struct *child);
|
|
|
|
-extern void perf_counter_exit_task(struct task_struct *child);
|
|
|
|
-extern void perf_counter_free_task(struct task_struct *task);
|
|
|
|
-extern void set_perf_counter_pending(void);
|
|
|
|
-extern void perf_counter_do_pending(void);
|
|
|
|
-extern void perf_counter_print_debug(void);
|
|
|
|
|
|
+extern void perf_event_task_tick(struct task_struct *task, int cpu);
|
|
|
|
+extern int perf_event_init_task(struct task_struct *child);
|
|
|
|
+extern void perf_event_exit_task(struct task_struct *child);
|
|
|
|
+extern void perf_event_free_task(struct task_struct *task);
|
|
|
|
+extern void set_perf_event_pending(void);
|
|
|
|
+extern void perf_event_do_pending(void);
|
|
|
|
+extern void perf_event_print_debug(void);
|
|
extern void __perf_disable(void);
|
|
extern void __perf_disable(void);
|
|
extern bool __perf_enable(void);
|
|
extern bool __perf_enable(void);
|
|
extern void perf_disable(void);
|
|
extern void perf_disable(void);
|
|
extern void perf_enable(void);
|
|
extern void perf_enable(void);
|
|
-extern int perf_counter_task_disable(void);
|
|
|
|
-extern int perf_counter_task_enable(void);
|
|
|
|
-extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
|
|
|
|
|
|
+extern int perf_event_task_disable(void);
|
|
|
|
+extern int perf_event_task_enable(void);
|
|
|
|
+extern int hw_perf_group_sched_in(struct perf_event *group_leader,
|
|
struct perf_cpu_context *cpuctx,
|
|
struct perf_cpu_context *cpuctx,
|
|
- struct perf_counter_context *ctx, int cpu);
|
|
|
|
-extern void perf_counter_update_userpage(struct perf_counter *counter);
|
|
|
|
|
|
+ struct perf_event_context *ctx, int cpu);
|
|
|
|
+extern void perf_event_update_userpage(struct perf_event *event);
|
|
|
|
|
|
struct perf_sample_data {
|
|
struct perf_sample_data {
|
|
u64 type;
|
|
u64 type;
|
|
@@ -758,96 +758,96 @@ struct perf_sample_data {
|
|
extern void perf_output_sample(struct perf_output_handle *handle,
|
|
extern void perf_output_sample(struct perf_output_handle *handle,
|
|
struct perf_event_header *header,
|
|
struct perf_event_header *header,
|
|
struct perf_sample_data *data,
|
|
struct perf_sample_data *data,
|
|
- struct perf_counter *counter);
|
|
|
|
|
|
+ struct perf_event *event);
|
|
extern void perf_prepare_sample(struct perf_event_header *header,
|
|
extern void perf_prepare_sample(struct perf_event_header *header,
|
|
struct perf_sample_data *data,
|
|
struct perf_sample_data *data,
|
|
- struct perf_counter *counter,
|
|
|
|
|
|
+ struct perf_event *event,
|
|
struct pt_regs *regs);
|
|
struct pt_regs *regs);
|
|
|
|
|
|
-extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
|
|
|
|
|
|
+extern int perf_event_overflow(struct perf_event *event, int nmi,
|
|
struct perf_sample_data *data,
|
|
struct perf_sample_data *data,
|
|
struct pt_regs *regs);
|
|
struct pt_regs *regs);
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Return 1 for a software counter, 0 for a hardware counter
|
|
|
|
|
|
+ * Return 1 for a software event, 0 for a hardware event
|
|
*/
|
|
*/
|
|
-static inline int is_software_counter(struct perf_counter *counter)
|
|
|
|
|
|
+static inline int is_software_event(struct perf_event *event)
|
|
{
|
|
{
|
|
- return (counter->attr.type != PERF_TYPE_RAW) &&
|
|
|
|
- (counter->attr.type != PERF_TYPE_HARDWARE) &&
|
|
|
|
- (counter->attr.type != PERF_TYPE_HW_CACHE);
|
|
|
|
|
|
+ return (event->attr.type != PERF_TYPE_RAW) &&
|
|
|
|
+ (event->attr.type != PERF_TYPE_HARDWARE) &&
|
|
|
|
+ (event->attr.type != PERF_TYPE_HW_CACHE);
|
|
}
|
|
}
|
|
|
|
|
|
-extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
|
|
|
|
|
|
+extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
|
|
|
|
|
|
-extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
|
|
|
|
|
|
+extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
|
|
|
|
|
|
static inline void
|
|
static inline void
|
|
-perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
|
|
|
|
|
|
+perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
|
|
{
|
|
{
|
|
- if (atomic_read(&perf_swcounter_enabled[event]))
|
|
|
|
- __perf_swcounter_event(event, nr, nmi, regs, addr);
|
|
|
|
|
|
+ if (atomic_read(&perf_swevent_enabled[event_id]))
|
|
|
|
+ __perf_sw_event(event_id, nr, nmi, regs, addr);
|
|
}
|
|
}
|
|
|
|
|
|
-extern void __perf_counter_mmap(struct vm_area_struct *vma);
|
|
|
|
|
|
+extern void __perf_event_mmap(struct vm_area_struct *vma);
|
|
|
|
|
|
-static inline void perf_counter_mmap(struct vm_area_struct *vma)
|
|
|
|
|
|
+static inline void perf_event_mmap(struct vm_area_struct *vma)
|
|
{
|
|
{
|
|
if (vma->vm_flags & VM_EXEC)
|
|
if (vma->vm_flags & VM_EXEC)
|
|
- __perf_counter_mmap(vma);
|
|
|
|
|
|
+ __perf_event_mmap(vma);
|
|
}
|
|
}
|
|
|
|
|
|
-extern void perf_counter_comm(struct task_struct *tsk);
|
|
|
|
-extern void perf_counter_fork(struct task_struct *tsk);
|
|
|
|
|
|
+extern void perf_event_comm(struct task_struct *tsk);
|
|
|
|
+extern void perf_event_fork(struct task_struct *tsk);
|
|
|
|
|
|
extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
|
|
extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
|
|
|
|
|
|
-extern int sysctl_perf_counter_paranoid;
|
|
|
|
-extern int sysctl_perf_counter_mlock;
|
|
|
|
-extern int sysctl_perf_counter_sample_rate;
|
|
|
|
|
|
+extern int sysctl_perf_event_paranoid;
|
|
|
|
+extern int sysctl_perf_event_mlock;
|
|
|
|
+extern int sysctl_perf_event_sample_rate;
|
|
|
|
|
|
-extern void perf_counter_init(void);
|
|
|
|
-extern void perf_tpcounter_event(int event_id, u64 addr, u64 count,
|
|
|
|
|
|
+extern void perf_event_init(void);
|
|
|
|
+extern void perf_tp_event(int event_id, u64 addr, u64 count,
|
|
void *record, int entry_size);
|
|
void *record, int entry_size);
|
|
|
|
|
|
#ifndef perf_misc_flags
|
|
#ifndef perf_misc_flags
|
|
-#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \
|
|
|
|
- PERF_EVENT_MISC_KERNEL)
|
|
|
|
|
|
+#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \
|
|
|
|
+ PERF_RECORD_MISC_KERNEL)
|
|
#define perf_instruction_pointer(regs) instruction_pointer(regs)
|
|
#define perf_instruction_pointer(regs) instruction_pointer(regs)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
extern int perf_output_begin(struct perf_output_handle *handle,
|
|
extern int perf_output_begin(struct perf_output_handle *handle,
|
|
- struct perf_counter *counter, unsigned int size,
|
|
|
|
|
|
+ struct perf_event *event, unsigned int size,
|
|
int nmi, int sample);
|
|
int nmi, int sample);
|
|
extern void perf_output_end(struct perf_output_handle *handle);
|
|
extern void perf_output_end(struct perf_output_handle *handle);
|
|
extern void perf_output_copy(struct perf_output_handle *handle,
|
|
extern void perf_output_copy(struct perf_output_handle *handle,
|
|
const void *buf, unsigned int len);
|
|
const void *buf, unsigned int len);
|
|
#else
|
|
#else
|
|
static inline void
|
|
static inline void
|
|
-perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
|
|
|
|
|
|
+perf_event_task_sched_in(struct task_struct *task, int cpu) { }
|
|
static inline void
|
|
static inline void
|
|
-perf_counter_task_sched_out(struct task_struct *task,
|
|
|
|
|
|
+perf_event_task_sched_out(struct task_struct *task,
|
|
struct task_struct *next, int cpu) { }
|
|
struct task_struct *next, int cpu) { }
|
|
static inline void
|
|
static inline void
|
|
-perf_counter_task_tick(struct task_struct *task, int cpu) { }
|
|
|
|
-static inline int perf_counter_init_task(struct task_struct *child) { return 0; }
|
|
|
|
-static inline void perf_counter_exit_task(struct task_struct *child) { }
|
|
|
|
-static inline void perf_counter_free_task(struct task_struct *task) { }
|
|
|
|
-static inline void perf_counter_do_pending(void) { }
|
|
|
|
-static inline void perf_counter_print_debug(void) { }
|
|
|
|
|
|
+perf_event_task_tick(struct task_struct *task, int cpu) { }
|
|
|
|
+static inline int perf_event_init_task(struct task_struct *child) { return 0; }
|
|
|
|
+static inline void perf_event_exit_task(struct task_struct *child) { }
|
|
|
|
+static inline void perf_event_free_task(struct task_struct *task) { }
|
|
|
|
+static inline void perf_event_do_pending(void) { }
|
|
|
|
+static inline void perf_event_print_debug(void) { }
|
|
static inline void perf_disable(void) { }
|
|
static inline void perf_disable(void) { }
|
|
static inline void perf_enable(void) { }
|
|
static inline void perf_enable(void) { }
|
|
-static inline int perf_counter_task_disable(void) { return -EINVAL; }
|
|
|
|
-static inline int perf_counter_task_enable(void) { return -EINVAL; }
|
|
|
|
|
|
+static inline int perf_event_task_disable(void) { return -EINVAL; }
|
|
|
|
+static inline int perf_event_task_enable(void) { return -EINVAL; }
|
|
|
|
|
|
static inline void
|
|
static inline void
|
|
-perf_swcounter_event(u32 event, u64 nr, int nmi,
|
|
|
|
|
|
+perf_sw_event(u32 event_id, u64 nr, int nmi,
|
|
struct pt_regs *regs, u64 addr) { }
|
|
struct pt_regs *regs, u64 addr) { }
|
|
|
|
|
|
-static inline void perf_counter_mmap(struct vm_area_struct *vma) { }
|
|
|
|
-static inline void perf_counter_comm(struct task_struct *tsk) { }
|
|
|
|
-static inline void perf_counter_fork(struct task_struct *tsk) { }
|
|
|
|
-static inline void perf_counter_init(void) { }
|
|
|
|
|
|
+static inline void perf_event_mmap(struct vm_area_struct *vma) { }
|
|
|
|
+static inline void perf_event_comm(struct task_struct *tsk) { }
|
|
|
|
+static inline void perf_event_fork(struct task_struct *tsk) { }
|
|
|
|
+static inline void perf_event_init(void) { }
|
|
|
|
|
|
#endif
|
|
#endif
|
|
|
|
|
|
@@ -855,4 +855,4 @@ static inline void perf_counter_init(void) { }
|
|
perf_output_copy((handle), &(x), sizeof(x))
|
|
perf_output_copy((handle), &(x), sizeof(x))
|
|
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* __KERNEL__ */
|
|
-#endif /* _LINUX_PERF_COUNTER_H */
|
|
|
|
|
|
+#endif /* _LINUX_PERF_EVENT_H */
|