|
@@ -75,7 +75,7 @@ static DEFINE_SPINLOCK(perf_resource_lock);
|
|
|
/*
|
|
|
* Architecture provided APIs - weak aliases:
|
|
|
*/
|
|
|
-extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
|
|
|
+extern __weak struct pmu *hw_perf_event_init(struct perf_event *event)
|
|
|
{
|
|
|
return NULL;
|
|
|
}
|
|
@@ -691,7 +691,7 @@ group_sched_in(struct perf_event *group_event,
|
|
|
struct perf_event_context *ctx)
|
|
|
{
|
|
|
struct perf_event *event, *partial_group = NULL;
|
|
|
- const struct pmu *pmu = group_event->pmu;
|
|
|
+ struct pmu *pmu = group_event->pmu;
|
|
|
bool txn = false;
|
|
|
|
|
|
if (group_event->state == PERF_EVENT_STATE_OFF)
|
|
@@ -4501,7 +4501,7 @@ static int perf_swevent_int(struct perf_event *event)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static const struct pmu perf_ops_generic = {
|
|
|
+static struct pmu perf_ops_generic = {
|
|
|
.enable = perf_swevent_enable,
|
|
|
.disable = perf_swevent_disable,
|
|
|
.start = perf_swevent_int,
|
|
@@ -4614,7 +4614,7 @@ static void cpu_clock_perf_event_read(struct perf_event *event)
|
|
|
cpu_clock_perf_event_update(event);
|
|
|
}
|
|
|
|
|
|
-static const struct pmu perf_ops_cpu_clock = {
|
|
|
+static struct pmu perf_ops_cpu_clock = {
|
|
|
.enable = cpu_clock_perf_event_enable,
|
|
|
.disable = cpu_clock_perf_event_disable,
|
|
|
.read = cpu_clock_perf_event_read,
|
|
@@ -4671,7 +4671,7 @@ static void task_clock_perf_event_read(struct perf_event *event)
|
|
|
task_clock_perf_event_update(event, time);
|
|
|
}
|
|
|
|
|
|
-static const struct pmu perf_ops_task_clock = {
|
|
|
+static struct pmu perf_ops_task_clock = {
|
|
|
.enable = task_clock_perf_event_enable,
|
|
|
.disable = task_clock_perf_event_disable,
|
|
|
.read = task_clock_perf_event_read,
|
|
@@ -4785,7 +4785,7 @@ static int swevent_hlist_get(struct perf_event *event)
|
|
|
|
|
|
#ifdef CONFIG_EVENT_TRACING
|
|
|
|
|
|
-static const struct pmu perf_ops_tracepoint = {
|
|
|
+static struct pmu perf_ops_tracepoint = {
|
|
|
.enable = perf_trace_enable,
|
|
|
.disable = perf_trace_disable,
|
|
|
.start = perf_swevent_int,
|
|
@@ -4849,7 +4849,7 @@ static void tp_perf_event_destroy(struct perf_event *event)
|
|
|
perf_trace_destroy(event);
|
|
|
}
|
|
|
|
|
|
-static const struct pmu *tp_perf_event_init(struct perf_event *event)
|
|
|
+static struct pmu *tp_perf_event_init(struct perf_event *event)
|
|
|
{
|
|
|
int err;
|
|
|
|
|
@@ -4896,7 +4896,7 @@ static void perf_event_free_filter(struct perf_event *event)
|
|
|
|
|
|
#else
|
|
|
|
|
|
-static const struct pmu *tp_perf_event_init(struct perf_event *event)
|
|
|
+static struct pmu *tp_perf_event_init(struct perf_event *event)
|
|
|
{
|
|
|
return NULL;
|
|
|
}
|
|
@@ -4918,7 +4918,7 @@ static void bp_perf_event_destroy(struct perf_event *event)
|
|
|
release_bp_slot(event);
|
|
|
}
|
|
|
|
|
|
-static const struct pmu *bp_perf_event_init(struct perf_event *bp)
|
|
|
+static struct pmu *bp_perf_event_init(struct perf_event *bp)
|
|
|
{
|
|
|
int err;
|
|
|
|
|
@@ -4942,7 +4942,7 @@ void perf_bp_event(struct perf_event *bp, void *data)
|
|
|
perf_swevent_add(bp, 1, 1, &sample, regs);
|
|
|
}
|
|
|
#else
|
|
|
-static const struct pmu *bp_perf_event_init(struct perf_event *bp)
|
|
|
+static struct pmu *bp_perf_event_init(struct perf_event *bp)
|
|
|
{
|
|
|
return NULL;
|
|
|
}
|
|
@@ -4964,9 +4964,9 @@ static void sw_perf_event_destroy(struct perf_event *event)
|
|
|
swevent_hlist_put(event);
|
|
|
}
|
|
|
|
|
|
-static const struct pmu *sw_perf_event_init(struct perf_event *event)
|
|
|
+static struct pmu *sw_perf_event_init(struct perf_event *event)
|
|
|
{
|
|
|
- const struct pmu *pmu = NULL;
|
|
|
+ struct pmu *pmu = NULL;
|
|
|
u64 event_id = event->attr.config;
|
|
|
|
|
|
/*
|
|
@@ -5028,7 +5028,7 @@ perf_event_alloc(struct perf_event_attr *attr,
|
|
|
perf_overflow_handler_t overflow_handler,
|
|
|
gfp_t gfpflags)
|
|
|
{
|
|
|
- const struct pmu *pmu;
|
|
|
+ struct pmu *pmu;
|
|
|
struct perf_event *event;
|
|
|
struct hw_perf_event *hwc;
|
|
|
long err;
|