|
@@ -2228,14 +2228,11 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
int ctxn, err;
|
|
int ctxn, err;
|
|
|
|
|
|
- if (!task && cpu != -1) {
|
|
|
|
|
|
+ if (!task) {
|
|
/* Must be root to operate on a CPU event: */
|
|
/* Must be root to operate on a CPU event: */
|
|
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
|
|
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
|
|
return ERR_PTR(-EACCES);
|
|
return ERR_PTR(-EACCES);
|
|
|
|
|
|
- if (cpu < 0 || cpu >= nr_cpumask_bits)
|
|
|
|
- return ERR_PTR(-EINVAL);
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* We could be clever and allow to attach a event to an
|
|
* We could be clever and allow to attach a event to an
|
|
* offline CPU and activate it when the CPU comes up, but
|
|
* offline CPU and activate it when the CPU comes up, but
|
|
@@ -5541,6 +5538,11 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
|
struct hw_perf_event *hwc;
|
|
struct hw_perf_event *hwc;
|
|
long err;
|
|
long err;
|
|
|
|
|
|
|
|
+ if ((unsigned)cpu >= nr_cpu_ids) {
|
|
|
|
+ if (!task || cpu != -1)
|
|
|
|
+ return ERR_PTR(-EINVAL);
|
|
|
|
+ }
|
|
|
|
+
|
|
event = kzalloc(sizeof(*event), GFP_KERNEL);
|
|
event = kzalloc(sizeof(*event), GFP_KERNEL);
|
|
if (!event)
|
|
if (!event)
|
|
return ERR_PTR(-ENOMEM);
|
|
return ERR_PTR(-ENOMEM);
|
|
@@ -5589,7 +5591,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
|
|
|
|
|
if (!overflow_handler && parent_event)
|
|
if (!overflow_handler && parent_event)
|
|
overflow_handler = parent_event->overflow_handler;
|
|
overflow_handler = parent_event->overflow_handler;
|
|
-
|
|
|
|
|
|
+
|
|
event->overflow_handler = overflow_handler;
|
|
event->overflow_handler = overflow_handler;
|
|
|
|
|
|
if (attr->disabled)
|
|
if (attr->disabled)
|
|
@@ -6494,7 +6496,6 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
|
|
|
|
|
|
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
|
|
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
|
|
parent_ctx->rotate_disable = 0;
|
|
parent_ctx->rotate_disable = 0;
|
|
- raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
|
|
|
|
|
|
|
|
child_ctx = child->perf_event_ctxp[ctxn];
|
|
child_ctx = child->perf_event_ctxp[ctxn];
|
|
|
|
|
|
@@ -6502,12 +6503,11 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
|
|
/*
|
|
/*
|
|
* Mark the child context as a clone of the parent
|
|
* Mark the child context as a clone of the parent
|
|
* context, or of whatever the parent is a clone of.
|
|
* context, or of whatever the parent is a clone of.
|
|
- * Note that if the parent is a clone, it could get
|
|
|
|
- * uncloned at any point, but that doesn't matter
|
|
|
|
- * because the list of events and the generation
|
|
|
|
- * count can't have changed since we took the mutex.
|
|
|
|
|
|
+ *
|
|
|
|
+ * Note that if the parent is a clone, the holding of
|
|
|
|
+ * parent_ctx->lock avoids it from being uncloned.
|
|
*/
|
|
*/
|
|
- cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
|
|
|
|
|
|
+ cloned_ctx = parent_ctx->parent_ctx;
|
|
if (cloned_ctx) {
|
|
if (cloned_ctx) {
|
|
child_ctx->parent_ctx = cloned_ctx;
|
|
child_ctx->parent_ctx = cloned_ctx;
|
|
child_ctx->parent_gen = parent_ctx->parent_gen;
|
|
child_ctx->parent_gen = parent_ctx->parent_gen;
|
|
@@ -6518,6 +6518,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
|
|
get_ctx(child_ctx->parent_ctx);
|
|
get_ctx(child_ctx->parent_ctx);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
|
|
mutex_unlock(&parent_ctx->mutex);
|
|
mutex_unlock(&parent_ctx->mutex);
|
|
|
|
|
|
perf_unpin_context(parent_ctx);
|
|
perf_unpin_context(parent_ctx);
|