|
@@ -31,6 +31,7 @@
|
|
#include <linux/kernel_stat.h>
|
|
#include <linux/kernel_stat.h>
|
|
#include <linux/perf_event.h>
|
|
#include <linux/perf_event.h>
|
|
#include <linux/ftrace_event.h>
|
|
#include <linux/ftrace_event.h>
|
|
|
|
+#include <linux/hw_breakpoint.h>
|
|
|
|
|
|
#include <asm/irq_regs.h>
|
|
#include <asm/irq_regs.h>
|
|
|
|
|
|
@@ -2234,11 +2235,6 @@ int perf_event_release_kernel(struct perf_event *event)
|
|
raw_spin_unlock_irq(&ctx->lock);
|
|
raw_spin_unlock_irq(&ctx->lock);
|
|
mutex_unlock(&ctx->mutex);
|
|
mutex_unlock(&ctx->mutex);
|
|
|
|
|
|
- mutex_lock(&event->owner->perf_event_mutex);
|
|
|
|
- list_del_init(&event->owner_entry);
|
|
|
|
- mutex_unlock(&event->owner->perf_event_mutex);
|
|
|
|
- put_task_struct(event->owner);
|
|
|
|
-
|
|
|
|
free_event(event);
|
|
free_event(event);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
@@ -2251,9 +2247,43 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
|
|
static int perf_release(struct inode *inode, struct file *file)
|
|
static int perf_release(struct inode *inode, struct file *file)
|
|
{
|
|
{
|
|
struct perf_event *event = file->private_data;
|
|
struct perf_event *event = file->private_data;
|
|
|
|
+ struct task_struct *owner;
|
|
|
|
|
|
file->private_data = NULL;
|
|
file->private_data = NULL;
|
|
|
|
|
|
|
|
+ rcu_read_lock();
|
|
|
|
+ owner = ACCESS_ONCE(event->owner);
|
|
|
|
+ /*
|
|
|
|
+ * Matches the smp_wmb() in perf_event_exit_task(). If we observe
|
|
|
|
+ * !owner it means the list deletion is complete and we can indeed
|
|
|
|
+ * free this event, otherwise we need to serialize on
|
|
|
|
+ * owner->perf_event_mutex.
|
|
|
|
+ */
|
|
|
|
+ smp_read_barrier_depends();
|
|
|
|
+ if (owner) {
|
|
|
|
+ /*
|
|
|
|
+ * Since delayed_put_task_struct() also drops the last
|
|
|
|
+ * task reference we can safely take a new reference
|
|
|
|
+ * while holding the rcu_read_lock().
|
|
|
|
+ */
|
|
|
|
+ get_task_struct(owner);
|
|
|
|
+ }
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
+
|
|
|
|
+ if (owner) {
|
|
|
|
+ mutex_lock(&owner->perf_event_mutex);
|
|
|
|
+ /*
|
|
|
|
+ * We have to re-check the event->owner field, if it is cleared
|
|
|
|
+ * we raced with perf_event_exit_task(), acquiring the mutex
|
|
|
|
+ * ensured they're done, and we can proceed with freeing the
|
|
|
|
+ * event.
|
|
|
|
+ */
|
|
|
|
+ if (event->owner)
|
|
|
|
+ list_del_init(&event->owner_entry);
|
|
|
|
+ mutex_unlock(&owner->perf_event_mutex);
|
|
|
|
+ put_task_struct(owner);
|
|
|
|
+ }
|
|
|
|
+
|
|
return perf_event_release_kernel(event);
|
|
return perf_event_release_kernel(event);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -5677,7 +5707,7 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
mutex_unlock(&ctx->mutex);
|
|
mutex_unlock(&ctx->mutex);
|
|
|
|
|
|
event->owner = current;
|
|
event->owner = current;
|
|
- get_task_struct(current);
|
|
|
|
|
|
+
|
|
mutex_lock(¤t->perf_event_mutex);
|
|
mutex_lock(¤t->perf_event_mutex);
|
|
list_add_tail(&event->owner_entry, ¤t->perf_event_list);
|
|
list_add_tail(&event->owner_entry, ¤t->perf_event_list);
|
|
mutex_unlock(¤t->perf_event_mutex);
|
|
mutex_unlock(¤t->perf_event_mutex);
|
|
@@ -5745,12 +5775,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
|
|
++ctx->generation;
|
|
++ctx->generation;
|
|
mutex_unlock(&ctx->mutex);
|
|
mutex_unlock(&ctx->mutex);
|
|
|
|
|
|
- event->owner = current;
|
|
|
|
- get_task_struct(current);
|
|
|
|
- mutex_lock(¤t->perf_event_mutex);
|
|
|
|
- list_add_tail(&event->owner_entry, ¤t->perf_event_list);
|
|
|
|
- mutex_unlock(¤t->perf_event_mutex);
|
|
|
|
-
|
|
|
|
return event;
|
|
return event;
|
|
|
|
|
|
err_free:
|
|
err_free:
|
|
@@ -5901,8 +5925,24 @@ again:
|
|
*/
|
|
*/
|
|
void perf_event_exit_task(struct task_struct *child)
|
|
void perf_event_exit_task(struct task_struct *child)
|
|
{
|
|
{
|
|
|
|
+ struct perf_event *event, *tmp;
|
|
int ctxn;
|
|
int ctxn;
|
|
|
|
|
|
|
|
+ mutex_lock(&child->perf_event_mutex);
|
|
|
|
+ list_for_each_entry_safe(event, tmp, &child->perf_event_list,
|
|
|
|
+ owner_entry) {
|
|
|
|
+ list_del_init(&event->owner_entry);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Ensure the list deletion is visible before we clear
|
|
|
|
+ * the owner, closes a race against perf_release() where
|
|
|
|
+ * we need to serialize on the owner->perf_event_mutex.
|
|
|
|
+ */
|
|
|
|
+ smp_wmb();
|
|
|
|
+ event->owner = NULL;
|
|
|
|
+ }
|
|
|
|
+ mutex_unlock(&child->perf_event_mutex);
|
|
|
|
+
|
|
for_each_task_context_nr(ctxn)
|
|
for_each_task_context_nr(ctxn)
|
|
perf_event_exit_task_context(child, ctxn);
|
|
perf_event_exit_task_context(child, ctxn);
|
|
}
|
|
}
|
|
@@ -6321,6 +6361,8 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
|
|
|
|
void __init perf_event_init(void)
|
|
void __init perf_event_init(void)
|
|
{
|
|
{
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
perf_event_init_all_cpus();
|
|
perf_event_init_all_cpus();
|
|
init_srcu_struct(&pmus_srcu);
|
|
init_srcu_struct(&pmus_srcu);
|
|
perf_pmu_register(&perf_swevent);
|
|
perf_pmu_register(&perf_swevent);
|
|
@@ -6328,4 +6370,7 @@ void __init perf_event_init(void)
|
|
perf_pmu_register(&perf_task_clock);
|
|
perf_pmu_register(&perf_task_clock);
|
|
perf_tp_register();
|
|
perf_tp_register();
|
|
perf_cpu_notifier(perf_cpu_notify);
|
|
perf_cpu_notifier(perf_cpu_notify);
|
|
|
|
+
|
|
|
|
+ ret = init_hw_breakpoint();
|
|
|
|
+ WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
|
|
}
|
|
}
|