|
@@ -2242,8 +2242,6 @@ static void trace_recursive_unlock(void)
|
|
|
|
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-static DEFINE_PER_CPU(int, rb_need_resched);
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_lock_reserve - reserve a part of the buffer
|
|
* ring_buffer_lock_reserve - reserve a part of the buffer
|
|
* @buffer: the ring buffer to reserve from
|
|
* @buffer: the ring buffer to reserve from
|
|
@@ -2264,13 +2262,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
|
|
{
|
|
{
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
struct ring_buffer_event *event;
|
|
struct ring_buffer_event *event;
|
|
- int cpu, resched;
|
|
|
|
|
|
+ int cpu;
|
|
|
|
|
|
if (ring_buffer_flags != RB_BUFFERS_ON)
|
|
if (ring_buffer_flags != RB_BUFFERS_ON)
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
/* If we are tracing schedule, we don't want to recurse */
|
|
/* If we are tracing schedule, we don't want to recurse */
|
|
- resched = ftrace_preempt_disable();
|
|
|
|
|
|
+ preempt_disable_notrace();
|
|
|
|
|
|
if (atomic_read(&buffer->record_disabled))
|
|
if (atomic_read(&buffer->record_disabled))
|
|
goto out_nocheck;
|
|
goto out_nocheck;
|
|
@@ -2295,21 +2293,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
|
|
if (!event)
|
|
if (!event)
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
- /*
|
|
|
|
- * Need to store resched state on this cpu.
|
|
|
|
- * Only the first needs to.
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
- if (preempt_count() == 1)
|
|
|
|
- per_cpu(rb_need_resched, cpu) = resched;
|
|
|
|
-
|
|
|
|
return event;
|
|
return event;
|
|
|
|
|
|
out:
|
|
out:
|
|
trace_recursive_unlock();
|
|
trace_recursive_unlock();
|
|
|
|
|
|
out_nocheck:
|
|
out_nocheck:
|
|
- ftrace_preempt_enable(resched);
|
|
|
|
|
|
+ preempt_enable_notrace();
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
|
|
EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
|
|
@@ -2355,13 +2345,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
|
|
|
|
|
|
trace_recursive_unlock();
|
|
trace_recursive_unlock();
|
|
|
|
|
|
- /*
|
|
|
|
- * Only the last preempt count needs to restore preemption.
|
|
|
|
- */
|
|
|
|
- if (preempt_count() == 1)
|
|
|
|
- ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
|
|
|
|
- else
|
|
|
|
- preempt_enable_no_resched_notrace();
|
|
|
|
|
|
+ preempt_enable_notrace();
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -2469,13 +2453,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
|
|
|
|
|
|
trace_recursive_unlock();
|
|
trace_recursive_unlock();
|
|
|
|
|
|
- /*
|
|
|
|
- * Only the last preempt count needs to restore preemption.
|
|
|
|
- */
|
|
|
|
- if (preempt_count() == 1)
|
|
|
|
- ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
|
|
|
|
- else
|
|
|
|
- preempt_enable_no_resched_notrace();
|
|
|
|
|
|
+ preempt_enable_notrace();
|
|
|
|
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
|
|
EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
|
|
@@ -2501,12 +2479,12 @@ int ring_buffer_write(struct ring_buffer *buffer,
|
|
struct ring_buffer_event *event;
|
|
struct ring_buffer_event *event;
|
|
void *body;
|
|
void *body;
|
|
int ret = -EBUSY;
|
|
int ret = -EBUSY;
|
|
- int cpu, resched;
|
|
|
|
|
|
+ int cpu;
|
|
|
|
|
|
if (ring_buffer_flags != RB_BUFFERS_ON)
|
|
if (ring_buffer_flags != RB_BUFFERS_ON)
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
|
|
|
|
- resched = ftrace_preempt_disable();
|
|
|
|
|
|
+ preempt_disable_notrace();
|
|
|
|
|
|
if (atomic_read(&buffer->record_disabled))
|
|
if (atomic_read(&buffer->record_disabled))
|
|
goto out;
|
|
goto out;
|
|
@@ -2536,7 +2514,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
|
|
|
|
|
|
ret = 0;
|
|
ret = 0;
|
|
out:
|
|
out:
|
|
- ftrace_preempt_enable(resched);
|
|
|
|
|
|
+ preempt_enable_notrace();
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|