瀏覽代碼

ring-buffer: move calculation of event length

The event length is calculated and passed in to rb_reserve_next_event
in two different locations. Having rb_reserve_next_event do the
calculations directly makes only one location to do the change and
causes the calculation to be inlined by gcc.

Before:
   text    data     bss     dec     hex filename
  16538      24      12   16574    40be kernel/trace/ring_buffer.o

After:
   text    data     bss     dec     hex filename
  16490      24      12   16526    408e kernel/trace/ring_buffer.o

[ Impact: smaller more efficient code ]

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Steven Rostedt 16 年之前
父節點
當前提交
be957c447f
共有 1 個文件被更改,包括 9 次插入5 次删除
  1. 9 5
      kernel/trace/ring_buffer.c

+ 9 - 5
kernel/trace/ring_buffer.c

@@ -367,6 +367,9 @@ static inline int test_time_stamp(u64 delta)
 
 
 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
 
 
+/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
+#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
+
 int ring_buffer_print_page_header(struct trace_seq *s)
 int ring_buffer_print_page_header(struct trace_seq *s)
 {
 {
 	struct buffer_data_page field;
 	struct buffer_data_page field;
@@ -1396,6 +1399,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
 	int commit = 0;
 	int commit = 0;
 	int nr_loops = 0;
 	int nr_loops = 0;
 
 
+	length = rb_calculate_event_length(length);
  again:
  again:
 	/*
 	/*
 	 * We allow for interrupts to reenter here and do a trace.
 	 * We allow for interrupts to reenter here and do a trace.
@@ -1552,8 +1556,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
 	if (atomic_read(&cpu_buffer->record_disabled))
 	if (atomic_read(&cpu_buffer->record_disabled))
 		goto out;
 		goto out;
 
 
-	length = rb_calculate_event_length(length);
-	if (length > BUF_PAGE_SIZE)
+	if (length > BUF_MAX_DATA_SIZE)
 		goto out;
 		goto out;
 
 
 	event = rb_reserve_next_event(cpu_buffer, length);
 	event = rb_reserve_next_event(cpu_buffer, length);
@@ -1758,7 +1761,6 @@ int ring_buffer_write(struct ring_buffer *buffer,
 {
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	struct ring_buffer_per_cpu *cpu_buffer;
 	struct ring_buffer_event *event;
 	struct ring_buffer_event *event;
-	unsigned long event_length;
 	void *body;
 	void *body;
 	int ret = -EBUSY;
 	int ret = -EBUSY;
 	int cpu, resched;
 	int cpu, resched;
@@ -1781,8 +1783,10 @@ int ring_buffer_write(struct ring_buffer *buffer,
 	if (atomic_read(&cpu_buffer->record_disabled))
 	if (atomic_read(&cpu_buffer->record_disabled))
 		goto out;
 		goto out;
 
 
-	event_length = rb_calculate_event_length(length);
-	event = rb_reserve_next_event(cpu_buffer, event_length);
+	if (length > BUF_MAX_DATA_SIZE)
+		goto out;
+
+	event = rb_reserve_next_event(cpu_buffer, length);
 	if (!event)
 	if (!event)
 		goto out;
 		goto out;