|
@@ -69,6 +69,7 @@ void tracing_on(void)
|
|
{
|
|
{
|
|
set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
|
|
set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(tracing_on);
|
|
|
|
|
|
/**
|
|
/**
|
|
* tracing_off - turn off all tracing buffers
|
|
* tracing_off - turn off all tracing buffers
|
|
@@ -82,6 +83,7 @@ void tracing_off(void)
|
|
{
|
|
{
|
|
clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
|
|
clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(tracing_off);
|
|
|
|
|
|
/**
|
|
/**
|
|
* tracing_off_permanent - permanently disable ring buffers
|
|
* tracing_off_permanent - permanently disable ring buffers
|
|
@@ -111,12 +113,14 @@ u64 ring_buffer_time_stamp(int cpu)
|
|
|
|
|
|
return time;
|
|
return time;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
|
|
|
|
|
|
void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
|
|
void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
|
|
{
|
|
{
|
|
/* Just stupid testing the normalize function and deltas */
|
|
/* Just stupid testing the normalize function and deltas */
|
|
*ts >>= DEBUG_SHIFT;
|
|
*ts >>= DEBUG_SHIFT;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
|
|
|
|
|
|
#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
|
|
#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
|
|
#define RB_ALIGNMENT_SHIFT 2
|
|
#define RB_ALIGNMENT_SHIFT 2
|
|
@@ -166,6 +170,7 @@ unsigned ring_buffer_event_length(struct ring_buffer_event *event)
|
|
{
|
|
{
|
|
return rb_event_length(event);
|
|
return rb_event_length(event);
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_event_length);
|
|
|
|
|
|
/* inline for ring buffer fast paths */
|
|
/* inline for ring buffer fast paths */
|
|
static inline void *
|
|
static inline void *
|
|
@@ -187,6 +192,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
|
|
{
|
|
{
|
|
return rb_event_data(event);
|
|
return rb_event_data(event);
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_event_data);
|
|
|
|
|
|
#define for_each_buffer_cpu(buffer, cpu) \
|
|
#define for_each_buffer_cpu(buffer, cpu) \
|
|
for_each_cpu_mask(cpu, buffer->cpumask)
|
|
for_each_cpu_mask(cpu, buffer->cpumask)
|
|
@@ -427,7 +433,7 @@ extern int ring_buffer_page_too_big(void);
|
|
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_alloc - allocate a new ring_buffer
|
|
* ring_buffer_alloc - allocate a new ring_buffer
|
|
- * @size: the size in bytes that is needed.
|
|
|
|
|
|
+ * @size: the size in bytes per cpu that is needed.
|
|
* @flags: attributes to set for the ring buffer.
|
|
* @flags: attributes to set for the ring buffer.
|
|
*
|
|
*
|
|
* Currently the only flag that is available is the RB_FL_OVERWRITE
|
|
* Currently the only flag that is available is the RB_FL_OVERWRITE
|
|
@@ -490,6 +496,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
|
|
kfree(buffer);
|
|
kfree(buffer);
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_alloc);
|
|
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_free - free a ring buffer.
|
|
* ring_buffer_free - free a ring buffer.
|
|
@@ -505,6 +512,7 @@ ring_buffer_free(struct ring_buffer *buffer)
|
|
|
|
|
|
kfree(buffer);
|
|
kfree(buffer);
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_free);
|
|
|
|
|
|
static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
|
|
static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
|
|
|
|
|
|
@@ -680,6 +688,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
|
|
mutex_unlock(&buffer->mutex);
|
|
mutex_unlock(&buffer->mutex);
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_resize);
|
|
|
|
|
|
static inline int rb_null_event(struct ring_buffer_event *event)
|
|
static inline int rb_null_event(struct ring_buffer_event *event)
|
|
{
|
|
{
|
|
@@ -1304,6 +1313,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
|
|
ftrace_preempt_enable(resched);
|
|
ftrace_preempt_enable(resched);
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
|
|
|
|
|
|
static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
|
|
static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
|
|
struct ring_buffer_event *event)
|
|
struct ring_buffer_event *event)
|
|
@@ -1350,6 +1360,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
|
|
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_write - write data to the buffer without reserving
|
|
* ring_buffer_write - write data to the buffer without reserving
|
|
@@ -1411,6 +1422,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_write);
|
|
|
|
|
|
static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
|
|
static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
|
|
{
|
|
{
|
|
@@ -1437,6 +1449,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer)
|
|
{
|
|
{
|
|
atomic_inc(&buffer->record_disabled);
|
|
atomic_inc(&buffer->record_disabled);
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
|
|
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_record_enable - enable writes to the buffer
|
|
* ring_buffer_record_enable - enable writes to the buffer
|
|
@@ -1449,6 +1462,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer)
|
|
{
|
|
{
|
|
atomic_dec(&buffer->record_disabled);
|
|
atomic_dec(&buffer->record_disabled);
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
|
|
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
|
|
* ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
|
|
@@ -1470,6 +1484,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
atomic_inc(&cpu_buffer->record_disabled);
|
|
atomic_inc(&cpu_buffer->record_disabled);
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
|
|
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_record_enable_cpu - enable writes to the buffer
|
|
* ring_buffer_record_enable_cpu - enable writes to the buffer
|
|
@@ -1489,6 +1504,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
atomic_dec(&cpu_buffer->record_disabled);
|
|
atomic_dec(&cpu_buffer->record_disabled);
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
|
|
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_entries_cpu - get the number of entries in a cpu buffer
|
|
* ring_buffer_entries_cpu - get the number of entries in a cpu buffer
|
|
@@ -1505,6 +1521,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
return cpu_buffer->entries;
|
|
return cpu_buffer->entries;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
|
|
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
|
|
* ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
|
|
@@ -1521,6 +1538,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
return cpu_buffer->overrun;
|
|
return cpu_buffer->overrun;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
|
|
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_entries - get the number of entries in a buffer
|
|
* ring_buffer_entries - get the number of entries in a buffer
|
|
@@ -1543,6 +1561,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
|
|
|
|
|
|
return entries;
|
|
return entries;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_entries);
|
|
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_overrun_cpu - get the number of overruns in buffer
|
|
* ring_buffer_overrun_cpu - get the number of overruns in buffer
|
|
@@ -1565,6 +1584,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
|
|
|
|
|
|
return overruns;
|
|
return overruns;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_overruns);
|
|
|
|
|
|
static void rb_iter_reset(struct ring_buffer_iter *iter)
|
|
static void rb_iter_reset(struct ring_buffer_iter *iter)
|
|
{
|
|
{
|
|
@@ -1600,6 +1620,7 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
|
|
rb_iter_reset(iter);
|
|
rb_iter_reset(iter);
|
|
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
|
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
|
|
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_iter_empty - check if an iterator has no more to read
|
|
* ring_buffer_iter_empty - check if an iterator has no more to read
|
|
@@ -1614,6 +1635,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
|
|
return iter->head_page == cpu_buffer->commit_page &&
|
|
return iter->head_page == cpu_buffer->commit_page &&
|
|
iter->head == rb_commit_index(cpu_buffer);
|
|
iter->head == rb_commit_index(cpu_buffer);
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
|
|
|
|
|
|
static void
|
|
static void
|
|
rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
|
|
rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
|
|
@@ -1880,6 +1902,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
|
|
|
|
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_peek);
|
|
|
|
|
|
static struct ring_buffer_event *
|
|
static struct ring_buffer_event *
|
|
rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
|
rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
|
@@ -1940,6 +1963,7 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
|
|
|
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
|
|
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_peek - peek at the next event to be read
|
|
* ring_buffer_peek - peek at the next event to be read
|
|
@@ -2017,6 +2041,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
|
|
|
|
|
|
return event;
|
|
return event;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_consume);
|
|
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_read_start - start a non consuming read of the buffer
|
|
* ring_buffer_read_start - start a non consuming read of the buffer
|
|
@@ -2059,6 +2084,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
|
|
|
|
|
|
return iter;
|
|
return iter;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_read_start);
|
|
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_finish - finish reading the iterator of the buffer
|
|
* ring_buffer_finish - finish reading the iterator of the buffer
|
|
@@ -2075,6 +2101,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
|
|
atomic_dec(&cpu_buffer->record_disabled);
|
|
atomic_dec(&cpu_buffer->record_disabled);
|
|
kfree(iter);
|
|
kfree(iter);
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
|
|
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_read - read the next item in the ring buffer by the iterator
|
|
* ring_buffer_read - read the next item in the ring buffer by the iterator
|
|
@@ -2101,6 +2128,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
|
|
|
|
|
|
return event;
|
|
return event;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_read);
|
|
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_size - return the size of the ring buffer (in bytes)
|
|
* ring_buffer_size - return the size of the ring buffer (in bytes)
|
|
@@ -2110,6 +2138,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer)
|
|
{
|
|
{
|
|
return BUF_PAGE_SIZE * buffer->pages;
|
|
return BUF_PAGE_SIZE * buffer->pages;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_size);
|
|
|
|
|
|
static void
|
|
static void
|
|
rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
|
|
rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
|
|
@@ -2156,6 +2185,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
|
|
|
|
|
|
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
|
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
|
|
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_reset - reset a ring buffer
|
|
* ring_buffer_reset - reset a ring buffer
|
|
@@ -2168,6 +2198,7 @@ void ring_buffer_reset(struct ring_buffer *buffer)
|
|
for_each_buffer_cpu(buffer, cpu)
|
|
for_each_buffer_cpu(buffer, cpu)
|
|
ring_buffer_reset_cpu(buffer, cpu);
|
|
ring_buffer_reset_cpu(buffer, cpu);
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_reset);
|
|
|
|
|
|
/**
|
|
/**
|
|
* rind_buffer_empty - is the ring buffer empty?
|
|
* rind_buffer_empty - is the ring buffer empty?
|
|
@@ -2186,6 +2217,7 @@ int ring_buffer_empty(struct ring_buffer *buffer)
|
|
}
|
|
}
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_empty);
|
|
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
|
|
* ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
|
|
@@ -2202,6 +2234,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
return rb_per_cpu_empty(cpu_buffer);
|
|
return rb_per_cpu_empty(cpu_buffer);
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
|
|
|
|
|
|
/**
|
|
/**
|
|
* ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
|
|
* ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
|
|
@@ -2250,6 +2283,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
|
|
|
|
|
|
static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
|
|
static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
|
|
struct buffer_data_page *bpage)
|
|
struct buffer_data_page *bpage)
|