|
@@ -195,7 +195,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
|
|
EXPORT_SYMBOL_GPL(ring_buffer_event_data);
|
|
EXPORT_SYMBOL_GPL(ring_buffer_event_data);
|
|
|
|
|
|
#define for_each_buffer_cpu(buffer, cpu) \
|
|
#define for_each_buffer_cpu(buffer, cpu) \
|
|
- for_each_cpu_mask(cpu, buffer->cpumask)
|
|
|
|
|
|
+ for_each_cpu(cpu, buffer->cpumask)
|
|
|
|
|
|
#define TS_SHIFT 27
|
|
#define TS_SHIFT 27
|
|
#define TS_MASK ((1ULL << TS_SHIFT) - 1)
|
|
#define TS_MASK ((1ULL << TS_SHIFT) - 1)
|
|
@@ -267,7 +267,7 @@ struct ring_buffer {
|
|
unsigned pages;
|
|
unsigned pages;
|
|
unsigned flags;
|
|
unsigned flags;
|
|
int cpus;
|
|
int cpus;
|
|
- cpumask_t cpumask;
|
|
|
|
|
|
+ cpumask_var_t cpumask;
|
|
atomic_t record_disabled;
|
|
atomic_t record_disabled;
|
|
|
|
|
|
struct mutex mutex;
|
|
struct mutex mutex;
|
|
@@ -458,6 +458,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
|
|
if (!buffer)
|
|
if (!buffer)
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
|
|
+ if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
|
|
|
|
+ goto fail_free_buffer;
|
|
|
|
+
|
|
buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
|
|
buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
|
|
buffer->flags = flags;
|
|
buffer->flags = flags;
|
|
|
|
|
|
@@ -465,14 +468,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
|
|
if (buffer->pages == 1)
|
|
if (buffer->pages == 1)
|
|
buffer->pages++;
|
|
buffer->pages++;
|
|
|
|
|
|
- buffer->cpumask = cpu_possible_map;
|
|
|
|
|
|
+ cpumask_copy(buffer->cpumask, cpu_possible_mask);
|
|
buffer->cpus = nr_cpu_ids;
|
|
buffer->cpus = nr_cpu_ids;
|
|
|
|
|
|
bsize = sizeof(void *) * nr_cpu_ids;
|
|
bsize = sizeof(void *) * nr_cpu_ids;
|
|
buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
|
|
buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
|
|
GFP_KERNEL);
|
|
GFP_KERNEL);
|
|
if (!buffer->buffers)
|
|
if (!buffer->buffers)
|
|
- goto fail_free_buffer;
|
|
|
|
|
|
+ goto fail_free_cpumask;
|
|
|
|
|
|
for_each_buffer_cpu(buffer, cpu) {
|
|
for_each_buffer_cpu(buffer, cpu) {
|
|
buffer->buffers[cpu] =
|
|
buffer->buffers[cpu] =
|
|
@@ -492,6 +495,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
|
|
}
|
|
}
|
|
kfree(buffer->buffers);
|
|
kfree(buffer->buffers);
|
|
|
|
|
|
|
|
+ fail_free_cpumask:
|
|
|
|
+ free_cpumask_var(buffer->cpumask);
|
|
|
|
+
|
|
fail_free_buffer:
|
|
fail_free_buffer:
|
|
kfree(buffer);
|
|
kfree(buffer);
|
|
return NULL;
|
|
return NULL;
|
|
@@ -510,6 +516,8 @@ ring_buffer_free(struct ring_buffer *buffer)
|
|
for_each_buffer_cpu(buffer, cpu)
|
|
for_each_buffer_cpu(buffer, cpu)
|
|
rb_free_cpu_buffer(buffer->buffers[cpu]);
|
|
rb_free_cpu_buffer(buffer->buffers[cpu]);
|
|
|
|
|
|
|
|
+ free_cpumask_var(buffer->cpumask);
|
|
|
|
+
|
|
kfree(buffer);
|
|
kfree(buffer);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(ring_buffer_free);
|
|
EXPORT_SYMBOL_GPL(ring_buffer_free);
|
|
@@ -1283,7 +1291,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
|
|
|
|
|
|
cpu = raw_smp_processor_id();
|
|
cpu = raw_smp_processor_id();
|
|
|
|
|
|
- if (!cpu_isset(cpu, buffer->cpumask))
|
|
|
|
|
|
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
@@ -1396,7 +1404,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
|
|
|
|
|
|
cpu = raw_smp_processor_id();
|
|
cpu = raw_smp_processor_id();
|
|
|
|
|
|
- if (!cpu_isset(cpu, buffer->cpumask))
|
|
|
|
|
|
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
@@ -1478,7 +1486,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
|
|
{
|
|
{
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
|
|
|
|
- if (!cpu_isset(cpu, buffer->cpumask))
|
|
|
|
|
|
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
return;
|
|
return;
|
|
|
|
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
@@ -1498,7 +1506,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
|
|
{
|
|
{
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
|
|
|
|
- if (!cpu_isset(cpu, buffer->cpumask))
|
|
|
|
|
|
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
return;
|
|
return;
|
|
|
|
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
@@ -1515,7 +1523,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
|
|
{
|
|
{
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
|
|
|
|
- if (!cpu_isset(cpu, buffer->cpumask))
|
|
|
|
|
|
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
@@ -1532,7 +1540,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
|
|
{
|
|
{
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
|
|
|
|
- if (!cpu_isset(cpu, buffer->cpumask))
|
|
|
|
|
|
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
@@ -1850,7 +1858,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
|
|
struct buffer_page *reader;
|
|
struct buffer_page *reader;
|
|
int nr_loops = 0;
|
|
int nr_loops = 0;
|
|
|
|
|
|
- if (!cpu_isset(cpu, buffer->cpumask))
|
|
|
|
|
|
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
@@ -2025,7 +2033,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
|
|
struct ring_buffer_event *event;
|
|
struct ring_buffer_event *event;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
- if (!cpu_isset(cpu, buffer->cpumask))
|
|
|
|
|
|
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
|
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
|
@@ -2062,7 +2070,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
|
|
struct ring_buffer_iter *iter;
|
|
struct ring_buffer_iter *iter;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
- if (!cpu_isset(cpu, buffer->cpumask))
|
|
|
|
|
|
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
|
|
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
|
|
@@ -2172,7 +2180,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
|
|
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
|
|
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
- if (!cpu_isset(cpu, buffer->cpumask))
|
|
|
|
|
|
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
return;
|
|
return;
|
|
|
|
|
|
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
|
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
|
@@ -2228,7 +2236,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
|
|
{
|
|
{
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
|
|
|
|
- if (!cpu_isset(cpu, buffer->cpumask))
|
|
|
|
|
|
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
@@ -2252,8 +2260,8 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
|
|
struct ring_buffer_per_cpu *cpu_buffer_a;
|
|
struct ring_buffer_per_cpu *cpu_buffer_a;
|
|
struct ring_buffer_per_cpu *cpu_buffer_b;
|
|
struct ring_buffer_per_cpu *cpu_buffer_b;
|
|
|
|
|
|
- if (!cpu_isset(cpu, buffer_a->cpumask) ||
|
|
|
|
- !cpu_isset(cpu, buffer_b->cpumask))
|
|
|
|
|
|
+ if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
|
|
|
|
+ !cpumask_test_cpu(cpu, buffer_b->cpumask))
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
/* At least make sure the two buffers are somewhat the same */
|
|
/* At least make sure the two buffers are somewhat the same */
|