|
@@ -473,7 +473,7 @@ struct ring_buffer_per_cpu {
|
|
|
int nr_pages_to_update;
|
|
|
struct list_head new_pages; /* new pages to add */
|
|
|
struct work_struct update_pages_work;
|
|
|
- struct completion update_completion;
|
|
|
+ struct completion update_done;
|
|
|
};
|
|
|
|
|
|
struct ring_buffer {
|
|
@@ -1058,7 +1058,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
|
|
|
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
|
|
|
cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
|
|
INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
|
|
|
- init_completion(&cpu_buffer->update_completion);
|
|
|
+ init_completion(&cpu_buffer->update_done);
|
|
|
|
|
|
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
|
|
|
GFP_KERNEL, cpu_to_node(cpu));
|
|
@@ -1461,7 +1461,7 @@ static void update_pages_handler(struct work_struct *work)
|
|
|
struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
|
|
|
struct ring_buffer_per_cpu, update_pages_work);
|
|
|
rb_update_pages(cpu_buffer);
|
|
|
- complete(&cpu_buffer->update_completion);
|
|
|
+ complete(&cpu_buffer->update_done);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1534,39 +1534,29 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
|
|
|
get_online_cpus();
|
|
|
/*
|
|
|
* Fire off all the required work handlers
|
|
|
- * Look out for offline CPUs
|
|
|
- */
|
|
|
- for_each_buffer_cpu(buffer, cpu) {
|
|
|
- cpu_buffer = buffer->buffers[cpu];
|
|
|
- if (!cpu_buffer->nr_pages_to_update ||
|
|
|
- !cpu_online(cpu))
|
|
|
- continue;
|
|
|
-
|
|
|
- schedule_work_on(cpu, &cpu_buffer->update_pages_work);
|
|
|
- }
|
|
|
- /*
|
|
|
- * This loop is for the CPUs that are not online.
|
|
|
- * We can't schedule anything on them, but it's not necessary
|
|
|
+ * We can't schedule on offline CPUs, but it's not necessary
|
|
|
* since we can change their buffer sizes without any race.
|
|
|
*/
|
|
|
for_each_buffer_cpu(buffer, cpu) {
|
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
|
- if (!cpu_buffer->nr_pages_to_update ||
|
|
|
- cpu_online(cpu))
|
|
|
+ if (!cpu_buffer->nr_pages_to_update)
|
|
|
continue;
|
|
|
|
|
|
- rb_update_pages(cpu_buffer);
|
|
|
+ if (cpu_online(cpu))
|
|
|
+ schedule_work_on(cpu,
|
|
|
+ &cpu_buffer->update_pages_work);
|
|
|
+ else
|
|
|
+ rb_update_pages(cpu_buffer);
|
|
|
}
|
|
|
|
|
|
/* wait for all the updates to complete */
|
|
|
for_each_buffer_cpu(buffer, cpu) {
|
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
|
- if (!cpu_buffer->nr_pages_to_update ||
|
|
|
- !cpu_online(cpu))
|
|
|
+ if (!cpu_buffer->nr_pages_to_update)
|
|
|
continue;
|
|
|
|
|
|
- wait_for_completion(&cpu_buffer->update_completion);
|
|
|
- /* reset this value */
|
|
|
+ if (cpu_online(cpu))
|
|
|
+ wait_for_completion(&cpu_buffer->update_done);
|
|
|
cpu_buffer->nr_pages_to_update = 0;
|
|
|
}
|
|
|
|
|
@@ -1593,13 +1583,12 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
|
|
|
if (cpu_online(cpu_id)) {
|
|
|
schedule_work_on(cpu_id,
|
|
|
&cpu_buffer->update_pages_work);
|
|
|
- wait_for_completion(&cpu_buffer->update_completion);
|
|
|
+ wait_for_completion(&cpu_buffer->update_done);
|
|
|
} else
|
|
|
rb_update_pages(cpu_buffer);
|
|
|
|
|
|
- put_online_cpus();
|
|
|
- /* reset this value */
|
|
|
cpu_buffer->nr_pages_to_update = 0;
|
|
|
+ put_online_cpus();
|
|
|
}
|
|
|
|
|
|
out:
|