|
@@ -997,13 +997,14 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
|
|
unsigned nr_pages)
|
|
unsigned nr_pages)
|
|
{
|
|
{
|
|
struct buffer_page *bpage, *tmp;
|
|
struct buffer_page *bpage, *tmp;
|
|
- unsigned long addr;
|
|
|
|
LIST_HEAD(pages);
|
|
LIST_HEAD(pages);
|
|
unsigned i;
|
|
unsigned i;
|
|
|
|
|
|
WARN_ON(!nr_pages);
|
|
WARN_ON(!nr_pages);
|
|
|
|
|
|
for (i = 0; i < nr_pages; i++) {
|
|
for (i = 0; i < nr_pages; i++) {
|
|
|
|
+ struct page *page;
|
|
|
|
+
|
|
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
|
|
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
|
|
GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
|
|
GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
|
|
if (!bpage)
|
|
if (!bpage)
|
|
@@ -1013,10 +1014,11 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
|
|
|
|
|
|
list_add(&bpage->list, &pages);
|
|
list_add(&bpage->list, &pages);
|
|
|
|
|
|
- addr = __get_free_page(GFP_KERNEL);
|
|
|
|
- if (!addr)
|
|
|
|
|
|
+ page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
|
|
|
|
+ GFP_KERNEL, 0);
|
|
|
|
+ if (!page)
|
|
goto free_pages;
|
|
goto free_pages;
|
|
- bpage->page = (void *)addr;
|
|
|
|
|
|
+ bpage->page = page_address(page);
|
|
rb_init_page(bpage->page);
|
|
rb_init_page(bpage->page);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1045,7 +1047,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
|
|
{
|
|
{
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
struct buffer_page *bpage;
|
|
struct buffer_page *bpage;
|
|
- unsigned long addr;
|
|
|
|
|
|
+ struct page *page;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
|
|
cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
|
|
@@ -1067,10 +1069,10 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
|
|
rb_check_bpage(cpu_buffer, bpage);
|
|
rb_check_bpage(cpu_buffer, bpage);
|
|
|
|
|
|
cpu_buffer->reader_page = bpage;
|
|
cpu_buffer->reader_page = bpage;
|
|
- addr = __get_free_page(GFP_KERNEL);
|
|
|
|
- if (!addr)
|
|
|
|
|
|
+ page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
|
|
|
|
+ if (!page)
|
|
goto fail_free_reader;
|
|
goto fail_free_reader;
|
|
- bpage->page = (void *)addr;
|
|
|
|
|
|
+ bpage->page = page_address(page);
|
|
rb_init_page(bpage->page);
|
|
rb_init_page(bpage->page);
|
|
|
|
|
|
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
|
|
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
|
|
@@ -1314,7 +1316,6 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
|
|
unsigned nr_pages, rm_pages, new_pages;
|
|
unsigned nr_pages, rm_pages, new_pages;
|
|
struct buffer_page *bpage, *tmp;
|
|
struct buffer_page *bpage, *tmp;
|
|
unsigned long buffer_size;
|
|
unsigned long buffer_size;
|
|
- unsigned long addr;
|
|
|
|
LIST_HEAD(pages);
|
|
LIST_HEAD(pages);
|
|
int i, cpu;
|
|
int i, cpu;
|
|
|
|
|
|
@@ -1375,16 +1376,17 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
|
|
|
|
|
|
for_each_buffer_cpu(buffer, cpu) {
|
|
for_each_buffer_cpu(buffer, cpu) {
|
|
for (i = 0; i < new_pages; i++) {
|
|
for (i = 0; i < new_pages; i++) {
|
|
|
|
+ struct page *page;
|
|
bpage = kzalloc_node(ALIGN(sizeof(*bpage),
|
|
bpage = kzalloc_node(ALIGN(sizeof(*bpage),
|
|
cache_line_size()),
|
|
cache_line_size()),
|
|
GFP_KERNEL, cpu_to_node(cpu));
|
|
GFP_KERNEL, cpu_to_node(cpu));
|
|
if (!bpage)
|
|
if (!bpage)
|
|
goto free_pages;
|
|
goto free_pages;
|
|
list_add(&bpage->list, &pages);
|
|
list_add(&bpage->list, &pages);
|
|
- addr = __get_free_page(GFP_KERNEL);
|
|
|
|
- if (!addr)
|
|
|
|
|
|
+ page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
|
|
|
|
+ if (!page)
|
|
goto free_pages;
|
|
goto free_pages;
|
|
- bpage->page = (void *)addr;
|
|
|
|
|
|
+ bpage->page = page_address(page);
|
|
rb_init_page(bpage->page);
|
|
rb_init_page(bpage->page);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -3730,16 +3732,16 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
|
|
* Returns:
|
|
* Returns:
|
|
* The page allocated, or NULL on error.
|
|
* The page allocated, or NULL on error.
|
|
*/
|
|
*/
|
|
-void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
|
|
|
|
|
|
+void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
|
|
{
|
|
{
|
|
struct buffer_data_page *bpage;
|
|
struct buffer_data_page *bpage;
|
|
- unsigned long addr;
|
|
|
|
|
|
+ struct page *page;
|
|
|
|
|
|
- addr = __get_free_page(GFP_KERNEL);
|
|
|
|
- if (!addr)
|
|
|
|
|
|
+ page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
|
|
|
|
+ if (!page)
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
- bpage = (void *)addr;
|
|
|
|
|
|
+ bpage = page_address(page);
|
|
|
|
|
|
rb_init_page(bpage);
|
|
rb_init_page(bpage);
|
|
|
|
|