|
@@ -1254,21 +1254,38 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
|
|
|
__free_pages(page, order);
|
|
|
}
|
|
|
|
|
|
+#define need_reserve_slab_rcu \
|
|
|
+ (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
|
|
|
+
|
|
|
static void rcu_free_slab(struct rcu_head *h)
|
|
|
{
|
|
|
struct page *page;
|
|
|
|
|
|
- page = container_of((struct list_head *)h, struct page, lru);
|
|
|
+ if (need_reserve_slab_rcu)
|
|
|
+ page = virt_to_head_page(h);
|
|
|
+ else
|
|
|
+ page = container_of((struct list_head *)h, struct page, lru);
|
|
|
+
|
|
|
__free_slab(page->slab, page);
|
|
|
}
|
|
|
|
|
|
static void free_slab(struct kmem_cache *s, struct page *page)
|
|
|
{
|
|
|
if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
|
|
|
- /*
|
|
|
- * RCU free overloads the RCU head over the LRU
|
|
|
- */
|
|
|
- struct rcu_head *head = (void *)&page->lru;
|
|
|
+ struct rcu_head *head;
|
|
|
+
|
|
|
+ if (need_reserve_slab_rcu) {
|
|
|
+ int order = compound_order(page);
|
|
|
+ int offset = (PAGE_SIZE << order) - s->reserved;
|
|
|
+
|
|
|
+ VM_BUG_ON(s->reserved != sizeof(*head));
|
|
|
+ head = page_address(page) + offset;
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * RCU free overloads the RCU head over the LRU
|
|
|
+ */
|
|
|
+ head = (void *)&page->lru;
|
|
|
+ }
|
|
|
|
|
|
call_rcu(head, rcu_free_slab);
|
|
|
} else
|
|
@@ -2356,6 +2373,9 @@ static int kmem_cache_open(struct kmem_cache *s,
|
|
|
s->flags = kmem_cache_flags(size, flags, name, ctor);
|
|
|
s->reserved = 0;
|
|
|
|
|
|
+ if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
|
|
|
+ s->reserved = sizeof(struct rcu_head);
|
|
|
+
|
|
|
if (!calculate_sizes(s, -1))
|
|
|
goto error;
|
|
|
if (disable_higher_order_debug) {
|