|
@@ -188,25 +188,6 @@ typedef unsigned int kmem_bufctl_t;
|
|
|
#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
|
|
|
#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
|
|
|
|
|
|
-/*
|
|
|
- * struct slab_rcu
|
|
|
- *
|
|
|
- * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
|
|
|
- * arrange for kmem_freepages to be called via RCU. This is useful if
|
|
|
- * we need to approach a kernel structure obliquely, from its address
|
|
|
- * obtained without the usual locking. We can lock the structure to
|
|
|
- * stabilize it and check it's still at the given address, only if we
|
|
|
- * can be sure that the memory has not been meanwhile reused for some
|
|
|
- * other kind of object (which our subsystem's lock might corrupt).
|
|
|
- *
|
|
|
- * rcu_read_lock before reading the address, then rcu_read_unlock after
|
|
|
- * taking the spinlock within the structure expected at that address.
|
|
|
- */
|
|
|
-struct slab_rcu {
|
|
|
- struct rcu_head head;
|
|
|
- struct page *page;
|
|
|
-};
|
|
|
-
|
|
|
/*
|
|
|
* struct slab
|
|
|
*
|
|
@@ -215,14 +196,11 @@ struct slab_rcu {
|
|
|
* Slabs are chained into three list: fully used, partial, fully free slabs.
|
|
|
*/
|
|
|
struct slab {
|
|
|
- union {
|
|
|
- struct {
|
|
|
- struct list_head list;
|
|
|
- void *s_mem; /* including colour offset */
|
|
|
- unsigned int inuse; /* num of objs active in slab */
|
|
|
- kmem_bufctl_t free;
|
|
|
- };
|
|
|
- struct slab_rcu __slab_cover_slab_rcu;
|
|
|
+ struct {
|
|
|
+ struct list_head list;
|
|
|
+ void *s_mem; /* including colour offset */
|
|
|
+ unsigned int inuse; /* num of objs active in slab */
|
|
|
+ kmem_bufctl_t free;
|
|
|
};
|
|
|
};
|
|
|
|
|
@@ -1509,6 +1487,8 @@ void __init kmem_cache_init(void)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
+ BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
|
|
|
+ sizeof(struct rcu_head));
|
|
|
kmem_cache = &kmem_cache_boot;
|
|
|
setup_node_pointer(kmem_cache);
|
|
|
|
|
@@ -1822,12 +1802,13 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
|
|
|
|
|
|
static void kmem_rcu_free(struct rcu_head *head)
|
|
|
{
|
|
|
- struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
|
|
|
- struct kmem_cache *cachep = slab_rcu->page->slab_cache;
|
|
|
+ struct kmem_cache *cachep;
|
|
|
+ struct page *page;
|
|
|
|
|
|
- kmem_freepages(cachep, slab_rcu->page);
|
|
|
- if (OFF_SLAB(cachep))
|
|
|
- kmem_cache_free(cachep->slabp_cache, slab_rcu);
|
|
|
+ page = container_of(head, struct page, rcu_head);
|
|
|
+ cachep = page->slab_cache;
|
|
|
+
|
|
|
+ kmem_freepages(cachep, page);
|
|
|
}
|
|
|
|
|
|
#if DEBUG
|
|
@@ -2048,16 +2029,27 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
|
|
|
|
|
|
slab_destroy_debugcheck(cachep, slabp);
|
|
|
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
|
|
|
- struct slab_rcu *slab_rcu;
|
|
|
+ struct rcu_head *head;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * RCU free overloads the RCU head over the LRU.
|
|
|
+ * slab_page has been overloeaded over the LRU,
|
|
|
+ * however it is not used from now on so that
|
|
|
+ * we can use it safely.
|
|
|
+ */
|
|
|
+ head = (void *)&page->rcu_head;
|
|
|
+ call_rcu(head, kmem_rcu_free);
|
|
|
|
|
|
- slab_rcu = (struct slab_rcu *)slabp;
|
|
|
- slab_rcu->page = page;
|
|
|
- call_rcu(&slab_rcu->head, kmem_rcu_free);
|
|
|
} else {
|
|
|
kmem_freepages(cachep, page);
|
|
|
- if (OFF_SLAB(cachep))
|
|
|
- kmem_cache_free(cachep->slabp_cache, slabp);
|
|
|
}
|
|
|
+
|
|
|
+ /*
|
|
|
+ * From now on, we don't use slab management
|
|
|
+ * although actual page can be freed in rcu context
|
|
|
+ */
|
|
|
+ if (OFF_SLAB(cachep))
|
|
|
+ kmem_cache_free(cachep->slabp_cache, slabp);
|
|
|
}
|
|
|
|
|
|
/**
|