浏览代码

SLUB: clean up krealloc

We really do not need all this gaga there.

ksize gives us all the information we need to figure out if the object can
cope with the new size.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Christoph Lameter 18 年之前
父节点
当前提交
1f99a283dc
共有 1 个文件被更改,包括 4 次插入11 次删除
  1. 4 11
      mm/slub.c

+ 4 - 11
mm/slub.c

@@ -2199,9 +2199,8 @@ EXPORT_SYMBOL(kmem_cache_shrink);
  */
  */
 void *krealloc(const void *p, size_t new_size, gfp_t flags)
 void *krealloc(const void *p, size_t new_size, gfp_t flags)
 {
 {
-	struct kmem_cache *new_cache;
 	void *ret;
 	void *ret;
-	struct page *page;
+	size_t ks;
 
 
 	if (unlikely(!p))
 	if (unlikely(!p))
 		return kmalloc(new_size, flags);
 		return kmalloc(new_size, flags);
@@ -2211,19 +2210,13 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
 		return NULL;
 		return NULL;
 	}
 	}
 
 
-	page = virt_to_head_page(p);
-
-	new_cache = get_slab(new_size, flags);
-
-	/*
- 	 * If new size fits in the current cache, bail out.
- 	 */
-	if (likely(page->slab == new_cache))
+	ks = ksize(p);
+	if (ks >= new_size)
 		return (void *)p;
 		return (void *)p;
 
 
 	ret = kmalloc(new_size, flags);
 	ret = kmalloc(new_size, flags);
 	if (ret) {
 	if (ret) {
-		memcpy(ret, p, min(new_size, ksize(p)));
+		memcpy(ret, p, min(new_size, ks));
 		kfree(p);
 		kfree(p);
 	}
 	}
 	return ret;
 	return ret;