|
@@ -91,14 +91,6 @@ struct slob_block {
|
|
|
};
|
|
|
typedef struct slob_block slob_t;
|
|
|
|
|
|
-/*
|
|
|
- * free_slob_page: call before a slob_page is returned to the page allocator.
|
|
|
- */
|
|
|
-static inline void free_slob_page(struct page *sp)
|
|
|
-{
|
|
|
- reset_page_mapcount(sp);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* All partially free slob pages go on these lists.
|
|
|
*/
|
|
@@ -108,29 +100,6 @@ static LIST_HEAD(free_slob_small);
|
|
|
static LIST_HEAD(free_slob_medium);
|
|
|
static LIST_HEAD(free_slob_large);
|
|
|
|
|
|
-/*
|
|
|
- * is_slob_page: True for all slob pages (false for bigblock pages)
|
|
|
- */
|
|
|
-static inline int is_slob_page(struct page *sp)
|
|
|
-{
|
|
|
- return PageSlab(sp);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void set_slob_page(struct page *sp)
|
|
|
-{
|
|
|
- __SetPageSlab(sp);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void clear_slob_page(struct page *sp)
|
|
|
-{
|
|
|
- __ClearPageSlab(sp);
|
|
|
-}
|
|
|
-
|
|
|
-static inline struct page *slob_page(const void *addr)
|
|
|
-{
|
|
|
- return virt_to_page(addr);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* slob_page_free: true for pages on free_slob_pages list.
|
|
|
*/
|
|
@@ -347,8 +316,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
|
|
|
b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
|
|
|
if (!b)
|
|
|
return NULL;
|
|
|
- sp = slob_page(b);
|
|
|
- set_slob_page(sp);
|
|
|
+ sp = virt_to_page(b);
|
|
|
+ __SetPageSlab(sp);
|
|
|
|
|
|
spin_lock_irqsave(&slob_lock, flags);
|
|
|
sp->units = SLOB_UNITS(PAGE_SIZE);
|
|
@@ -380,7 +349,7 @@ static void slob_free(void *block, int size)
|
|
|
return;
|
|
|
BUG_ON(!size);
|
|
|
|
|
|
- sp = slob_page(block);
|
|
|
+ sp = virt_to_page(block);
|
|
|
units = SLOB_UNITS(size);
|
|
|
|
|
|
spin_lock_irqsave(&slob_lock, flags);
|
|
@@ -390,8 +359,8 @@ static void slob_free(void *block, int size)
|
|
|
if (slob_page_free(sp))
|
|
|
clear_slob_page_free(sp);
|
|
|
spin_unlock_irqrestore(&slob_lock, flags);
|
|
|
- clear_slob_page(sp);
|
|
|
- free_slob_page(sp);
|
|
|
+ __ClearPageSlab(sp);
|
|
|
+ reset_page_mapcount(sp);
|
|
|
slob_free_pages(b, 0);
|
|
|
return;
|
|
|
}
|
|
@@ -508,8 +477,8 @@ void kfree(const void *block)
|
|
|
return;
|
|
|
kmemleak_free(block);
|
|
|
|
|
|
- sp = slob_page(block);
|
|
|
- if (is_slob_page(sp)) {
|
|
|
+ sp = virt_to_page(block);
|
|
|
+ if (PageSlab(sp)) {
|
|
|
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
|
|
|
unsigned int *m = (unsigned int *)(block - align);
|
|
|
slob_free(m, *m + align);
|
|
@@ -527,8 +496,8 @@ size_t ksize(const void *block)
|
|
|
if (unlikely(block == ZERO_SIZE_PTR))
|
|
|
return 0;
|
|
|
|
|
|
- sp = slob_page(block);
|
|
|
- if (is_slob_page(sp)) {
|
|
|
+ sp = virt_to_page(block);
|
|
|
+ if (PageSlab(sp)) {
|
|
|
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
|
|
|
unsigned int *m = (unsigned int *)(block - align);
|
|
|
return SLOB_UNITS(*m) * SLOB_UNIT;
|