|
@@ -247,13 +247,11 @@ static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
|
|
|
}
|
|
|
|
|
|
/* Decode <page, obj_idx> pair from the given object handle */
|
|
|
-static void obj_handle_to_location(void *handle, struct page **page,
|
|
|
+static void obj_handle_to_location(unsigned long handle, struct page **page,
|
|
|
unsigned long *obj_idx)
|
|
|
{
|
|
|
- unsigned long hval = (unsigned long)handle;
|
|
|
-
|
|
|
- *page = pfn_to_page(hval >> OBJ_INDEX_BITS);
|
|
|
- *obj_idx = hval & OBJ_INDEX_MASK;
|
|
|
+ *page = pfn_to_page(handle >> OBJ_INDEX_BITS);
|
|
|
+ *obj_idx = handle & OBJ_INDEX_MASK;
|
|
|
}
|
|
|
|
|
|
static unsigned long obj_idx_to_offset(struct page *page,
|
|
@@ -568,12 +566,12 @@ EXPORT_SYMBOL_GPL(zs_destroy_pool);
|
|
|
* @size: size of block to allocate
|
|
|
*
|
|
|
* On success, handle to the allocated object is returned,
|
|
|
- * otherwise NULL.
|
|
|
+ * otherwise 0.
|
|
|
* Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
|
|
|
*/
|
|
|
-void *zs_malloc(struct zs_pool *pool, size_t size)
|
|
|
+unsigned long zs_malloc(struct zs_pool *pool, size_t size)
|
|
|
{
|
|
|
- void *obj;
|
|
|
+ unsigned long obj;
|
|
|
struct link_free *link;
|
|
|
int class_idx;
|
|
|
struct size_class *class;
|
|
@@ -582,7 +580,7 @@ void *zs_malloc(struct zs_pool *pool, size_t size)
|
|
|
unsigned long m_objidx, m_offset;
|
|
|
|
|
|
if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
|
|
|
- return NULL;
|
|
|
+ return 0;
|
|
|
|
|
|
class_idx = get_size_class_index(size);
|
|
|
class = &pool->size_class[class_idx];
|
|
@@ -595,14 +593,14 @@ void *zs_malloc(struct zs_pool *pool, size_t size)
|
|
|
spin_unlock(&class->lock);
|
|
|
first_page = alloc_zspage(class, pool->flags);
|
|
|
if (unlikely(!first_page))
|
|
|
- return NULL;
|
|
|
+ return 0;
|
|
|
|
|
|
set_zspage_mapping(first_page, class->index, ZS_EMPTY);
|
|
|
spin_lock(&class->lock);
|
|
|
class->pages_allocated += class->pages_per_zspage;
|
|
|
}
|
|
|
|
|
|
- obj = first_page->freelist;
|
|
|
+ obj = (unsigned long)first_page->freelist;
|
|
|
obj_handle_to_location(obj, &m_page, &m_objidx);
|
|
|
m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
|
|
|
|
|
@@ -621,7 +619,7 @@ void *zs_malloc(struct zs_pool *pool, size_t size)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(zs_malloc);
|
|
|
|
|
|
-void zs_free(struct zs_pool *pool, void *obj)
|
|
|
+void zs_free(struct zs_pool *pool, unsigned long obj)
|
|
|
{
|
|
|
struct link_free *link;
|
|
|
struct page *first_page, *f_page;
|
|
@@ -648,7 +646,7 @@ void zs_free(struct zs_pool *pool, void *obj)
|
|
|
+ f_offset);
|
|
|
link->next = first_page->freelist;
|
|
|
kunmap_atomic(link);
|
|
|
- first_page->freelist = obj;
|
|
|
+ first_page->freelist = (void *)obj;
|
|
|
|
|
|
first_page->inuse--;
|
|
|
fullness = fix_fullness_group(pool, first_page);
|
|
@@ -672,7 +670,7 @@ EXPORT_SYMBOL_GPL(zs_free);
|
|
|
* this function. When done with the object, it must be unmapped using
|
|
|
* zs_unmap_object
|
|
|
*/
|
|
|
-void *zs_map_object(struct zs_pool *pool, void *handle)
|
|
|
+void *zs_map_object(struct zs_pool *pool, unsigned long handle)
|
|
|
{
|
|
|
struct page *page;
|
|
|
unsigned long obj_idx, off;
|
|
@@ -712,7 +710,7 @@ void *zs_map_object(struct zs_pool *pool, void *handle)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(zs_map_object);
|
|
|
|
|
|
-void zs_unmap_object(struct zs_pool *pool, void *handle)
|
|
|
+void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
|
|
|
{
|
|
|
struct page *page;
|
|
|
unsigned long obj_idx, off;
|