|
@@ -106,8 +106,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
|
|
|
if (layer_idr)
|
|
|
return get_from_free_list(layer_idr);
|
|
|
|
|
|
- /* try to allocate directly from kmem_cache */
|
|
|
- new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
|
|
|
+ /*
|
|
|
+ * Try to allocate directly from kmem_cache. We want to try this
|
|
|
+ * before preload buffer; otherwise, non-preloading idr_alloc()
|
|
|
+ * users will end up taking advantage of preloading ones. As the
|
|
|
+ * following is allowed to fail for preloaded cases, suppress
|
|
|
+ * warning this time.
|
|
|
+ */
|
|
|
+ new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
|
|
|
if (new)
|
|
|
return new;
|
|
|
|
|
@@ -115,18 +121,24 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
|
|
|
* Try to fetch one from the per-cpu preload buffer if in process
|
|
|
* context. See idr_preload() for details.
|
|
|
*/
|
|
|
- if (in_interrupt())
|
|
|
- return NULL;
|
|
|
-
|
|
|
- preempt_disable();
|
|
|
- new = __this_cpu_read(idr_preload_head);
|
|
|
- if (new) {
|
|
|
- __this_cpu_write(idr_preload_head, new->ary[0]);
|
|
|
- __this_cpu_dec(idr_preload_cnt);
|
|
|
- new->ary[0] = NULL;
|
|
|
+ if (!in_interrupt()) {
|
|
|
+ preempt_disable();
|
|
|
+ new = __this_cpu_read(idr_preload_head);
|
|
|
+ if (new) {
|
|
|
+ __this_cpu_write(idr_preload_head, new->ary[0]);
|
|
|
+ __this_cpu_dec(idr_preload_cnt);
|
|
|
+ new->ary[0] = NULL;
|
|
|
+ }
|
|
|
+ preempt_enable();
|
|
|
+ if (new)
|
|
|
+ return new;
|
|
|
}
|
|
|
- preempt_enable();
|
|
|
- return new;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so
|
|
|
+ * that memory allocation failure warning is printed as intended.
|
|
|
+ */
|
|
|
+ return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
|
|
|
}
|
|
|
|
|
|
static void idr_layer_rcu_free(struct rcu_head *head)
|
|
@@ -184,20 +196,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * idr_pre_get - reserve resources for idr allocation
|
|
|
- * @idp: idr handle
|
|
|
- * @gfp_mask: memory allocation flags
|
|
|
- *
|
|
|
- * This function should be called prior to calling the idr_get_new* functions.
|
|
|
- * It preallocates enough memory to satisfy the worst possible allocation. The
|
|
|
- * caller should pass in GFP_KERNEL if possible. This of course requires that
|
|
|
- * no spinning locks be held.
|
|
|
- *
|
|
|
- * If the system is REALLY out of memory this function returns %0,
|
|
|
- * otherwise %1.
|
|
|
- */
|
|
|
-int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
|
|
|
+int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
|
|
|
{
|
|
|
while (idp->id_free_cnt < MAX_IDR_FREE) {
|
|
|
struct idr_layer *new;
|
|
@@ -208,7 +207,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
|
|
|
}
|
|
|
return 1;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(idr_pre_get);
|
|
|
+EXPORT_SYMBOL(__idr_pre_get);
|
|
|
|
|
|
/**
|
|
|
* sub_alloc - try to allocate an id without growing the tree depth
|
|
@@ -375,25 +374,7 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
|
|
|
idr_mark_full(pa, id);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * idr_get_new_above - allocate new idr entry above or equal to a start id
|
|
|
- * @idp: idr handle
|
|
|
- * @ptr: pointer you want associated with the id
|
|
|
- * @starting_id: id to start search at
|
|
|
- * @id: pointer to the allocated handle
|
|
|
- *
|
|
|
- * This is the allocate id function. It should be called with any
|
|
|
- * required locks.
|
|
|
- *
|
|
|
- * If allocation from IDR's private freelist fails, idr_get_new_above() will
|
|
|
- * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
|
|
|
- * IDR's preallocation and then retry the idr_get_new_above() call.
|
|
|
- *
|
|
|
- * If the idr is full idr_get_new_above() will return %-ENOSPC.
|
|
|
- *
|
|
|
- * @id returns a value in the range @starting_id ... %0x7fffffff
|
|
|
- */
|
|
|
-int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
|
|
|
+int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
|
|
|
{
|
|
|
struct idr_layer *pa[MAX_IDR_LEVEL + 1];
|
|
|
int rv;
|
|
@@ -406,7 +387,7 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
|
|
|
*id = rv;
|
|
|
return 0;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(idr_get_new_above);
|
|
|
+EXPORT_SYMBOL(__idr_get_new_above);
|
|
|
|
|
|
/**
|
|
|
* idr_preload - preload for idr_alloc()
|
|
@@ -907,7 +888,7 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
|
|
|
int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
|
|
|
{
|
|
|
/* allocate idr_layers */
|
|
|
- if (!idr_pre_get(&ida->idr, gfp_mask))
|
|
|
+ if (!__idr_pre_get(&ida->idr, gfp_mask))
|
|
|
return 0;
|
|
|
|
|
|
/* allocate free_bitmap */
|