|
@@ -38,6 +38,15 @@
|
|
|
#include <linux/percpu.h>
|
|
|
#include <linux/hardirq.h>
|
|
|
|
|
|
+#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
|
|
|
+#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
|
|
|
+
|
|
|
+/* Leave the possibility of an incomplete final layer */
|
|
|
+#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
|
|
|
+
|
|
|
+/* Number of id_layer structs to leave in free list */
|
|
|
+#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
|
|
|
+
|
|
|
static struct kmem_cache *idr_layer_cache;
|
|
|
static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
|
|
|
static DEFINE_PER_CPU(int, idr_preload_cnt);
|
|
@@ -542,8 +551,8 @@ void idr_remove(struct idr *idp, int id)
|
|
|
struct idr_layer *p;
|
|
|
struct idr_layer *to_free;
|
|
|
|
|
|
- /* Mask off upper bits we don't use for the search. */
|
|
|
- id &= MAX_IDR_MASK;
|
|
|
+ if (WARN_ON_ONCE(id < 0))
|
|
|
+ return;
|
|
|
|
|
|
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
|
|
|
if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
|
|
@@ -650,14 +659,14 @@ void *idr_find(struct idr *idp, int id)
|
|
|
int n;
|
|
|
struct idr_layer *p;
|
|
|
|
|
|
+ if (WARN_ON_ONCE(id < 0))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
p = rcu_dereference_raw(idp->top);
|
|
|
if (!p)
|
|
|
return NULL;
|
|
|
n = (p->layer+1) * IDR_BITS;
|
|
|
|
|
|
- /* Mask off upper bits we don't use for the search. */
|
|
|
- id &= MAX_IDR_MASK;
|
|
|
-
|
|
|
if (id > idr_max(p->layer + 1))
|
|
|
return NULL;
|
|
|
BUG_ON(n == 0);
|
|
@@ -799,14 +808,15 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
|
|
|
int n;
|
|
|
struct idr_layer *p, *old_p;
|
|
|
|
|
|
+ if (WARN_ON_ONCE(id < 0))
|
|
|
+ return ERR_PTR(-EINVAL);
|
|
|
+
|
|
|
p = idp->top;
|
|
|
if (!p)
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
n = (p->layer+1) * IDR_BITS;
|
|
|
|
|
|
- id &= MAX_IDR_MASK;
|
|
|
-
|
|
|
if (id >= (1 << n))
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|