|
@@ -1725,14 +1725,6 @@ the amount of locking which needs to be done.
|
|
|
if (++cache_num > MAX_CACHE_SIZE) {
|
|
|
struct object *i, *outcast = NULL;
|
|
|
list_for_each_entry(i, &cache, list) {
|
|
|
-@@ -85,6 +94,7 @@
|
|
|
- obj->popularity = 0;
|
|
|
- atomic_set(&obj->refcnt, 1); /* The cache holds a reference */
|
|
|
- spin_lock_init(&obj->lock);
|
|
|
-+ INIT_RCU_HEAD(&obj->rcu);
|
|
|
-
|
|
|
- spin_lock_irqsave(&cache_lock, flags);
|
|
|
- __cache_add(obj);
|
|
|
@@ -104,12 +114,11 @@
|
|
|
struct object *cache_find(int id)
|
|
|
{
|