|
@@ -33,9 +33,6 @@
|
|
|
* 3) This list is guarded by a mutex,
|
|
|
* so that the gc_task and dst_dev_event() can be synchronized.
|
|
|
*/
|
|
|
-#if RT_CACHE_DEBUG >= 2
|
|
|
-static atomic_t dst_total = ATOMIC_INIT(0);
|
|
|
-#endif
|
|
|
|
|
|
/*
|
|
|
* We want to keep lock & list close together
|
|
@@ -69,10 +66,6 @@ static void dst_gc_task(struct work_struct *work)
|
|
|
unsigned long expires = ~0L;
|
|
|
struct dst_entry *dst, *next, head;
|
|
|
struct dst_entry *last = &head;
|
|
|
-#if RT_CACHE_DEBUG >= 2
|
|
|
- ktime_t time_start = ktime_get();
|
|
|
- struct timespec elapsed;
|
|
|
-#endif
|
|
|
|
|
|
mutex_lock(&dst_gc_mutex);
|
|
|
next = dst_busy_list;
|
|
@@ -146,15 +139,6 @@ loop:
|
|
|
|
|
|
spin_unlock_bh(&dst_garbage.lock);
|
|
|
mutex_unlock(&dst_gc_mutex);
|
|
|
-#if RT_CACHE_DEBUG >= 2
|
|
|
- elapsed = ktime_to_timespec(ktime_sub(ktime_get(), time_start));
|
|
|
- printk(KERN_DEBUG "dst_total: %d delayed: %d work_perf: %d"
|
|
|
- " expires: %lu elapsed: %lu us\n",
|
|
|
- atomic_read(&dst_total), delayed, work_performed,
|
|
|
- expires,
|
|
|
- elapsed.tv_sec * USEC_PER_SEC +
|
|
|
- elapsed.tv_nsec / NSEC_PER_USEC);
|
|
|
-#endif
|
|
|
}
|
|
|
|
|
|
int dst_discard(struct sk_buff *skb)
|
|
@@ -205,9 +189,6 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
|
|
|
dst->lastuse = jiffies;
|
|
|
dst->flags = flags;
|
|
|
dst->next = NULL;
|
|
|
-#if RT_CACHE_DEBUG >= 2
|
|
|
- atomic_inc(&dst_total);
|
|
|
-#endif
|
|
|
dst_entries_add(ops, 1);
|
|
|
return dst;
|
|
|
}
|
|
@@ -267,9 +248,6 @@ again:
|
|
|
dst->ops->destroy(dst);
|
|
|
if (dst->dev)
|
|
|
dev_put(dst->dev);
|
|
|
-#if RT_CACHE_DEBUG >= 2
|
|
|
- atomic_dec(&dst_total);
|
|
|
-#endif
|
|
|
kmem_cache_free(dst->ops->kmem_cachep, dst);
|
|
|
|
|
|
dst = child;
|