|
@@ -86,33 +86,39 @@ int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
|
|
|
return __res_counter_charge(counter, val, limit_fail_at, true);
|
|
|
}
|
|
|
|
|
|
-void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
|
|
|
+u64 res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
|
|
|
{
|
|
|
if (WARN_ON(counter->usage < val))
|
|
|
val = counter->usage;
|
|
|
|
|
|
counter->usage -= val;
|
|
|
+ return counter->usage;
|
|
|
}
|
|
|
|
|
|
-void res_counter_uncharge_until(struct res_counter *counter,
|
|
|
- struct res_counter *top,
|
|
|
- unsigned long val)
|
|
|
+u64 res_counter_uncharge_until(struct res_counter *counter,
|
|
|
+ struct res_counter *top,
|
|
|
+ unsigned long val)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
struct res_counter *c;
|
|
|
+ u64 ret = 0;
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
for (c = counter; c != top; c = c->parent) {
|
|
|
+ u64 r;
|
|
|
spin_lock(&c->lock);
|
|
|
- res_counter_uncharge_locked(c, val);
|
|
|
+ r = res_counter_uncharge_locked(c, val);
|
|
|
+ if (c == counter)
|
|
|
+ ret = r;
|
|
|
spin_unlock(&c->lock);
|
|
|
}
|
|
|
local_irq_restore(flags);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
-void res_counter_uncharge(struct res_counter *counter, unsigned long val)
|
|
|
+u64 res_counter_uncharge(struct res_counter *counter, unsigned long val)
|
|
|
{
|
|
|
- res_counter_uncharge_until(counter, NULL, val);
|
|
|
+ return res_counter_uncharge_until(counter, NULL, val);
|
|
|
}
|
|
|
|
|
|
static inline unsigned long long *
|