|
@@ -34,6 +34,10 @@ struct res_counter {
|
|
|
* the limit that usage cannot exceed
|
|
|
*/
|
|
|
unsigned long long limit;
|
|
|
+ /*
|
|
|
+ * the limit that usage can be exceed
|
|
|
+ */
|
|
|
+ unsigned long long soft_limit;
|
|
|
/*
|
|
|
* the number of unsuccessful attempts to consume the resource
|
|
|
*/
|
|
@@ -87,6 +91,7 @@ enum {
|
|
|
RES_MAX_USAGE,
|
|
|
RES_LIMIT,
|
|
|
RES_FAILCNT,
|
|
|
+ RES_SOFT_LIMIT,
|
|
|
};
|
|
|
|
|
|
/*
|
|
@@ -132,6 +137,36 @@ static inline bool res_counter_limit_check_locked(struct res_counter *cnt)
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
+static inline bool res_counter_soft_limit_check_locked(struct res_counter *cnt)
|
|
|
+{
|
|
|
+ if (cnt->usage < cnt->soft_limit)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * Get the difference between the usage and the soft limit
|
|
|
+ * @cnt: The counter
|
|
|
+ *
|
|
|
+ * Returns 0 if usage is less than or equal to soft limit
|
|
|
+ * The difference between usage and soft limit, otherwise.
|
|
|
+ */
|
|
|
+static inline unsigned long long
|
|
|
+res_counter_soft_limit_excess(struct res_counter *cnt)
|
|
|
+{
|
|
|
+ unsigned long long excess;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&cnt->lock, flags);
|
|
|
+ if (cnt->usage <= cnt->soft_limit)
|
|
|
+ excess = 0;
|
|
|
+ else
|
|
|
+ excess = cnt->usage - cnt->soft_limit;
|
|
|
+ spin_unlock_irqrestore(&cnt->lock, flags);
|
|
|
+ return excess;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Helper function to detect if the cgroup is within it's limit or
|
|
|
* not. It's currently called from cgroup_rss_prepare()
|
|
@@ -147,6 +182,17 @@ static inline bool res_counter_check_under_limit(struct res_counter *cnt)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static inline bool res_counter_check_under_soft_limit(struct res_counter *cnt)
|
|
|
+{
|
|
|
+ bool ret;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&cnt->lock, flags);
|
|
|
+ ret = res_counter_soft_limit_check_locked(cnt);
|
|
|
+ spin_unlock_irqrestore(&cnt->lock, flags);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static inline void res_counter_reset_max(struct res_counter *cnt)
|
|
|
{
|
|
|
unsigned long flags;
|
|
@@ -180,4 +226,16 @@ static inline int res_counter_set_limit(struct res_counter *cnt,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static inline int
|
|
|
+res_counter_set_soft_limit(struct res_counter *cnt,
|
|
|
+ unsigned long long soft_limit)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&cnt->lock, flags);
|
|
|
+ cnt->soft_limit = soft_limit;
|
|
|
+ spin_unlock_irqrestore(&cnt->lock, flags);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
#endif
|