|
@@ -17,9 +17,18 @@
|
|
|
#include <linux/sysctl.h>
|
|
|
|
|
|
/*
|
|
|
- * Have a reasonable limit on the number of tasks checked:
|
|
|
+ * The number of tasks checked:
|
|
|
*/
|
|
|
-unsigned long __read_mostly sysctl_hung_task_check_count = 1024;
|
|
|
+unsigned long __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
|
|
|
+
|
|
|
+/*
|
|
|
+ * Limit number of tasks checked in a batch.
|
|
|
+ *
|
|
|
+ * This value controls the preemptibility of khungtaskd since preemption
|
|
|
+ * is disabled during the critical section. It also controls the size of
|
|
|
+ * the RCU grace period. So it needs to be upper-bound.
|
|
|
+ */
|
|
|
+#define HUNG_TASK_BATCHING 1024
|
|
|
|
|
|
/*
|
|
|
* Zero means infinite timeout - no checking done:
|
|
@@ -109,6 +118,24 @@ static void check_hung_task(struct task_struct *t, unsigned long now,
|
|
|
panic("hung_task: blocked tasks");
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * To avoid extending the RCU grace period for an unbounded amount of time,
|
|
|
+ * periodically exit the critical section and enter a new one.
|
|
|
+ *
|
|
|
+ * For preemptible RCU it is sufficient to call rcu_read_unlock in order
|
|
|
+ * exit the grace period. For classic RCU, a reschedule is required.
|
|
|
+ */
|
|
|
+static void rcu_lock_break(struct task_struct *g, struct task_struct *t)
|
|
|
+{
|
|
|
+ get_task_struct(g);
|
|
|
+ get_task_struct(t);
|
|
|
+ rcu_read_unlock();
|
|
|
+ cond_resched();
|
|
|
+ rcu_read_lock();
|
|
|
+ put_task_struct(t);
|
|
|
+ put_task_struct(g);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Check whether a TASK_UNINTERRUPTIBLE does not get woken up for
|
|
|
* a really long time (120 seconds). If that happens, print out
|
|
@@ -117,6 +144,7 @@ static void check_hung_task(struct task_struct *t, unsigned long now,
|
|
|
static void check_hung_uninterruptible_tasks(unsigned long timeout)
|
|
|
{
|
|
|
int max_count = sysctl_hung_task_check_count;
|
|
|
+ int batch_count = HUNG_TASK_BATCHING;
|
|
|
unsigned long now = get_timestamp();
|
|
|
struct task_struct *g, *t;
|
|
|
|
|
@@ -131,6 +159,13 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
|
|
|
do_each_thread(g, t) {
|
|
|
if (!--max_count)
|
|
|
goto unlock;
|
|
|
+ if (!--batch_count) {
|
|
|
+ batch_count = HUNG_TASK_BATCHING;
|
|
|
+ rcu_lock_break(g, t);
|
|
|
+ /* Exit if t or g was unhashed during refresh. */
|
|
|
+ if (t->state == TASK_DEAD || g->state == TASK_DEAD)
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
/* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
|
|
|
if (t->state == TASK_UNINTERRUPTIBLE)
|
|
|
check_hung_task(t, now, timeout);
|