|
@@ -2621,6 +2621,26 @@ static inline int spin_needbreak(spinlock_t *lock)
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Idle thread specific functions to determine the need_resched
|
|
|
+ * polling state. We have two versions, one based on TS_POLLING in
|
|
|
+ * thread_info.status and one based on TIF_POLLING_NRFLAG in
|
|
|
+ * thread_info.flags
|
|
|
+ */
|
|
|
+#ifdef TS_POLLING
|
|
|
+static inline int tsk_is_polling(struct task_struct *p)
|
|
|
+{
|
|
|
+ return task_thread_info(p)->status & TS_POLLING;
|
|
|
+}
|
|
|
+#elif defined(TIF_POLLING_NRFLAG)
|
|
|
+static inline int tsk_is_polling(struct task_struct *p)
|
|
|
+{
|
|
|
+ return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline int tsk_is_polling(struct task_struct *p) { return 0; }
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* Thread group CPU time accounting.
|
|
|
*/
|