|
@@ -427,6 +427,31 @@ out_unlock:
|
|
|
return error;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * As described in commit 0ccf831cb lockdep: annotate epoll
|
|
|
+ * the use of wait queues used by epoll is done in a very controlled
|
|
|
+ * manner. Wake ups can nest inside each other, but are never done
|
|
|
+ * with the same locking. For example:
|
|
|
+ *
|
|
|
+ * dfd = socket(...);
|
|
|
+ * efd1 = epoll_create();
|
|
|
+ * efd2 = epoll_create();
|
|
|
+ * epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...);
|
|
|
+ * epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);
|
|
|
+ *
|
|
|
+ * When a packet arrives to the device underneath "dfd", the net code will
|
|
|
+ * issue a wake_up() on its poll wake list. Epoll (efd1) has installed a
|
|
|
+ * callback wakeup entry on that queue, and the wake_up() performed by the
|
|
|
+ * "dfd" net code will end up in ep_poll_callback(). At this point epoll
|
|
|
+ * (efd1) notices that it may have some event ready, so it needs to wake up
|
|
|
+ * the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()
|
|
|
+ * that ends up in another wake_up(), after having checked about the
|
|
|
+ * recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to
|
|
|
+ * avoid stack blasting.
|
|
|
+ *
|
|
|
+ * When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle
|
|
|
+ * this special case of epoll.
|
|
|
+ */
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
|
static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
|
|
|
unsigned long events, int subclass)
|