summaryrefslogtreecommitdiff
path: root/include/linux/wait.h
diff options
context:
space:
mode:
authorPhilippe Schenker <philippe.schenker@toradex.com>2022-05-19 16:29:13 +0200
committerPhilippe Schenker <philippe.schenker@toradex.com>2022-05-19 16:29:13 +0200
commite6fb5c32f78f99682821f91b3959e222c93e4cb9 (patch)
tree3f6c281a7d83a6e3c0a153f93718f7cd9a39b6f7 /include/linux/wait.h
parent585dc27c53b6c1a32a7e11b724e91d7297388a13 (diff)
parentb7007e1d615b8d4b71c28ebf790c5164fc4491d5 (diff)
Merge remote-tracking branch 'gh-fslc/5.4-2.3.x-imx' into toradex_5.4-2.3.x-imx
Diffstat (limited to 'include/linux/wait.h')
-rw-r--r--include/linux/wait.h26
1 files changed, 26 insertions, 0 deletions
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 032ae61c22a2..5903b1d17c92 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -204,6 +204,7 @@ void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
+void __wake_up_pollfree(struct wait_queue_head *wq_head);
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
@@ -230,6 +231,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
#define wake_up_interruptible_sync_poll(x, m) \
__wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m))
+/**
+ * wake_up_pollfree - signal that a polled waitqueue is going away
+ * @wq_head: the wait queue head
+ *
+ * In the very rare cases where a ->poll() implementation uses a waitqueue whose
+ * lifetime is tied to a task rather than to the 'struct file' being polled,
+ * this function must be called before the waitqueue is freed so that
+ * non-blocking polls (e.g. epoll) are notified that the queue is going away.
+ *
+ * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
+ * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
+ */
+static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
+{
+ /*
+ * For performance reasons, we don't always take the queue lock here.
+ * Therefore, we might race with someone removing the last entry from
+ * the queue, and proceed while they still hold the queue lock.
+ * However, rcu_read_lock() is required to be held in such cases, so we
+ * can safely proceed with an RCU-delayed free.
+ */
+ if (waitqueue_active(wq_head))
+ __wake_up_pollfree(wq_head);
+}
+
#define ___wait_cond_timeout(condition) \
({ \
bool __cond = (condition); \