From 59cb96e8022cf29f1359bec93d222995c1f5c7c5 Mon Sep 17 00:00:00 2001 From: Peter Mitsis Date: Thu, 5 Jan 2023 16:16:13 -0500 Subject: [PATCH] kernel: pipes: Add spin lock/unlock barrier pair Adds a spin lock/unlock barrier pair after a pipe thread wakes. After the list of waiting threads is generated, it is possible for threads on that list to timeout and be removed from the wait queue. However, since that list was generated before the timeout occurred, the timed-out thread must wait until the copying is done (the pipe's spin-lock has been released). Signed-off-by: Peter Mitsis --- kernel/pipes.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/kernel/pipes.c b/kernel/pipes.c index 0c108c3c834..6bc89c0547a 100644 --- a/kernel/pipes.c +++ b/kernel/pipes.c @@ -478,6 +478,16 @@ int z_impl_k_pipe_put(struct k_pipe *pipe, void *data, size_t bytes_to_write, z_sched_wait(&pipe->lock, key, &pipe->wait_q.writers, timeout, NULL); + /* + * On SMP systems, threads in the processing list may timeout before + * the data has finished copying. The following spin lock/unlock pair + * prevents those threads from executing further until the data copying + * is complete. + */ + + key = k_spin_lock(&pipe->lock); + k_spin_unlock(&pipe->lock, key); + *bytes_written = bytes_to_write - src_desc->bytes_to_xfer; int ret = pipe_return_code(min_xfer, src_desc->bytes_to_xfer, @@ -648,6 +658,16 @@ static int pipe_get_internal(k_spinlock_key_t key, struct k_pipe *pipe, z_sched_wait(&pipe->lock, key, &pipe->wait_q.readers, timeout, NULL); + /* + * On SMP systems, threads in the processing list may timeout before + * the data has finished copying. The following spin lock/unlock pair + * prevents those threads from executing further until the data copying + * is complete. + */ + + key = k_spin_lock(&pipe->lock); + k_spin_unlock(&pipe->lock, key); + *bytes_read = bytes_to_read - dest_desc->bytes_to_xfer; int ret = pipe_return_code(min_xfer, dest_desc->bytes_to_xfer,