zephyr/include/spinlock.h
Andy Ross 7367b84f8e kernel/spinlock: Augment runtime validation
There was an existing validation layer in the spinlock implementation,
but it was only enabled when both SMP and CONFIG_DEBUG were enabled,
which meant that nothing was using it.  Replace it with a more
elaborate framework that ensures that every lock taken is not already
taken by the current CPU and is released on the same CPU by the same
thread.

This catches the much more common goof of locking a spinlock
recursively, which would "work" on uniprocessor setups but have the
side effect of releasing the lock prematurely at the end of the inner
lock.  We've done that in two spots already.

Note that this patch causes k_spinlock_t to have non-zero size on
builds with CONFIG_ASSERT, so expect a little data and code size
increase.  Worth it IMHO.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-01-30 13:29:42 -08:00

85 lines
1.8 KiB
C

/*
* Copyright (c) 2018 Intel Corporation.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_SPINLOCK_H_
#define ZEPHYR_INCLUDE_SPINLOCK_H_
#include <atomic.h>
#if defined(CONFIG_ASSERT) && (CONFIG_MP_NUM_CPUS < 4)
#include <kernel_structs.h>
#define SPIN_VALIDATE
#endif
struct k_spinlock_key {
int key;
};
typedef struct k_spinlock_key k_spinlock_key_t;
struct k_spinlock {
#ifdef CONFIG_SMP
atomic_t locked;
#endif
#ifdef SPIN_VALIDATE
/* Stores the thread that holds the lock with the locking CPU
* ID in the bottom two bits.
*/
size_t thread_cpu;
#endif
};
static inline k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
{
k_spinlock_key_t k;
/* Note that we need to use the underlying arch-specific lock
* implementation. The "irq_lock()" API in SMP context is
* actually a wrapper for a global spinlock!
*/
k.key = _arch_irq_lock();
#ifdef SPIN_VALIDATE
if (l->thread_cpu) {
__ASSERT((l->thread_cpu & 3) != _current_cpu->id,
"Recursive spinlock");
}
l->thread_cpu = _current_cpu->id | (u32_t)_current;
#endif
#ifdef CONFIG_SMP
while (!atomic_cas(&l->locked, 0, 1)) {
}
#endif
return k;
}
static inline void k_spin_unlock(struct k_spinlock *l, k_spinlock_key_t key)
{
#ifdef SPIN_VALIDATE
__ASSERT(l->thread_cpu == (_current_cpu->id | (u32_t)_current),
"Not my spinlock!");
l->thread_cpu = 0;
#endif
#ifdef CONFIG_SMP
/* Strictly we don't need atomic_clear() here (which is an
* exchange operation that returns the old value). We are always
* setting a zero and (because we hold the lock) know the existing
* state won't change due to a race. But some architectures need
* a memory barrier when used like this, and we don't have a
* Zephyr framework for that.
*/
atomic_clear(&l->locked);
#endif
_arch_irq_unlock(key.key);
}
#endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */