diff --git a/include/atomic.h b/include/atomic.h index 5a05917d888..510ffdfffde 100644 --- a/include/atomic.h +++ b/include/atomic.h @@ -10,6 +10,7 @@ #define ZEPHYR_INCLUDE_ATOMIC_H_ #include +#include #ifdef __cplusplus extern "C" { @@ -45,6 +46,10 @@ static inline bool atomic_cas(atomic_t *target, atomic_val_t old_value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); } +#elif defined(CONFIG_ATOMIC_OPERATIONS_C) +__syscall int atomic_cas(atomic_t *target, atomic_val_t old_value, + atomic_val_t new_value); + #else extern int atomic_cas(atomic_t *target, atomic_val_t old_value, atomic_val_t new_value); @@ -66,6 +71,8 @@ static inline atomic_val_t atomic_add(atomic_t *target, atomic_val_t value) { return __atomic_fetch_add(target, value, __ATOMIC_SEQ_CST); } +#elif defined(CONFIG_ATOMIC_OPERATIONS_C) +__syscall atomic_val_t atomic_add(atomic_t *target, atomic_val_t value); #else extern atomic_val_t atomic_add(atomic_t *target, atomic_val_t value); #endif @@ -86,6 +93,8 @@ static inline atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value) { return __atomic_fetch_sub(target, value, __ATOMIC_SEQ_CST); } +#elif defined(CONFIG_ATOMIC_OPERATIONS_C) +__syscall atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value); #else extern atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value); #endif @@ -100,7 +109,7 @@ extern atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value); * * @return Previous value of @a target. */ -#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN +#if defined(CONFIG_ATOMIC_OPERATIONS_BUILTIN) || defined (CONFIG_ATOMIC_OPERATIONS_C) static inline atomic_val_t atomic_inc(atomic_t *target) { return atomic_add(target, 1); @@ -119,7 +128,7 @@ extern atomic_val_t atomic_inc(atomic_t *target); * * @return Previous value of @a target. */ -#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN +#if defined(CONFIG_ATOMIC_OPERATIONS_BUILTIN) || defined (CONFIG_ATOMIC_OPERATIONS_C) static inline atomic_val_t atomic_dec(atomic_t *target) { return atomic_sub(target, 1); @@ -168,6 +177,8 @@ static inline atomic_val_t atomic_set(atomic_t *target, atomic_val_t value) */ return __atomic_exchange_n(target, value, __ATOMIC_SEQ_CST); } +#elif defined(CONFIG_ATOMIC_OPERATIONS_C) +__syscall atomic_val_t atomic_set(atomic_t *target, atomic_val_t value); #else extern atomic_val_t atomic_set(atomic_t *target, atomic_val_t value); #endif @@ -183,7 +194,7 @@ extern atomic_val_t atomic_set(atomic_t *target, atomic_val_t value); * * @return Previous value of @a target. */ -#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN +#if defined(CONFIG_ATOMIC_OPERATIONS_BUILTIN) || defined (CONFIG_ATOMIC_OPERATIONS_C) static inline atomic_val_t atomic_clear(atomic_t *target) { return atomic_set(target, 0); @@ -209,6 +220,9 @@ static inline atomic_val_t atomic_or(atomic_t *target, atomic_val_t value) { return __atomic_fetch_or(target, value, __ATOMIC_SEQ_CST); } +#elif defined(CONFIG_ATOMIC_OPERATIONS_C) +__syscall atomic_val_t atomic_or(atomic_t *target, atomic_val_t value); + #else extern atomic_val_t atomic_or(atomic_t *target, atomic_val_t value); #endif @@ -230,6 +244,8 @@ static inline atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value) { return __atomic_fetch_xor(target, value, __ATOMIC_SEQ_CST); } +#elif defined(CONFIG_ATOMIC_OPERATIONS_C) +__syscall atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value); #else extern atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value); #endif @@ -251,6 +267,8 @@ static inline atomic_val_t atomic_and(atomic_t *target, atomic_val_t value) { return __atomic_fetch_and(target, value, __ATOMIC_SEQ_CST); } +#elif defined(CONFIG_ATOMIC_OPERATIONS_C) +__syscall atomic_val_t atomic_and(atomic_t *target, atomic_val_t value); #else extern atomic_val_t atomic_and(atomic_t *target, atomic_val_t value); #endif @@ -272,6 +290,8 @@ static inline atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value) { return __atomic_fetch_nand(target, value, __ATOMIC_SEQ_CST); } +#elif defined(CONFIG_ATOMIC_OPERATIONS_C) +__syscall atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value); #else extern atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value); #endif @@ -434,6 +454,9 @@ static inline void atomic_set_bit_to(atomic_t *target, int bit, bool val) } } +#ifdef CONFIG_ATOMIC_OPERATIONS_C +#include +#endif /** * @} */ diff --git a/kernel/atomic_c.c b/kernel/atomic_c.c index 02977133f95..954388ad209 100644 --- a/kernel/atomic_c.c +++ b/kernel/atomic_c.c @@ -30,6 +30,30 @@ */ static struct k_spinlock lock; +/* For those rare CPUs which support user mode, but not native atomic + * operations, the best we can do for them is implement the atomic + * functions as system calls, since in user mode locking a spinlock is + * forbidden. + */ +#ifdef CONFIG_USERSPACE +#include + +#define ATOMIC_SYSCALL_HANDLER_TARGET(name) \ + Z_SYSCALL_HANDLER(name, target) { \ + Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t))); \ + return z_impl_##name((atomic_t *)target); \ + } + +#define ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(name) \ + Z_SYSCALL_HANDLER(name, target, value) { \ + Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t))); \ + return z_impl_##name((atomic_t *)target, value); \ + } +#else +#define ATOMIC_SYSCALL_HANDLER_TARGET(name) +#define ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(name) +#endif + /** * * @brief Atomic compare-and-set primitive @@ -50,8 +74,8 @@ static struct k_spinlock lock; * @param new_value value to compare against * @return Returns 1 if is written, 0 otherwise. */ -int atomic_cas(atomic_t *target, atomic_val_t old_value, - atomic_val_t new_value) +int z_impl_atomic_cas(atomic_t *target, atomic_val_t old_value, + atomic_val_t new_value) { k_spinlock_key_t key; int ret = 0; @@ -68,6 +92,15 @@ int atomic_cas(atomic_t *target, atomic_val_t old_value, return ret; } +#ifdef CONFIG_USERSPACE +Z_SYSCALL_HANDLER(atomic_cas, target, old_value, new_value) +{ + Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t))); + + return z_impl_atomic_cas((atomic_t *)target, old_value, new_value); +} +#endif /* CONFIG_USERSPACE */ + /** * * @brief Atomic addition primitive @@ -81,7 +114,7 @@ int atomic_cas(atomic_t *target, atomic_val_t old_value, * * @return The previous value from */ -atomic_val_t atomic_add(atomic_t *target, atomic_val_t value) +atomic_val_t z_impl_atomic_add(atomic_t *target, atomic_val_t value) { k_spinlock_key_t key; atomic_val_t ret; @@ -96,6 +129,8 @@ atomic_val_t atomic_add(atomic_t *target, atomic_val_t value) return ret; } +ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_add); + /** * * @brief Atomic subtraction primitive @@ -109,7 +144,7 @@ atomic_val_t atomic_add(atomic_t *target, atomic_val_t value) * * @return The previous value from */ -atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value) +atomic_val_t z_impl_atomic_sub(atomic_t *target, atomic_val_t value) { k_spinlock_key_t key; atomic_val_t ret; @@ -124,57 +159,7 @@ atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value) return ret; } -/** - * - * @brief Atomic increment primitive - * - * @param target memory location to increment - * - * This routine provides the atomic increment operator. The value at - * is atomically incremented by 1, and the old value from is returned. - * - * @return The value from before the increment - */ -atomic_val_t atomic_inc(atomic_t *target) -{ - k_spinlock_key_t key; - atomic_val_t ret; - - key = k_spin_lock(&lock); - - ret = *target; - (*target)++; - - k_spin_unlock(&lock, key); - - return ret; -} - -/** - * - * @brief Atomic decrement primitive - * - * @param target memory location to decrement - * - * This routine provides the atomic decrement operator. The value at - * is atomically decremented by 1, and the old value from is returned. - * - * @return The value from prior to the decrement - */ -atomic_val_t atomic_dec(atomic_t *target) -{ - k_spinlock_key_t key; - atomic_val_t ret; - - key = k_spin_lock(&lock); - - ret = *target; - (*target)--; - - k_spin_unlock(&lock, key); - - return ret; -} +ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_sub); /** * @@ -205,7 +190,7 @@ atomic_val_t atomic_get(const atomic_t *target) * * @return The previous value from */ -atomic_val_t atomic_set(atomic_t *target, atomic_val_t value) +atomic_val_t z_impl_atomic_set(atomic_t *target, atomic_val_t value) { k_spinlock_key_t key; atomic_val_t ret; @@ -220,32 +205,7 @@ atomic_val_t atomic_set(atomic_t *target, atomic_val_t value) return ret; } -/** - * - * @brief Atomic clear primitive - * - * This routine provides the atomic clear operator. The value of 0 is atomically - * written at and the previous value at is returned. (Hence, - * atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).) - * - * @param target the memory location to write - * - * @return The previous value from - */ -atomic_val_t atomic_clear(atomic_t *target) -{ - k_spinlock_key_t key; - atomic_val_t ret; - - key = k_spin_lock(&lock); - - ret = *target; - *target = 0; - - k_spin_unlock(&lock, key); - - return ret; -} +ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_set); /** * @@ -260,7 +220,7 @@ atomic_val_t atomic_clear(atomic_t *target) * * @return The previous value from */ -atomic_val_t atomic_or(atomic_t *target, atomic_val_t value) +atomic_val_t z_impl_atomic_or(atomic_t *target, atomic_val_t value) { k_spinlock_key_t key; atomic_val_t ret; @@ -275,6 +235,8 @@ atomic_val_t atomic_or(atomic_t *target, atomic_val_t value) return ret; } +ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_or); + /** * * @brief Atomic bitwise exclusive OR (XOR) primitive @@ -288,7 +250,7 @@ atomic_val_t atomic_or(atomic_t *target, atomic_val_t value) * * @return The previous value from */ -atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value) +atomic_val_t z_impl_atomic_xor(atomic_t *target, atomic_val_t value) { k_spinlock_key_t key; atomic_val_t ret; @@ -303,6 +265,8 @@ atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value) return ret; } +ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_xor); + /** * * @brief Atomic bitwise AND primitive @@ -316,7 +280,7 @@ atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value) * * @return The previous value from */ -atomic_val_t atomic_and(atomic_t *target, atomic_val_t value) +atomic_val_t z_impl_atomic_and(atomic_t *target, atomic_val_t value) { k_spinlock_key_t key; atomic_val_t ret; @@ -331,6 +295,8 @@ atomic_val_t atomic_and(atomic_t *target, atomic_val_t value) return ret; } +ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_and); + /** * * @brief Atomic bitwise NAND primitive @@ -344,7 +310,7 @@ atomic_val_t atomic_and(atomic_t *target, atomic_val_t value) * * @return The previous value from */ -atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value) +atomic_val_t z_impl_atomic_nand(atomic_t *target, atomic_val_t value) { k_spinlock_key_t key; atomic_val_t ret; @@ -358,3 +324,5 @@ atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value) return ret; } + +ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_nand); diff --git a/tests/kernel/common/src/main.c b/tests/kernel/common/src/main.c index 874f393a279..bd18314799a 100644 --- a/tests/kernel/common/src/main.c +++ b/tests/kernel/common/src/main.c @@ -75,7 +75,7 @@ void test_main(void) ztest_unit_test(test_irq_offload), ztest_unit_test(test_byteorder_memcpy_swap), ztest_unit_test(test_byteorder_mem_swap), - ztest_unit_test(test_atomic), + ztest_user_unit_test(test_atomic), ztest_unit_test(test_bitfield), ztest_unit_test(test_printk), ztest_unit_test(test_slist),