From 43b8002b14a7c1a38c19df19955911144c625b5e Mon Sep 17 00:00:00 2001 From: Stephanos Ioannidis Date: Sat, 12 Jun 2021 21:11:22 +0900 Subject: [PATCH] lib: newlib: Add retargetable locking implementation This commit adds the newlib retargetable locking interface function implementations in order to make newlib functions thread safe. The newlib retargetable locking interface is internally called by the standard C library functions provided by newlib to synchronise access to the internal shared resources. By default, the retargetable locking interface functions defined within the newlib library are no-op. When multi-threading is enabled (i.e. `CONFIG_MULTITHREADING=y`), the Zephyr-side retargetable locking interface implementations override the default newlib implementation and provide locking mechanism. The retargetable locking interface may be called with either a static (`__lock__...`) or a dynamic lock. The static locks are statically allocated and initialised immediately after kernel initialisation by `newlib_locks_prepare`. The dynamic locks are allocated and de-allocated through the `__retargetable_lock_init[_recursive]` and `__retarget_lock_close_[recurisve]` functions as necessary by the newlib functions. These locks are allocated in the newlib heap using the `malloc` function when userspace is not enabled -- this is safe because the internal multi-threaded malloc lock implementations (`__malloc_lock` and `__malloc_unlock`) call the retargetable locking interface with a static lock (`__lock__malloc_recursive_mutex`). When userspace is enabled, the dynamic locks are allocated and freed through `k_object_alloc` and `k_object_release`. Note that the lock implementations used here are `k_mutex` and `k_sem` instead of `sys_mutex` and `sys_sem` because the Zephyr kernel does not currently support dynamic allocation of the latter. These locks should be updated to use `sys_mutex` and `sys_sem` when the Zephyr becomes capable of dynamically allocating them in the future. Signed-off-by: Stephanos Ioannidis --- lib/libc/newlib/libc-hooks.c | 145 +++++++++++++++++++++++++++++++++-- 1 file changed, 140 insertions(+), 5 deletions(-) diff --git a/lib/libc/newlib/libc-hooks.c b/lib/libc/newlib/libc-hooks.c index fdc8c5ead6e..a73c3f85117 100644 --- a/lib/libc/newlib/libc-hooks.c +++ b/lib/libc/newlib/libc-hooks.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -298,18 +299,152 @@ void *_sbrk(intptr_t count) } __weak FUNC_ALIAS(_sbrk, sbrk, void *); -static LIBC_DATA SYS_MUTEX_DEFINE(heap_mutex); +#ifdef CONFIG_MULTITHREADING +/* + * Newlib Retargetable Locking Interface Implementation + * + * When multithreading is enabled, the newlib retargetable locking interface is + * defined below to override the default void implementation and provide the + * Zephyr-side locks. + * + * NOTE: `k_mutex` and `k_sem` are used instead of `sys_mutex` and `sys_sem` + * because the latter do not support dynamic allocation for now. + */ -void __malloc_lock(struct _reent *reent) +/* Static locks */ +K_MUTEX_DEFINE(__lock___sinit_recursive_mutex); +K_MUTEX_DEFINE(__lock___sfp_recursive_mutex); +K_MUTEX_DEFINE(__lock___atexit_recursive_mutex); +K_MUTEX_DEFINE(__lock___malloc_recursive_mutex); +K_MUTEX_DEFINE(__lock___env_recursive_mutex); +K_SEM_DEFINE(__lock___at_quick_exit_mutex, 1, 1); +K_SEM_DEFINE(__lock___tz_mutex, 1, 1); +K_SEM_DEFINE(__lock___dd_hash_mutex, 1, 1); +K_SEM_DEFINE(__lock___arc4random_mutex, 1, 1); + +#ifdef CONFIG_USERSPACE +/* Grant public access to all static locks after boot */ +static int newlib_locks_prepare(const struct device *unused) { - sys_mutex_lock(&heap_mutex, K_FOREVER); + ARG_UNUSED(unused); + + /* Initialise recursive locks */ + k_object_access_all_grant(&__lock___sinit_recursive_mutex); + k_object_access_all_grant(&__lock___sfp_recursive_mutex); + k_object_access_all_grant(&__lock___atexit_recursive_mutex); + k_object_access_all_grant(&__lock___malloc_recursive_mutex); + k_object_access_all_grant(&__lock___env_recursive_mutex); + + /* Initialise non-recursive locks */ + k_object_access_all_grant(&__lock___at_quick_exit_mutex); + k_object_access_all_grant(&__lock___tz_mutex); + k_object_access_all_grant(&__lock___dd_hash_mutex); + k_object_access_all_grant(&__lock___arc4random_mutex); + + return 0; } -void __malloc_unlock(struct _reent *reent) +SYS_INIT(newlib_locks_prepare, POST_KERNEL, + CONFIG_KERNEL_INIT_PRIORITY_DEFAULT); +#endif /* CONFIG_USERSPACE */ + +/* Create a new dynamic non-recursive lock */ +void __retarget_lock_init(_LOCK_T *lock) { - sys_mutex_unlock(&heap_mutex); + __ASSERT_NO_MSG(lock != NULL); + + /* Allocate semaphore object */ +#ifndef CONFIG_USERSPACE + *lock = malloc(sizeof(struct k_sem)); +#else + *lock = k_object_alloc(K_OBJ_SEM); +#endif /* !CONFIG_USERSPACE */ + __ASSERT(*lock != NULL, "non-recursive lock allocation failed"); + + k_sem_init((struct k_sem *)*lock, 1, 1); } +/* Create a new dynamic recursive lock */ +void __retarget_lock_init_recursive(_LOCK_T *lock) +{ + __ASSERT_NO_MSG(lock != NULL); + + /* Allocate mutex object */ +#ifndef CONFIG_USERSPACE + *lock = malloc(sizeof(struct k_mutex)); +#else + *lock = k_object_alloc(K_OBJ_MUTEX); +#endif /* !CONFIG_USERSPACE */ + __ASSERT(*lock != NULL, "recursive lock allocation failed"); + + k_mutex_init((struct k_mutex *)*lock); +} + +/* Close dynamic non-recursive lock */ +void __retarget_lock_close(_LOCK_T lock) +{ + __ASSERT_NO_MSG(lock != NULL); +#ifndef CONFIG_USERSPACE + free(lock); +#else + k_object_release(lock); +#endif /* !CONFIG_USERSPACE */ +} + +/* Close dynamic recursive lock */ +void __retarget_lock_close_recursive(_LOCK_T lock) +{ + __ASSERT_NO_MSG(lock != NULL); +#ifndef CONFIG_USERSPACE + free(lock); +#else + k_object_release(lock); +#endif /* !CONFIG_USERSPACE */ +} + +/* Acquiure non-recursive lock */ +void __retarget_lock_acquire(_LOCK_T lock) +{ + __ASSERT_NO_MSG(lock != NULL); + k_sem_take((struct k_sem *)lock, K_FOREVER); +} + +/* Acquiure recursive lock */ +void __retarget_lock_acquire_recursive(_LOCK_T lock) +{ + __ASSERT_NO_MSG(lock != NULL); + k_mutex_lock((struct k_mutex *)lock, K_FOREVER); +} + +/* Try acquiring non-recursive lock */ +int __retarget_lock_try_acquire(_LOCK_T lock) +{ + __ASSERT_NO_MSG(lock != NULL); + return !k_sem_take((struct k_sem *)lock, K_NO_WAIT); +} + +/* Try acquiring recursive lock */ +int __retarget_lock_try_acquire_recursive(_LOCK_T lock) +{ + __ASSERT_NO_MSG(lock != NULL); + return !k_mutex_lock((struct k_mutex *)lock, K_NO_WAIT); +} + +/* Release non-recursive lock */ +void __retarget_lock_release(_LOCK_T lock) +{ + __ASSERT_NO_MSG(lock != NULL); + k_sem_give((struct k_sem *)lock); +} + +/* Release recursive lock */ +void __retarget_lock_release_recursive(_LOCK_T lock) +{ + __ASSERT_NO_MSG(lock != NULL); + k_mutex_unlock((struct k_mutex *)lock); +} +#endif /* CONFIG_MULTITHREADING */ + __weak int *__errno(void) { return z_errno();