diff --git a/lib/libc/newlib/libc-hooks.c b/lib/libc/newlib/libc-hooks.c index fdc8c5ead6e..a73c3f85117 100644 --- a/lib/libc/newlib/libc-hooks.c +++ b/lib/libc/newlib/libc-hooks.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -298,18 +299,152 @@ void *_sbrk(intptr_t count) } __weak FUNC_ALIAS(_sbrk, sbrk, void *); -static LIBC_DATA SYS_MUTEX_DEFINE(heap_mutex); +#ifdef CONFIG_MULTITHREADING +/* + * Newlib Retargetable Locking Interface Implementation + * + * When multithreading is enabled, the newlib retargetable locking interface is + * defined below to override the default void implementation and provide the + * Zephyr-side locks. + * + * NOTE: `k_mutex` and `k_sem` are used instead of `sys_mutex` and `sys_sem` + * because the latter do not support dynamic allocation for now. + */ -void __malloc_lock(struct _reent *reent) +/* Static locks */ +K_MUTEX_DEFINE(__lock___sinit_recursive_mutex); +K_MUTEX_DEFINE(__lock___sfp_recursive_mutex); +K_MUTEX_DEFINE(__lock___atexit_recursive_mutex); +K_MUTEX_DEFINE(__lock___malloc_recursive_mutex); +K_MUTEX_DEFINE(__lock___env_recursive_mutex); +K_SEM_DEFINE(__lock___at_quick_exit_mutex, 1, 1); +K_SEM_DEFINE(__lock___tz_mutex, 1, 1); +K_SEM_DEFINE(__lock___dd_hash_mutex, 1, 1); +K_SEM_DEFINE(__lock___arc4random_mutex, 1, 1); + +#ifdef CONFIG_USERSPACE +/* Grant public access to all static locks after boot */ +static int newlib_locks_prepare(const struct device *unused) { - sys_mutex_lock(&heap_mutex, K_FOREVER); + ARG_UNUSED(unused); + + /* Initialise recursive locks */ + k_object_access_all_grant(&__lock___sinit_recursive_mutex); + k_object_access_all_grant(&__lock___sfp_recursive_mutex); + k_object_access_all_grant(&__lock___atexit_recursive_mutex); + k_object_access_all_grant(&__lock___malloc_recursive_mutex); + k_object_access_all_grant(&__lock___env_recursive_mutex); + + /* Initialise non-recursive locks */ + k_object_access_all_grant(&__lock___at_quick_exit_mutex); + k_object_access_all_grant(&__lock___tz_mutex); + k_object_access_all_grant(&__lock___dd_hash_mutex); + k_object_access_all_grant(&__lock___arc4random_mutex); + + return 0; } -void __malloc_unlock(struct _reent *reent) +SYS_INIT(newlib_locks_prepare, POST_KERNEL, + CONFIG_KERNEL_INIT_PRIORITY_DEFAULT); +#endif /* CONFIG_USERSPACE */ + +/* Create a new dynamic non-recursive lock */ +void __retarget_lock_init(_LOCK_T *lock) { - sys_mutex_unlock(&heap_mutex); + __ASSERT_NO_MSG(lock != NULL); + + /* Allocate semaphore object */ +#ifndef CONFIG_USERSPACE + *lock = malloc(sizeof(struct k_sem)); +#else + *lock = k_object_alloc(K_OBJ_SEM); +#endif /* !CONFIG_USERSPACE */ + __ASSERT(*lock != NULL, "non-recursive lock allocation failed"); + + k_sem_init((struct k_sem *)*lock, 1, 1); } +/* Create a new dynamic recursive lock */ +void __retarget_lock_init_recursive(_LOCK_T *lock) +{ + __ASSERT_NO_MSG(lock != NULL); + + /* Allocate mutex object */ +#ifndef CONFIG_USERSPACE + *lock = malloc(sizeof(struct k_mutex)); +#else + *lock = k_object_alloc(K_OBJ_MUTEX); +#endif /* !CONFIG_USERSPACE */ + __ASSERT(*lock != NULL, "recursive lock allocation failed"); + + k_mutex_init((struct k_mutex *)*lock); +} + +/* Close dynamic non-recursive lock */ +void __retarget_lock_close(_LOCK_T lock) +{ + __ASSERT_NO_MSG(lock != NULL); +#ifndef CONFIG_USERSPACE + free(lock); +#else + k_object_release(lock); +#endif /* !CONFIG_USERSPACE */ +} + +/* Close dynamic recursive lock */ +void __retarget_lock_close_recursive(_LOCK_T lock) +{ + __ASSERT_NO_MSG(lock != NULL); +#ifndef CONFIG_USERSPACE + free(lock); +#else + k_object_release(lock); +#endif /* !CONFIG_USERSPACE */ +} + +/* Acquiure non-recursive lock */ +void __retarget_lock_acquire(_LOCK_T lock) +{ + __ASSERT_NO_MSG(lock != NULL); + k_sem_take((struct k_sem *)lock, K_FOREVER); +} + +/* Acquiure recursive lock */ +void __retarget_lock_acquire_recursive(_LOCK_T lock) +{ + __ASSERT_NO_MSG(lock != NULL); + k_mutex_lock((struct k_mutex *)lock, K_FOREVER); +} + +/* Try acquiring non-recursive lock */ +int __retarget_lock_try_acquire(_LOCK_T lock) +{ + __ASSERT_NO_MSG(lock != NULL); + return !k_sem_take((struct k_sem *)lock, K_NO_WAIT); +} + +/* Try acquiring recursive lock */ +int __retarget_lock_try_acquire_recursive(_LOCK_T lock) +{ + __ASSERT_NO_MSG(lock != NULL); + return !k_mutex_lock((struct k_mutex *)lock, K_NO_WAIT); +} + +/* Release non-recursive lock */ +void __retarget_lock_release(_LOCK_T lock) +{ + __ASSERT_NO_MSG(lock != NULL); + k_sem_give((struct k_sem *)lock); +} + +/* Release recursive lock */ +void __retarget_lock_release_recursive(_LOCK_T lock) +{ + __ASSERT_NO_MSG(lock != NULL); + k_mutex_unlock((struct k_mutex *)lock); +} +#endif /* CONFIG_MULTITHREADING */ + __weak int *__errno(void) { return z_errno();