Picolibc's retargetable locking is based upon having the user own the lock
type (struct __lock, along with typedef struct __lock *_LOCK_T), and then
having the picolibc internal code only refer to this type via the _LOCK_T
pointer typedef, leaving the actual struct undeclared there.
Zephyr wants to use 'struct k_mutex' for this type; the initial picolibc
port handled this by trying to redefine the picolibc locking to use 'void
*' instead of 'struct __lock *' by including '#define _LOCK_T void
*'. Which 'works' as long as the Zephyr code doesn't actually include
picolibc's sys/lock.h.
A recent picolibc change to support POSIX stdio locking has picolibc's
stdio.h including sys/lock.h, which breaks Zephyr's hack.
To fix this, create a real 'struct __lock' type as
struct __lock { struct k_mutex m; };
Define all of the required picolibc locking API with this real type,
referring to the mutex inside without needing any casts.
This required switching the definition of the C library global lock from
K_MUTEX_DEFINE to the open-coded version, STRUCT_SECTION_ITERABLE_ALTERNATE
so that it has the correct type and still lands in the same elf section.
The only mildly inappropriate code left is that lock are allocated using
k_object_alloc(K_OBJ_MUTEX), which "works" because the size of 'struct
__lock` will exactly match the size of 'struct k_mutex' because of C's
struct allocation rules.
Signed-off-by: Keith Packard <keithp@keithp.com>
117 lines
2.5 KiB
C
117 lines
2.5 KiB
C
/*
|
|
* Copyright © 2021, Keith Packard <keithp@keithp.com>
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
#include "picolibc-hooks.h"
|
|
|
|
#ifdef CONFIG_MULTITHREADING
|
|
|
|
#include <sys/lock.h>
|
|
|
|
/* Define the picolibc lock type */
|
|
struct __lock {
|
|
struct k_mutex m;
|
|
};
|
|
|
|
STRUCT_SECTION_ITERABLE_ALTERNATE(k_mutex, __lock,
|
|
__lock___libc_recursive_mutex) = {
|
|
.m = Z_MUTEX_INITIALIZER(__lock___libc_recursive_mutex.m),
|
|
};
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
/* Grant public access to picolibc lock after boot */
|
|
static int picolibc_locks_prepare(void)
|
|
{
|
|
|
|
/* Initialise recursive locks */
|
|
k_object_access_all_grant(&__lock___libc_recursive_mutex);
|
|
|
|
return 0;
|
|
}
|
|
|
|
SYS_INIT(picolibc_locks_prepare, POST_KERNEL,
|
|
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
|
|
#endif /* CONFIG_USERSPACE */
|
|
|
|
/* Create a new dynamic recursive lock */
|
|
void __retarget_lock_init_recursive(_LOCK_T *lock)
|
|
{
|
|
__ASSERT_NO_MSG(lock != NULL);
|
|
|
|
/* Allocate mutex object */
|
|
#ifndef CONFIG_USERSPACE
|
|
*lock = malloc(sizeof(struct __lock));
|
|
#else
|
|
*lock = k_object_alloc(K_OBJ_MUTEX);
|
|
#endif /* !CONFIG_USERSPACE */
|
|
__ASSERT(*lock != NULL, "recursive lock allocation failed");
|
|
|
|
k_mutex_init(&(*lock)->m);
|
|
}
|
|
|
|
/* Create a new dynamic non-recursive lock */
|
|
void __retarget_lock_init(_LOCK_T *lock)
|
|
{
|
|
__retarget_lock_init_recursive(lock);
|
|
}
|
|
|
|
/* Close dynamic recursive lock */
|
|
void __retarget_lock_close_recursive(_LOCK_T lock)
|
|
{
|
|
__ASSERT_NO_MSG(lock != NULL);
|
|
#ifndef CONFIG_USERSPACE
|
|
free(lock);
|
|
#else
|
|
k_object_release(lock);
|
|
#endif /* !CONFIG_USERSPACE */
|
|
}
|
|
|
|
/* Close dynamic non-recursive lock */
|
|
void __retarget_lock_close(_LOCK_T lock)
|
|
{
|
|
__retarget_lock_close_recursive(lock);
|
|
}
|
|
|
|
/* Acquiure recursive lock */
|
|
void __retarget_lock_acquire_recursive(_LOCK_T lock)
|
|
{
|
|
__ASSERT_NO_MSG(lock != NULL);
|
|
k_mutex_lock(&lock->m, K_FOREVER);
|
|
}
|
|
|
|
/* Acquiure non-recursive lock */
|
|
void __retarget_lock_acquire(_LOCK_T lock)
|
|
{
|
|
__retarget_lock_acquire_recursive(lock);
|
|
}
|
|
|
|
/* Try acquiring recursive lock */
|
|
int __retarget_lock_try_acquire_recursive(_LOCK_T lock)
|
|
{
|
|
__ASSERT_NO_MSG(lock != NULL);
|
|
return !k_mutex_lock(&lock->m, K_NO_WAIT);
|
|
}
|
|
|
|
/* Try acquiring non-recursive lock */
|
|
int __retarget_lock_try_acquire(_LOCK_T lock)
|
|
{
|
|
return __retarget_lock_try_acquire_recursive(lock);
|
|
}
|
|
|
|
/* Release recursive lock */
|
|
void __retarget_lock_release_recursive(_LOCK_T lock)
|
|
{
|
|
__ASSERT_NO_MSG(lock != NULL);
|
|
k_mutex_unlock(&lock->m);
|
|
}
|
|
|
|
/* Release non-recursive lock */
|
|
void __retarget_lock_release(_LOCK_T lock)
|
|
{
|
|
__retarget_lock_release_recursive(lock);
|
|
}
|
|
|
|
#endif /* CONFIG_MULTITHREADING */
|