The scheduler needs a few tweaks to work in SMP mode: 1. The "cache" field just doesn't work. With more than one CPU, caching the highest priority thread isn't useful as you may need N of them at any given time before another thread is returned to the scheduler. You could recalculate it at every change, but that provides no performance benefit. Remove. 2. The "bitmask" designed to prevent the need to individually check priorities is likewise dropped. This could work, but in fact on our only current SMP system and with current K_NUM_PRIOPRITIES values it provides no real benefit. 3. The individual threads now have a "current cpu" and "active" flag so that the choice of the next thread to run can correctly skip threads that are active on other CPUs. The upshot is that a decent amount of code gets #if'd out, and the new SMP implementations for _get_highest_ready_prio() and _get_next_ready_thread() are simpler and smaller, at the expense of having to drop older optimizations. Note that scheduler synchronization is unchanged: all scheduler APIs used to require that an irq_lock() be held, which means that they now require the global spinlock via the same API. This should be a very early candidate for lock granularity attention! Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
78 lines
1.6 KiB
C
78 lines
1.6 KiB
C
/*
|
|
* Copyright (c) 2018 Intel Corporation
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
#ifndef _KSWAP_H
|
|
#define _KSWAP_H
|
|
|
|
#include <ksched.h>
|
|
#include <kernel_arch_func.h>
|
|
|
|
#ifdef CONFIG_TIMESLICING
|
|
extern void _update_time_slice_before_swap(void);
|
|
#else
|
|
#define _update_time_slice_before_swap() /**/
|
|
#endif
|
|
|
|
#ifdef CONFIG_STACK_SENTINEL
|
|
extern void _check_stack_sentinel(void);
|
|
#else
|
|
#define _check_stack_sentinel() /**/
|
|
#endif
|
|
|
|
/* context switching and scheduling-related routines */
|
|
#ifdef CONFIG_USE_SWITCH
|
|
|
|
/* New style context switching. _arch_switch() is a lower level
|
|
* primitive that doesn't know about the scheduler or return value.
|
|
* Needed for SMP, where the scheduler requires spinlocking that we
|
|
* don't want to have to do in per-architecture assembly.
|
|
*/
|
|
static inline unsigned int _Swap(unsigned int key)
|
|
{
|
|
struct k_thread *new_thread, *old_thread;
|
|
int ret;
|
|
|
|
old_thread = _current;
|
|
|
|
_check_stack_sentinel();
|
|
_update_time_slice_before_swap();
|
|
|
|
new_thread = _get_next_ready_thread();
|
|
|
|
old_thread->swap_retval = -EAGAIN;
|
|
|
|
#ifdef CONFIG_SMP
|
|
old_thread->base.active = 0;
|
|
new_thread->base.active = 1;
|
|
|
|
new_thread->base.cpu = _arch_curr_cpu()->id;
|
|
#endif
|
|
|
|
_current = new_thread;
|
|
_arch_switch(new_thread->switch_handle,
|
|
&old_thread->switch_handle);
|
|
|
|
ret = _current->swap_retval;
|
|
|
|
irq_unlock(key);
|
|
|
|
return ret;
|
|
}
|
|
|
|
#else /* !CONFIG_USE_SWITCH */
|
|
|
|
extern unsigned int __swap(unsigned int key);
|
|
|
|
static inline unsigned int _Swap(unsigned int key)
|
|
{
|
|
_check_stack_sentinel();
|
|
_update_time_slice_before_swap();
|
|
|
|
return __swap(key);
|
|
}
|
|
#endif
|
|
|
|
#endif /* _KSWAP_H */
|