tests/kernel: Simplify timer spinning

There is actually nothing wrong with this test code idiom.  But it's
tickling a qemu emulator bug with the hpet driver and x86_64[1].  The
rapidly spinning calls to k_uptime_get_32() need to disable
interrupts, read timer hardware state and enable them.  Something goes
wrong in qemu with this process and the timer interrupt gets lost.
The counter blows right past the comparator without delivering its
interrupt, and thus the interrupt won't be delivered until the counter
is next reset in idle after exit from the busy loop, which is
obviously too late to interrupt the timeslicing thread.

Just replace the loops with a single call to k_busy_wait().  The
resulting code ends up being much simpler anyway.  An added bonus is
that we can remove the special case handling for native_posix (which
was an entirely unrelated thing, but with a similar symptom).

[1] But oddly not the same emulated hardware running with the same
driver under the same qemu binary when used with a 32 bit kernel.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2018-12-10 15:15:03 -08:00 committed by Anas Nashif
parent 870e8188a8
commit 0cc362f873
3 changed files with 4 additions and 39 deletions

View File

@ -46,22 +46,10 @@ static void thread_tslice(void *p1, void *p2, void *p3)
zassert_true(t <= expected_slice_max, NULL);
thread_idx = (thread_idx + 1) % NUM_THREAD;
u32_t t32 = k_uptime_get_32();
/* Keep the current thread busy for more than one slice, even though,
* when timeslice used up the next thread should be scheduled in.
*/
while (k_uptime_get_32() - t32 < BUSY_MS) {
#if defined(CONFIG_ARCH_POSIX)
/*
* In the posix arch, a busy loop takes no time, so let's make
* it take some
*/
k_busy_wait(50);
#else
;
#endif
}
k_busy_wait(1000 * BUSY_MS);
k_sem_give(&sema);
}

View File

@ -50,20 +50,12 @@ static void thread_tslice(void *p1, void *p2, void *p3)
(tdelta <= expected_slice_max) &&
((int)p1 == thread_idx)), NULL);
thread_idx = (thread_idx + 1) % (NUM_THREAD);
u32_t t32 = k_uptime_get_32();
/* Keep the current thread busy for more than one slice,
* even though, when timeslice used up the next thread
* should be scheduled in.
*/
while (k_uptime_get_32() - t32 < BUSY_MS) {
#if defined(CONFIG_ARCH_POSIX)
k_busy_wait(50);
#else
;
#endif
}
k_busy_wait(1000 * BUSY_MS);
k_sem_give(&sema1);
}
@ -83,7 +75,6 @@ static void thread_tslice(void *p1, void *p2, void *p3)
*/
void test_slice_scheduling(void)
{
u32_t t32;
k_tid_t tid[NUM_THREAD];
int old_prio = k_thread_priority_get(k_current_get());
int count = 0;
@ -111,14 +102,7 @@ void test_slice_scheduling(void)
* even though, when timeslice used up the next thread
* should be scheduled in.
*/
t32 = k_uptime_get_32();
while (k_uptime_get_32() - t32 < BUSY_MS) {
#if defined(CONFIG_ARCH_POSIX)
k_busy_wait(50);
#else
;
#endif
}
k_busy_wait(1000 * BUSY_MS);
/* relinquish CPU and wait for each thread to complete*/
for (int i = 0; i < NUM_THREAD; i++) {

View File

@ -58,15 +58,8 @@ static void thread_tslice(void *p1, void *p2, void *p3)
/*less than one tick delay*/
zassert_true(t <= SLICE_SIZE_LIMIT, NULL);
u32_t t32 = k_uptime_get_32();
/*keep the current thread busy for more than one slice*/
while (k_uptime_get_32() - t32 < SLEEP_TICKLESS)
#if defined(CONFIG_ARCH_POSIX)
k_busy_wait(50);
#else
;
#endif
k_busy_wait(1000 * SLEEP_TICKLESS);
k_sem_give(&sema);
}
/**