arch/x86: Fixing dcache enable/disable code

It did not build in x86_64 due to the fact that cr0 is a 64bits
register in such architecture, instead of being a 32bits one originaly
so the place holder has to follow that size. Such place holder must be
initialized to 0 to make sure no upper 32 bits ends up set which would
conclude in a general protection error.

Operand size specifier (l, q ...) is useless as well in this context.

Clearing up the masks by using proper macros.

Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
This commit is contained in:
Tomasz Bursztyka 2023-11-27 19:24:17 +01:00 committed by Carles Cufí
parent 6421c7a5fe
commit b1cc7312cf

View File

@ -18,6 +18,11 @@
#include <zephyr/cache.h>
#include <stdbool.h>
/* Not Write-through bit */
#define X86_REG_CR0_NW BIT(29)
/* Cache Disable bit */
#define X86_REG_CR0_CD BIT(30)
static inline void z_x86_wbinvd(void)
{
__asm__ volatile("wbinvd;\n\t" : : : "memory");
@ -25,25 +30,28 @@ static inline void z_x86_wbinvd(void)
void arch_dcache_enable(void)
{
uint32_t cr0;
unsigned long cr0 = 0;
/* Enable write-back caching by clearing the NW and CD bits */
__asm__ volatile("movl %%cr0, %0;\n\t"
"andl $0x9fffffff, %0;\n\t"
"movl %0, %%cr0;\n\t"
: "=r" (cr0));
__asm__ volatile("mov %%cr0, %0;\n\t"
"and %1, %0;\n\t"
"mov %0, %%cr0;\n\t"
: "=r" (cr0)
: "i" (~(X86_REG_CR0_NW | X86_REG_CR0_CD)));
}
void arch_dcache_disable(void)
{
uint32_t cr0;
unsigned long cr0 = 0;
/* Enter the no-fill mode by setting NW=0 and CD=1 */
__asm__ volatile("movl %%cr0, %0;\n\t"
"andl $0xdfffffff, %0;\n\t"
"orl $0x40000000, %0;\n\t"
"movl %0, %%cr0;\n\t"
: "=r" (cr0));
__asm__ volatile("mov %%cr0, %0;\n\t"
"and %1, %0;\n\t"
"or %2, %0;\n\t"
"mov %0, %%cr0;\n\t"
: "=r" (cr0)
: "i" (~(X86_REG_CR0_NW)),
"i" (X86_REG_CR0_CD));
/* Flush all caches */
z_x86_wbinvd();