arch/x86_64: convert all asm() to __asm__()

asm() is not supported by -std=c99, __asm__() is more portable

Signed-off-by: p-szafonimateusz <p-szafonimateusz@xiaomi.com>
This commit is contained in:
p-szafonimateusz 2024-08-20 14:23:01 +02:00 committed by Petro Karashchenko
parent 8b81689f2c
commit 882c0d0a47
14 changed files with 126 additions and 120 deletions

View File

@ -50,7 +50,7 @@
static inline void outb(uint8_t regval, uint16_t port)
{
asm volatile(
__asm__ volatile(
"\toutb %0,%1\n"
:
: "a" (regval), "dN" (port)
@ -60,17 +60,17 @@ static inline void outb(uint8_t regval, uint16_t port)
static inline uint8_t inb(uint16_t port)
{
uint8_t regval;
asm volatile(
__asm__ volatile(
"\tinb %1,%0\n"
: "=a" (regval)
: "dN" (port)
);
);
return regval;
}
static inline void outw(uint16_t regval, uint16_t port)
{
asm volatile(
__asm__ volatile(
"\toutw %0,%1\n"
:
: "a" (regval), "dN" (port)
@ -81,7 +81,7 @@ static inline uint16_t inw(uint16_t port)
{
uint16_t regval;
asm volatile(
__asm__ volatile(
"\tinw %1,%0\n"
: "=a" (regval)
: "dN" (port)
@ -91,7 +91,7 @@ static inline uint16_t inw(uint16_t port)
static inline void outl(uint32_t regval, uint16_t port)
{
asm volatile(
__asm__ volatile(
"\toutl %0,%1\n"
:
: "a" (regval), "dN" (port)
@ -101,7 +101,7 @@ static inline void outl(uint32_t regval, uint16_t port)
static inline uint32_t inl(uint16_t port)
{
uint32_t regval;
asm volatile(
__asm__ volatile(
"\tinl %1,%0\n"
: "=a" (regval)
: "dN" (port)
@ -127,7 +127,7 @@ static inline uint32_t mmio_read32(void *address)
/* Assembly-encoded to match the hypervisor MMIO parser support */
asm volatile("movl (%1),%0" : "=r" (value) : "r" (address));
__asm__ volatile("movl (%1),%0" : "=r" (value) : "r" (address));
return value;
}
@ -150,7 +150,7 @@ static inline void mmio_write32(void *address, uint32_t value)
{
/* Assembly-encoded to match the hypervisor MMIO parser support */
asm volatile("movl %0,(%1)" : : "r" (value), "r" (address));
__asm__ volatile("movl %0,(%1)" : : "r" (value), "r" (address));
}
static inline void mmio_write64(void *address, uint64_t value)
@ -162,10 +162,10 @@ static inline void up_trash_cpu(void)
{
for (; ; )
{
asm volatile ("cli;hlt;");
__asm__ volatile ("cli;hlt;");
}
asm("ud2":::"memory");
__asm__ volatile ("ud2":::"memory");
}
static inline void up_invalid_tlb(uintptr_t start, uintptr_t end)
@ -177,7 +177,7 @@ static inline void up_invalid_tlb(uintptr_t start, uintptr_t end)
for (i = start; i < end; i += PAGE_SIZE)
{
asm("invlpg %0;":: "m"(i):"memory");
__asm__ volatile ("invlpg %0;":: "m"(i):"memory");
}
}

View File

@ -518,7 +518,7 @@ static inline void setgdt(void *gdt, int size)
gdt_ptr.limit = size;
gdt_ptr.base = (uintptr_t)gdt;
asm volatile ("lgdt %0"::"m"(gdt_ptr):"memory");
__asm__ volatile ("lgdt %0"::"m"(gdt_ptr):"memory");
}
static inline void setidt(void *idt, int size)
@ -527,7 +527,7 @@ static inline void setidt(void *idt, int size)
idt_ptr.limit = size;
idt_ptr.base = (uintptr_t)idt;
asm volatile ("lidt %0"::"m"(idt_ptr):"memory");
__asm__ volatile ("lidt %0"::"m"(idt_ptr):"memory");
}
static inline uint64_t rdtscp(void)
@ -535,7 +535,7 @@ static inline uint64_t rdtscp(void)
uint32_t lo;
uint32_t hi;
asm volatile("rdtscp" : "=a" (lo), "=d" (hi)::"ecx", "memory");
__asm__ volatile("rdtscp" : "=a" (lo), "=d" (hi)::"ecx", "memory");
return (uint64_t)lo | (((uint64_t)hi) << 32);
}
@ -544,29 +544,29 @@ static inline uint64_t rdtsc(void)
uint32_t lo;
uint32_t hi;
asm volatile("rdtsc" : "=a" (lo), "=d" (hi)::"memory");
__asm__ volatile("rdtsc" : "=a" (lo), "=d" (hi)::"memory");
return (uint64_t)lo | (((uint64_t)hi) << 32);
}
static inline void set_pcid(uint64_t pcid)
{
if (pcid < 4095)
{
asm volatile("mov %%cr3, %%rbx; andq $-4096, %%rbx; or %0, "
"%%rbx; mov %%rbx, %%cr3;"
::"g"(pcid):"memory", "rbx", "rax");
}
if (pcid < 4095)
{
__asm__ volatile("mov %%cr3, %%rbx; andq $-4096, %%rbx; or %0, "
"%%rbx; mov %%rbx, %%cr3;"
::"g"(pcid):"memory", "rbx", "rax");
}
}
static inline void set_cr3(uint64_t cr3)
{
asm volatile("mov %0, %%cr3" : "=rm"(cr3) : : "memory");
__asm__ volatile("mov %0, %%cr3" : "=rm"(cr3) : : "memory");
}
static inline uint64_t get_cr3(void)
{
uint64_t cr3;
asm volatile("mov %%cr3, %0" : "=rm"(cr3) : : "memory");
__asm__ volatile("mov %%cr3, %0" : "=rm"(cr3) : : "memory");
return cr3;
}
@ -582,54 +582,54 @@ static inline unsigned long read_msr(unsigned int msr)
uint32_t low;
uint32_t high;
asm volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
__asm__ volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
return low | ((unsigned long)high << 32);
}
static inline void write_msr(unsigned int msr, unsigned long val)
{
asm volatile("wrmsr"
: /* no output */
: "c" (msr), "a" (val), "d" (val >> 32)
: "memory");
__asm__ volatile("wrmsr"
: /* no output */
: "c" (msr), "a" (val), "d" (val >> 32)
: "memory");
}
static inline uint64_t read_fsbase(void)
{
uint64_t val;
asm volatile("rdfsbase %0"
: "=r" (val)
: /* no output */
: "memory");
uint64_t val;
__asm__ volatile("rdfsbase %0"
: "=r" (val)
: /* no output */
: "memory");
return val;
return val;
}
static inline void write_fsbase(unsigned long val)
{
asm volatile("wrfsbase %0"
: /* no output */
: "r" (val)
: "memory");
__asm__ volatile("wrfsbase %0"
: /* no output */
: "r" (val)
: "memory");
}
static inline uint64_t read_gsbase(void)
{
uint64_t val;
asm volatile("rdgsbase %0"
: "=r" (val)
: /* no output */
: "memory");
uint64_t val;
__asm__ volatile("rdgsbase %0"
: "=r" (val)
: /* no output */
: "memory");
return val;
return val;
}
static inline void write_gsbase(unsigned long val)
{
asm volatile("wrgsbase %0"
: /* no output */
: "r" (val)
: "memory");
__asm__ volatile("wrgsbase %0"
: /* no output */
: "r" (val)
: "memory");
}
/* Return stack pointer */
@ -638,7 +638,7 @@ static inline uint64_t up_getsp(void)
{
uint64_t regval;
asm volatile(
__asm__ volatile(
"\tmovq %%rsp, %0\n"
: "=rm" (regval)
:
@ -652,7 +652,7 @@ static inline uint32_t up_getds(void)
{
uint32_t regval;
asm volatile(
__asm__ volatile(
"\tmov %%ds, %0\n"
: "=rm" (regval)
:
@ -664,7 +664,7 @@ static inline uint32_t up_getcs(void)
{
uint32_t regval;
asm volatile(
__asm__ volatile(
"\tmov %%cs, %0\n"
: "=rm" (regval)
:
@ -676,7 +676,7 @@ static inline uint32_t up_getss(void)
{
uint32_t regval;
asm volatile(
__asm__ volatile(
"\tmov %%ss, %0\n"
: "=rm" (regval)
:
@ -688,7 +688,7 @@ static inline uint32_t up_getes(void)
{
uint32_t regval;
asm volatile(
__asm__ volatile(
"\tmov %%es, %0\n"
: "=rm" (regval)
:
@ -700,7 +700,7 @@ static inline uint32_t up_getfs(void)
{
uint32_t regval;
asm volatile(
__asm__ volatile(
"\tmov %%fs, %0\n"
: "=rm" (regval)
:
@ -712,7 +712,7 @@ static inline uint32_t up_getgs(void)
{
uint32_t regval;
asm volatile(
__asm__ volatile(
"\tmov %%gs, %0\n"
: "=rm" (regval)
:
@ -735,7 +735,7 @@ static inline irqstate_t irqflags()
{
irqstate_t flags;
asm volatile(
__asm__ volatile(
"\tpushfq\n"
"\tpopq %0\n"
: "=rm" (flags)
@ -763,14 +763,14 @@ static inline bool up_irq_enabled(irqstate_t flags)
static inline void up_irq_disable(void)
{
asm volatile("cli": : :"memory");
__asm__ volatile("cli": : :"memory");
}
/* Enable interrupts unconditionally */
static inline void up_irq_enable(void)
{
asm volatile("sti": : :"memory");
__asm__ volatile("sti": : :"memory");
}
/* Disable interrupts, but return previous interrupt state */

View File

@ -100,7 +100,7 @@ static inline_function int up_cpu_index(void)
{
int cpu;
asm volatile(
__asm__ volatile(
"\tmovl %%gs:(%c1), %0\n"
: "=r" (cpu)
: "i" (offsetof(struct intel64_cpu_s, id))
@ -119,17 +119,17 @@ static inline_function int up_cpu_index(void)
static inline_function uint64_t *up_current_regs(void)
{
uint64_t *regs;
asm volatile("movq %%gs:(%c1), %0"
: "=rm" (regs)
: "i" (offsetof(struct intel64_cpu_s, current_regs)));
__asm__ volatile("movq %%gs:(%c1), %0"
: "=rm" (regs)
: "i" (offsetof(struct intel64_cpu_s, current_regs)));
return regs;
}
static inline_function void up_set_current_regs(uint64_t *regs)
{
asm volatile("movq %0, %%gs:(%c1)"
:: "r" (regs), "i" (offsetof(struct intel64_cpu_s,
current_regs)));
__asm__ volatile("movq %0, %%gs:(%c1)"
:: "r" (regs), "i" (offsetof(struct intel64_cpu_s,
current_regs)));
}
/****************************************************************************

View File

@ -43,7 +43,7 @@
static inline void x86_64_wbindv(void)
{
asm volatile("wbinvd" : : : "memory");
__asm__ volatile("wbinvd" : : : "memory");
}
/****************************************************************************
@ -52,7 +52,7 @@ static inline void x86_64_wbindv(void)
static inline void x86_64_wbnoinvd(void)
{
asm volatile("wbnoinvd" : : : "memory");
__asm__ volatile("wbnoinvd" : : : "memory");
}
/****************************************************************************
@ -61,7 +61,7 @@ static inline void x86_64_wbnoinvd(void)
static inline void x86_64_invd(void)
{
asm volatile("invd" : : : "memory");
__asm__ volatile("invd" : : : "memory");
}
/****************************************************************************
@ -79,9 +79,9 @@ static size_t x86_64_cache_linesize(void)
unsigned long ebx = 0;
eax = 1;
asm volatile("cpuid\n\t"
: "=b" (ebx)
: "a" (eax));
__asm__ volatile("cpuid\n\t"
: "=b" (ebx)
: "a" (eax));
return ((ebx >> 8) & 0xff) * 8;
#else
@ -104,9 +104,9 @@ static size_t x86_64_cache_size(int leaf)
eax = 4;
ecx = leaf;
asm volatile("cpuid"
: "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
: "a"(eax), "c"(ecx));
__asm__ volatile("cpuid"
: "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
: "a"(eax), "c"(ecx));
/* (Ways + 1) * (Partitions + 1) * (Line_Size + 1) * (Sets + 1) */
@ -124,11 +124,11 @@ static void x86_64_cache_enable(void)
{
/* Clear "Not-write through" (NW) and "Cache disable" (CD) bits */
asm volatile("\t mov %%cr0, %%rax\n"
"\t mov $0x9fffffff, %%rbx\n"
"\t and %%rbx, %%rax\n"
"\t mov %%rax, %%cr0\n"
::: "memory", "rax", "rbx");
__asm__ volatile("\t mov %%cr0, %%rax\n"
"\t mov $0x9fffffff, %%rbx\n"
"\t and %%rbx, %%rax\n"
"\t mov %%rax, %%cr0\n"
::: "memory", "rax", "rbx");
}
/****************************************************************************
@ -139,13 +139,13 @@ static void x86_64_cache_disable(void)
{
/* Set "Not-write through" (NW) and "Cache disable" (CD) bits */
asm volatile("\t mov %%cr0, %%rax\n"
"\t mov $0x9fffffff, %%rbx \n"
"\t and %%rbx, %%rax \n"
"\t mov $0x60000000, %%rbx\n"
"\t or %%rbx, %%rax\n"
"\t mov %%rax, %%cr0\n"
:::"memory", "rax", "rbx");
__asm__ volatile("\t mov %%cr0, %%rax\n"
"\t mov $0x9fffffff, %%rbx \n"
"\t and %%rbx, %%rax \n"
"\t mov $0x60000000, %%rbx\n"
"\t or %%rbx, %%rax\n"
"\t mov %%rax, %%cr0\n"
:::"memory", "rax", "rbx");
/* And flush all caches */
@ -452,11 +452,11 @@ void up_clean_dcache(uintptr_t start, uintptr_t end)
start &= ~(lsize - 1);
asm volatile("mfence" : : : "memory");
__asm__ volatile("mfence" : : : "memory");
do
{
asm volatile("\tclwb %0;\n" : "+m" (start));
__asm__ volatile("\tclwb %0;\n" : "+m" (start));
/* Increment the address by the size of one cache line. */
@ -464,7 +464,7 @@ void up_clean_dcache(uintptr_t start, uintptr_t end)
}
while (start < end);
asm volatile("mfence" : : : "memory");
__asm__ volatile("mfence" : : : "memory");
#else
x86_64_wbnoinvd();
#endif
@ -516,11 +516,11 @@ void up_flush_dcache(uintptr_t start, uintptr_t end)
start &= ~(lsize - 1);
asm volatile("mfence" : : : "memory");
__asm__ volatile("mfence" : : : "memory");
do
{
asm volatile("\tclflush %0;\n" : "+m" (start));
__asm__ volatile("\tclflush %0;\n" : "+m" (start));
/* Increment the address by the size of one cache line. */
@ -528,7 +528,7 @@ void up_flush_dcache(uintptr_t start, uintptr_t end)
}
while (start < end);
asm volatile("mfence" : : : "memory");
__asm__ volatile("mfence" : : : "memory");
}
#endif /* CONFIG_ARCH_DCACHE */

View File

@ -124,8 +124,8 @@ void x86_64_check_and_enable_capability(void)
require |= X86_64_CPUID_01_RDRAND;
#endif
asm volatile("cpuid" : "=c" (ecx) : "a" (X86_64_CPUID_CAP)
: "rdx", "memory");
__asm__ volatile("cpuid" : "=c" (ecx) : "a" (X86_64_CPUID_CAP)
: "rdx", "memory");
/* Check features availability from ECX */
@ -150,8 +150,8 @@ void x86_64_check_and_enable_capability(void)
require |= X86_64_CPUID_07_CLWB;
#endif
asm volatile("cpuid" : "=b" (ebx) : "a" (X86_64_CPUID_EXTCAP), "c" (0)
: "rdx", "memory");
__asm__ volatile("cpuid" : "=b" (ebx) : "a" (X86_64_CPUID_EXTCAP), "c" (0)
: "rdx", "memory");
/* Check features availability */
@ -168,9 +168,9 @@ void x86_64_check_and_enable_capability(void)
#ifdef CONFIG_ARCH_X86_64_HAVE_XSAVE
/* Check XSAVE state area size for the current XCR0 state */
asm volatile("cpuid" : "=b" (ebx)
: "a" (X86_64_CPUID_XSAVE), "c" (0)
: "rdx", "memory");
__asm__ volatile("cpuid" : "=b" (ebx)
: "a" (X86_64_CPUID_XSAVE), "c" (0)
: "rdx", "memory");
if (XCPTCONTEXT_XMM_AREA_SIZE < ebx)
{
@ -190,8 +190,9 @@ void x86_64_check_and_enable_capability(void)
return;
err:
asm volatile ("cli");
asm volatile ("hlt");
__asm__ volatile ("cli");
__asm__ volatile ("hlt");
goto err;
}

View File

@ -95,7 +95,8 @@ static void x86_64_cpu_tss_load(int cpu)
addr = X86_GDT_ISTL_SEL_NUM * 8 + 16 * cpu;
asm volatile ("mov %0, %%ax; ltr %%ax":: "m"(addr) : "memory", "rax");
__asm__ volatile ("mov %0, %%ax; ltr %%ax"
:: "m"(addr) : "memory", "rax");
}
/****************************************************************************
@ -189,7 +190,8 @@ struct tss_s *x86_64_cpu_tss_now_get(void)
/* Get TSS associated with this CPU */
asm volatile ("str %%ax; mov %%ax, %0": "=rm"(seg) :: "memory", "rax");
__asm__ volatile ("str %%ax; mov %%ax, %0": "=rm"(seg)
:: "memory", "rax");
/* This is BSP if TSS not configured yet */

View File

@ -77,10 +77,11 @@ void x86_64_timer_calibrate_freq(void)
unsigned long numerator;
unsigned long denominator;
asm volatile("cpuid"
: "=c" (crystal_freq), "=b" (numerator), "=a" (denominator)
: "a" (X86_64_CPUID_TSC)
: "rdx", "memory");
__asm__ volatile("cpuid"
: "=c" (crystal_freq), "=b" (numerator),
"=a" (denominator)
: "a" (X86_64_CPUID_TSC)
: "rdx", "memory");
if (numerator == 0 || denominator == 0 || crystal_freq == 0)
{

View File

@ -156,7 +156,7 @@ uint64_t *isr_handler(uint64_t *regs, uint64_t irq)
{
case 0:
case 16:
asm volatile("fnclex":::"memory");
__asm__ volatile("fnclex":::"memory");
nxsig_kill(this_task()->pid, SIGFPE);
break;

View File

@ -66,7 +66,7 @@ void up_idle(void)
sched_process_timer();
#else
asm volatile("hlt");
__asm__ volatile("hlt");
#endif
}
#endif

View File

@ -112,10 +112,10 @@ void up_initial_state(struct tcb_s *tcb)
#else
/* Initialize XSAVE region with a valid state */
asm volatile("xsave %0"
: "=m" (*xcp->regs)
: "a" (XSAVE_STATE_COMPONENTS), "d" (0)
: "memory");
__asm__ volatile("xsave %0"
: "=m" (*xcp->regs)
: "a" (XSAVE_STATE_COMPONENTS), "d" (0)
: "memory");
#endif
/* Save the initial stack pointer... the value of the stackpointer before

View File

@ -115,8 +115,10 @@ void up_dump_register(void *dumpregs)
uint64_t mxcsr;
uint64_t cr2;
asm volatile ("stmxcsr %0"::"m"(mxcsr):"memory");
asm volatile ("mov %%cr2, %%rax; mov %%rax, %0"::"m"(cr2):"memory", "rax");
__asm__ volatile ("stmxcsr %0"::"m"(mxcsr):"memory");
__asm__ volatile ("mov %%cr2, %%rax; mov %%rax, %0"
::"m"(cr2):"memory", "rax");
_alert("----------------CUT HERE-----------------\n");
_alert("Gerneral Informations:\n");
_alert("CPL: %" PRId64 ", RPL: %" PRId64 "\n",

View File

@ -57,6 +57,6 @@ void up_systemreset(void)
while (1)
{
asm volatile("hlt");
__asm__ volatile("hlt");
}
}

View File

@ -101,7 +101,7 @@ void up_mask_tmr(void)
/* Required when using TSC deadline mode. */
asm volatile("mfence" : : : "memory");
__asm__ volatile("mfence" : : : "memory");
}
void up_unmask_tmr(void)
@ -116,7 +116,7 @@ void up_unmask_tmr(void)
/* Required when using TSC deadline mode. */
asm volatile("mfence" : : : "memory");
__asm__ volatile("mfence" : : : "memory");
}
#ifndef CONFIG_SCHED_TICKLESS_ALARM

View File

@ -122,7 +122,7 @@ void up_timer_initialize(void)
write_msr(MSR_X2APIC_LVTT, vector);
asm volatile("mfence" : : : "memory");
__asm__ volatile("mfence" : : : "memory");
apic_timer_set(NS_PER_MSEC);
}