Commit 49eb21a6 authored by Léo Grange's avatar Léo Grange

change asm to __asm__ for C99 compliance

parent 850e8a7b
......@@ -35,8 +35,8 @@ void exception_handler()
struct process *cur;
(void)(tea);
asm volatile ("stc spc, %0" : "=r"(spcval) );
asm volatile ("mov r15, %0" : "=r"(stackval) );
__asm__ volatile ("stc spc, %0" : "=r"(spcval) );
__asm__ volatile ("mov r15, %0" : "=r"(stackval) );
cur = process_get_current();
......@@ -64,7 +64,7 @@ void exception_handler()
interrupt_inhibit_all(0);
// call given function, and store return value in context-saved r0
asm volatile ("mov %0, r0;"
__asm__ volatile ("mov %0, r0;"
"mov.l @(16, r0), r4;"
"mov.l @(20, r0), r5;"
"mov.l @(24, r0), r6;"
......@@ -119,7 +119,7 @@ void exception_handler()
break;
}
asm volatile ("stc spc, %0" : "=r"(spcval) );
__asm__ volatile ("stc spc, %0" : "=r"(spcval) );
// avoid verbosity for TRAPA
if(evt != EXP_CODE_TRAPA)
......@@ -158,7 +158,7 @@ void tlbmiss_handler()
if(_recurcive_tlbfault) {
void *spcval;
asm volatile("stc spc, %0":"=r"(spcval));
__asm__ volatile("stc spc, %0":"=r"(spcval));
printk(LOG_EMERG, "> [%d] Page fault %p, PC=%p\n", MMU.PTEH.BIT.ASID,
PM_PHYSICAL_ADDR(MMU.PTEH.BIT.VPN), spcval);
......@@ -248,8 +248,8 @@ void tlbmiss_handler()
int spcval;
void *stack;
asm volatile("stc spc, %0":"=r"(spcval));
asm volatile("mov r15, %0":"=r"(stack));
__asm__ volatile("stc spc, %0":"=r"(spcval));
__asm__ volatile("mov r15, %0":"=r"(stack));
printk(LOG_ERR, "> Dereference %p\n> With PC=%p\n",
(void*)TEA, (void*)spcval);
kdebug_oops("Access to a forbiden page");
......
......@@ -51,8 +51,7 @@ void interrupt_init() {
void interrupt_set_vbr(void *vbr)
{
asm("mov %0, r2"::"r"(vbr):"r2");
asm("ldc r2,vbr");
__asm__ volatile ("ldc %0, vbr" : : "r"(vbr));
}
......@@ -60,8 +59,7 @@ void interrupt_set_vbr(void *vbr)
void* interrupt_get_vbr(void)
{
void *ptr;
asm("stc vbr,r2":::"r2");
asm("mov.l r2, %0":"=m"(ptr));
__asm__ volatile ("stc vbr, %0" : "=r"(ptr));
return ptr;
}
......@@ -71,7 +69,7 @@ void arch_int_weak_atomic_block(int mode) {
// use I3~I0 of SR register to set interrupt priorities
int value = mode == 0 ? INTERRUPT_PVALUE_LOW-1 : INTERRUPT_PVALUE_CRITICAL-1;
asm volatile ( "stc sr, r1;"
__asm__ volatile ( "stc sr, r1;"
"mov %0, r0;"
"and #0xF, r0;"
"shll2 r0;"
......@@ -187,26 +185,26 @@ interrupt_callback_t interrupt_get_callback(unsigned int interruptID) {
// defined in sys/interrupt.h
void interrupt_atomic_save(int *state) {
unsigned int sr;
asm volatile ( "stc sr, %0" : "=r"(sr) : : );
__asm__ volatile ( "stc sr, %0" : "=r"(sr) : : );
*state = sr & 0x000000F0; // only I3-I0 bits
// re-write SR with highest priority
sr = sr | 0x000000F0;
asm volatile ( "ldc %0, sr" : : "r"(sr) : );
__asm__ volatile ( "ldc %0, sr" : : "r"(sr) : );
}
void interrupt_atomic_restore(int state) {
unsigned int sr;
asm volatile ( "stc sr, %0" : "=r"(sr) : : );
__asm__ volatile ( "stc sr, %0" : "=r"(sr) : : );
sr = (sr & (~0x000000F0) ) | (state & 0x000000F0);
asm volatile ( "ldc %0, sr" : : "r"(sr) : );
__asm__ volatile ( "ldc %0, sr" : : "r"(sr) : );
}
int interrupt_in_atomic() {
unsigned int sr;
asm volatile ( "stc sr, %0" : "=r"(sr) : : );
__asm__ volatile ( "stc sr, %0" : "=r"(sr) : : );
return ( sr & 0x000000F0) == 0xF0;
}
......@@ -58,7 +58,7 @@ extern inline unsigned char mmu_getasid() {
// PPN must be given like a 1K page number (even for 4K page!)
extern inline void mmu_tlb_fillload(unsigned int ppn, unsigned short flags) {
MMU.PTEL.LONG = (ppn << 10) | flags;
asm volatile ("ldtlb":::"memory" );
__asm__ volatile ("ldtlb":::"memory" );
}
......
......@@ -48,7 +48,7 @@ static void arch_print_trace(uint32 *stack, uint32 *bottom) {
int i;
/*
asm volatile ("mov r14, %0;"
__asm__ volatile ("mov r14, %0;"
"mov r15, %1"
: "=r"(cur), "=r"(stack) : : );
*/
......@@ -75,7 +75,7 @@ void kdebug_print_trace() {
proc = process_get_current();
asm volatile ("mov r15, %0" : "=r"(stack) : : );
__asm__ volatile ("mov r15, %0" : "=r"(stack) : : );
arch_print_trace(stack, (uint32*)(proc->acnt));//proc->kernel_stack);
}
......
......@@ -18,7 +18,7 @@ void arch_kernel_contextjmp(struct _context_info *cnt, struct _context_info **ol
if(old_cnt != NULL)
*old_cnt = cnt->previous;
asm volatile (
__asm__ volatile (
"mov %0, r0 ;"
"mov r0, r15;"
"add #64, r0;"
......@@ -65,7 +65,7 @@ int arch_process_mode(struct process *proc) {
void arch_idle_func();
asm (
__asm__ (
" .section \".text\" ;"
" .align 1 ;"
"_arch_idle_func:"
......
......@@ -56,9 +56,9 @@ int elf_load_kernel(const char *path, const char *cmdline, void *cmd_addr,
unsigned int new_sr;
// TODO avoid any exception during kernel copy!!!
asm volatile ("stc sr, %0" : "=r"(old_sr) );
__asm__ volatile ("stc sr, %0" : "=r"(old_sr) );
new_sr = old_sr | (1 << 28); // BL bit set to 1
asm volatile ("ldc %0, sr" : : "r"(new_sr) );
__asm__ volatile ("ldc %0, sr" : : "r"(new_sr) );
if(smem_open(path, &kernel) == 0) {
// check for ELF informations
......@@ -116,7 +116,7 @@ int elf_load_kernel(const char *path, const char *cmdline, void *cmd_addr,
ret = -2;
}
asm volatile ("ldc %0, sr" : : "r"(old_sr) );
__asm__ volatile ("ldc %0, sr" : : "r"(old_sr) );
return ret;
}
......@@ -330,7 +330,7 @@ pid_t sys_fork() {
+ ( ((unsigned int)cur->acnt) % PM_PAGE_BYTES);
// compute the position in stack
asm volatile ("mov r15, %0" : "=r"(cur_stack));
__asm__ volatile ("mov r15, %0" : "=r"(cur_stack));
// WARNING : only works if *ONE* page is used for kernel stack!
kstack -= PM_PAGE_BYTES - ((unsigned int)(cur_stack) % PM_PAGE_BYTES);
......@@ -623,7 +623,7 @@ int sys_execve(const char *filename, char *const argv[], char *const envp[]) {
printk(LOG_DEBUG, "exec: ready, r15=%p\n", (void*)(cur->acnt->reg[15]));
// this job is done using inline assembly to avoid GCC stack usage
asm volatile (
__asm__ volatile (
"mov %0, r15;"
"mov %1, r0;"
"mov %2, r4;"
......
......@@ -6,7 +6,7 @@
#define _SYSCALL_(name,id) \
void name(int a, int b, int c, int d) \
{ \
asm volatile ("trapa %0;" : : "n"(id) ); \
__asm__ volatile ("trapa %0;" : : "n"(id) ); \
}
#endif //_SYSCALL_ARCH_SYSCALL_H
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment