x86-64: Run asmtest as well
This fixes and activates the asm test that's part of tcctest.c also on x86-64, requiring a small fix for the 'm' constraint.
This commit is contained in:
@ -1366,7 +1366,11 @@ ST_FUNC void subst_asm_operand(CString *add_str,
|
|||||||
snprintf(buf, sizeof(buf), "%d", (int)sv->c.i);
|
snprintf(buf, sizeof(buf), "%d", (int)sv->c.i);
|
||||||
cstr_cat(add_str, buf, -1);
|
cstr_cat(add_str, buf, -1);
|
||||||
} else if ((r & VT_VALMASK) == VT_LOCAL) {
|
} else if ((r & VT_VALMASK) == VT_LOCAL) {
|
||||||
|
#ifdef TCC_TARGET_X86_64
|
||||||
|
snprintf(buf, sizeof(buf), "%d(%%rbp)", (int)sv->c.i);
|
||||||
|
#else
|
||||||
snprintf(buf, sizeof(buf), "%d(%%ebp)", (int)sv->c.i);
|
snprintf(buf, sizeof(buf), "%d(%%ebp)", (int)sv->c.i);
|
||||||
|
#endif
|
||||||
cstr_cat(add_str, buf, -1);
|
cstr_cat(add_str, buf, -1);
|
||||||
} else if (r & VT_LVAL) {
|
} else if (r & VT_LVAL) {
|
||||||
reg = r & VT_VALMASK;
|
reg = r & VT_VALMASK;
|
||||||
|
|||||||
@ -2411,24 +2411,24 @@ void local_label_test(void)
|
|||||||
printf("bb2\n");
|
printf("bb2\n");
|
||||||
goto l4;
|
goto l4;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* inline assembler test */
|
/* inline assembler test */
|
||||||
#if defined(__i386__) || defined(__x86_64__)
|
#if defined(__i386__) || defined(__x86_64__)
|
||||||
|
|
||||||
/* from linux kernel */
|
/* from linux kernel */
|
||||||
static char * strncat1(char * dest,const char * src,size_t count)
|
static char * strncat1(char * dest,const char * src,size_t count)
|
||||||
{
|
{
|
||||||
long d0, d1, d2, d3;
|
long d0, d1, d2, d3;
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"repne\n\t"
|
"repne\n\t"
|
||||||
"scasb\n\t"
|
"scasb\n\t"
|
||||||
"decl %1\n\t"
|
"dec %1\n\t"
|
||||||
"mov %8,%3\n"
|
"mov %8,%3\n"
|
||||||
"1:\tdec %3\n\t"
|
"1:\tdec %3\n\t"
|
||||||
"js 2f\n\t"
|
"js 2f\n\t"
|
||||||
"lodsb\n\t"
|
"lodsb\n\t"
|
||||||
"stosb\n\t"
|
"stosb\n\t"
|
||||||
"testb %%al,%%al\n\t"
|
"testb %%al,%%al\n\t"
|
||||||
"jne 1b\n"
|
"jne 1b\n"
|
||||||
"2:\txor %2,%2\n\t"
|
"2:\txor %2,%2\n\t"
|
||||||
"stosb"
|
"stosb"
|
||||||
@ -2438,18 +2438,18 @@ return dest;
|
|||||||
return dest;
|
return dest;
|
||||||
}
|
}
|
||||||
|
|
||||||
static char * strncat2(char * dest,const char * src,size_t count)
|
static char * strncat2(char * dest,const char * src,size_t count)
|
||||||
{
|
{
|
||||||
long d0, d1, d2, d3;
|
long d0, d1, d2, d3;
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"repne scasb\n\t" /* one-line repne prefix + string op */
|
"repne scasb\n\t" /* one-line repne prefix + string op */
|
||||||
"decl %1\n\t"
|
"dec %1\n\t"
|
||||||
"mov %8,%3\n"
|
"mov %8,%3\n"
|
||||||
"1:\tdec %3\n\t"
|
"1:\tdec %3\n\t"
|
||||||
"js 2f\n\t"
|
"js 2f\n\t"
|
||||||
"lodsb\n\t"
|
"lodsb\n\t"
|
||||||
"stosb\n\t"
|
"stosb\n\t"
|
||||||
"testb %%al,%%al\n\t"
|
"testb %%al,%%al\n\t"
|
||||||
"jne 1b\n"
|
"jne 1b\n"
|
||||||
"2:\txor %2,%2\n\t"
|
"2:\txor %2,%2\n\t"
|
||||||
"stosb"
|
"stosb"
|
||||||
@ -2459,7 +2459,7 @@ return dest;
|
|||||||
return dest;
|
return dest;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void * memcpy1(void * to, const void * from, size_t n)
|
static inline void * memcpy1(void * to, const void * from, size_t n)
|
||||||
{
|
{
|
||||||
long d0, d1, d2;
|
long d0, d1, d2;
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
@ -2477,7 +2477,7 @@ return (to);
|
|||||||
return (to);
|
return (to);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void * memcpy2(void * to, const void * from, size_t n)
|
static inline void * memcpy2(void * to, const void * from, size_t n)
|
||||||
{
|
{
|
||||||
long d0, d1, d2;
|
long d0, d1, d2;
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
@ -2516,14 +2516,28 @@ static __inline__ __const__ unsigned int swab32(unsigned int x)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static __inline__ unsigned long long mul64(unsigned int a, unsigned int b)
|
static __inline__ unsigned long long mul64(unsigned int a, unsigned int b)
|
||||||
|
{
|
||||||
|
unsigned long long res;
|
||||||
|
#ifdef __x86_64__
|
||||||
|
/* Using the A constraint is wrong (it means rdx:rax, which is too large)
|
||||||
|
but still test the 32bit->64bit mull. */
|
||||||
|
unsigned int resh, resl;
|
||||||
|
__asm__("mull %2" : "=a" (resl), "=d" (resh) : "a" (a), "r" (b));
|
||||||
res = ((unsigned long long)resh << 32) | resl;
|
res = ((unsigned long long)resh << 32) | resl;
|
||||||
|
#else
|
||||||
__asm__("mull %2" : "=A" (res) : "a" (a), "r" (b));
|
__asm__("mull %2" : "=A" (res) : "a" (a), "r" (b));
|
||||||
#endif
|
#endif
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __inline__ unsigned long long inc64(unsigned long long a)
|
static __inline__ unsigned long long inc64(unsigned long long a)
|
||||||
|
{
|
||||||
|
unsigned long long res;
|
||||||
|
#ifdef __x86_64__
|
||||||
|
/* Using the A constraint is wrong, and increments are tested
|
||||||
|
elsewere. */
|
||||||
res = a + 1;
|
res = a + 1;
|
||||||
|
#else
|
||||||
__asm__("addl $1, %%eax ; adcl $0, %%edx" : "=A" (res) : "A" (a));
|
__asm__("addl $1, %%eax ; adcl $0, %%edx" : "=A" (res) : "A" (a));
|
||||||
#endif
|
#endif
|
||||||
return res;
|
return res;
|
||||||
|
|||||||
Reference in New Issue
Block a user