static void dc_callvm_argDouble_arm32_thumb_eabi(DCCallVM* in_self, DCdouble x) { DCCallVM_arm32_thumb* self = (DCCallVM_arm32_thumb*)in_self; /* 64 bit values need to be aligned on 8 byte boundaries */ dcVecSkip(&self->mVecHead, dcVecSize(&self->mVecHead) & 4); dcVecAppend(&self->mVecHead, &x, sizeof(DCdouble)); }
static void dc_callvm_argLongLong_mips_o32(DCCallVM* in_self, DClonglong Lv) { DCCallVM_mips_o32* self = (DCCallVM_mips_o32*)in_self; /* 64-bit values need to be aligned on 8 byte boundaries */ dcVecSkip(&self->mVecHead, dcVecSize(&self->mVecHead) & 4); dcVecAppend(&self->mVecHead, &Lv, sizeof(DClonglong)); self->mArgCount += 1; }
static void dc_callvm_argDouble_mips_o32(DCCallVM* in_self, DCdouble x) { DCCallVM_mips_o32* self = (DCCallVM_mips_o32*)in_self; /* 64-bit values need to be aligned on 8 byte boundaries */ dcVecSkip(&self->mVecHead, dcVecSize(&self->mVecHead) & 4); dcVecAppend(&self->mVecHead, &x, sizeof(DCdouble) ); if (self->mArgCount < 2) self->mRegData.doubles[self->mArgCount] = x; self->mArgCount++; }
void dc_callvm_call_ppc64(DCCallVM* in_self, DCpointer target) { DCCallVM_ppc64* self = (DCCallVM_ppc64*) in_self; int size = dcVecSize(&self->mVecHead); if (size < 64) { dcVecSkip(&self->mVecHead, 64-size); } dcCall_ppc64( target, &self->mRegData, dcVecSize(&self->mVecHead) , dcVecData(&self->mVecHead)); }
static void dc_callvm_argFloat_mips_n64(DCCallVM* in_self, DCfloat x) { DCCallVM_mips_n64* self = (DCCallVM_mips_n64*)in_self; if (self->mRegCount < 8) { /*self->mRegData.mFloatData[self->mRegCount++].d = (DCdouble) x;*/ self->mRegData.mFloatData[self->mRegCount++].f = x; } else { dcVecAppend(&self->mVecHead, &x, sizeof(DCfloat) ); dcVecSkip(&self->mVecHead, sizeof(DCfloat) ); } }
static void a_float(DCCallVM* in_p, DCfloat x) { DCCallVM_arm64* p = (DCCallVM_arm64*)in_p; if (p->f < 8) { p->u.S[ p->f << 1 ] = x; p->f++; } else { dcVecAppend(&p->mVecHead, &x, sizeof(DCfloat)); dcVecSkip(&p->mVecHead, 4); /* align to 8-bytes */ } }
static void dc_callvm_argLongLong_ppc64_ellipsis(DCCallVM* in_self, DClonglong L) { DCCallVM_ppc64* self = (DCCallVM_ppc64*)in_self; if (dcVecSize(&self->mVecHead) == 0) dcVecSkip(&self->mVecHead,(sizeof(DClonglong))*(self->mIntRegs)); if (self->mIntRegs < 8) self->mRegData.mIntData[self->mIntRegs++] = L; /* push on stack */ dcVecAppend(&self->mVecHead,&L,sizeof(DClonglong)); }
static void a_longlong(DCCallVM* in_self, DClonglong x) { DCCallVM_arm32_armhf* p = (DCCallVM_arm32_armhf*)in_self; p->i = (p->i+4) & -8; if (p->i < 16) { * (DClonglong*) dcVecAt(&p->mVecHead, p->i) = x; p->i += 8; } else { /* 64 bit values need to be aligned on 8 byte boundaries */ dcVecSkip(&p->mVecHead, dcVecSize(&p->mVecHead) & 4); dcVecAppend(&p->mVecHead, &x, sizeof(DClonglong)); } }
static void dc_callvm_argDouble_ppc64_ellipsis(DCCallVM* in_self, DCdouble d) { DCCallVM_ppc64* self = (DCCallVM_ppc64*)in_self; if (dcVecSize(&self->mVecHead) == 0) dcVecSkip(&self->mVecHead,(sizeof(DClonglong))*(self->mIntRegs)); if (self->mFloatRegs < 13) { self->mRegData.mFloatData[self->mFloatRegs++] = d; if (self->mIntRegs < 8) { self->mRegData.mIntData[self->mIntRegs++] = *( (DClonglong*) &d ); } } /* push on stack */ dcVecAppend(&self->mVecHead,(DCpointer) &d,sizeof(DCdouble)); }
static void dc_callvm_argLongLong_mips_eabi(DCCallVM* in_self, DClonglong Lv) { DCCallVM_mips_eabi* self = (DCCallVM_mips_eabi*)in_self; if (self->mIntRegs < 7) { DCint* p = (DCint*) &Lv; /* skip odd register (align 64 bit) */ self->mIntRegs += self->mIntRegs & 1; self->mRegData.mIntData[self->mIntRegs++] = p[0]; self->mRegData.mIntData[self->mIntRegs++] = p[1]; } else { self->mIntRegs = 8; /* 64 bit values need to be aligned on 8 byte boundaries */ dcVecSkip(&self->mVecHead, dcVecSize(&self->mVecHead) & 4); dcVecAppend(&self->mVecHead, &Lv, sizeof(DClonglong)); } }
static void dc_callvm_argLongLong_ppc64(DCCallVM* in_self, DClonglong L) { DCCallVM_ppc64* self = (DCCallVM_ppc64*)in_self; /* fillup integer register file */ if (self->mIntRegs < 8) { self->mRegData.mIntData[self->mIntRegs++] = L; #if DC__ABI_PPC64_ELF_V == 2 return; #endif } #if DC__ABI_PPC64_ELF_V == 2 if (dcVecSize(&self->mVecHead) == 0) { dcVecSkip(&self->mVecHead,sizeof(DClonglong)*8); } #endif /* push on stack */ dcVecAppend(&self->mVecHead,&L,sizeof(DClonglong)); }
static void dc_callvm_argDouble_ppc64(DCCallVM* in_self, DCdouble d) { DCCallVM_ppc64* self = (DCCallVM_ppc64*)in_self; if (self->mFloatRegs < 13) { self->mRegData.mFloatData[self->mFloatRegs++] = d; if (self->mIntRegs < 8) { self->mRegData.mIntData[self->mIntRegs++] = *( (DClonglong*) &d ); #if DC__ABI_PPC64_ELF_V == 2 return; #endif } } #if DC__ABI_PPC64_ELF_V == 2 if (dcVecSize(&self->mVecHead) == 0) { dcVecSkip(&self->mVecHead,sizeof(DClonglong)*8); } #endif /* push on stack */ dcVecAppend(&self->mVecHead,(DCpointer) &d,sizeof(DCdouble)); }
static void a_double(DCCallVM* in_p, DCdouble x) { union { DCdouble d; DCchar b[8]; } v; DCCallVM_arm32_armhf* p = (DCCallVM_arm32_armhf*)in_p; if (p->d < 16) { * (DCdouble*) &p->S[p->d] = x; p->d += 2; if (!(p->s & 1)) { /* if s is even it always equals d. otherwise, s points to an odd float register. */ p->s = p->d; } } else { p->s = 16; /* fp registers all full - need to use stack now: stop filling gaps for single precision, also */ v.d = x; /* 64 bit values need to be aligned on 8 byte boundaries */ dcVecSkip(&p->mVecHead, dcVecSize(&p->mVecHead) & 4); dcVecAppend(&p->mVecHead, &v.b[0], sizeof(DCdouble)); } }
static void a_double(DCCallVM* in_p, DCdouble x) { DCCallVM_arm32_armhf* p = (DCCallVM_arm32_armhf*)in_p; union { DCdouble d; DCchar b[8]; } v; // ,w; if (p->d < 16) { * (double*) &p->S[p->d] = x; p->d += 2; if (!(p->s & 1)) { /* if s is even it always equals d. otherwise, s points to an odd float register. */ p->s = p->d; } } else { p->s = 16; v.d = x; #if 0 w.b[0] = v.b[7]; w.b[1] = v.b[6]; w.b[2] = v.b[5]; w.b[3] = v.b[4]; w.b[4] = v.b[3]; w.b[5] = v.b[2]; w.b[6] = v.b[1]; w.b[7] = v.b[0]; #endif /* 64 bit values need to be aligned on 8 byte boundaries */ dcVecSkip(&p->mVecHead, dcVecSize(&p->mVecHead) & 4); dcVecAppend(&p->mVecHead, &v.b[0], sizeof(DCdouble)); } }