/* store register 'r' in lvalue 'v' */ void store(int r, SValue *v) { int fr, bt, ft, fc; ft = v->type.t; fc = v->c.ul; fr = v->r & VT_VALMASK; bt = ft & VT_BTYPE; /* XXX: incorrect if float reg to reg */ if (bt == VT_FLOAT) { o(0xd9); /* fsts */ r = 2; } else if (bt == VT_DOUBLE) { o(0xdd); /* fstpl */ r = 2; } else if (bt == VT_LDOUBLE) { o(0xc0d9); /* fld %st(0) */ o(0xdb); /* fstpt */ r = 7; } else { if (bt == VT_SHORT) o(0x66); if (bt == VT_BYTE || bt == VT_BOOL) o(0x88); else o(0x89); } if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) { gen_modrm(r, v->r, v->sym, fc); } else if (fr != r) { o(0xc0 + fr + r * 8); /* mov r, fr */ } }
// Store register 'r' in lvalue 'v' void store(int r, SValue *v) { int fr, bt, ft, fc; ft = v->type.t; fc = v->c.ul; fr = v->r & VT_VALMASK; bt = ft & VT_BTYPE; regs_used |= 1 << r; // TODO: incorrect if float reg to reg if (bt == VT_FLOAT) { o(0xd9); // fsts r = 2; } else if (bt == VT_DOUBLE) { o(0xdd); // fstpl r = 2; } else if (bt == VT_LDOUBLE) { o(0xc0d9); // fld %st(0) o(0xdb); // fstpt r = 7; } else { if (bt == VT_SHORT) o(0x66); if (bt == VT_BYTE || bt == VT_BOOL) { o(0x88); } else { o(0x89); } } if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) { gen_modrm(r, v->r, v->sym, fc); } else if (fr != r) { o(0xc0 + fr + r * 8); // mov r, fr } }
static __forceinline void gen_modrm_sib(uint8_t **out, int r, struct modrm_rm_t rm) { if (rm.flags == MODRM_PURE_REGISTER) { gen_modrm(out, 3, r, rm.base); return; } if (rm.index == 4) { log_error("gen_modrm(): rsp or r12 cannot be used as an index register.\n"); return; } int is_disp8 = (((int8_t)rm.disp) == rm.disp); if (rm.base == -1 && rm.index == -1) /* disp32 */ { gen_modrm(out, 0, r, 5); gen_dword(out, rm.disp); } else if (rm.base == -1) /* [scaled index] + disp32 */ { gen_modrm(out, 0, r, 4); gen_sib(out, 5, rm.index, rm.scale); gen_dword(out, rm.disp); } else if (rm.base == 4 || rm.index != -1) /* SIB required */ { gen_modrm(out, is_disp8? 1: 2, r, 4); gen_sib(out, rm.base, rm.index == -1? 4: rm.index, rm.scale); if (is_disp8) gen_byte(out, (int8_t)rm.disp); else gen_dword(out, rm.disp); } else /* [base] + disp */ { if (is_disp8) { gen_modrm(out, 1, r, rm.base); gen_byte(out, (int8_t)rm.disp); } else { gen_modrm(out, 2, r, rm.base); gen_dword(out, rm.disp); } } }
// Generate function prolog of type 't' void gfunc_prolog(CType *func_type) { int addr, align, size, func_call, fastcall_nb_regs; int param_index, param_addr; uint8_t *fastcall_regs_ptr; Sym *sym; CType *type; #ifdef DEBUG_BRANCH printf("compile %s\n", func_name); #endif reset_code_buf(); gbranch(CodeStart); sym = func_type->ref; func_naked = FUNC_NAKED(sym->r); func_call = FUNC_CALL(sym->r); addr = 8; loc = 0; regs_used = 0; if (func_call >= FUNC_FASTCALL1 && func_call <= FUNC_FASTCALL3) { fastcall_nb_regs = func_call - FUNC_FASTCALL1 + 1; fastcall_regs_ptr = fastcall_regs; } else if (func_call == FUNC_FASTCALLW) { fastcall_nb_regs = 2; fastcall_regs_ptr = fastcallw_regs; } else { fastcall_nb_regs = 0; fastcall_regs_ptr = NULL; } param_index = 0; // If the function returns a structure, then add an implicit pointer parameter func_vt = sym->type; if ((func_vt.t & VT_BTYPE) == VT_STRUCT) { // TODO: fastcall case? func_vc = addr; addr += 4; param_index++; } // Define parameters while ((sym = sym->next) != NULL) { type = &sym->type; size = type_size(type, &align); size = (size + 3) & ~3; if (param_index < fastcall_nb_regs) { // Save FASTCALL register if (!func_naked) { loc -= 4; o(0x89); // movl gen_modrm(fastcall_regs_ptr[param_index], VT_LOCAL, NULL, loc); param_addr = loc; } } else { param_addr = addr; addr += size; } sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL, param_addr); param_index++; } // pascal type call? func_ret_sub = 0; if (func_call == FUNC_STDCALL) func_ret_sub = addr - 8; func_noargs = (addr == 8); }
// Load 'r' from value 'sv' void load(int r, SValue *sv) { int v, t, ft, fc, fr, a; SValue v1; fr = sv->r; ft = sv->type.t; fc = sv->c.ul; regs_used |= 1 << r; v = fr & VT_VALMASK; if (fr & VT_LVAL) { if (v == VT_LLOCAL) { v1.type.t = VT_INT; v1.r = VT_LOCAL | VT_LVAL; v1.c.ul = fc; load(r, &v1); fr = r; } if ((ft & VT_BTYPE) == VT_FLOAT) { o(0xd9); // flds r = 0; } else if ((ft & VT_BTYPE) == VT_DOUBLE) { o(0xdd); // fldl r = 0; } else if ((ft & VT_BTYPE) == VT_LDOUBLE) { o(0xdb); // fldt r = 5; } else if ((ft & VT_TYPE) == VT_BYTE) { o(0xbe0f); // movsbl } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) { o(0xb60f); // movzbl } else if ((ft & VT_TYPE) == VT_SHORT) { o(0xbf0f); // movswl } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) { o(0xb70f); // movzwl } else { o(0x8b); // movl } gen_modrm(r, fr, sv->sym, fc); } else { if (v == VT_CONST) { if (fc == 0 && (fr & VT_SYM) == 0) { o(0x33); // xor r, r o(0xc0 + r + r * 8); } else { o(0xb8 + r); // mov $xx, r gen_addr32(fr, sv->sym, fc); } } else if (v == VT_LOCAL) { o(0x8d); // lea xxx(%ebp), r gen_modrm(r, VT_LOCAL, sv->sym, fc); } else if (v == VT_CMP) { o(0x0f); // setxx br o(fc); o(0xc0 + r); o(0x0f); // movzx r,br o(0xb6); o(0xc0 + r + r * 8); } else if (v == VT_JMP || v == VT_JMPI) { t = v & 1; oad(0xb8 + r, t); // mov $1, r a = gjmp(0, 0); // jmp after gsym(fc); oad(0xb8 + r, t ^ 1); // mov $0, r gsym(a); } else if (v != r) { o(0x89); o(0xc0 + r + v * 8); // mov v, r } } }
// Generate a floating point operation 'v = t1 op t2' instruction. The // two operands are guaranted to have the same floating point type // TODO: need to use ST1 too void gen_opf(int op) { int a, ft, fc, swapped, r; // Convert constants to memory references if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) { vswap(); gv(RC_FLOAT); vswap(); } if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) gv(RC_FLOAT); // Must put at least one value in the floating point register if ((vtop[-1].r & VT_LVAL) && (vtop[0].r & VT_LVAL)) { vswap(); gv(RC_FLOAT); vswap(); } swapped = 0; // Swap the stack if needed so that t1 is the register and t2 is the memory reference if (vtop[-1].r & VT_LVAL) { vswap(); swapped = 1; } if (op >= TOK_ULT && op <= TOK_GT) { // Load on stack second operand load(TREG_ST0, vtop); save_reg(TREG_EAX); // eax is used by FP comparison code if (op == TOK_GE || op == TOK_GT) { swapped = !swapped; } else if (op == TOK_EQ || op == TOK_NE) { swapped = 0; } if (swapped) o(0xc9d9); // fxch %st(1) o(0xe9da); // fucompp o(0xe0df); // fnstsw %ax if (op == TOK_EQ) { o(0x45e480); // and $0x45, %ah o(0x40fC80); // cmp $0x40, %ah } else if (op == TOK_NE) { o(0x45e480); // and $0x45, %ah o(0x40f480); // xor $0x40, %ah op = TOK_NE; } else if (op == TOK_GE || op == TOK_LE) { o(0x05c4f6); // test $0x05, %ah op = TOK_EQ; } else { o(0x45c4f6); // test $0x45, %ah op = TOK_EQ; } vtop--; vtop->r = VT_CMP; vtop->c.i = op; } else { // No memory reference possible for long double operations if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) { load(TREG_ST0, vtop); swapped = !swapped; } switch (op) { case '+': a = 0; break; case '-': a = 4; if (swapped) a++; break; case '*': a = 1; break; case '/': a = 6; if (swapped) a++; break; default: a = 0; } ft = vtop->type.t; fc = vtop->c.ul; if ((ft & VT_BTYPE) == VT_LDOUBLE) { o(0xde); // fxxxp %st, %st(1) o(0xc1 + (a << 3)); } else { // If saved lvalue, then we must reload it r = vtop->r; if ((r & VT_VALMASK) == VT_LLOCAL) { SValue v1; r = get_reg(RC_INT); v1.type.t = VT_INT; v1.r = VT_LOCAL | VT_LVAL; v1.c.ul = fc; load(r, &v1); fc = 0; } if ((ft & VT_BTYPE) == VT_DOUBLE) { o(0xdc); } else { o(0xd8); } gen_modrm(a, r, vtop->sym, fc); } vtop--; } }
/* generate function prolog of type 't' */ void gfunc_prolog(CType *func_type) { int addr, align, size, func_call, fastcall_nb_regs; int param_index, param_addr; uint8_t *fastcall_regs_ptr; Sym *sym; CType *type; sym = func_type->ref; func_call = FUNC_CALL(sym->r); addr = 8; loc = 0; if (func_call >= FUNC_FASTCALL1 && func_call <= FUNC_FASTCALL3) { fastcall_nb_regs = func_call - FUNC_FASTCALL1 + 1; fastcall_regs_ptr = fastcall_regs; } else if (func_call == FUNC_FASTCALLW) { fastcall_nb_regs = 2; fastcall_regs_ptr = fastcallw_regs; } else { fastcall_nb_regs = 0; fastcall_regs_ptr = NULL; } param_index = 0; ind += FUNC_PROLOG_SIZE; func_sub_sp_offset = ind; /* if the function returns a structure, then add an implicit pointer parameter */ func_vt = sym->type; if ((func_vt.t & VT_BTYPE) == VT_STRUCT) { /* XXX: fastcall case ? */ func_vc = addr; addr += 4; param_index++; } /* define parameters */ while ((sym = sym->next) != NULL) { type = &sym->type; size = type_size(type, &align); size = (size + 3) & ~3; #ifdef FUNC_STRUCT_PARAM_AS_PTR /* structs are passed as pointer */ if ((type->t & VT_BTYPE) == VT_STRUCT) { size = 4; } #endif if (param_index < fastcall_nb_regs) { /* save FASTCALL register */ loc -= 4; o(0x89); /* movl */ gen_modrm(fastcall_regs_ptr[param_index], VT_LOCAL, NULL, loc); param_addr = loc; } else { param_addr = addr; addr += size; } sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | lvalue_type(type->t), param_addr); param_index++; } func_ret_sub = 0; /* pascal type call ? */ if (func_call == FUNC_STDCALL) func_ret_sub = addr - 8; /* leave some room for bound checking code */ if (tcc_state->do_bounds_check) { oad(0xb8, 0); /* lbound section pointer */ oad(0xb8, 0); /* call to function */ func_bound_offset = lbounds_section->data_offset; } }
/* load 'r' from value 'sv' */ void load(int r, SValue *sv) { int v, t, ft, fc, fr; SValue v1; fr = sv->r; ft = sv->type.t; fc = sv->c.ul; v = fr & VT_VALMASK; if (fr & VT_LVAL) { if (v == VT_LLOCAL) { v1.type.t = VT_INT; v1.r = VT_LOCAL | VT_LVAL; v1.c.ul = fc; load(r, &v1); fr = r; } if ((ft & VT_BTYPE) == VT_FLOAT) { o(0xd9); /* flds */ r = 0; } else if ((ft & VT_BTYPE) == VT_DOUBLE) { o(0xdd); /* fldl */ r = 0; } else if ((ft & VT_BTYPE) == VT_LDOUBLE) { o(0xdb); /* fldt */ r = 5; } else if ((ft & VT_TYPE) == VT_BYTE) { o(0xbe0f); /* movsbl */ } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) { o(0xb60f); /* movzbl */ } else if ((ft & VT_TYPE) == VT_SHORT) { o(0xbf0f); /* movswl */ } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) { o(0xb70f); /* movzwl */ } else { o(0x8b); /* movl */ } gen_modrm(r, fr, sv->sym, fc); } else { if (v == VT_CONST) { o(0xb8 + r); /* mov $xx, r */ gen_addr32(fr, sv->sym, fc); } else if (v == VT_LOCAL) { o(0x8d); /* lea xxx(%ebp), r */ gen_modrm(r, VT_LOCAL, sv->sym, fc); } else if (v == VT_CMP) { oad(0xb8 + r, 0); /* mov $0, r */ o(0x0f); /* setxx %br */ o(fc); o(0xc0 + r); } else if (v == VT_JMP || v == VT_JMPI) { t = v & 1; oad(0xb8 + r, t); /* mov $1, r */ o(0x05eb); /* jmp after */ gsym(fc); oad(0xb8 + r, t ^ 1); /* mov $0, r */ } else if (v != r) { o(0x89); o(0xc0 + r + v * 8); /* mov v, r */ } } }