int symtab_fold(symtable *tab, int current) { const int this_start = current; if(tab->decls){ const int word_size = platform_word_size(); decl **diter; int arg_offset; arg_offset = 0; /* need to walk backwards for args */ for(diter = tab->decls; *diter; diter++); for(diter--; diter >= tab->decls; diter--){ sym *s = (*diter)->sym; /*enum type_primitive last = type_int; TODO: packing */ if(s->type == sym_local && (s->decl->type->spec & (spec_extern | spec_static)) == 0){ int siz = decl_size(s->decl); if(siz <= word_size) s->offset = current; else s->offset = current + siz - word_size; /* an array and structs start at the bottom */ /* need to increase by a multiple of word_size */ if(siz % word_size) siz += word_size - siz % word_size; current += siz; /* static analysis on sym (only auto-vars) */ if(s->nwrites == 0 && !decl_has_array(s->decl)){ cc1_warn_at(&s->decl->where, 0, WARN_SYM_NEVER_WRITTEN, "\"%s\" never written to", s->decl->spel); s->nwrites++; /* only warn once */ } }else if(s->type == sym_arg){ s->offset = arg_offset; arg_offset += word_size; } } } { symtable **tabi; int subtab_max = 0; for(tabi = tab->children; tabi && *tabi; tabi++){ int this = symtab_fold(*tabi, current); if(this > subtab_max) subtab_max = this; } tab->auto_total_size = current - this_start + subtab_max; } return tab->auto_total_size; }
char *func_mangle(const char *name, type *fnty) { char *pre, suff[8]; pre = fopt_mode & FOPT_LEADING_UNDERSCORE ? "_" : ""; *suff = '\0'; if(fnty){ funcargs *fa = type_funcargs(fnty); switch(fa->conv){ case conv_fastcall: pre = "@"; case conv_stdcall: snprintf(suff, sizeof suff, "@%d", dynarray_count(fa->arglist) * platform_word_size()); case conv_x64_sysv: case conv_x64_ms: case conv_cdecl: break; } } if(*pre || *suff){ return ustrprintf("%s%s%s", pre, name, suff); } return (char *)name; }
void gen_expr_funcall(expr *e, symtable *stab) { const char *const fname = e->expr->spel; expr **iter; int nargs = 0; if(fopt_mode & FOPT_ENABLE_ASM && fname && !strcmp(fname, ASM_INLINE_FNAME)){ const char *str; expr *arg1; int i; if(!e->funcargs || e->funcargs[1] || !expr_kind(e->funcargs[0], addr)) die_at(&e->where, "invalid __asm__ arguments"); arg1 = e->funcargs[0]; str = arg1->array_store->data.str; for(i = 0; i < arg1->array_store->len - 1; i++){ char ch = str[i]; if(!isprint(ch) && !isspace(ch)) invalid: die_at(&arg1->where, "invalid __asm__ string (character %d)", ch); } if(str[i]) goto invalid; asm_temp(0, "; start manual __asm__"); fprintf(cc_out[SECTION_TEXT], "%s\n", arg1->array_store->data.str); asm_temp(0, "; end manual __asm__"); }else{ /* continue with normal funcall */ if(e->funcargs){ /* need to push on in reverse order */ for(iter = e->funcargs; *iter; iter++); for(iter--; iter >= e->funcargs; iter--){ gen_expr(*iter, stab); nargs++; } } if(e->sym && !e->sym->decl->decl_ptr && e->sym->decl->spel){ /* simple */ asm_temp(1, "call %s", e->sym->decl->spel); }else{ gen_expr(e->expr, stab); asm_temp(1, "pop rax ; function address"); asm_temp(1, "call rax ; duh"); } if(nargs) asm_temp(1, "add rsp, %d ; %d arg%s", nargs * platform_word_size(), nargs, nargs == 1 ? "" : "s"); asm_temp(1, "push rax ; ret"); } }
unsigned type_size(type *r, const where *from) { switch(r->type){ case type_auto: ICE("__auto_type"); case type_btype: return btype_size(r->bits.type, from); case type_tdef: { decl *d = r->bits.tdef.decl; type *sub; if(d) return type_size(d->ref, from); sub = r->bits.tdef.type_of->tree_type; UCC_ASSERT(sub, "type_size for unfolded typedef"); return type_size(sub, from); } case type_attr: case type_cast: case type_where: return type_size(r->ref, from); case type_ptr: case type_block: return platform_word_size(); case type_func: /* function size is one, sizeof(main) is valid */ return 1; case type_array: { integral_t sz; if(type_is_void(r->ref)) die_at(from, "array of void"); if(!r->bits.array.size) die_at(from, "array has an incomplete size"); sz = const_fold_val_i(r->bits.array.size); return sz * type_size(r->ref, from); } } ucc_unreach(0); }
void asm_sym(enum asm_sym_type t, sym *s, const char *reg) { const int is_global = s->type == sym_global || (s->decl->type->spec & (spec_extern | spec_static)); char *const dsp = s->decl->spel; int is_auto = s->type == sym_local; char stackbrackets[16]; char *brackets; if(is_global){ const int bracket_len = strlen(dsp) + 16; brackets = umalloc(bracket_len + 1); if(t == ASM_LEA || s->decl->func_code){ snprintf(brackets, bracket_len, "%s", dsp); /* int (*p)() = printf; for example */ /* * either: * we want lea rax, [a] * and convert this to mov rax, a // this is because Macho-64 is an awful binary format * force a mov for funcs (i.e. &func == func) */ t = ASM_LOAD; }else{ const char *type_s = ""; if(asm_type_size(s->decl) == ASM_SIZE_WORD) type_s = "qword "; /* get warnings for "lea rax, [qword tim]", just do "lea rax, [tim]" */ snprintf(brackets, bracket_len, "[%s%s]", t == ASM_LEA ? "" : type_s, dsp); } }else{ brackets = stackbrackets; snprintf(brackets, sizeof stackbrackets, "[rbp %c %d]", is_auto ? '-' : '+', ((is_auto ? 1 : 2) * platform_word_size()) + s->offset); } asm_temp(1, "%s %s, %s ; %s%s", t == ASM_LEA ? "lea" : "mov", t == ASM_SET ? brackets : reg, t == ASM_SET ? reg : brackets, t == ASM_LEA ? "&" : "", dsp ); if(brackets != stackbrackets) free(brackets); }
unsigned vla_decl_space(decl *d) { const unsigned pws = platform_word_size(); type *t; unsigned sz; if(STORE_IS_TYPEDEF(d->store)) sz = 0; /* just the sizes */ else if(type_is_vla(d->ref, VLA_ANY_DIMENSION)) sz = pws * 2; /* T *ptr; void *orig_sp; */ else sz = pws; /* T *ptr; - no stack res, no orig_sp */ for(t = d->ref; t; t = type_next(t)) if(type_is_vla(t, VLA_TOP_DIMENSION)) sz += pws; return sz; }
int asm_table_lookup(type *r) { int sz; int i; if(!r) sz = type_primitive_size(type_long); /* or ptr */ else if(type_is(r, type_array) || type_is(r, type_func)) /* special case for funcs and arrays */ sz = platform_word_size(); else sz = type_size(r, NULL); for(i = 0; i < ASM_TABLE_LEN; i++) if(asm_type_table[i].sz == sz) return i; ICE("no asm type index for byte size %d", sz); return -1; }
unsigned type_align(type *r, const where *from) { struct_union_enum_st *sue; type *test; attribute *align; align = type_attr_present(r, attr_aligned); if(align){ if(align->bits.align){ consty k; const_fold(align->bits.align, &k); assert(k.type == CONST_NUM && K_INTEGRAL(k.bits.num)); return k.bits.num.val.i; } return platform_align_max(); } if((sue = type_is_s_or_u(r))) /* safe - can't have an instance without a ->sue */ return sue->align; if(type_is(r, type_ptr) || type_is(r, type_block)) { return platform_word_size(); } if((test = type_is(r, type_btype))) return btype_align(test->bits.type, from); if((test = type_is(r, type_array))) return type_align(test->ref, from); return 1; }
static void impl_overlay_mem_reg( out_ctx *octx, unsigned memsz, unsigned nregs, struct vreg regs[], int mem2reg, const out_val *ptr) { const unsigned pws = platform_word_size(); struct vreg *cur_reg = regs; unsigned reg_i = 0; if(memsz == 0){ out_val_release(octx, ptr); return; } UCC_ASSERT( nregs * pws >= memsz, "not enough registers for memory overlay"); out_comment(octx, "overlay, %s2%s(%u)", mem2reg ? "mem" : "reg", mem2reg ? "reg" : "mem", memsz); if(!mem2reg){ /* reserve all registers so we don't accidentally wipe before the spill */ for(reg_i = 0; reg_i < nregs; reg_i++) v_reserve_reg(octx, ®s[reg_i]); } for(;; cur_reg++, reg_i++){ /* read/write whatever size is required */ type *this_ty; unsigned this_sz; if(cur_reg->is_float){ UCC_ASSERT(memsz >= 4, "float for memsz %u?", memsz); this_ty = type_nav_btype( cc1_type_nav, memsz > 4 ? type_double : type_float); }else{ this_ty = type_nav_MAX_FOR(cc1_type_nav, memsz); } this_sz = type_size(this_ty, NULL); UCC_ASSERT(this_sz <= memsz, "reading/writing too much memory"); ptr = out_change_type(octx, ptr, type_ptr_to(this_ty)); out_val_retain(octx, ptr); if(mem2reg){ const out_val *fetched; /* can use impl_deref, as we have a register already, * and know that the memory is an lvalue and not a bitfield * * this means we can load straight into the desired register */ fetched = impl_deref(octx, ptr, cur_reg); UCC_ASSERT(reg_i < nregs, "reg oob"); if(fetched->type != V_REG || !vreg_eq(&fetched->bits.regoff.reg, cur_reg)){ /* move to register */ v_freeup_reg(octx, cur_reg); fetched = v_to_reg_given(octx, fetched, cur_reg); } out_flush_volatile(octx, fetched); v_reserve_reg(octx, cur_reg); /* prevent changes */ }else{ const out_val *vreg = v_new_reg(octx, NULL, this_ty, cur_reg); out_store(octx, ptr, vreg); } memsz -= this_sz; /* early termination */ if(memsz == 0) break; /* increment our memory pointer */ ptr = out_change_type( octx, ptr, type_ptr_to(type_nav_btype(cc1_type_nav, type_uchar))); ptr = out_op(octx, op_plus, ptr, out_new_l( octx, type_nav_btype(cc1_type_nav, type_intptr_t), pws)); } out_val_release(octx, ptr); /* done, unreserve all registers */ for(reg_i = 0; reg_i < nregs; reg_i++) v_unreserve_reg(octx, ®s[reg_i]); }