static char * arg_info_desc (ArgInfo *info) { GString *str = g_string_new (""); g_string_append_printf (str, "offset %d reg %s storage %s nregs %d", info->offset, mono_arch_regname (info->reg), storage_name (info->storage), info->nregs); if (info->storage == ArgValuetypeInReg) g_string_append_printf (str, " {(%s %s), (%s %s)", storage_name (info->pair_storage [0]), mono_arch_regname (info->pair_regs [0]), storage_name (info->pair_storage [1]), mono_arch_regname (info->pair_regs [1])); return g_string_free (str, FALSE); }
static void mono_print_label (FILE *fp, MonoInst *tree) { int arity; if (!tree) return; arity = mono_burg_arity [tree->opcode]; fprintf (fp, "\\ %s%s", arity? "(": "", mono_inst_name (tree->opcode)); switch (tree->opcode) { case OP_ICONST: fprintf (fp, "[%ld]", (long)tree->inst_c0); break; case OP_I8CONST: fprintf (fp, "[%lld]", (long long)tree->inst_l); break; case OP_R8CONST: fprintf (fp, "[%f]", *(double*)tree->inst_p0); break; case OP_R4CONST: fprintf (fp, "[%f]", *(float*)tree->inst_p0); break; case OP_ARG: case OP_LOCAL: fprintf (fp, "[%d]", (int)tree->inst_c0); break; case OP_REGOFFSET: fprintf (fp, "[0x%x(%s)]", (int)tree->inst_offset, mono_arch_regname (tree->inst_basereg)); break; case OP_REGVAR: fprintf (fp, "[%s]", mono_arch_regname (tree->dreg)); break; case CEE_NEWARR: fprintf (fp, "[%s]", tree->inst_newa_class->name); mono_print_label (fp, tree->inst_newa_len); break; case OP_CALL: case OP_CALL_MEMBASE: case OP_FCALL: case OP_FCALL_MEMBASE: case OP_LCALL: case OP_LCALL_MEMBASE: case OP_VCALL: case OP_VCALL_MEMBASE: case OP_VOIDCALL: case OP_VOIDCALL_MEMBASE: { MonoCallInst *call = (MonoCallInst*)tree; if (call->method) { if (mono_method_signature (call->method)->hasthis && tree->inst_left) { mono_print_label (fp, tree->inst_left); } fprintf (fp, "[%s]", call->method->name); } break; } case OP_PHI: { int i; fprintf (fp, "[%d\\ (", (int)tree->inst_c0); for (i = 0; i < tree->inst_phi_args [0]; i++) { if (i) fprintf (fp, ",\\ "); fprintf (fp, "%d", tree->inst_phi_args [i + 1]); } fprintf (fp, ")]"); break; } case OP_NOP: case OP_JMP: case OP_BREAK: break; case OP_BR: fprintf (fp, "[B%d]", tree->inst_target_bb->block_num); break; case OP_SWITCH: case CEE_ISINST: case CEE_CASTCLASS: case OP_CALL_REG: case OP_FCALL_REG: case OP_LCALL_REG: case OP_VCALL_REG: case OP_VOIDCALL_REG: mono_print_label (fp, tree->inst_left); break; case CEE_BNE_UN: case CEE_BEQ: case CEE_BLT: case CEE_BLT_UN: case CEE_BGT: case CEE_BGT_UN: case CEE_BGE: case CEE_BGE_UN: case CEE_BLE: case CEE_BLE_UN: fprintf (fp, "[B%dB%d]", tree->inst_true_bb->block_num, tree->inst_false_bb->block_num); mono_print_label (fp, tree->inst_left); break; default: if (arity) { mono_print_label (fp, tree->inst_left); if (arity > 1) mono_print_label (fp, tree->inst_right); } break; } if (arity) fprintf (fp, ")"); }
void mono_linear_scan (MonoCompile *cfg, GList *vars, GList *regs, regmask_t *used_mask) { GList *l, *a, *active = NULL; MonoMethodVar *vmv, *amv; int max_regs, n_regvars; int gains [sizeof (regmask_t) * 8]; regmask_t used_regs = 0; gboolean cost_driven; if (!cfg->disable_reuse_registers && vars && (((MonoMethodVar*)vars->data)->interval != NULL)) { mono_linear_scan2 (cfg, vars, regs, used_mask); return; } cost_driven = TRUE; #ifdef DEBUG_LSCAN printf ("Linears scan for %s\n", mono_method_full_name (cfg->method, TRUE)); #endif #ifdef DEBUG_LSCAN for (l = vars; l; l = l->next) { vmv = l->data; printf ("VAR %d %08x %08x C%d\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos, vmv->spill_costs); } #endif max_regs = g_list_length (regs); for (l = regs; l; l = l->next) { int regnum = GPOINTER_TO_INT (l->data); g_assert (regnum < G_N_ELEMENTS (gains)); gains [regnum] = 0; } /* linear scan */ for (l = vars; l; l = l->next) { vmv = (MonoMethodVar *)l->data; #ifdef DEBUG_LSCAN printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos); #endif /* expire old intervals in active */ if (!cfg->disable_reuse_registers) { while (active) { amv = (MonoMethodVar *)active->data; if (amv->range.last_use.abs_pos > vmv->range.first_use.abs_pos) break; #ifdef DEBUG_LSCAN printf ("EXPIR %2d %08x %08x C%d R%d\n", amv->idx, amv->range.first_use.abs_pos, amv->range.last_use.abs_pos, amv->spill_costs, amv->reg); #endif active = g_list_delete_link (active, active); regs = g_list_prepend (regs, GINT_TO_POINTER (amv->reg)); gains [amv->reg] += amv->spill_costs; } } if (active && g_list_length (active) == max_regs) { /* Spill */ a = g_list_nth (active, max_regs - 1); amv = (MonoMethodVar *)a->data; if ((cost_driven && amv->spill_costs < vmv->spill_costs) || (!cost_driven && amv->range.last_use.abs_pos > vmv->range.last_use.abs_pos)) { vmv->reg = amv->reg; amv->reg = -1; active = g_list_delete_link (active, a); if (cost_driven) active = mono_varlist_insert_sorted (cfg, active, vmv, 2); else active = mono_varlist_insert_sorted (cfg, active, vmv, 1); #ifdef DEBUG_LSCAN printf ("SPILL0 %2d %08x %08x C%d\n", amv->idx, amv->range.first_use.abs_pos, amv->range.last_use.abs_pos, amv->spill_costs); #endif } else { #ifdef DEBUG_LSCAN printf ("SPILL1 %2d %08x %08x C%d\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos, vmv->spill_costs); #endif vmv->reg = -1; } } else { /* assign register */ g_assert (regs); vmv->reg = GPOINTER_TO_INT (regs->data); used_regs |= 1LL << vmv->reg; regs = g_list_delete_link (regs, regs); #ifdef DEBUG_LSCAN printf ("ADD %2d %08x %08x C%d R%d\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos, vmv->spill_costs, vmv->reg); #endif active = mono_varlist_insert_sorted (cfg, active, vmv, TRUE); } #ifdef DEBUG_LSCAN for (a = active; a; a = a->next) { amv = (MonoMethodVar *)a->data; printf ("ACT %2d %08x %08x C%d R%d\n", amv->idx, amv->range.first_use.abs_pos, amv->range.last_use.abs_pos, amv->spill_costs, amv->reg); } printf ("NEXT\n"); #endif } for (a = active; a; a = a->next) { amv = (MonoMethodVar *)a->data; gains [amv->reg] += amv->spill_costs; } n_regvars = 0; for (l = vars; l; l = l->next) { vmv = (MonoMethodVar *)l->data; if (vmv->reg >= 0) { if ((gains [vmv->reg] > mono_arch_regalloc_cost (cfg, vmv)) && (cfg->varinfo [vmv->idx]->opcode != OP_REGVAR)) { if (cfg->verbose_level > 2) { printf ("ALLOCATED R%d(%d) TO HREG %d COST %d\n", cfg->varinfo [vmv->idx]->dreg, vmv->idx, vmv->reg, vmv->spill_costs); } cfg->varinfo [vmv->idx]->opcode = OP_REGVAR; cfg->varinfo [vmv->idx]->dreg = vmv->reg; n_regvars ++; } else { if (cfg->verbose_level > 2) printf ("COSTLY: R%d C%d C%d %s\n", vmv->idx, vmv->spill_costs, mono_arch_regalloc_cost (cfg, vmv), mono_arch_regname (vmv->reg)); vmv->reg = -1; } } if (vmv->reg == -1) { if (cfg->verbose_level > 2) printf ("NOT REGVAR: %d\n", vmv->idx); } } cfg->stat_n_regvars = n_regvars; /* Compute used regs */ used_regs = 0; for (l = vars; l; l = l->next) { vmv = (MonoMethodVar *)l->data; if (vmv->reg >= 0) used_regs |= 1LL << vmv->reg; } *used_mask |= used_regs; #ifdef DEBUG_LSCAN if (cfg->verbose_level > 2) printf ("EXIT: final used mask: %08x\n", *used_mask); #endif g_list_free (regs); g_list_free (active); g_list_free (vars); }
void mono_linear_scan2 (MonoCompile *cfg, GList *vars, GList *regs, regmask_t *used_mask) { GList *unhandled, *active, *inactive, *l; MonoMethodVar *vmv; gint32 free_pos [sizeof (regmask_t) * 8]; gint32 gains [sizeof (regmask_t) * 8]; regmask_t used_regs = 0; int n_regs, n_regvars, i; for (l = vars; l; l = l->next) { vmv = (MonoMethodVar *)l->data; LSCAN_DEBUG (printf ("VAR R%d %08x %08x C%d\n", cfg->varinfo [vmv->idx]->dreg, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos, vmv->spill_costs)); } LSCAN_DEBUG (printf ("Linear Scan 2 for %s:\n", mono_method_full_name (cfg->method, TRUE))); n_regs = g_list_length (regs); memset (gains, 0, n_regs * sizeof (gint32)); unhandled = g_list_sort (g_list_copy (vars), compare_by_interval_start_pos_func); active = NULL; inactive = NULL; while (unhandled) { MonoMethodVar *current = (MonoMethodVar *)unhandled->data; int pos, reg, max_free_pos; gboolean changed; unhandled = g_list_delete_link (unhandled, unhandled); LSCAN_DEBUG (printf ("Processing R%d: ", cfg->varinfo [current->idx]->dreg)); LSCAN_DEBUG (mono_linterval_print (current->interval)); LSCAN_DEBUG (printf ("\n")); if (!current->interval->range) continue; pos = current->interval->range->from; /* Check for intervals in active which expired or inactive */ changed = TRUE; /* FIXME: Optimize this */ while (changed) { changed = FALSE; for (l = active; l != NULL; l = l->next) { MonoMethodVar *v = (MonoMethodVar*)l->data; if (v->interval->last_range->to < pos) { active = g_list_delete_link (active, l); LSCAN_DEBUG (printf ("Interval R%d has expired\n", cfg->varinfo [v->idx]->dreg)); changed = TRUE; break; } else if (!mono_linterval_covers (v->interval, pos)) { inactive = g_list_append (inactive, v); active = g_list_delete_link (active, l); LSCAN_DEBUG (printf ("Interval R%d became inactive\n", cfg->varinfo [v->idx]->dreg)); changed = TRUE; break; } } } /* Check for intervals in inactive which expired or active */ changed = TRUE; /* FIXME: Optimize this */ while (changed) { changed = FALSE; for (l = inactive; l != NULL; l = l->next) { MonoMethodVar *v = (MonoMethodVar*)l->data; if (v->interval->last_range->to < pos) { inactive = g_list_delete_link (inactive, l); LSCAN_DEBUG (printf ("\tInterval R%d has expired\n", cfg->varinfo [v->idx]->dreg)); changed = TRUE; break; } else if (mono_linterval_covers (v->interval, pos)) { active = g_list_append (active, v); inactive = g_list_delete_link (inactive, l); LSCAN_DEBUG (printf ("\tInterval R%d became active\n", cfg->varinfo [v->idx]->dreg)); changed = TRUE; break; } } } /* Find a register for the current interval */ for (i = 0; i < n_regs; ++i) free_pos [i] = ((gint32)0x7fffffff); for (l = active; l != NULL; l = l->next) { MonoMethodVar *v = (MonoMethodVar*)l->data; if (v->reg >= 0) { free_pos [v->reg] = 0; LSCAN_DEBUG (printf ("\threg %d is busy (cost %d)\n", v->reg, v->spill_costs)); } } for (l = inactive; l != NULL; l = l->next) { MonoMethodVar *v = (MonoMethodVar*)l->data; gint32 intersect_pos; if (v->reg >= 0) { intersect_pos = mono_linterval_get_intersect_pos (current->interval, v->interval); if (intersect_pos != -1) { free_pos [v->reg] = intersect_pos; LSCAN_DEBUG (printf ("\threg %d becomes free at %d\n", v->reg, intersect_pos)); } } } max_free_pos = -1; reg = -1; for (i = 0; i < n_regs; ++i) if (free_pos [i] > max_free_pos) { reg = i; max_free_pos = free_pos [i]; } g_assert (reg != -1); if (free_pos [reg] >= current->interval->last_range->to) { /* Register available for whole interval */ current->reg = reg; LSCAN_DEBUG (printf ("\tAssigned hreg %d to R%d\n", reg, cfg->varinfo [current->idx]->dreg)); active = g_list_append (active, current); gains [current->reg] += current->spill_costs; } else { /* * free_pos [reg] > 0 means there is a register available for parts * of the interval, so splitting it is possible. This is not yet * supported, so we spill in this case too. */ /* Spill an interval */ /* FIXME: Optimize the selection of the interval */ if (active) { GList *min_spill_pos; #if 0 /* * This favors registers with big spill costs, thus larger liveness ranges, * thus actually leading to worse code size. */ guint32 min_spill_value = G_MAXINT32; for (l = active; l != NULL; l = l->next) { vmv = (MonoMethodVar*)l->data; if (vmv->spill_costs < min_spill_value) { min_spill_pos = l; min_spill_value = vmv->spill_costs; } } #else /* Spill either the first active or the current interval */ min_spill_pos = active; #endif vmv = (MonoMethodVar*)min_spill_pos->data; if (vmv->spill_costs < current->spill_costs) { // if (vmv->interval->last_range->to < current->interval->last_range->to) { gains [vmv->reg] -= vmv->spill_costs; vmv->reg = -1; LSCAN_DEBUG (printf ("\tSpilled R%d\n", cfg->varinfo [vmv->idx]->dreg)); active = g_list_delete_link (active, min_spill_pos); } else LSCAN_DEBUG (printf ("\tSpilled current (cost %d)\n", current->spill_costs)); } else LSCAN_DEBUG (printf ("\tSpilled current\n")); } } /* Decrease the gains by the cost of saving+restoring the register */ for (i = 0; i < n_regs; ++i) { if (gains [i]) { /* FIXME: This is x86 only */ gains [i] -= cfg->method->save_lmf ? 1 : 2; if (gains [i] < 0) gains [i] = 0; } } /* Do the actual register assignment */ n_regvars = 0; for (l = vars; l; l = l->next) { vmv = (MonoMethodVar *)l->data; if (vmv->reg >= 0) { int reg_index = vmv->reg; /* During allocation, vmv->reg is an index into the regs list */ vmv->reg = GPOINTER_TO_INT (g_list_nth_data (regs, vmv->reg)); if ((gains [reg_index] > regalloc_cost (cfg, vmv)) && (cfg->varinfo [vmv->idx]->opcode != OP_REGVAR)) { if (cfg->verbose_level > 2) printf ("REGVAR R%d G%d C%d %s\n", cfg->varinfo [vmv->idx]->dreg, gains [reg_index], regalloc_cost (cfg, vmv), mono_arch_regname (vmv->reg)); cfg->varinfo [vmv->idx]->opcode = OP_REGVAR; cfg->varinfo [vmv->idx]->dreg = vmv->reg; n_regvars ++; } else { if (cfg->verbose_level > 2) printf ("COSTLY: %s R%d G%d C%d %s\n", mono_method_full_name (cfg->method, TRUE), cfg->varinfo [vmv->idx]->dreg, gains [reg_index], regalloc_cost (cfg, vmv), mono_arch_regname (vmv->reg)); vmv->reg = -1; } } } cfg->stat_n_regvars = n_regvars; /* Compute used regs */ used_regs = 0; for (l = vars; l; l = l->next) { vmv = (MonoMethodVar *)l->data; if (vmv->reg >= 0) used_regs |= 1LL << vmv->reg; } *used_mask |= used_regs; g_list_free (active); g_list_free (inactive); }
gpointer mono_arch_get_gsharedvt_call_info (gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, gboolean gsharedvt_in, gint32 vcall_offset, gboolean calli) { GSharedVtCallInfo *info; CallInfo *caller_cinfo, *callee_cinfo; MonoMethodSignature *caller_sig, *callee_sig; int aindex, i; gboolean var_ret = FALSE; CallInfo *cinfo, *gcinfo; MonoMethodSignature *sig, *gsig; GPtrArray *map; if (gsharedvt_in) { caller_sig = normal_sig; callee_sig = gsharedvt_sig; caller_cinfo = mono_arch_get_call_info (NULL, caller_sig); callee_cinfo = mono_arch_get_call_info (NULL, callee_sig); } else { callee_sig = normal_sig; caller_sig = gsharedvt_sig; callee_cinfo = mono_arch_get_call_info (NULL, callee_sig); caller_cinfo = mono_arch_get_call_info (NULL, caller_sig); } /* * If GSHAREDVT_IN is true, this means we are transitioning from normal to gsharedvt code. The caller uses the * normal call signature, while the callee uses the gsharedvt signature. * If GSHAREDVT_IN is false, its the other way around. */ /* sig/cinfo describes the normal call, while gsig/gcinfo describes the gsharedvt call */ if (gsharedvt_in) { sig = caller_sig; gsig = callee_sig; cinfo = caller_cinfo; gcinfo = callee_cinfo; } else { sig = callee_sig; gsig = caller_sig; cinfo = callee_cinfo; gcinfo = caller_cinfo; } DEBUG_AMD64_GSHAREDVT_PRINT ("source sig: (%s) return (%s)\n", mono_signature_get_desc (caller_sig, FALSE), mono_type_full_name (mono_signature_get_return_type (caller_sig))); // Leak DEBUG_AMD64_GSHAREDVT_PRINT ("dest sig: (%s) return (%s)\n", mono_signature_get_desc (callee_sig, FALSE), mono_type_full_name (mono_signature_get_return_type (callee_sig))); if (gcinfo->ret.storage == ArgGsharedvtVariableInReg) { /* * The return type is gsharedvt */ var_ret = TRUE; } /* * The stack looks like this: * <arguments> * <trampoline frame> * <call area> * We have to map the stack slots in <arguments> to the stack slots in <call area>. */ map = g_ptr_array_new (); for (aindex = 0; aindex < cinfo->nargs; ++aindex) { ArgInfo *src_info = &caller_cinfo->args [aindex]; ArgInfo *dst_info = &callee_cinfo->args [aindex]; int *src = NULL, *dst = NULL; int nsrc = -1, ndst = -1, nslots = 0; int arg_marshal = GSHAREDVT_ARG_NONE; int arg_slots = 0; // Size in quadwords DEBUG_AMD64_GSHAREDVT_PRINT ("-- arg %d in (%s) out (%s)\n", aindex, arg_info_desc (src_info), arg_info_desc (dst_info)); switch (src_info->storage) { case ArgInIReg: case ArgInDoubleSSEReg: case ArgInFloatSSEReg: case ArgValuetypeInReg: case ArgOnStack: nsrc = get_arg_slots (src_info, &src, TRUE); break; case ArgGSharedVtInReg: handle_marshal_when_src_gsharedvt (dst_info, &arg_marshal, &arg_slots); handle_map_when_gsharedvt_in_reg (src_info, &nsrc, &src); break; case ArgGSharedVtOnStack: handle_marshal_when_src_gsharedvt (dst_info, &arg_marshal, &arg_slots); handle_map_when_gsharedvt_on_stack (src_info, &nsrc, &src, TRUE); break; case ArgValuetypeAddrInIReg: case ArgValuetypeAddrOnStack: nsrc = get_arg_slots (src_info, &src, TRUE); break; default: g_error ("Gsharedvt can't handle source arg type %d", (int)src_info->storage); // Inappropriate value: ArgValuetypeAddrInIReg is for returns only } switch (dst_info->storage) { case ArgInIReg: case ArgInDoubleSSEReg: case ArgInFloatSSEReg: case ArgOnStack: case ArgValuetypeInReg: ndst = get_arg_slots (dst_info, &dst, FALSE); break; case ArgGSharedVtInReg: handle_marshal_when_dst_gsharedvt (src_info, &arg_marshal); handle_map_when_gsharedvt_in_reg (dst_info, &ndst, &dst); break; case ArgGSharedVtOnStack: handle_marshal_when_dst_gsharedvt (src_info, &arg_marshal); handle_map_when_gsharedvt_on_stack (dst_info, &ndst, &dst, FALSE); break; case ArgValuetypeAddrInIReg: case ArgValuetypeAddrOnStack: ndst = get_arg_slots (dst_info, &dst, FALSE); break; default: g_error ("Gsharedvt can't handle dest arg type %d", (int)dst_info->storage); // See above } if (nsrc) src [0] |= (arg_marshal << SRC_DESCRIPTOR_MARSHAL_SHIFT) | (arg_slots << SLOT_COUNT_SHIFT); /* Merge and add to the global list*/ nslots = MIN (nsrc, ndst); DEBUG_AMD64_GSHAREDVT_PRINT ("nsrc %d ndst %d\n", nsrc, ndst); for (i = 0; i < nslots; ++i) add_to_map (map, src [i], dst [i]); g_free (src); g_free (dst); } DEBUG_AMD64_GSHAREDVT_PRINT ("-- return in (%s) out (%s) var_ret %d\n", arg_info_desc (&caller_cinfo->ret), arg_info_desc (&callee_cinfo->ret), var_ret); if (cinfo->ret.storage == ArgValuetypeAddrInIReg) { /* Both the caller and the callee pass the vtype ret address in r8 (System V) and RCX or RDX (Windows) */ g_assert (gcinfo->ret.storage == ArgValuetypeAddrInIReg || gcinfo->ret.storage == ArgGsharedvtVariableInReg); add_to_map (map, map_reg (cinfo->ret.reg), map_reg (cinfo->ret.reg)); } info = mono_domain_alloc0 (mono_domain_get (), sizeof (GSharedVtCallInfo) + (map->len * sizeof (int))); info->addr = addr; info->stack_usage = callee_cinfo->stack_usage; info->ret_marshal = GSHAREDVT_RET_NONE; info->gsharedvt_in = gsharedvt_in ? 1 : 0; info->vret_slot = -1; info->calli = calli; if (var_ret) { g_assert (gcinfo->ret.storage == ArgGsharedvtVariableInReg); info->vret_arg_reg = map_reg (gcinfo->ret.reg); DEBUG_AMD64_GSHAREDVT_PRINT ("mapping vreg_arg_reg to %d in reg %s\n", info->vret_arg_reg, mono_arch_regname (gcinfo->ret.reg)); } else { info->vret_arg_reg = -1; } #ifdef DEBUG_AMD64_GSHAREDVT printf ("final map:\n"); for (i = 0; i < map->len; i += 2) { printf ("\t[%d] src %x dst %x\n ", i / 2, GPOINTER_TO_UINT (g_ptr_array_index (map, i)), GPOINTER_TO_UINT (g_ptr_array_index (map, i + 1))); } #endif info->vcall_offset = vcall_offset; info->map_count = map->len / 2; for (i = 0; i < map->len; ++i) info->map [i] = GPOINTER_TO_UINT (g_ptr_array_index (map, i)); g_ptr_array_free (map, TRUE); /* Compute return value marshalling */ if (var_ret) { /* Compute return value marshalling */ switch (cinfo->ret.storage) { case ArgInIReg: if (!gsharedvt_in || sig->ret->byref) { info->ret_marshal = GSHAREDVT_RET_IREGS_1; } else { MonoType *ret = sig->ret; // Unwrap enums if (ret->type == MONO_TYPE_VALUETYPE) ret = mini_type_get_underlying_type (ret); switch (ret->type) { case MONO_TYPE_I1: info->ret_marshal = GSHAREDVT_RET_I1; break; case MONO_TYPE_BOOLEAN: case MONO_TYPE_U1: info->ret_marshal = GSHAREDVT_RET_U1; break; case MONO_TYPE_I2: info->ret_marshal = GSHAREDVT_RET_I2; break; case MONO_TYPE_CHAR: case MONO_TYPE_U2: info->ret_marshal = GSHAREDVT_RET_U2; break; case MONO_TYPE_I4: info->ret_marshal = GSHAREDVT_RET_I4; break; case MONO_TYPE_U4: info->ret_marshal = GSHAREDVT_RET_U4; break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: case MONO_TYPE_STRING: case MONO_TYPE_U8: case MONO_TYPE_I8: info->ret_marshal = GSHAREDVT_RET_I8; break; case MONO_TYPE_GENERICINST: g_assert (!mono_type_generic_inst_is_valuetype (ret)); info->ret_marshal = GSHAREDVT_RET_I8; break; default: g_error ("Gsharedvt can't handle dst type [%d]", (int)sig->ret->type); } } break; case ArgValuetypeInReg: info->ret_marshal = GSHAREDVT_RET_IREGS_1 - 1 + cinfo->ret.nregs; g_assert (cinfo->ret.nregs == 1); // ABI supports 2-register return but we do not implement this. break; case ArgInDoubleSSEReg: case ArgInFloatSSEReg: info->ret_marshal = GSHAREDVT_RET_R8; break; case ArgValuetypeAddrInIReg: break; default: g_error ("Can't marshal return of storage [%d] %s", (int)cinfo->ret.storage, storage_name (cinfo->ret.storage)); } if (gsharedvt_in && cinfo->ret.storage != ArgValuetypeAddrInIReg) { /* Allocate stack space for the return value */ info->vret_slot = map_stack_slot (info->stack_usage / sizeof (gpointer)); info->stack_usage += mono_type_stack_size_internal (normal_sig->ret, NULL, FALSE) + sizeof (gpointer); } DEBUG_AMD64_GSHAREDVT_PRINT ("RET marshal is %s\n", ret_marshal_name [info->ret_marshal]); } info->stack_usage = ALIGN_TO (info->stack_usage, MONO_ARCH_FRAME_ALIGNMENT); g_free (callee_cinfo); g_free (caller_cinfo); DEBUG_AMD64_GSHAREDVT_PRINT ("allocated an info at %p stack usage %d\n", info, info->stack_usage); return info; }
void mono_print_unwind_info (guint8 *unwind_info, int unwind_info_len) { guint8 *p; int pos, reg, offset, cfa_reg, cfa_offset; p = unwind_info; pos = 0; while (p < unwind_info + unwind_info_len) { int op = *p & 0xc0; switch (op) { case DW_CFA_advance_loc: pos += *p & 0x3f; p ++; break; case DW_CFA_offset: reg = *p & 0x3f; p ++; offset = decode_uleb128 (p, &p) * DWARF_DATA_ALIGN; if (reg == DWARF_PC_REG) printf ("CFA: [%x] offset: %s at cfa-0x%x\n", pos, "pc", -offset); else printf ("CFA: [%x] offset: %s at cfa-0x%x\n", pos, mono_arch_regname (mono_dwarf_reg_to_hw_reg (reg)), -offset); break; case 0: { int ext_op = *p; p ++; switch (ext_op) { case DW_CFA_def_cfa: cfa_reg = decode_uleb128 (p, &p); cfa_offset = decode_uleb128 (p, &p); printf ("CFA: [%x] def_cfa: %s+0x%x\n", pos, mono_arch_regname (mono_dwarf_reg_to_hw_reg (cfa_reg)), cfa_offset); break; case DW_CFA_def_cfa_offset: cfa_offset = decode_uleb128 (p, &p); printf ("CFA: [%x] def_cfa_offset: 0x%x\n", pos, cfa_offset); break; case DW_CFA_def_cfa_register: cfa_reg = decode_uleb128 (p, &p); printf ("CFA: [%x] def_cfa_reg: %s\n", pos, mono_arch_regname (mono_dwarf_reg_to_hw_reg (cfa_reg))); break; case DW_CFA_offset_extended_sf: reg = decode_uleb128 (p, &p); offset = decode_sleb128 (p, &p) * DWARF_DATA_ALIGN; printf ("CFA: [%x] offset_extended_sf: %s at cfa-0x%x\n", pos, mono_arch_regname (mono_dwarf_reg_to_hw_reg (reg)), -offset); break; case DW_CFA_same_value: reg = decode_uleb128 (p, &p); printf ("CFA: [%x] same_value: %s\n", pos, mono_arch_regname (mono_dwarf_reg_to_hw_reg (reg))); break; case DW_CFA_advance_loc4: pos += read32 (p); p += 4; break; case DW_CFA_remember_state: printf ("CFA: [%x] remember_state\n", pos); break; case DW_CFA_restore_state: printf ("CFA: [%x] restore_state\n", pos); break; case DW_CFA_mono_advance_loc: printf ("CFA: [%x] mono_advance_loc\n", pos); break; default: g_assert_not_reached (); } break; } default: g_assert_not_reached (); } } }