CCxrEntry * CDsnInfo::AddEntry(CModule * pModule, string funcName, string entryInstr, string reserve, bool &bHost) { if (bHost) { bool bCall = true; bool bFork = false; AddCall(m_modList[0], funcName, bCall, bFork, string("0"), string("auto")); } return &pModule->AddEntry(funcName, entryInstr, reserve); }
void MethodState_Delete(tThread *pThread, tMethodState **ppMethodState) { tMethodState *pThis = *ppMethodState; #ifdef GEN_COMBINED_OPCODES if (pThis->pJIT != pThis->pMethod->pJITted) { // Only decrease call-stack count if it's been using the combined JIT pThis->pMethod->callStackCount--; } if (pThis->pCaller != NULL) { // Add a call to the method being returned to. // This is neccesary to give a more correct 'usage heuristic' to long-running // methods that call lots of other methods. AddCall(pThis->pCaller->pMethod); } #endif #ifdef DIAG_METHOD_CALLS pThis->pMethod->totalTime += microTime() - pThis->startTime; #endif // If this MethodState is a Finalizer, then let the heap know this Finalizer has been run if (pThis->finalizerThis != NULL) { Heap_UnmarkFinalizer(pThis->finalizerThis); } if (pThis->pDelegateParams != NULL) { free(pThis->pDelegateParams); } // Note that the way the stack free funtion works means that only the 1st allocated chunk // needs to be free'd, as this function just sets the current allocation offset to the address given. Thread_StackFree(pThread, pThis); *ppMethodState = NULL; }
tMethodState* MethodState_Direct(tThread *pThread, tMD_MethodDef *pMethod, tMethodState *pCaller, U32 isInternalNewObjCall) { tMethodState *pThis; if (!pMethod->isFilled) { tMD_TypeDef *pTypeDef; pTypeDef = MetaData_GetTypeDefFromMethodDef(pMethod); MetaData_Fill_TypeDef(pTypeDef, NULL, NULL); } pThis = (tMethodState*)Thread_StackAlloc(pThread, sizeof(tMethodState)); pThis->finalizerThis = NULL; pThis->pCaller = pCaller; pThis->pMetaData = pMethod->pMetaData; pThis->pMethod = pMethod; if (pMethod->pJITted == NULL) { // If method has not already been JITted JIT_Prepare(pMethod, 0); } pThis->pJIT = pMethod->pJITted; pThis->ipOffset = 0; pThis->pEvalStack = (PTR)Thread_StackAlloc(pThread, pThis->pMethod->pJITted->maxStack); pThis->stackOfs = 0; pThis->isInternalNewObjCall = isInternalNewObjCall; pThis->pNextDelegate = NULL; pThis->pDelegateParams = NULL; pThis->pParamsLocals = (PTR)Thread_StackAlloc(pThread, pMethod->parameterStackSize + pMethod->pJITted->localsStackSize); memset(pThis->pParamsLocals, 0, pMethod->parameterStackSize + pMethod->pJITted->localsStackSize); #ifdef GEN_COMBINED_OPCODES AddCall(pMethod); /*if (combinedJITSize < GEN_COMBINED_OPCODES_MAX_MEMORY) { if (pMethod->genCallCount > GEN_COMBINED_OPCODES_CALL_TRIGGER) { if (pMethod->pJITtedCombined == NULL) { JIT_Prepare(pMethod, 1); combinedJITSize += pMethod->pJITtedCombined->opsMemSize; } } }*/ if (pMethod->pJITtedCombined == NULL && pMethod->genCallCount >= GEN_COMBINED_OPCODES_CALL_TRIGGER && (pMethod->pNextHighestCalls == NULL || pMethod->pPrevHighestCalls == NULL || pMethod->pPrevHighestCalls->pJITtedCombined != NULL || (combinedJITSize < GEN_COMBINED_OPCODES_MAX_MEMORY && pMethod->pNextHighestCalls->pJITtedCombined != NULL))) { // Do a combined JIT, if there's enough room after removing combined JIT from previous if (combinedJITSize > GEN_COMBINED_OPCODES_MAX_MEMORY) { // Remove the least-called function's combined JIT tMD_MethodDef *pToRemove = pMethod; while (pToRemove->pPrevHighestCalls != NULL && pToRemove->pPrevHighestCalls->pJITtedCombined != NULL) { pToRemove = pToRemove->pPrevHighestCalls; } if (pToRemove != pMethod) { RemoveCombinedJIT(pToRemove); } } if (combinedJITSize < GEN_COMBINED_OPCODES_MAX_MEMORY) { // If there's enough room, then create new combined JIT AddCombinedJIT(pMethod); } } // See if there is a combined opcode JIT ready to use if (pMethod->pJITtedCombined != NULL) { pThis->pJIT = pMethod->pJITtedCombined; pMethod->callStackCount++; } #endif #ifdef DIAG_METHOD_CALLS // Keep track of the number of times this method is called pMethod->callCount++; pThis->startTime = microTime(); #endif return pThis; }
an BGCall( cn call, bool use_return, bool in_line ) /******************************************************/ { instruction *call_ins; call_state *state; name *ret_ptr = NULL; name *result; name *temp; name *reg_name; instruction *ret_ins = NULL; hw_reg_set return_reg; hw_reg_set zap_reg; if( call->name->tipe == TypeProcParm ) { SaveDisplay( OP_PUSH ); } state = call->state; result = BGNewTemp( call->tipe ); call_ins = call->ins; /* If we have a return value that won't fit in a register*/ /* pass a pointer to result as the first parm*/ if( call_ins->type_class == XX ) { if( _RoutineIsFar16( state->attr ) ) { if( state->attr & ROUTINE_ALLOCS_RETURN ) { HW_CAsgn( state->return_reg, HW_EAX ); } else { HW_CAsgn( state->return_reg, HW_EBX ); } } if( ( state->attr & ROUTINE_ALLOCS_RETURN ) == 0 ) { if( HW_CEqual( state->return_reg, HW_EMPTY ) ) { ret_ptr = AllocTemp( WD ); } else { ret_ptr = AllocRegName( state->return_reg ); } ret_ins = MakeUnary( OP_LA, result, ret_ptr, WD ); HW_TurnOn( state->parm.used, state->return_reg ); call_ins->flags.call_flags |= CALL_RETURNS_STRUCT; } } if( _IsTargetModel(FLOATING_DS) && (state->attr&ROUTINE_NEEDS_DS_LOADED) ) { HW_CTurnOn( state->parm.used, HW_DS ); } if( _RoutineIsFar16( state->attr ) ) { #if _TARGET & _TARG_80386 Far16Parms( call ); #endif } else { if( AssgnParms( call, in_line ) ) { if( state->attr & ROUTINE_REMOVES_PARMS ) { call_ins->flags.call_flags |= CALL_POPS_PARMS; } } } if( state->attr & (ROUTINE_MODIFIES_NO_MEMORY | ROUTINE_NEVER_RETURNS) ) { /* a routine that never returns can not write any memory as far as this routine is concerned */ call_ins->flags.call_flags |= CALL_WRITES_NO_MEMORY; } if( state->attr & ROUTINE_READS_NO_MEMORY ) { call_ins->flags.call_flags |= CALL_READS_NO_MEMORY; } if( state->attr & ROUTINE_NEVER_RETURNS ) { call_ins->flags.call_flags |= CALL_ABORTS; } if( _RoutineIsInterrupt( state->attr ) ) { call_ins->flags.call_flags |= CALL_INTERRUPT | CALL_POPS_PARMS; } if( !use_return ) { call_ins->flags.call_flags |= CALL_IGNORES_RETURN; } if( call_ins->type_class == XX ) { reg_name = AllocRegName( state->return_reg ); if( state->attr & ROUTINE_ALLOCS_RETURN ) { call_ins->result = reg_name; AddCall( call_ins, call ); if( use_return ) { temp = AllocTemp( WD ); /* assume near pointer*/ AddIns( MakeMove( reg_name, temp, WD ) ); temp = SAllocIndex( temp, NULL, 0, result->n.type_class, call->tipe->length ); AddIns( MakeMove( temp, result, result->n.type_class ) ); } } else { call_ins->result = result; AddIns( ret_ins ); if( HW_CEqual( state->return_reg, HW_EMPTY ) ) { AddIns( MakeUnary( OP_PUSH, ret_ptr, NULL, WD ) ); state->parm.offset += TypeClassSize[WD]; call_ins->operands[CALL_OP_POPS] = AllocS32Const( call_ins->operands[CALL_OP_POPS]->c.lo.int_value + TypeClassSize[WD] ); if( state->attr & ROUTINE_REMOVES_PARMS ) { call_ins->flags.call_flags |= CALL_POPS_PARMS; } } AddCall( call_ins, call ); } } else { return_reg = state->return_reg; zap_reg = call_ins->zap->reg; HW_CTurnOn( zap_reg, HW_FLTS ); HW_OnlyOn( return_reg, zap_reg ); call_ins->result = AllocRegName( return_reg ); reg_name = AllocRegName( state->return_reg ); AddCall( call_ins, call ); if( use_return ) { ret_ins = MakeMove( reg_name, result, result->n.type_class ); if( HW_COvlap( reg_name->r.reg, HW_FLTS ) ) { ret_ins->stk_entry = 1; ret_ins->stk_exit = 0; } AddIns( ret_ins ); } } if( state->parm.offset != 0 && ( state->attr & ROUTINE_REMOVES_PARMS ) == 0 ) { reg_name = AllocRegName( HW_SP ); AddIns( MakeBinary( OP_ADD, reg_name, AllocS32Const( state->parm.offset ), reg_name, WD ) ); } return( MakeTempAddr( result ) ); }