예제 #1
0
void CLR_RT_StackFrame::Pop()
{
    NATIVE_PROFILE_CLR_CORE();

#if defined(TINYCLR_PROFILE_NEW_CALLS)
    {
        //
        // This passivates any outstanding handler.
        //
        CLR_PROF_HANDLER_CALLCHAIN(pm2,m_callchain);

        m_callchain.Leave();
    }
#endif

#if defined(TINYCLR_PROFILE_NEW_CALLS)
    g_CLR_PRF_Profiler.RecordFunctionReturn( m_owningThread, m_callchain );
#endif

#if defined(TINYCLR_ENABLE_SOURCELEVELDEBUGGING)
    if(m_owningThread->m_fHasJMCStepper || (m_flags & c_HasBreakpoint))
    {
        g_CLR_RT_ExecutionEngine.Breakpoint_StackFrame_Pop( this, false );
    }
#endif

    const CLR_UINT32 c_flagsToCheck = CLR_RT_StackFrame::c_CallOnPop | CLR_RT_StackFrame::c_Synchronized | CLR_RT_StackFrame::c_SynchronizedGlobally | CLR_RT_StackFrame::c_NativeProfiled;

    if(m_flags & c_flagsToCheck)
    {
        if(m_flags & CLR_RT_StackFrame::c_CallOnPop)
        {
            m_flags |= CLR_RT_StackFrame::c_CalledOnPop;

            if(m_nativeMethod)
            {
                (void)m_nativeMethod( *this );
            }
        }

        if(m_flags & CLR_RT_StackFrame::c_Synchronized)
        {
            m_flags &= ~CLR_RT_StackFrame::c_Synchronized;

            (void)HandleSynchronized( false, false );
        }

        if(m_flags & CLR_RT_StackFrame::c_SynchronizedGlobally)
        {
            m_flags &= ~CLR_RT_StackFrame::c_SynchronizedGlobally;

            (void)HandleSynchronized( false, true );
        }

#if defined(ENABLE_NATIVE_PROFILER)
        if(m_flags & CLR_RT_StackFrame::c_NativeProfiled)
        {
            m_owningThread->m_fNativeProfiled = false;
            m_flags &= ~CLR_RT_StackFrame::c_NativeProfiled;
            Native_Profiler_Stop();
        }
#endif
    }

    CLR_RT_StackFrame* caller = Caller();

    if(caller->Prev() != NULL)
    {
#if defined(TINYCLR_ENABLE_SOURCELEVELDEBUGGING)
        if(caller->m_flags & CLR_RT_StackFrame::c_HasBreakpoint)
        {
            g_CLR_RT_ExecutionEngine.Breakpoint_StackFrame_Step( caller, caller->m_IP );
        }
#endif

        //
        // Constructors are slightly different, they push the 'this' pointer back into the caller stack.
        //
        // This is to enable the special case for strings, where the object can be recreated by the constructor...
        //
        if(caller->m_flags & CLR_RT_StackFrame::c_ExecutingConstructor)
        {
            CLR_RT_HeapBlock& src = this  ->Arg0              (     );
            CLR_RT_HeapBlock& dst = caller->PushValueAndAssign( src );

            dst.Promote();

            //
            // Undo the special "object -> reference" hack done by CEE_NEWOBJ.
            //
            if(dst.DataType() == DATATYPE_BYREF)
            {
                dst.ChangeDataType( DATATYPE_OBJECT );
            }

            caller->m_flags &= ~CLR_RT_StackFrame::c_ExecutingConstructor;

            _ASSERTE((m_flags & CLR_RT_StackFrame::c_AppDomainTransition) == 0);
        }
        else
        {   //Note that ExecutingConstructor is checked on 'caller', whereas the other two flags are checked on 'this'
            const CLR_UINT32 c_moreFlagsToCheck = CLR_RT_StackFrame::c_PseudoStackFrameForFilter | CLR_RT_StackFrame::c_AppDomainTransition;

            if(m_flags & c_moreFlagsToCheck)
            {
                if(m_flags & CLR_RT_StackFrame::c_PseudoStackFrameForFilter)
                {
                    //Do nothing here. Pushing return values onto stack frames that don't expect them are a bad idea.
                }
#if defined(TINYCLR_APPDOMAINS)
                else if((m_flags & CLR_RT_StackFrame::c_AppDomainTransition) != 0)
                {
                    (void)PopAppDomainTransition();
                }
#endif
            }
            else //!c_moreFlagsToCheck
            {
                //
                // Push the return, if any.
                //
                if(m_call.m_target->retVal != DATATYPE_VOID)
                {
                    if(m_owningThread->m_currentException.Dereference() == NULL)
                    {
                        CLR_RT_HeapBlock& src = this  ->TopValue          (     );
                        CLR_RT_HeapBlock& dst = caller->PushValueAndAssign( src );

                        dst.Promote();
                    }
                }
            }
        }
    }
#if defined(TINYCLR_ENABLE_SOURCELEVELDEBUGGING)
    else
    {
        int idx = m_owningThread->m_scratchPad;

        if(idx >= 0)
        {
            CLR_RT_HeapBlock_Array* array = g_CLR_RT_ExecutionEngine.m_scratchPadArray;

            if(array && array->m_numOfElements > (CLR_UINT32)idx)
            {
                CLR_RT_HeapBlock* dst       = (CLR_RT_HeapBlock*)array->GetElement( (CLR_UINT32)idx );
                CLR_RT_HeapBlock* exception = m_owningThread->m_currentException.Dereference();

                dst->SetObjectReference( NULL );

                if(exception != NULL)
                {
                    dst->SetObjectReference( exception );
                }
                else if(m_call.m_target->retVal != DATATYPE_VOID)
                {
                    CLR_RT_SignatureParser sig;
                    sig.Initialize_MethodSignature( this->m_call.m_assm, this->m_call.m_target );
                    CLR_RT_SignatureParser::Element res;
                    CLR_RT_TypeDescriptor           desc;

                    dst->Assign( this->TopValue() );

                    //Perform boxing, if needed.

                    //Box to the return value type
                    _SIDE_ASSERTE(SUCCEEDED(sig.Advance( res )));
                    _SIDE_ASSERTE(SUCCEEDED(desc.InitializeFromType( res.m_cls )));


                    if(c_CLR_RT_DataTypeLookup[ this->DataType() ].m_flags & CLR_RT_DataTypeLookup::c_OptimizedValueType
                            || desc.m_handlerCls.m_target->IsEnum()
                      )
                    {
                        if(FAILED(dst->PerformBoxing( desc.m_handlerCls )))
                        {
                            dst->SetObjectReference( NULL );
                        }
                    }
                }
            }
        }
    }
#endif

    //
    // We could be jumping outside of a nested exception handler.
    //

    m_owningThread->PopEH( this, NULL );


    //
    // If this StackFrame owns a SubThread, kill it.
    //
    {
        CLR_RT_SubThread* sth = (CLR_RT_SubThread*)m_owningSubThread->Next();

        if(sth->Next() && sth->m_owningStackFrame == this)
        {
            CLR_RT_SubThread::DestroyInstance( sth->m_owningThread, sth, CLR_RT_SubThread::MODE_IncludeSelf );
        }
    }

    g_CLR_RT_EventCache.Append_Node( this );
}
예제 #2
0
HRESULT CLR_RT_StackFrame::Push( CLR_RT_Thread* th, const CLR_RT_MethodDef_Instance& callInst, CLR_INT32 extraBlocks )
{
    NATIVE_PROFILE_CLR_CORE();
    TINYCLR_HEADER();

    CLR_RT_StackFrame*               stack;
    CLR_RT_StackFrame*               caller;
    CLR_RT_Assembly*                 assm;
    const CLR_RECORD_METHODDEF*      md;
    const CLR_RT_MethodDef_Instance* callInstPtr = &callInst;
    CLR_UINT32                       sizeLocals;
    CLR_UINT32                       sizeEvalStack;

#if defined(PLATFORM_WINDOWS)
    if(s_CLR_RT_fTrace_SimulateSpeed > c_CLR_RT_Trace_None)
    {
        CLR_PROF_Handler::SuspendTime();

        HAL_Windows_FastSleep( g_HAL_Configuration_Windows.TicksPerMethodCall );

        CLR_PROF_Handler::ResumeTime();
    }
#endif

    assm          = callInstPtr->m_assm;
    md            = callInstPtr->m_target;

    sizeLocals    = md->numLocals;
    sizeEvalStack = md->lengthEvalStack + CLR_RT_StackFrame::c_OverheadForNewObjOrInteropMethod;

    //--//

    caller = th->CurrentFrame();

    //--//

    //
    // Allocate memory for the runtime state.
    //
    {
        CLR_UINT32 memorySize = sizeLocals + sizeEvalStack;

        if(extraBlocks > 0             ) memorySize += extraBlocks;
        if(memorySize  < c_MinimumStack) memorySize  = c_MinimumStack;

        memorySize += CONVERTFROMSIZETOHEAPBLOCKS(offsetof(CLR_RT_StackFrame,m_extension));

        stack = EVENTCACHE_EXTRACT_NODE_AS_BLOCKS(g_CLR_RT_EventCache,CLR_RT_StackFrame,DATATYPE_STACK_FRAME,0,memorySize);
        CHECK_ALLOCATION(stack);
    }

    //--//

    {   //
        stack->m_owningSubThread = th->CurrentSubThread();  // CLR_RT_SubThread*         m_owningSubThread;  // EVENT HEAP - NO RELOCATION -
        stack->m_owningThread    = th;                      // CLR_RT_Thread*            m_owningThread;     // EVENT HEAP - NO RELOCATION -
        // CLR_UINT32                m_flags;
        //
        stack->m_call            = *callInstPtr;            // CLR_RT_MethodDef_Instance m_call;
        //
        // CLR_RT_MethodHandler      m_nativeMethod;
        // CLR_PMETADATA             m_IPstart;          // ANY   HEAP - DO RELOCATION -
        // CLR_PMETADATA             m_IP;               // ANY   HEAP - DO RELOCATION -
        //
        stack->m_locals          = stack->m_extension;      // CLR_RT_HeapBlock*         m_locals;           // EVENT HEAP - NO RELOCATION -
        stack->m_evalStack       = stack->m_extension + sizeLocals;                      // CLR_RT_HeapBlock*         m_evalStack;        // EVENT HEAP - NO RELOCATION -
        stack->m_evalStackPos    = stack->m_evalStack;      // CLR_RT_HeapBlock*         m_evalStackPos;     // EVENT HEAP - NO RELOCATION -
        stack->m_evalStackEnd    = stack->m_evalStack + sizeEvalStack;                      // CLR_RT_HeapBlock*         m_evalStackEnd;     // EVENT HEAP - NO RELOCATION -
        stack->m_arguments       = NULL;                    // CLR_RT_HeapBlock*         m_arguments;        // EVENT HEAP - NO RELOCATION -
        //
        // union
        // {
        stack->m_customState     = 0;                       //    CLR_UINT32             m_customState;
        //    void*                  m_customPointer;
        // };
        //
#if defined(TINYCLR_PROFILE_NEW_CALLS)
        stack->m_callchain.Enter( stack );                  // CLR_PROF_CounterCallChain m_callchain;
#endif
        //
        // CLR_RT_HeapBlock          m_extension[1];
        //
#if defined(ENABLE_NATIVE_PROFILER)
        stack->m_fNativeProfiled = stack->m_owningThread->m_fNativeProfiled;
#endif
        CLR_RT_MethodHandler impl;

#if defined(TINYCLR_APPDOMAINS)
        stack->m_appDomain = g_CLR_RT_ExecutionEngine.GetCurrentAppDomain();
#endif

        if(md->flags & CLR_RECORD_METHODDEF::MD_DelegateInvoke) // Special case for delegate calls.
        {
            stack->m_nativeMethod = (CLR_RT_MethodHandler)CLR_RT_Thread::Execute_DelegateInvoke;

            stack->m_flags   = CLR_RT_StackFrame::c_MethodKind_Native;
            stack->m_IPstart = NULL;
        }
        else if(assm->m_nativeCode && (impl = assm->m_nativeCode[ stack->m_call.Method() ]) != NULL)
        {
            stack->m_nativeMethod = impl;

            stack->m_flags   = CLR_RT_StackFrame::c_MethodKind_Native;
            stack->m_IPstart = NULL;
            stack->m_IP      = NULL;
        }
#if defined(TINYCLR_JITTER)
        else if(assm->m_jittedCode && (impl = assm->m_jittedCode[ stack->m_call.Method() ]) != NULL)
        {
            stack->m_nativeMethod = (CLR_RT_MethodHandler)(size_t)g_thunkTable.m_address__Internal_Initialize;

            stack->m_flags   = CLR_RT_StackFrame::c_MethodKind_Jitted;
            stack->m_IPstart = (CLR_PMETADATA)impl;

            if(md->flags & CLR_RECORD_METHODDEF::MD_HasExceptionHandlers)
            {
                CLR_UINT32 numEh = *(CLR_UINT32*)stack->m_IPstart;

                stack->m_IP = stack->m_IPstart + sizeof(CLR_UINT32) + numEh * sizeof(CLR_RT_ExceptionHandler);
            }
            else
            {
                stack->m_IP = stack->m_IPstart;
            }
        }
#endif
        else
        {
            stack->m_nativeMethod = (CLR_RT_MethodHandler)CLR_RT_Thread::Execute_IL;

            if(md->RVA == CLR_EmptyIndex) TINYCLR_SET_AND_LEAVE(CLR_E_NOT_SUPPORTED);

            stack->m_flags   = CLR_RT_StackFrame::c_MethodKind_Interpreted;
            stack->m_IPstart = assm->GetByteCode( md->RVA );
            stack->m_IP      = stack->m_IPstart;
        }

#if defined(ENABLE_NATIVE_PROFILER)
        if(stack->m_owningThread->m_fNativeProfiled == false && md->flags & CLR_RECORD_METHODDEF::MD_NativeProfiled)
        {
            stack->m_flags |= CLR_RT_StackFrame::c_NativeProfiled;
            stack->m_owningThread->m_fNativeProfiled = true;
        }
#endif

        //--//

        th->m_stackFrames.LinkAtBack( stack );

#if defined(TINYCLR_PROFILE_NEW_CALLS)
        g_CLR_PRF_Profiler.RecordFunctionCall( th, callInst );
#endif
    }

    if(md->numLocals)
    {
        g_CLR_RT_ExecutionEngine.InitializeLocals( stack->m_locals, assm, md );
    }

    {
        CLR_UINT32 flags = md->flags & (md->MD_Synchronized | md->MD_GloballySynchronized);

        if(flags)
        {
            if(flags & md->MD_Synchronized        ) stack->m_flags |= c_NeedToSynchronize;
            if(flags & md->MD_GloballySynchronized) stack->m_flags |= c_NeedToSynchronizeGlobally;
        }
    }

#if defined(TINYCLR_ENABLE_SOURCELEVELDEBUGGING)
    stack->m_depth = stack->Caller()->Prev() ? stack->Caller()->m_depth + 1 : 0;

    if(g_CLR_RT_ExecutionEngine.m_breakpointsNum)
    {
        if(stack->m_call.DebuggingInfo().HasBreakpoint())
        {
            stack->m_flags |= CLR_RT_StackFrame::c_HasBreakpoint;
        }

        if(stack->m_owningThread->m_fHasJMCStepper || (stack->m_flags & c_HasBreakpoint) || (caller->Prev() != NULL && (caller->m_flags & c_HasBreakpoint)))
        {
            g_CLR_RT_ExecutionEngine.Breakpoint_StackFrame_Push( stack, CLR_DBG_Commands::Debugging_Execution_BreakpointDef::c_DEPTH_STEP_CALL );
        }
    }
#endif

    //--//

#if defined(TINYCLR_JITTER)
    if(s_CLR_RT_fJitter_Enabled && (stack->m_flags & CLR_RT_StackFrame::c_MethodKind_Mask) == CLR_RT_StackFrame::c_MethodKind_Interpreted)
    {
        CLR_RT_ExecutionEngine::ExecutionConstraint_Suspend();

        g_CLR_RT_ExecutionEngine.Compile( stack->m_call, CLR_RT_ExecutionEngine::c_Compile_ARM );

        CLR_RT_ExecutionEngine::ExecutionConstraint_Resume();

        if(assm->m_jittedCode)
        {
            CLR_PMETADATA ipStart = (CLR_PMETADATA)assm->m_jittedCode[ stack->m_call.Method() ];

            if(ipStart != NULL)
            {
                stack->m_nativeMethod = (CLR_RT_MethodHandler)(size_t)g_thunkTable.m_address__Internal_Initialize;

                stack->m_IPstart = ipStart;

                if(md->flags & CLR_RECORD_METHODDEF::MD_HasExceptionHandlers)
                {
                    CLR_UINT32 numEh = *(CLR_UINT32*)ipStart;

                    stack->m_IP = ipStart + sizeof(CLR_UINT32) + numEh * sizeof(CLR_RT_ExceptionHandler);
                }
                else
                {
                    stack->m_IP = ipStart;
                }

                stack->m_flags &= ~CLR_RT_StackFrame::c_MethodKind_Mask;
                stack->m_flags |=  CLR_RT_StackFrame::c_MethodKind_Jitted;
            }
        }
    }
#endif

    if(caller->Prev() != NULL && caller->m_nativeMethod == stack->m_nativeMethod)
    {
        if(stack->m_flags & CLR_RT_StackFrame::c_ProcessSynchronize)
        {
            stack->m_flags |= CLR_RT_StackFrame::c_CallerIsCompatibleForRet;
        }
        else
        {
            stack->m_flags |= CLR_RT_StackFrame::c_CallerIsCompatibleForCall | CLR_RT_StackFrame::c_CallerIsCompatibleForRet;
        }
    }

    //
    // If the arguments are in the caller's stack frame (var == 0), let's link the two.
    //
    if(extraBlocks < 0)
    {
#if defined(PLATFORM_WINDOWS) || (defined(PLATFORM_WINCE) && defined(_DEBUG))
        if(caller->m_evalStackPos > caller->m_evalStackEnd)
        {
            TINYCLR_SET_AND_LEAVE(CLR_E_STACK_OVERFLOW);
        }
#endif

        //
        // Everything is set up correctly, pop the operands.
        //
        stack->m_arguments = &caller->m_evalStackPos[ -md->numArgs ];

        caller->m_evalStackPos = stack->m_arguments;

#if defined(PLATFORM_WINDOWS) || (defined(PLATFORM_WINCE) && defined(_DEBUG))
        if(stack->m_arguments < caller->m_evalStack)
        {
            TINYCLR_SET_AND_LEAVE(CLR_E_STACK_UNDERFLOW);
        }
#endif
    }
    else
    {
        stack->m_arguments = stack->m_evalStackEnd;
    }

    TINYCLR_CLEANUP();


    TINYCLR_CLEANUP_END();
}