Object* MachineCode::execute_as_script(STATE, CompiledCode* code, CallFrame* previous) { MachineCode* mcode = code->machine_code(); StackVariables* scope = ALLOCA_STACKVARIABLES(mcode->number_of_locals); // Originally, I tried using msg.module directly, but what happens is if // super is used, that field is read. If you combine that with the method // being called recursively, msg.module can change, causing super() to // look in the wrong place. // // Thus, we have to cache the value in the StackVariables. scope->initialize(G(main), cNil, G(object), mcode->number_of_locals); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(mcode->stack_size); frame->prepare(mcode->stack_size); Arguments args(state->symbol("__script__"), G(main), cNil, 0, 0); frame->previous = previous; frame->constant_scope_ = 0; frame->dispatch_data = 0; frame->compiled_code = code; frame->flags = 0; frame->optional_jit_data = 0; frame->top_scope_ = 0; frame->scope = scope; frame->arguments = &args; // Do NOT check if we should JIT this. We NEVER want to jit a script. // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); // Don't generate profiling info here, it's expected // to be done by the caller. return (*mcode->run)(state, mcode, frame); }
// Installed by default in BlockEnvironment::execute, it runs the bytecodes // for the block in the interpreter. // // Future code will detect hot blocks and queue them in the JIT, whereby the // JIT will install a newly minted machine function into ::execute. Object* BlockEnvironment::execute_interpreter(STATE, CallFrame* previous, BlockEnvironment* env, Arguments& args, BlockInvocation& invocation) { // Don't use env->machine_code() because it might lock and the work should // already be done. MachineCode* const mcode = env->compiled_code_->machine_code(); if(!mcode) { Exception::internal_error(state, previous, "invalid bytecode method"); return 0; } #ifdef ENABLE_LLVM if(mcode->call_count >= 0) { if(mcode->call_count >= state->shared().config.jit_threshold_compile) { OnStack<1> os(state, env); G(jit)->compile_soon(state, env->compiled_code(), previous, invocation.self->direct_class(state), env, true); } else { mcode->call_count++; } } #endif StackVariables* scope = ALLOCA_STACKVARIABLES(mcode->number_of_locals); Module* mod = invocation.module; if(!mod) mod = env->module(); Object* block = cNil; if(VariableScope* scope = env->top_scope_) { if(!scope->nil_p()) block = scope->block(); } scope->initialize(invocation.self, block, mod, mcode->number_of_locals); scope->set_parent(env->scope_); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(mcode->stack_size); frame->prepare(mcode->stack_size); frame->previous = previous; frame->constant_scope_ = invocation.constant_scope; frame->arguments = &args; frame->dispatch_data = env; frame->compiled_code = env->compiled_code_; frame->scope = scope; frame->top_scope_ = env->top_scope_; frame->flags = invocation.flags | CallFrame::cMultipleScopes | CallFrame::cBlock; if(!GenericArguments::call(state, frame, mcode, scope, args, invocation.flags)) { if(state->vm()->thread_state()->raise_reason() == cNone) { Exception* exc = Exception::make_argument_error(state, mcode->required_args, args.total(), mcode->name()); exc->locations(state, Location::from_call_stack(state, previous)); state->raise_exception(exc); } return NULL; } #ifdef RBX_PROFILER if(unlikely(state->vm()->tooling())) { Module* mod = scope->module(); if(SingletonClass* sc = try_as<SingletonClass>(mod)) { if(Module* ma = try_as<Module>(sc->singleton())) { mod = ma; } } OnStack<2> os(state, env, mod); // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); tooling::BlockEntry method(state, env, mod); return (*mcode->run)(state, mcode, frame); } else { // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); return (*mcode->run)(state, mcode, frame); } #else // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); return (*mcode->run)(state, mcode, frame); #endif }
// Installed by default in BlockEnvironment::execute, it runs the bytecodes // for the block in the interpreter. // // Future code will detect hot blocks and queue them in the JIT, whereby the // JIT will install a newly minted machine function into ::execute. Object* BlockEnvironment::execute_interpreter(STATE, CallFrame* previous, BlockEnvironment* const env, Arguments& args, BlockInvocation& invocation) { VMMethod* const vmm = env->vmmethod(state); if(!vmm) { Exception::internal_error(state, previous, "invalid bytecode method"); return 0; } #ifdef ENABLE_LLVM if(vmm->call_count >= 0) { if(vmm->call_count >= state->shared.config.jit_call_til_compile) { LLVMState* ls = LLVMState::get(state); ls->compile_soon(state, env->code(), env); } else { vmm->call_count++; } } #endif size_t scope_size = sizeof(StackVariables) + (vmm->number_of_locals * sizeof(Object*)); StackVariables* scope = reinterpret_cast<StackVariables*>(alloca(scope_size)); Module* mod = invocation.module; if(!mod) mod = env->module(); scope->initialize(invocation.self, env->top_scope_->block(), mod, vmm->number_of_locals); scope->set_parent(env->scope_); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(vmm->stack_size); frame->prepare(vmm->stack_size); frame->previous = previous; frame->static_scope_ = invocation.static_scope; frame->arguments = &args; frame->dispatch_data = reinterpret_cast<BlockEnvironment*>(env); frame->cm = env->code_; frame->scope = scope; frame->top_scope_ = env->top_scope_; frame->flags = invocation.flags | CallFrame::cCustomStaticScope | CallFrame::cMultipleScopes | CallFrame::cBlock; // Check the stack and interrupts here rather than in the interpreter // loop itself. if(state->detect_stack_condition(frame)) { if(!state->check_interrupts(frame, frame)) return NULL; } state->global_lock().checkpoint(state, frame); if(unlikely(state->interrupts.check)) { state->interrupts.checked(); if(state->interrupts.perform_gc) { state->interrupts.perform_gc = false; state->collect_maybe(frame); } } #ifdef RBX_PROFILER if(unlikely(state->tooling())) { Module* mod = scope->module(); if(SingletonClass* sc = try_as<SingletonClass>(mod)) { if(Module* ma = try_as<Module>(sc->attached_instance())) { mod = ma; } } tooling::BlockEntry method(state, env, mod); return (*vmm->run)(state, vmm, frame); } else { return (*vmm->run)(state, vmm, frame); } #else return (*vmm->run)(state, vmm, frame); #endif }
void FinalizerHandler::finalize(STATE) { switch(process_item_kind_) { case eRuby: { if(process_item_->ruby_finalizer) { CallFrame* call_frame = 0; // Rubinius specific code. If the finalizer is cTrue, then send the // object the __finalize__ message. if(process_item_->ruby_finalizer == cTrue) { process_item_->object->send(state, call_frame, state->symbol("__finalize__")); } else { Array* ary = Array::create(state, 1); ary->set(state, 0, process_item_->object->id(state)); process_item_->ruby_finalizer->send(state, call_frame, G(sym_call), ary); } } process_item_->status = FinalizeObject::eRubyFinalized; break; } case eNative: if(process_item_->finalizer) { NativeMethodEnvironment* env = NativeMethodEnvironment::get(); NativeMethodFrame nmf(0, 0); CallFrame* call_frame = ALLOCA_CALLFRAME(0); call_frame->previous = 0; call_frame->constant_scope_ = 0; call_frame->dispatch_data = (void*)&nmf; call_frame->compiled_code = 0; call_frame->flags = CallFrame::cNativeMethod; call_frame->optional_jit_data = 0; call_frame->top_scope_ = 0; call_frame->scope = 0; call_frame->arguments = 0; env->set_current_call_frame(0); env->set_current_native_frame(&nmf); // Register the CallFrame, because we might GC below this. state->set_call_frame(call_frame); nmf.setup(Qnil, Qnil, Qnil, Qnil); (*process_item_->finalizer)(state, process_item_->object); state->set_call_frame(0); env->set_current_call_frame(0); env->set_current_native_frame(0); } process_item_->status = FinalizeObject::eNativeFinalized; break; case eRelease: // Unhook any handle used by fi->object so that we don't accidentally // try and mark it later (after we've finalized it) if(capi::Handle* handle = process_item_->object->handle(state)) { handle->forget_object(); process_item_->object->clear_handle(state); } // If the object was remembered, unremember it. if(process_item_->object->remembered_p()) { state->memory()->unremember_object(process_item_->object); } process_item_->status = FinalizeObject::eReleased; break; } }
// Installed by default in BlockEnvironment::execute, it runs the bytecodes // for the block in the interpreter. // // Future code will detect hot blocks and queue them in the JIT, whereby the // JIT will install a newly minted machine function into ::execute. Object* BlockEnvironment::execute_interpreter(STATE, CallFrame* previous, BlockEnvironment* const env, Arguments& args, BlockInvocation& invocation) { if(!env->vmm) { env->method_->formalize(state, false); env->vmm = env->method_->backend_method(); // Not sure why we hit this case currenly, so just disable the JIT // for them all together. env->vmm->call_count = -1; } VMMethod* const vmm = env->vmm; #ifdef ENABLE_LLVM if(vmm->call_count >= 0) { if(vmm->call_count >= state->shared.config.jit_call_til_compile) { LLVMState* ls = LLVMState::get(state); if(state->shared.config.jit_inline_blocks) { if(VMMethod* parent = vmm->parent()) { while(VMMethod* next = parent->parent()) { parent = next; } if(parent->call_count >= 200) { ls->compile_soon(state, parent); } } } ls->compile_soon(state, vmm, env); } else { vmm->call_count++; } } #endif size_t scope_size = sizeof(StackVariables) + (vmm->number_of_locals * sizeof(Object*)); StackVariables* scope = reinterpret_cast<StackVariables*>(alloca(scope_size)); Module* mod = invocation.module; if(!mod) mod = env->module(); scope->initialize(invocation.self, env->top_scope_->block(), mod, vmm->number_of_locals); scope->set_parent(env->scope_); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(vmm->stack_size); frame->prepare(vmm->stack_size); frame->previous = previous; frame->static_scope_ = invocation.static_scope; frame->msg = NULL; frame->cm = env->method_; frame->scope = scope; frame->top_scope_ = env->top_scope_; frame->flags = invocation.flags | CallFrame::cCustomStaticScope | CallFrame::cMultipleScopes; #ifdef RBX_PROFILER if(unlikely(state->shared.profiling())) { profiler::MethodEntry method(state, env->top_scope_->method()->name(), scope->module(), env->method_); return (*vmm->run)(state, vmm, frame, args); } else { return (*vmm->run)(state, vmm, frame, args); } #else return (*vmm->run)(state, vmm, frame, args); #endif }
Object* MachineCode::execute_specialized(STATE, CallFrame* previous, Executable* exec, Module* mod, Arguments& args) { CompiledCode* code = as<CompiledCode>(exec); MachineCode* mcode = code->machine_code(); StackVariables* scope = ALLOCA_STACKVARIABLES(mcode->number_of_locals); // Originally, I tried using msg.module directly, but what happens is if // super is used, that field is read. If you combine that with the method // being called recursively, msg.module can change, causing super() to // look in the wrong place. // // Thus, we have to cache the value in the StackVariables. scope->initialize(args.recv(), args.block(), mod, mcode->number_of_locals); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(mcode->stack_size); // If argument handling fails.. if(ArgumentHandler::call(state, mcode, scope, args) == false) { Exception* exc = Exception::make_argument_error(state, mcode->total_args, args.total(), args.name()); exc->locations(state, Location::from_call_stack(state, previous)); state->raise_exception(exc); return NULL; } frame->prepare(mcode->stack_size); frame->previous = previous; frame->constant_scope_ = 0; frame->dispatch_data = 0; frame->compiled_code = code; frame->flags = 0; frame->optional_jit_data = 0; frame->top_scope_ = 0; frame->scope = scope; frame->arguments = &args; GCTokenImpl gct; #ifdef ENABLE_LLVM // A negative call_count means we've disabled usage based JIT // for this method. if(mcode->call_count >= 0) { if(mcode->call_count >= state->shared().config.jit_call_til_compile) { LLVMState* ls = LLVMState::get(state); OnStack<3> os(state, exec, mod, code); ls->compile_callframe(state, gct, code, frame); } else { mcode->call_count++; } } #endif OnStack<3> os(state, exec, mod, code); #ifdef RBX_PROFILER if(unlikely(state->vm()->tooling())) { // Check the stack and interrupts here rather than in the interpreter // loop itself. if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); tooling::MethodEntry method(state, exec, mod, args, code); RUBINIUS_METHOD_ENTRY_HOOK(state, mod, args.name(), previous); Object* result = (*mcode->run)(state, mcode, frame); RUBINIUS_METHOD_RETURN_HOOK(state, mod, args.name(), previous); return result; } else { if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); RUBINIUS_METHOD_ENTRY_HOOK(state, mod, args.name(), previous); Object* result = (*mcode->run)(state, mcode, frame); RUBINIUS_METHOD_RETURN_HOOK(state, mod, args.name(), previous); return result; } #else if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); RUBINIUS_METHOD_ENTRY_HOOK(state, mod, args.name(), previous); Object* result = (*mcode->run)(state, mcode, frame); RUBINIUS_METHOD_RETURN_HOOK(state, mod, args.name(), previous); return result; #endif }
// Installed by default in BlockEnvironment::execute, it runs the bytecodes // for the block in the interpreter. // // Future code will detect hot blocks and queue them in the JIT, whereby the // JIT will install a newly minted machine function into ::execute. Object* BlockEnvironment::execute_interpreter(STATE, CallFrame* previous, BlockEnvironment* env, Arguments& args, BlockInvocation& invocation) { // Don't use env->machine_code() because it might lock and the work should // already be done. MachineCode* const mcode = env->compiled_code_->machine_code(); if(!mcode) { Exception::internal_error(state, previous, "invalid bytecode method"); return 0; } #ifdef ENABLE_LLVM if(mcode->call_count >= 0) { if(mcode->call_count >= state->shared().config.jit_call_til_compile) { LLVMState* ls = LLVMState::get(state); GCTokenImpl gct; OnStack<1> os(state, env); ls->compile_soon(state, gct, env->compiled_code(), previous, invocation.self->lookup_begin(state), env, true); } else { mcode->call_count++; } } #endif StackVariables* scope = ALLOCA_STACKVARIABLES(mcode->number_of_locals); Module* mod = invocation.module; if(!mod) mod = env->module(); scope->initialize(invocation.self, env->top_scope_->block(), mod, mcode->number_of_locals); scope->set_parent(env->scope_); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(mcode->stack_size); frame->prepare(mcode->stack_size); frame->previous = previous; frame->constant_scope_ = invocation.constant_scope; frame->arguments = &args; frame->dispatch_data = env; frame->compiled_code = env->compiled_code_; frame->scope = scope; frame->top_scope_ = env->top_scope_; frame->flags = invocation.flags | CallFrame::cCustomConstantScope | CallFrame::cMultipleScopes | CallFrame::cBlock; // TODO: this is a quick hack to process block arguments in 1.9. if(!LANGUAGE_18_ENABLED(state)) { if(!GenericArguments::call(state, frame, mcode, scope, args, invocation.flags)) { return NULL; } } #ifdef RBX_PROFILER if(unlikely(state->vm()->tooling())) { Module* mod = scope->module(); if(SingletonClass* sc = try_as<SingletonClass>(mod)) { if(Module* ma = try_as<Module>(sc->attached_instance())) { mod = ma; } } OnStack<2> os(state, env, mod); // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); tooling::BlockEntry method(state, env, mod); return (*mcode->run)(state, mcode, frame); } else { // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); return (*mcode->run)(state, mcode, frame); } #else // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); return (*mcode->run)(state, mcode, frame); #endif }
// Installed by default in BlockEnvironment::execute, it runs the bytecodes // for the block in the interpreter. // // Future code will detect hot blocks and queue them in the JIT, whereby the // JIT will install a newly minted machine function into ::execute. Object* BlockEnvironment::execute_interpreter(STATE, CallFrame* previous, BlockEnvironment* env, Arguments& args, BlockInvocation& invocation) { // Don't use env->vmmethod() because it mighc lock and the work should already // be done. VMMethod* const vmm = env->code_->backend_method(); if(!vmm) { Exception::internal_error(state, previous, "invalid bytecode method"); return 0; } #ifdef ENABLE_LLVM if(vmm->call_count >= 0) { if(vmm->call_count >= state->shared().config.jit_call_til_compile) { LLVMState* ls = LLVMState::get(state); ls->compile_soon(state, env->code(), env, true); } else { vmm->call_count++; } } #endif size_t scope_size = sizeof(StackVariables) + (vmm->number_of_locals * sizeof(Object*)); StackVariables* scope = reinterpret_cast<StackVariables*>(alloca(scope_size)); Module* mod = invocation.module; if(!mod) mod = env->module(); scope->initialize(invocation.self, env->top_scope_->block(), mod, vmm->number_of_locals); scope->set_parent(env->scope_); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(vmm->stack_size); frame->prepare(vmm->stack_size); frame->previous = previous; frame->static_scope_ = invocation.static_scope; frame->arguments = &args; frame->dispatch_data = reinterpret_cast<BlockEnvironment*>(env); frame->cm = env->code_; frame->scope = scope; frame->top_scope_ = env->top_scope_; frame->flags = invocation.flags | CallFrame::cCustomStaticScope | CallFrame::cMultipleScopes | CallFrame::cBlock; frame->stack_top_ptr_ptr = NULL; // TODO: this is a quick hack to process block arguments in 1.9. if(!LANGUAGE_18_ENABLED(state)) { if(!GenericArguments::call(state, frame, vmm, scope, args, invocation.flags)) { return NULL; } } // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(state->detect_stack_condition(frame)) { if(!state->check_interrupts(gct, frame, frame)) return NULL; } state->checkpoint(gct, frame); #ifdef RBX_PROFILER if(unlikely(state->vm()->tooling())) { Module* mod = scope->module(); if(SingletonClass* sc = try_as<SingletonClass>(mod)) { if(Module* ma = try_as<Module>(sc->attached_instance())) { mod = ma; } } tooling::BlockEntry method(state, env, mod); return (*vmm->run)(state, vmm, frame); } else { return (*vmm->run)(state, vmm, frame); } #else return (*vmm->run)(state, vmm, frame); #endif }
Object* VMMethod::execute_specialized(STATE, CallFrame* previous, Dispatch& msg, Arguments& args) { CompiledMethod* cm = as<CompiledMethod>(msg.method); VMMethod* vmm = cm->backend_method(); #ifdef ENABLE_LLVM // A negative call_count means we've disabled usage based JIT // for this method. if(vmm->call_count >= 0) { if(vmm->call_count >= state->shared.config.jit_call_til_compile) { LLVMState* ls = LLVMState::get(state); ls->compile_callframe(state, cm, previous); } else { vmm->call_count++; } } #endif size_t scope_size = sizeof(StackVariables) + (vmm->number_of_locals * sizeof(Object*)); StackVariables* scope = reinterpret_cast<StackVariables*>(alloca(scope_size)); // Originally, I tried using msg.module directly, but what happens is if // super is used, that field is read. If you combine that with the method // being called recursively, msg.module can change, causing super() to // look in the wrong place. // // Thus, we have to cache the value in the StackVariables. scope->initialize(args.recv(), args.block(), msg.module, vmm->number_of_locals); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(vmm->stack_size); // If argument handling fails.. if(ArgumentHandler::call(state, vmm, scope, args) == false) { Exception* exc = Exception::make_argument_error(state, vmm->required_args, args.total(), msg.name); exc->locations(state, Location::from_call_stack(state, previous)); state->thread_state()->raise_exception(exc); return NULL; } frame->prepare(vmm->stack_size); frame->previous = previous; frame->flags = 0; frame->arguments = &args; frame->dispatch_data = &msg; frame->cm = cm; frame->scope = scope; #ifdef RBX_PROFILER if(unlikely(state->shared.profiling())) { profiler::MethodEntry method(state, msg, args, cm); return (*vmm->run)(state, vmm, frame); } else { return (*vmm->run)(state, vmm, frame); } #else return (*vmm->run)(state, vmm, frame); #endif }
Object* NativeMethod::executor_implementation(STATE, CallFrame* previous, Executable* exec, Module* mod, Arguments& args) { NativeMethod* nm = as<NativeMethod>(exec); int arity = nm->arity()->to_int(); if(arity >= 0 && (size_t)arity != args.total()) { Exception* exc = Exception::make_argument_error( state, arity, args.total(), args.name()); exc->locations(state, Location::from_call_stack(state, previous)); state->raise_exception(exc); return NULL; } NativeMethodEnvironment* env = native_method_environment.get(); // Optionally get the handles back to the proper state. if(state->shared().config.capi_global_flush) { std::list<capi::Handle*>* handles = env->state()->memory()->cached_capi_handles(); for(std::list<capi::Handle*>::iterator i = handles->begin(); i != handles->end(); ++i) { (*i)->update(env); } } NativeMethodFrame nmf(env->current_native_frame()); CallFrame* call_frame = ALLOCA_CALLFRAME(0); call_frame->previous = previous; call_frame->constant_scope_ = 0; call_frame->dispatch_data = (void*)&nmf; call_frame->compiled_code = 0; call_frame->flags = CallFrame::cNativeMethod; call_frame->optional_jit_data = 0; call_frame->top_scope_ = 0; call_frame->scope = 0; call_frame->arguments = &args; CallFrame* saved_frame = env->current_call_frame(); env->set_current_call_frame(call_frame); env->set_current_native_frame(&nmf); // Register the CallFrame, because we might GC below this. state->set_call_frame(call_frame); // Be sure to do this after installing nmf as the current // native frame. nmf.setup( env->get_handle(args.recv()), env->get_handle(args.block()), env->get_handle(exec), env->get_handle(mod)); // We've got things setup (they can be GC'd properly), so we need to // wait before entering the extension code. ENTER_CAPI(state); Object* ret; ExceptionPoint ep(env); #ifdef RBX_PROFILER // This is organized like this so that we don't jump past the destructor of // MethodEntry. It's duplicated, but it's much easier to understand than // trying to de-dup it. OnStack<2> os(state, exec, mod); if(unlikely(state->vm()->tooling())) { tooling::MethodEntry method(state, exec, mod, args); RUBINIUS_METHOD_NATIVE_ENTRY_HOOK(state, mod, args.name(), call_frame); PLACE_EXCEPTION_POINT(ep); if(unlikely(ep.jumped_to())) { ret = NULL; } else { ret = ArgumentHandler::invoke(state, nm, env, args); } RUBINIUS_METHOD_NATIVE_RETURN_HOOK(state, mod, args.name(), call_frame); } else { RUBINIUS_METHOD_NATIVE_ENTRY_HOOK(state, mod, args.name(), call_frame); PLACE_EXCEPTION_POINT(ep); if(unlikely(ep.jumped_to())) { ret = NULL; } else { ret = ArgumentHandler::invoke(state, nm, env, args); } RUBINIUS_METHOD_NATIVE_RETURN_HOOK(state, mod, args.name(), call_frame); } #else RUBINIUS_METHOD_NATIVE_ENTRY_HOOK(state, mod, args.name(), call_frame); PLACE_EXCEPTION_POINT(ep); if(unlikely(ep.jumped_to())) { ret = NULL; } else { ret = ArgumentHandler::invoke(state, nm, env, args); } RUBINIUS_METHOD_NATIVE_RETURN_HOOK(state, mod, args.name(), call_frame); #endif env->set_current_call_frame(saved_frame); env->set_current_native_frame(nmf.previous()); ep.pop(env); LEAVE_CAPI(state); OnStack<1> os_ret(state, ret); // Handle any signals that occurred while the native method // was running. if(!state->check_async(call_frame)) return NULL; return ret; }