Object* NativeMethod::executor_implementation(STATE, CallFrame* call_frame, Dispatch& msg, Arguments& args) { NativeMethod* nm = as<NativeMethod>(msg.method); int arity = nm->arity()->to_int(); if(arity >= 0 && (size_t)arity != args.total()) { Exception* exc = Exception::make_argument_error( state, arity, args.total(), msg.name); exc->locations(state, System::vm_backtrace(state, Fixnum::from(1), call_frame)); state->thread_state()->raise_exception(exc); return NULL; } NativeMethodEnvironment* env = native_method_environment.get(); NativeMethodFrame nmf(env->current_native_frame()); CallFrame* saved_frame = env->current_call_frame(); Object* saved_block = env->block(); env->set_current_call_frame(call_frame); env->set_current_native_frame(&nmf); env->set_current_block(args.block()); Object* ret; ExceptionPoint ep(env); PLACE_EXCEPTION_POINT(ep); if(unlikely(ep.jumped_to())) { ret = NULL; } else { #ifdef RBX_PROFILER if(unlikely(state->shared.profiling())) { profiler::MethodEntry method(state, msg, args); ret = nm->call(state, env, args); } else { ret = nm->call(state, env, args); } #else ret = nm->call(state, env, args); #endif } env->set_current_block(saved_block); env->set_current_call_frame(saved_frame); env->set_current_native_frame(nmf.previous()); ep.pop(env); return ret; }
namespace rubinius { bool GlobalLock::debug_locking = false; int VM::cStackDepthMax = 655300; VM::VM(SharedState& shared) : ManagedThread(shared) , saved_call_frame_(0) , stack_start_(0) , profiler_(0) , run_signals_(false) , shared(shared) , waiter_(NULL) , om(shared.om) , interrupts(shared.interrupts) , check_local_interrupts(false) , thread_state_(this) , thread(this, (Thread*)Qnil) , current_fiber(this, (Fiber*)Qnil) { probe.set(Qnil, &globals().roots); set_stack_size(cStackDepthMax); } void VM::discard(VM* vm) { vm->saved_call_frame_ = 0; if(vm->profiler_) { vm->shared.remove_profiler(vm, vm->profiler_); } vm->shared.remove_vm(vm); delete vm; } void VM::initialize() { VM::register_state(this); om = new ObjectMemory(this, shared.config); shared.om = om; /** @todo Done by Environment::boot_vm(), and Thread::s_new() * does not boot at all. Should this be removed? --rue */ // this->boot(); shared.set_initialized(); // This seems like we should do this in VM(), ie, for every VM and // therefore every Thread object in the process. But in fact, because // we're using the GIL atm, we only do it once. When the GIL goes // away, this needs to be moved to VM(). shared.gc_dependent(); } void VM::boot() { TypeInfo::auto_learn_fields(this); bootstrap_ontology(); VMMethod::init(this); /** @todo Should a thread be starting a VM or is it the other way around? */ boot_threads(); // Force these back to false because creating the default Thread // sets them to true. interrupts.enable_preempt = false; GlobalLock::debug_locking = shared.config.gil_debug; } void VM::initialize_config() { #ifdef USE_DYNAMIC_INTERPRETER if(shared.config.dynamic_interpreter_enabled) { G(rubinius)->set_const(this, "INTERPRETER", symbol("dynamic")); } else { G(rubinius)->set_const(this, "INTERPRETER", symbol("static")); } #else G(rubinius)->set_const(this, "INTERPRETER", symbol("static")); #endif #ifdef ENABLE_LLVM if(!shared.config.jit_disabled) { Array* ary = Array::create(this, 3); ary->append(this, symbol("usage")); if(shared.config.jit_inline_generic) { ary->append(this, symbol("inline_generic")); } if(shared.config.jit_inline_blocks) { ary->append(this, symbol("inline_blocks")); } G(rubinius)->set_const(this, "JIT", ary); } else { G(rubinius)->set_const(this, "JIT", Qfalse); } #else G(rubinius)->set_const(this, "JIT", Qnil); #endif } // HACK so not thread safe or anything! static VM* __state = NULL; VM* VM::current_state() { return __state; } void VM::register_state(VM *vm) { __state = vm; } thread::ThreadData<VM*> _current_vm; VM* VM::current() { return _current_vm.get(); } void VM::set_current(VM* vm) { _current_vm.set(vm); } void VM::boot_threads() { thread.set(Thread::create(this, this, pthread_self()), &globals().roots); thread->sleep(this, Qfalse); VM::set_current(this); } Object* VM::new_object_typed(Class* cls, size_t bytes, object_type type) { return om->new_object_typed(cls, bytes, type); } Object* VM::new_object_typed_mature(Class* cls, size_t bytes, object_type type) { return om->new_object_typed_mature(cls, bytes, type); } Object* VM::new_object_from_type(Class* cls, TypeInfo* ti) { return om->new_object_typed(cls, ti->instance_size, ti->type); } Class* VM::new_basic_class(Class* sup) { Class *cls = om->new_object_enduring<Class>(G(klass)); cls->set_class_id(shared.inc_class_count()); cls->set_packed_size(0); if(sup->nil_p()) { cls->instance_type(this, Fixnum::from(ObjectType)); cls->set_type_info(find_type(ObjectType)); } else { cls->instance_type(this, sup->instance_type()); // HACK test that this is always true cls->set_type_info(sup->type_info()); } cls->superclass(this, sup); return cls; } Class* VM::new_class(const char* name) { return new_class(name, G(object), G(object)); } Class* VM::new_class(const char* name, Class* super_class) { return new_class(name, super_class, G(object)); } Class* VM::new_class(const char* name, Class* sup, Module* under) { Class* cls = new_basic_class(sup); cls->setup(this, name, under); // HACK test that we've got the MOP setup properly MetaClass::attach(this, cls, sup->metaclass(this)); return cls; } Class* VM::new_class_under(const char* name, Module* under) { return new_class(name, G(object), under); } Module* VM::new_module(const char* name, Module* under) { Module *mod = new_object<Module>(G(module)); mod->setup(this, name, under); return mod; } Symbol* VM::symbol(const char* str) { return shared.symbols.lookup(this, str); } Symbol* VM::symbol(String* str) { return shared.symbols.lookup(this, str); } void type_assert(STATE, Object* obj, object_type type, const char* reason) { if((obj->reference_p() && obj->type_id() != type) || (type == FixnumType && !obj->fixnum_p())) { Exception::type_error(state, type, obj, reason); } } void VM::raise_stack_error(CallFrame* call_frame) { G(stack_error)->locations(this, System::vm_backtrace(this, Fixnum::from(0), call_frame)); thread_state()->raise_exception(G(stack_error)); } void VM::init_stack_size() { struct rlimit rlim; if (getrlimit(RLIMIT_STACK, &rlim) == 0) { unsigned int space = rlim.rlim_cur/5; if (space > 1024*1024) space = 1024*1024; cStackDepthMax = (rlim.rlim_cur - space); } } TypeInfo* VM::find_type(int type) { return om->type_info[type]; } Thread *VM::current_thread() { return globals().current_thread.get(); } void VM::run_gc_soon() { om->collect_young_now = true; om->collect_mature_now = true; interrupts.set_perform_gc(); } void VM::collect(CallFrame* call_frame) { this->set_call_frame(call_frame); // Don't go any further unless we're allowed to GC. if(!om->can_gc()) return; // Stops all other threads, so we're only here by ourselves. StopTheWorld guard(this); GCData gc_data(this); om->collect_young(gc_data); om->collect_mature(gc_data); om->run_finalizers(this); } void VM::collect_maybe(CallFrame* call_frame) { this->set_call_frame(call_frame); // Don't go any further unless we're allowed to GC. if(!om->can_gc()) return; // Stops all other threads, so we're only here by ourselves. StopTheWorld guard(this); GCData gc_data(this); uint64_t start_time = 0; if(om->collect_young_now) { if(shared.config.gc_show) { start_time = get_current_time(); } YoungCollectStats stats; #ifdef RBX_PROFILER if(unlikely(shared.profiling())) { profiler::MethodEntry method(this, profiler::kYoungGC); om->collect_young(gc_data, &stats); } else { om->collect_young(gc_data, &stats); } #else om->collect_young(gc_data, &stats); #endif if(shared.config.gc_show) { uint64_t fin_time = get_current_time(); int diff = (fin_time - start_time) / 1000000; fprintf(stderr, "[GC %0.1f%% %d/%d %d %2dms]\n", stats.percentage_used, stats.promoted_objects, stats.excess_objects, stats.lifetime, diff); } } if(om->collect_mature_now) { int before_kb = 0; if(shared.config.gc_show) { start_time = get_current_time(); before_kb = om->mature_bytes_allocated() / 1024; } #ifdef RBX_PROFILER if(unlikely(shared.profiling())) { profiler::MethodEntry method(this, profiler::kMatureGC); om->collect_mature(gc_data); } else { om->collect_mature(gc_data); } #else om->collect_mature(gc_data); #endif if(shared.config.gc_show) { uint64_t fin_time = get_current_time(); int diff = (fin_time - start_time) / 1000000; int kb = om->mature_bytes_allocated() / 1024; fprintf(stderr, "[Full GC %dkB => %dkB %2dms]\n", before_kb, kb, diff); } } om->run_finalizers(this); } void VM::set_const(const char* name, Object* val) { globals().object->set_const(this, (char*)name, val); } void VM::set_const(Module* mod, const char* name, Object* val) { mod->set_const(this, (char*)name, val); } void VM::print_backtrace() { abort(); } void VM::install_waiter(Waiter& waiter) { waiter_ = &waiter; } bool VM::wakeup() { if(waiter_) { waiter_->run(); waiter_ = NULL; return true; } return false; } void VM::clear_waiter() { waiter_ = NULL; } bool VM::process_async(CallFrame* call_frame) { check_local_interrupts = false; if(run_signals_) { shared.signal_handler()->deliver_signals(call_frame); } if(thread_state_.raise_reason() != cNone) return false; return true; } void VM::register_raise(Exception* exc) { thread_state_.raise_exception(exc); check_local_interrupts = true; } void VM::check_exception(CallFrame* call_frame) { if(thread_state()->raise_reason() == cNone) { std::cout << "Exception propogating, but none registered!\n"; call_frame->print_backtrace(this); rubinius::abort(); } } profiler::Profiler* VM::profiler() { if(unlikely(!profiler_)) { profiler_ = new profiler::Profiler(this); shared.add_profiler(this, profiler_); } return profiler_; } void VM::remove_profiler() { profiler_ = 0; } void VM::set_current_fiber(Fiber* fib) { set_stack_start(fib->stack()); set_stack_size(fib->stack_size()); current_fiber.set(fib); } };
void VM::set_current(VM* vm) { _current_vm.set(vm); }
VM* VM::current() { return _current_vm.get(); }
Object* NativeMethod::executor_implementation(STATE, CallFrame* call_frame, Dispatch& msg, Arguments& args) { NativeMethod* nm = as<NativeMethod>(msg.method); int arity = nm->arity()->to_int(); if(arity >= 0 && (size_t)arity != args.total()) { Exception* exc = Exception::make_argument_error( state, arity, args.total(), msg.name); exc->locations(state, Location::from_call_stack(state, call_frame)); state->thread_state()->raise_exception(exc); return NULL; } NativeMethodEnvironment* env = native_method_environment.get(); // Optionally get the handles back to the proper state. if(state->shared.config.capi_global_flush) { capi::Handles* handles = state->shared.cached_handles(); if(handles->size() > 0) { for(capi::Handles::Iterator i(*handles); i.more(); i.advance()) { i->update(env); } } } // Register the CallFrame, because we might GC below this. state->set_call_frame(call_frame); NativeMethodFrame nmf(env->current_native_frame()); CallFrame* saved_frame = env->current_call_frame(); env->set_current_call_frame(call_frame); env->set_current_native_frame(&nmf); // Be sure to do this after installing nmf as the current // native frame. nmf.setup( env->get_handle(args.recv()), env->get_handle(args.block()), env->get_handle(msg.method), env->get_handle(msg.module)); Object* ret; ExceptionPoint ep(env); PLACE_EXCEPTION_POINT(ep); if(unlikely(ep.jumped_to())) { ret = NULL; } else { #ifdef RBX_PROFILER if(unlikely(state->tooling())) { tooling::MethodEntry method(state, msg, args); ret = ArgumentHandler::invoke(state, nm, env, args); } else { ret = ArgumentHandler::invoke(state, nm, env, args); } #else ret = ArgumentHandler::invoke(state, nm, env, args); #endif } env->set_current_call_frame(saved_frame); env->set_current_native_frame(nmf.previous()); ep.pop(env); // Handle any signals that occurred while the native method // was running. if(!state->check_async(call_frame)) return NULL; return ret; }
NativeMethodEnvironment* NativeMethodEnvironment::get() { return native_method_environment.get(); }
namespace rubinius { /** Thread-local NativeMethodEnvironment instance. */ thread::ThreadData<NativeMethodEnvironment*> native_method_environment; /* Class methods */ NativeMethodEnvironment* NativeMethodEnvironment::get() { return native_method_environment.get(); } NativeMethodFrame::~NativeMethodFrame() { flush_cached_data(); for(capi::HandleSet::iterator i = handles_.begin(); i != handles_.end(); i++) { capi::Handle* handle = *i; handle->deref(); } } void NativeMethodFrame::check_tracked_handle(capi::Handle* handle, bool need_update) { if(need_update) { check_handles_ = true; } // ref() ONLY if it's not already in there! // otherwise the refcount is wrong and we leak handles. capi::HandleSet::iterator pos = handles_.find(handle); if(pos == handles_.end()) { // We're seeing this object for the first time in this function. // Be sure that it's updated. handle->ref(); handles_.insert(handle); handle->update(NativeMethodEnvironment::get()); } } VALUE NativeMethodFrame::get_handle(STATE, Object* obj) { InflatedHeader* ih = state->om->inflate_header(obj); capi::Handle* handle = ih->handle(); if(handle) { // ref() ONLY if it's not already in there! // otherwise the refcount is wrong and we leak handles. capi::HandleSet::iterator pos = handles_.find(handle); if(pos == handles_.end()) { // We're seeing this object for the first time in this function. // Be sure that it's updated. handle->ref(); handles_.insert(handle); handle->update(NativeMethodEnvironment::get()); } } else { handle = new capi::Handle(state, obj); ih->set_handle(handle); state->shared.global_handles()->add(handle); handle->ref(); handles_.insert(handle); } return handle->as_value(); } Object* NativeMethodFrame::get_object(VALUE val) { return capi::Handle::from(val)->object(); } void NativeMethodFrame::flush_cached_data() { NativeMethodEnvironment* env = NativeMethodEnvironment::get(); if(check_handles_) { for(capi::HandleSet::iterator i = handles_.begin(); i != handles_.end(); i++) { capi::Handle* handle = *i; handle->flush(env); } } if(env->state()->shared.config.capi_global_flush) { capi::Handles* handles = env->state()->shared.cached_handles(); if(handles->size() > 0) { for(capi::Handles::Iterator i(*handles); i.more(); i.advance()) { i->flush(env); } } } } void NativeMethodFrame::update_cached_data() { NativeMethodEnvironment* env = NativeMethodEnvironment::get(); if(check_handles_) { for(capi::HandleSet::iterator i = handles_.begin(); i != handles_.end(); i++) { capi::Handle* handle = *i; handle->update(env); } } if(env->state()->shared.config.capi_global_flush) { capi::Handles* handles = env->state()->shared.cached_handles(); if(handles->size() > 0) { for(capi::Handles::Iterator i(*handles); i.more(); i.advance()) { i->update(env); } } } } VALUE NativeMethodEnvironment::get_handle(Object* obj) { if(obj->reference_p()) { return current_native_frame_->get_handle(state_, obj); } else if(obj->fixnum_p() || obj->symbol_p()) { return reinterpret_cast<VALUE>(obj); } else if(obj->nil_p()) { return cCApiHandleQnil; } else if(obj->false_p()) { return cCApiHandleQfalse; } else if(obj->true_p()) { return cCApiHandleQtrue; } else if(obj == Qundef) { return cCApiHandleQundef; } capi::capi_raise_runtime_error("NativeMethod handle requested for unknown object type"); return 0; // keep compiler happy } void NativeMethodEnvironment::delete_global(VALUE val) { abort(); } Object* NativeMethodEnvironment::block() { return get_object(current_native_frame_->block()); } capi::HandleSet& NativeMethodEnvironment::handles() { return current_native_frame_->handles(); } void NativeMethodEnvironment::flush_cached_data() { current_native_frame_->flush_cached_data(); } void NativeMethodEnvironment::check_tracked_handle(capi::Handle* hdl, bool need_update) { current_native_frame_->check_tracked_handle(hdl, need_update); } void NativeMethodEnvironment::update_cached_data() { current_native_frame_->update_cached_data(); } void NativeMethod::init(STATE) { GO(nmethod).set(state->new_class("NativeMethod", G(executable), G(rubinius))); G(nmethod)->set_object_type(state, NativeMethodType); init_thread(state); } void NativeMethod::init_thread(STATE) { NativeMethodEnvironment* env = new NativeMethodEnvironment(state); native_method_environment.set(env); } void NativeMethod::cleanup_thread(STATE) { delete native_method_environment.get(); native_method_environment.set(NULL); } /** * Arity -3: VALUE func(VALUE argument_array); * Arity -2: VALUE func(VALUE receiver, VALUE argument_array); * Arity -1: VALUE func(int argument_count, VALUE*, VALUE receiver); * Otherwise: VALUE func(VALUE receiver, [VALUE arg1, VALUE arg2, ...]); * * There is also a special-case arity, INIT_FUNCTION, which corresponds * to void (*)(void) and should never appear in user code. * * @note Currently supports functions with up to receiver + 15 (separate) arguments only! * Anything beyond that should use one of the special arities instead. * 15 is the limit in MRI as well. */ class ZeroArguments { public: static Object* invoke(STATE, NativeMethod* nm, NativeMethodEnvironment* env, Arguments& args) { VALUE receiver = env->get_handle(args.recv()); return env->get_object(nm->func()(receiver)); } }; class OneArgument { public: static Object* invoke(STATE, NativeMethod* nm, NativeMethodEnvironment* env, Arguments& args) { VALUE receiver = env->get_handle(args.recv()); VALUE a1 = env->get_handle(args.get_argument(0)); return env->get_object(nm->func()(receiver, a1)); } }; class TwoArguments { public: static Object* invoke(STATE, NativeMethod* nm, NativeMethodEnvironment* env, Arguments& args) { VALUE receiver = env->get_handle(args.recv()); VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); return env->get_object(nm->func()(receiver, a1, a2)); } }; class ThreeArguments { public: static Object* invoke(STATE, NativeMethod* nm, NativeMethodEnvironment* env, Arguments& args) { VALUE receiver = env->get_handle(args.recv()); VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); VALUE a3 = env->get_handle(args.get_argument(2)); return env->get_object(nm->func()(receiver, a1, a2, a3)); } }; class GenericArguments { public: static Object* invoke(STATE, NativeMethod* nm, NativeMethodEnvironment* env, Arguments& args) { VALUE receiver = env->get_handle(args.recv()); switch(nm->arity()->to_int()) { // This one is not in MRI. case ARGS_IN_RUBY_ARRAY: { /* Braces required to create objects in a switch */ VALUE ary = env->get_handle(args.as_array(state)); VALUE ret = nm->func()(ary); return env->get_object(ret); } case RECEIVER_PLUS_ARGS_IN_RUBY_ARRAY: { VALUE ary = env->get_handle(args.as_array(state)); VALUE ret = nm->func()(receiver, ary); return env->get_object(ret); } case ARG_COUNT_ARGS_IN_C_ARRAY_PLUS_RECEIVER: { VALUE* ary = (VALUE*)alloca(sizeof(VALUE) * args.total()); for (std::size_t i = 0; i < args.total(); ++i) { ary[i] = env->get_handle(args.get_argument(i)); } VALUE ret = nm->func_as<ArgcFunction>()(args.total(), ary, receiver); return env->get_object(ret); } /* * Normal arg counts * */ case 0: return env->get_object(nm->func()(receiver)); case 1: { VALUE a1 = env->get_handle(args.get_argument(0)); VALUE ret = nm->func()(receiver, a1); return env->get_object(ret); } case 2: { VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); VALUE ret = nm->func()(receiver, a1, a2); return env->get_object(ret); } case 3: { VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); VALUE a3 = env->get_handle(args.get_argument(2)); VALUE ret = nm->func()(receiver, a1, a2, a3); return env->get_object(ret); } case 4: { VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); VALUE a3 = env->get_handle(args.get_argument(2)); VALUE a4 = env->get_handle(args.get_argument(3)); VALUE ret = nm->func()(receiver, a1, a2, a3, a4); return env->get_object(ret); } case 5: { VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); VALUE a3 = env->get_handle(args.get_argument(2)); VALUE a4 = env->get_handle(args.get_argument(3)); VALUE a5 = env->get_handle(args.get_argument(4)); VALUE ret = nm->func()(receiver, a1, a2, a3, a4, a5); return env->get_object(ret); } case 6: { VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); VALUE a3 = env->get_handle(args.get_argument(2)); VALUE a4 = env->get_handle(args.get_argument(3)); VALUE a5 = env->get_handle(args.get_argument(4)); VALUE a6 = env->get_handle(args.get_argument(5)); VALUE ret = nm->func()(receiver, a1, a2, a3, a4, a5, a6); return env->get_object(ret); } case 7: { VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); VALUE a3 = env->get_handle(args.get_argument(2)); VALUE a4 = env->get_handle(args.get_argument(3)); VALUE a5 = env->get_handle(args.get_argument(4)); VALUE a6 = env->get_handle(args.get_argument(5)); VALUE a7 = env->get_handle(args.get_argument(6)); VALUE ret = nm->func()(receiver, a1, a2, a3, a4, a5, a6, a7); return env->get_object(ret); } case 8: { VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); VALUE a3 = env->get_handle(args.get_argument(2)); VALUE a4 = env->get_handle(args.get_argument(3)); VALUE a5 = env->get_handle(args.get_argument(4)); VALUE a6 = env->get_handle(args.get_argument(5)); VALUE a7 = env->get_handle(args.get_argument(6)); VALUE a8 = env->get_handle(args.get_argument(7)); VALUE ret = nm->func()(receiver, a1, a2, a3, a4, a5, a6, a7, a8); return env->get_object(ret); } case 9: { VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); VALUE a3 = env->get_handle(args.get_argument(2)); VALUE a4 = env->get_handle(args.get_argument(3)); VALUE a5 = env->get_handle(args.get_argument(4)); VALUE a6 = env->get_handle(args.get_argument(5)); VALUE a7 = env->get_handle(args.get_argument(6)); VALUE a8 = env->get_handle(args.get_argument(7)); VALUE a9 = env->get_handle(args.get_argument(8)); VALUE ret = nm->func()(receiver, a1, a2, a3, a4, a5, a6, a7, a8, a9); return env->get_object(ret); } case 10: { VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); VALUE a3 = env->get_handle(args.get_argument(2)); VALUE a4 = env->get_handle(args.get_argument(3)); VALUE a5 = env->get_handle(args.get_argument(4)); VALUE a6 = env->get_handle(args.get_argument(5)); VALUE a7 = env->get_handle(args.get_argument(6)); VALUE a8 = env->get_handle(args.get_argument(7)); VALUE a9 = env->get_handle(args.get_argument(8)); VALUE a10 = env->get_handle(args.get_argument(9)); VALUE ret = nm->func()(receiver, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10); return env->get_object(ret); } case 11: { VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); VALUE a3 = env->get_handle(args.get_argument(2)); VALUE a4 = env->get_handle(args.get_argument(3)); VALUE a5 = env->get_handle(args.get_argument(4)); VALUE a6 = env->get_handle(args.get_argument(5)); VALUE a7 = env->get_handle(args.get_argument(6)); VALUE a8 = env->get_handle(args.get_argument(7)); VALUE a9 = env->get_handle(args.get_argument(8)); VALUE a10 = env->get_handle(args.get_argument(9)); VALUE a11 = env->get_handle(args.get_argument(10)); VALUE ret = nm->func()(receiver, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11); return env->get_object(ret); } case 12: { VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); VALUE a3 = env->get_handle(args.get_argument(2)); VALUE a4 = env->get_handle(args.get_argument(3)); VALUE a5 = env->get_handle(args.get_argument(4)); VALUE a6 = env->get_handle(args.get_argument(5)); VALUE a7 = env->get_handle(args.get_argument(6)); VALUE a8 = env->get_handle(args.get_argument(7)); VALUE a9 = env->get_handle(args.get_argument(8)); VALUE a10 = env->get_handle(args.get_argument(9)); VALUE a11 = env->get_handle(args.get_argument(10)); VALUE a12 = env->get_handle(args.get_argument(11)); VALUE ret = nm->func()(receiver, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12); return env->get_object(ret); } case 13: { VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); VALUE a3 = env->get_handle(args.get_argument(2)); VALUE a4 = env->get_handle(args.get_argument(3)); VALUE a5 = env->get_handle(args.get_argument(4)); VALUE a6 = env->get_handle(args.get_argument(5)); VALUE a7 = env->get_handle(args.get_argument(6)); VALUE a8 = env->get_handle(args.get_argument(7)); VALUE a9 = env->get_handle(args.get_argument(8)); VALUE a10 = env->get_handle(args.get_argument(9)); VALUE a11 = env->get_handle(args.get_argument(10)); VALUE a12 = env->get_handle(args.get_argument(11)); VALUE a13 = env->get_handle(args.get_argument(12)); VALUE ret = nm->func()(receiver, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13); return env->get_object(ret); } case 14: { VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); VALUE a3 = env->get_handle(args.get_argument(2)); VALUE a4 = env->get_handle(args.get_argument(3)); VALUE a5 = env->get_handle(args.get_argument(4)); VALUE a6 = env->get_handle(args.get_argument(5)); VALUE a7 = env->get_handle(args.get_argument(6)); VALUE a8 = env->get_handle(args.get_argument(7)); VALUE a9 = env->get_handle(args.get_argument(8)); VALUE a10 = env->get_handle(args.get_argument(9)); VALUE a11 = env->get_handle(args.get_argument(10)); VALUE a12 = env->get_handle(args.get_argument(11)); VALUE a13 = env->get_handle(args.get_argument(12)); VALUE a14 = env->get_handle(args.get_argument(13)); VALUE ret = nm->func()(receiver, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14); return env->get_object(ret); } case 15: { VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); VALUE a3 = env->get_handle(args.get_argument(2)); VALUE a4 = env->get_handle(args.get_argument(3)); VALUE a5 = env->get_handle(args.get_argument(4)); VALUE a6 = env->get_handle(args.get_argument(5)); VALUE a7 = env->get_handle(args.get_argument(6)); VALUE a8 = env->get_handle(args.get_argument(7)); VALUE a9 = env->get_handle(args.get_argument(8)); VALUE a10 = env->get_handle(args.get_argument(9)); VALUE a11 = env->get_handle(args.get_argument(10)); VALUE a12 = env->get_handle(args.get_argument(11)); VALUE a13 = env->get_handle(args.get_argument(12)); VALUE a14 = env->get_handle(args.get_argument(13)); VALUE a15 = env->get_handle(args.get_argument(14)); VALUE ret = nm->func()(receiver, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15); return env->get_object(ret); } /* Extension entry point, should never occur for user code. */ case INIT_FUNCTION: { nm->func_as<InitFunction>()(); return Qnil; } /* A C function being used as a block */ case ITERATE_BLOCK: { VALUE cb = env->get_handle(nm->get_ivar(state, state->symbol("cb_data"))); VALUE val; switch(args.total()) { case 0: val = env->get_handle(Qnil); break; case 1: val = env->get_handle(args.get_argument(0)); break; default: val = env->get_handle(args.as_array(state)); break; } VALUE ret = nm->func()(val, cb, receiver); return env->get_object(ret); } case C_LAMBDA: { VALUE cb = env->get_handle(nm->get_ivar(state, state->symbol("cb_data"))); VALUE val = env->get_handle(args.as_array(state)); VALUE ret = nm->func()(val, cb); return env->get_object(ret); } case C_CALLBACK: { VALUE cb = env->get_handle(nm->get_ivar(state, state->symbol("cb_data"))); nm->func()(cb); return Qnil; } default: capi::capi_raise_runtime_error("unrecognized arity for NativeMethod call"); return Qnil; } } }; template <class ArgumentHandler> Object* NativeMethod::executor_implementation(STATE, CallFrame* call_frame, Dispatch& msg, Arguments& args) { NativeMethod* nm = as<NativeMethod>(msg.method); int arity = nm->arity()->to_int(); if(arity >= 0 && (size_t)arity != args.total()) { Exception* exc = Exception::make_argument_error( state, arity, args.total(), msg.name); exc->locations(state, Location::from_call_stack(state, call_frame)); state->thread_state()->raise_exception(exc); return NULL; } NativeMethodEnvironment* env = native_method_environment.get(); // Optionally get the handles back to the proper state. if(state->shared.config.capi_global_flush) { capi::Handles* handles = state->shared.cached_handles(); if(handles->size() > 0) { for(capi::Handles::Iterator i(*handles); i.more(); i.advance()) { i->update(env); } } } // Register the CallFrame, because we might GC below this. state->set_call_frame(call_frame); NativeMethodFrame nmf(env->current_native_frame()); CallFrame* saved_frame = env->current_call_frame(); env->set_current_call_frame(call_frame); env->set_current_native_frame(&nmf); // Be sure to do this after installing nmf as the current // native frame. nmf.setup( env->get_handle(args.recv()), env->get_handle(args.block()), env->get_handle(msg.method), env->get_handle(msg.module)); Object* ret; ExceptionPoint ep(env); PLACE_EXCEPTION_POINT(ep); if(unlikely(ep.jumped_to())) { ret = NULL; } else { #ifdef RBX_PROFILER if(unlikely(state->tooling())) { tooling::MethodEntry method(state, msg, args); ret = ArgumentHandler::invoke(state, nm, env, args); } else { ret = ArgumentHandler::invoke(state, nm, env, args); } #else ret = ArgumentHandler::invoke(state, nm, env, args); #endif } env->set_current_call_frame(saved_frame); env->set_current_native_frame(nmf.previous()); ep.pop(env); // Handle any signals that occurred while the native method // was running. if(!state->check_async(call_frame)) return NULL; return ret; } NativeMethod* NativeMethod::load_extension_entry_point(STATE, Pointer* ptr) { void* func = ptr->pointer; return NativeMethod::create(state, nil<String>(), G(rubinius), state->symbol("__init__"), func, Fixnum::from(INIT_FUNCTION)); } NativeMethod* NativeMethod::create(VM* state, String* file_name, Module* module, Symbol* method_name, void* func, Fixnum* arity) { NativeMethod* nmethod = state->new_object<NativeMethod>(G(nmethod)); nmethod->arity(state, arity); nmethod->file(state, file_name); nmethod->name(state, method_name); nmethod->module(state, module); nmethod->func_ = func; switch(arity->to_native()) { case 0: nmethod->set_executor(&NativeMethod::executor_implementation<ZeroArguments>); break; case 1: nmethod->set_executor(&NativeMethod::executor_implementation<OneArgument>); break; case 2: nmethod->set_executor(&NativeMethod::executor_implementation<TwoArguments>); break; case 3: nmethod->set_executor(&NativeMethod::executor_implementation<ThreeArguments>); break; default: nmethod->set_executor(&NativeMethod::executor_implementation<GenericArguments>); break; } nmethod->primitive(state, state->symbol("nativemethod_call")); nmethod->serial(state, Fixnum::from(0)); return nmethod; } }
void NativeMethod::cleanup_thread(STATE) { delete native_method_environment.get(); native_method_environment.set(NULL); }
void NativeMethod::init_thread(STATE) { NativeMethodEnvironment* env = new NativeMethodEnvironment(state); native_method_environment.set(env); }
namespace rubinius { bool GlobalLock::debug_locking = false; unsigned long VM::cStackDepthMax = 655300; // getrlimit can report there is 4G of stack (ie, unlimited). // Even when there is unlimited stack, we clamp the max to // this value (currently 128M) static rlim_t cMaxStack = (1024 * 1024 * 128); VM::VM(SharedState& shared) : ManagedThread(shared, ManagedThread::eRuby) , saved_call_frame_(0) , stack_start_(0) , run_signals_(false) , thread_step_(false) , shared(shared) , waiter_(NULL) , interrupt_with_signal_(false) , om(shared.om) , interrupts(shared.interrupts) , check_local_interrupts(false) , thread_state_(this) , thread(this, nil<Thread>()) , current_fiber(this, nil<Fiber>()) , root_fiber(this, nil<Fiber>()) { set_stack_size(cStackDepthMax); os_thread_ = pthread_self(); // initial value if(shared.om) { young_start_ = shared.om->young_start(); young_end_ = shared.om->yound_end(); shared.om->refill_slab(local_slab_); } tooling_env_ = rbxti::create_env(this); tooling_ = false; } void VM::discard(VM* vm) { vm->saved_call_frame_ = 0; vm->shared.remove_vm(vm); delete vm; } void VM::initialize() { VM::register_state(this); om = new ObjectMemory(this, shared.config); shared.om = om; young_start_ = shared.om->young_start(); young_end_ = shared.om->yound_end(); om->refill_slab(local_slab_); shared.set_initialized(); // This seems like we should do this in VM(), ie, for every VM and // therefore every Thread object in the process. But in fact, because // we're using the GIL atm, we only do it once. When the GIL goes // away, this needs to be moved to VM(). shared.gc_dependent(); } void VM::boot() { TypeInfo::auto_learn_fields(this); bootstrap_ontology(); VMMethod::init(this); // Setup the main Thread, which is a reflect of the pthread_self() // when the VM boots. boot_threads(); GlobalLock::debug_locking = shared.config.gil_debug; } void VM::initialize_config() { #ifdef ENABLE_LLVM if(!shared.config.jit_disabled) { Array* ary = Array::create(this, 3); ary->append(this, symbol("usage")); if(shared.config.jit_inline_generic) { ary->append(this, symbol("inline_generic")); } if(shared.config.jit_inline_blocks) { ary->append(this, symbol("inline_blocks")); } G(rubinius)->set_const(this, "JIT", ary); } else { G(rubinius)->set_const(this, "JIT", Qfalse); } #else G(rubinius)->set_const(this, "JIT", Qnil); #endif } // HACK so not thread safe or anything! static VM* __state = NULL; VM* VM::current_state() { return __state; } void VM::register_state(VM *vm) { __state = vm; } thread::ThreadData<VM*> _current_vm; VM* VM::current() { return _current_vm.get(); } void VM::set_current(VM* vm) { vm->os_thread_ = pthread_self(); _current_vm.set(vm); } void VM::boot_threads() { thread.set(Thread::create(this, this, G(thread), pthread_self()), &globals().roots); thread->sleep(this, Qfalse); VM::set_current(this); } Object* VM::new_object_typed(Class* cls, size_t size, object_type type) { Object* obj = reinterpret_cast<Object*>(local_slab().allocate(size)); if(unlikely(!obj)) { if(shared.om->refill_slab(local_slab())) { obj = reinterpret_cast<Object*>(local_slab().allocate(size)); } // If refill_slab fails, obj will still be NULL. if(!obj) { return om->new_object_typed(cls, size, type); } } obj->init_header(cls, YoungObjectZone, type); obj->clear_fields(size); return obj; } Object* VM::new_object_typed_mature(Class* cls, size_t bytes, object_type type) { return om->new_object_typed_mature(cls, bytes, type); } Object* VM::new_object_from_type(Class* cls, TypeInfo* ti) { return new_object_typed(cls, ti->instance_size, ti->type); } Class* VM::new_basic_class(Class* sup) { Class *cls = om->new_object_enduring<Class>(G(klass)); cls->init(shared.inc_class_count()); if(sup->nil_p()) { cls->instance_type(this, Fixnum::from(ObjectType)); cls->set_type_info(find_type(ObjectType)); } else { cls->instance_type(this, sup->instance_type()); // HACK test that this is always true cls->set_type_info(sup->type_info()); } cls->superclass(this, sup); return cls; } Class* VM::new_class(const char* name) { return new_class(name, G(object), G(object)); } Class* VM::new_class(const char* name, Class* super_class) { return new_class(name, super_class, G(object)); } Class* VM::new_class(const char* name, Class* sup, Module* under) { Class* cls = new_basic_class(sup); cls->setup(this, name, under); // HACK test that we've got the MOP setup properly SingletonClass::attach(this, cls, sup->singleton_class(this)); return cls; } Class* VM::new_class_under(const char* name, Module* under) { return new_class(name, G(object), under); } Module* VM::new_module(const char* name, Module* under) { Module *mod = new_object<Module>(G(module)); mod->setup(this, name, under); return mod; } Symbol* VM::symbol(const char* str) { return shared.symbols.lookup(this, str); } Symbol* VM::symbol(String* str) { return shared.symbols.lookup(this, str); } void type_assert(STATE, Object* obj, object_type type, const char* reason) { if((obj->reference_p() && obj->type_id() != type) || (type == FixnumType && !obj->fixnum_p())) { Exception::type_error(state, type, obj, reason); } } void VM::raise_stack_error(CallFrame* call_frame) { G(stack_error)->locations(this, Location::from_call_stack(this, call_frame)); thread_state()->raise_exception(G(stack_error)); } void VM::init_stack_size() { struct rlimit rlim; if(getrlimit(RLIMIT_STACK, &rlim) == 0) { rlim_t space = rlim.rlim_cur/5; if(space > 1024*1024) space = 1024*1024; rlim_t adjusted = (rlim.rlim_cur - space); if(adjusted > cMaxStack) { cStackDepthMax = cMaxStack; } else { cStackDepthMax = adjusted; } } } TypeInfo* VM::find_type(int type) { return om->type_info[type]; } Thread *VM::current_thread() { return globals().current_thread.get(); } void VM::run_gc_soon() { om->collect_young_now = true; om->collect_mature_now = true; interrupts.set_perform_gc(); } void VM::collect(CallFrame* call_frame) { this->set_call_frame(call_frame); // Don't go any further unless we're allowed to GC. if(!om->can_gc()) return; // Stops all other threads, so we're only here by ourselves. StopTheWorld guard(this); GCData gc_data(this); om->collect_young(gc_data); om->collect_mature(gc_data); om->run_finalizers(this, call_frame); } void VM::collect_maybe(CallFrame* call_frame) { this->set_call_frame(call_frame); // Don't go any further unless we're allowed to GC. if(!om->can_gc()) return; // Stops all other threads, so we're only here by ourselves. StopTheWorld guard(this); GCData gc_data(this); uint64_t start_time = 0; if(om->collect_young_now) { if(shared.config.gc_show) { start_time = get_current_time(); } YoungCollectStats stats; #ifdef RBX_PROFILER if(unlikely(tooling())) { tooling::GCEntry method(this, tooling::GCYoung); om->collect_young(gc_data, &stats); } else { om->collect_young(gc_data, &stats); } #else om->collect_young(gc_data, &stats); #endif if(shared.config.gc_show) { uint64_t fin_time = get_current_time(); int diff = (fin_time - start_time) / 1000000; fprintf(stderr, "[GC %0.1f%% %d/%d %d %2dms]\n", stats.percentage_used, stats.promoted_objects, stats.excess_objects, stats.lifetime, diff); } } if(om->collect_mature_now) { int before_kb = 0; if(shared.config.gc_show) { start_time = get_current_time(); before_kb = om->mature_bytes_allocated() / 1024; } #ifdef RBX_PROFILER if(unlikely(tooling())) { tooling::GCEntry method(this, tooling::GCMature); om->collect_mature(gc_data); } else { om->collect_mature(gc_data); } #else om->collect_mature(gc_data); #endif if(shared.config.gc_show) { uint64_t fin_time = get_current_time(); int diff = (fin_time - start_time) / 1000000; int kb = om->mature_bytes_allocated() / 1024; fprintf(stderr, "[Full GC %dkB => %dkB %2dms]\n", before_kb, kb, diff); } } // Count the finalizers toward running the mature gc. Not great, // but better than not seeing the time at all. #ifdef RBX_PROFILER if(unlikely(tooling())) { tooling::GCEntry method(this, tooling::GCFinalizer); om->run_finalizers(this, call_frame); } else { om->run_finalizers(this, call_frame); } #else om->run_finalizers(this, call_frame); #endif } void VM::set_const(const char* name, Object* val) { globals().object->set_const(this, (char*)name, val); } void VM::set_const(Module* mod, const char* name, Object* val) { mod->set_const(this, (char*)name, val); } void VM::print_backtrace() { abort(); } void VM::install_waiter(Waiter& waiter) { waiter_ = &waiter; } void VM::interrupt_with_signal() { interrupt_with_signal_ = true; } bool VM::wakeup() { if(interrupt_with_signal_) { pthread_kill(os_thread_, SIGVTALRM); return true; } else { // Use a local here because waiter_ can get reset to NULL by another thread // We can't use a mutex here because this is called from inside a // signal handler. if(Waiter* w = waiter_) { w->run(); return true; } return false; } } void VM::clear_waiter() { interrupt_with_signal_ = false; waiter_ = NULL; } bool VM::process_async(CallFrame* call_frame) { check_local_interrupts = false; if(run_signals_) { shared.signal_handler()->deliver_signals(call_frame); } switch(thread_state_.raise_reason()) { case cException: { Exception* exc = thread_state_.current_exception(); if(exc->locations()->nil_p() || exc->locations()->size() == 0) { exc->locations(this, Location::from_call_stack(this, call_frame)); } return false; } case cNone: return true; default: return false; } } void VM::register_raise(Exception* exc) { thread_state_.raise_exception(exc); check_local_interrupts = true; get_attention(); } void VM::check_exception(CallFrame* call_frame) { if(thread_state()->raise_reason() == cNone) { std::cout << "Exception propogating, but none registered!\n"; call_frame->print_backtrace(this); rubinius::abort(); } } bool VM::check_interrupts(CallFrame* call_frame, void* end) { // First, we might be here because someone reset the stack_limit_ so that // we'd fall into here to check interrupts even if the stack is fine, // // So fix up the stack_limit_ if thats the case first. // If this is true, stack_limit_ was just changed to get our attention, reset // it now. if(stack_limit_ == stack_start_) { reset_stack_limit(); } else { if(!check_stack(call_frame, end)) return false; } if(unlikely(check_local_interrupts)) { if(!process_async(call_frame)) return false; } // If the current thread is trying to step, debugger wise, then assist! if(thread_step()) { clear_thread_step(); if(!Helpers::yield_debugger(this, call_frame, Qnil)) return false; } return true; } void VM::set_current_fiber(Fiber* fib) { set_stack_start(fib->stack()); set_stack_size(fib->stack_size()); current_fiber.set(fib); } };
void VM::set_current(VM* vm) { vm->os_thread_ = pthread_self(); _current_vm.set(vm); }
Object* NativeMethod::executor_implementation(STATE, CallFrame* call_frame, Executable* exec, Module* mod, Arguments& args) { NativeMethod* nm = as<NativeMethod>(exec); int arity = nm->arity()->to_int(); if(arity >= 0 && (size_t)arity != args.total()) { Exception* exc = Exception::make_argument_error( state, arity, args.total(), args.name()); exc->locations(state, Location::from_call_stack(state, call_frame)); state->raise_exception(exc); return NULL; } NativeMethodEnvironment* env = native_method_environment.get(); // Optionally get the handles back to the proper state. if(state->shared().config.capi_global_flush) { capi::Handles* handles = state->shared().cached_handles(); if(handles->size() > 0) { for(capi::Handles::Iterator i(*handles); i.more(); i.advance()) { i->update(env); } } } // Register the CallFrame, because we might GC below this. state->set_call_frame(call_frame); NativeMethodFrame nmf(env->current_native_frame()); CallFrame cf; cf.previous = call_frame; cf.cm = 0; cf.scope = 0; cf.dispatch_data = (void*)&nmf; cf.flags = CallFrame::cNativeMethod; CallFrame* saved_frame = env->current_call_frame(); env->set_current_call_frame(&cf); env->set_current_native_frame(&nmf); // Be sure to do this after installing nmf as the current // native frame. nmf.setup( env->get_handle(args.recv()), env->get_handle(args.block()), env->get_handle(exec), env->get_handle(mod)); // We've got things setup (they can be GC'd properly), so we need to // wait before entering the extension code. ENTER_CAPI(state); Object* ret; ExceptionPoint ep(env); PLACE_EXCEPTION_POINT(ep); if(unlikely(ep.jumped_to())) { ret = NULL; } else { #ifdef RBX_PROFILER if(unlikely(state->vm()->tooling())) { tooling::MethodEntry method(state, exec, mod, args); ret = ArgumentHandler::invoke(state, nm, env, args); } else { ret = ArgumentHandler::invoke(state, nm, env, args); } #else ret = ArgumentHandler::invoke(state, nm, env, args); #endif } env->set_current_call_frame(saved_frame); env->set_current_native_frame(nmf.previous()); ep.pop(env); LEAVE_CAPI(state); // Handle any signals that occurred while the native method // was running. if(!state->check_async(call_frame)) return NULL; return ret; }
namespace rubinius { /** Thread-local NativeMethodEnvironment instance. */ thread::ThreadData<NativeMethodEnvironment*> native_method_environment; /* Class methods */ NativeMethodEnvironment* NativeMethodEnvironment::get() { return native_method_environment.get(); } NativeMethodFrame::~NativeMethodFrame() { flush_cached_data(); for(capi::HandleSet::iterator i = handles_.begin(); i != handles_.end(); i++) { capi::Handle* handle = *i; handle->deref(); } } VALUE NativeMethodFrame::get_handle(STATE, Object* obj) { InflatedHeader* ih = state->om->inflate_header(obj); capi::Handle* handle = ih->handle(); if(handle) { // ref() ONLY if it's not already in there! // otherwise the refcount is wrong and we leak handles. capi::HandleSet::iterator pos = handles_.find(handle); if(pos == handles_.end()) { handle->ref(); handles_.insert(handle); } } else { handle = new capi::Handle(state, obj); ih->set_handle(handle); state->shared.global_handles()->add(handle); handle->ref(); handles_.insert(handle); } return handle->as_value(); } Object* NativeMethodFrame::get_object(VALUE val) { return capi::Handle::from(val)->object(); } void NativeMethodFrame::flush_cached_data() { NativeMethodEnvironment* env = NativeMethodEnvironment::get(); capi::Handles* handles = env->state()->shared.cached_handles(); if(handles->size() > 0) { for(capi::Handles::Iterator i(*handles); i.more(); i.advance()) { i->flush(env); } } } void NativeMethodFrame::update_cached_data() { NativeMethodEnvironment* env = NativeMethodEnvironment::get(); capi::Handles* handles = env->state()->shared.cached_handles(); if(handles->size() > 0) { for(capi::Handles::Iterator i(*handles); i.more(); i.advance()) { i->update(env); } } } VALUE NativeMethodEnvironment::get_handle(Object* obj) { if(obj->reference_p()) { return current_native_frame_->get_handle(state_, obj); } else if(obj->fixnum_p() || obj->symbol_p()) { return reinterpret_cast<VALUE>(obj); } else if(obj->nil_p()) { return cCApiHandleQnil; } else if(obj->false_p()) { return cCApiHandleQfalse; } else if(obj->true_p()) { return cCApiHandleQtrue; } else if(obj == Qundef) { return cCApiHandleQundef; } capi::capi_raise_runtime_error("NativeMethod handle requested for unknown object type"); return 0; // keep compiler happy } void NativeMethodEnvironment::delete_global(VALUE val) { abort(); } Object* NativeMethodEnvironment::block() { return current_block_.get(); } capi::HandleSet& NativeMethodEnvironment::handles() { return current_native_frame_->handles(); } void NativeMethodEnvironment::flush_cached_data() { current_native_frame_->flush_cached_data(); } void NativeMethodEnvironment::update_cached_data() { current_native_frame_->update_cached_data(); } void NativeMethod::init(STATE) { state->globals.nmethod.set(state->new_class("NativeMethod", G(executable), G(rubinius))); state->globals.nmethod.get()->set_object_type(state, NativeMethodType); init_thread(state); } void NativeMethod::init_thread(STATE) { NativeMethodEnvironment* env = new NativeMethodEnvironment(state); native_method_environment.set(env); } NativeMethod* NativeMethod::allocate(STATE) { return create<GenericFunctor>(state); } Object* NativeMethod::executor_implementation(STATE, CallFrame* call_frame, Dispatch& msg, Arguments& args) { NativeMethod* nm = as<NativeMethod>(msg.method); int arity = nm->arity()->to_int(); if(arity >= 0 && (size_t)arity != args.total()) { Exception* exc = Exception::make_argument_error( state, arity, args.total(), msg.name); exc->locations(state, System::vm_backtrace(state, Fixnum::from(1), call_frame)); state->thread_state()->raise_exception(exc); return NULL; } NativeMethodEnvironment* env = native_method_environment.get(); NativeMethodFrame nmf(env->current_native_frame()); CallFrame* saved_frame = env->current_call_frame(); Object* saved_block = env->block(); env->set_current_call_frame(call_frame); env->set_current_native_frame(&nmf); env->set_current_block(args.block()); Object* ret; ExceptionPoint ep(env); PLACE_EXCEPTION_POINT(ep); if(unlikely(ep.jumped_to())) { ret = NULL; } else { #ifdef RBX_PROFILER if(unlikely(state->shared.profiling())) { profiler::MethodEntry method(state, msg, args); ret = nm->call(state, env, args); } else { ret = nm->call(state, env, args); } #else ret = nm->call(state, env, args); #endif } env->set_current_block(saved_block); env->set_current_call_frame(saved_frame); env->set_current_native_frame(nmf.previous()); ep.pop(env); return ret; } NativeMethod* NativeMethod::load_extension_entry_point(STATE, String* path, String* name) { void* func = NativeLibrary::find_symbol(state, name, path); NativeMethod* m = NativeMethod::create(state, path, state->globals.rubinius.get(), name->to_sym(state), reinterpret_cast<GenericFunctor>(func), Fixnum::from(INIT_FUNCTION) ); return m; } /** * Arity -3: VALUE func(VALUE argument_array); * Arity -2: VALUE func(VALUE receiver, VALUE argument_array); * Arity -1: VALUE func(int argument_count, VALUE*, VALUE receiver); * Otherwise: VALUE func(VALUE receiver, VALUE arg1[, VALUE arg2, ...]); * * There is also a special-case arity, INIT_FUNCTION, which corresponds * to void (*)(void) and should never appear in user code. * * @note Currently supports functions with up to receiver + 5 (separate) arguments only! * Anything beyond that should use one of the special arities instead. * * @todo Check for inefficiencies. */ Object* NativeMethod::call(STATE, NativeMethodEnvironment* env, Arguments& args) { VALUE receiver = env->get_handle(args.recv()); switch(arity()->to_int()) { case ARGS_IN_RUBY_ARRAY: { /* Braces required to create objects in a switch */ VALUE ary = env->get_handle(args.as_array(state)); VALUE ret = functor_as<OneArgFunctor>()(ary); return env->get_object(ret); } case RECEIVER_PLUS_ARGS_IN_RUBY_ARRAY: { VALUE ary = env->get_handle(args.as_array(state)); VALUE ret = functor_as<TwoArgFunctor>()(receiver, ary); return env->get_object(ret); } case ARG_COUNT_ARGS_IN_C_ARRAY_PLUS_RECEIVER: { VALUE* ary = new VALUE[args.total()]; for (std::size_t i = 0; i < args.total(); ++i) { ary[i] = env->get_handle(args.get_argument(i)); } VALUE ret = functor_as<ArgcFunctor>()(args.total(), ary, receiver); delete[] ary; return env->get_object(ret); } /* * Normal arg counts * * Yes, it is ugly as f**k. It is intended as an encouragement * to get rid of the concept of a separate VALUE and Object. */ case 0: { OneArgFunctor functor = functor_as<OneArgFunctor>(); VALUE ret = functor(receiver); return env->get_object(ret); } case 1: { TwoArgFunctor functor = functor_as<TwoArgFunctor>(); VALUE a1 = env->get_handle(args.get_argument(0)); VALUE ret = functor(receiver, a1); return env->get_object(ret); } case 2: { ThreeArgFunctor functor = functor_as<ThreeArgFunctor>(); VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); VALUE ret = functor(receiver, a1, a2); return env->get_object(ret); } case 3: { FourArgFunctor functor = functor_as<FourArgFunctor>(); VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); VALUE a3 = env->get_handle(args.get_argument(2)); VALUE ret = functor(receiver, a1, a2, a3); return env->get_object(ret); } case 4: { FiveArgFunctor functor = functor_as<FiveArgFunctor>(); VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); VALUE a3 = env->get_handle(args.get_argument(2)); VALUE a4 = env->get_handle(args.get_argument(3)); VALUE ret = functor(receiver, a1, a2, a3, a4); return env->get_object(ret); } case 5: { SixArgFunctor functor = functor_as<SixArgFunctor>(); VALUE a1 = env->get_handle(args.get_argument(0)); VALUE a2 = env->get_handle(args.get_argument(1)); VALUE a3 = env->get_handle(args.get_argument(2)); VALUE a4 = env->get_handle(args.get_argument(3)); VALUE a5 = env->get_handle(args.get_argument(4)); VALUE ret = functor(receiver, a1, a2, a3, a4, a5); return env->get_object(ret); } /* Extension entry point, should never occur for user code. */ case INIT_FUNCTION: { InitFunctor functor = functor_as<InitFunctor>(); functor(); return Qnil; } default: capi::capi_raise_runtime_error("unrecognized arity for NativeMethod call"); return Qnil; } } }