void Tuple::Info::show_simple(STATE, Object* self, int level) { Tuple* tup = as<Tuple>(self); native_int size = tup->num_fields(); native_int stop = size < 6 ? size : 6; if(size == 0) { class_info(state, self, true); return; } class_info(state, self); std::cout << ": " << size << std::endl; ++level; for(native_int i = 0; i < stop; i++) { indent(level); Object* obj = tup->at(state, i); if(Tuple* t = try_as<Tuple>(obj)) { class_info(state, self); std::cout << ": " << t->num_fields() << ">" << std::endl; } else { obj->show_simple(state, level); } } if(tup->num_fields() > stop) ellipsis(level); close_body(level); }
/** @todo Should we queue thread? Probably unnecessary. --rue */ void Thread::priority(STATE, Fixnum* new_priority) { /* This gets somewhat ugly to avoid existing lists. */ if(new_priority->to_native() < 0) { Exception::argument_error(state, "Thread priority must be non-negative!"); } Tuple* scheduled = state->globals.scheduled_threads.get(); std::size_t desired = new_priority->to_ulong(); std::size_t existing = scheduled->num_fields(); if(desired >= existing) { Tuple* replacement = Tuple::create(state, (desired + 1)); replacement->copy_from(state, scheduled, Fixnum::from(0), Fixnum::from(scheduled->num_fields()), Fixnum::from(0)); for(std::size_t i = existing - 1; i <= desired; ++i) { if(replacement->at(state, i)->nil_p()) { replacement->put(state, i, List::create(state)); } } state->globals.scheduled_threads.set(replacement); scheduled = replacement; } priority_ = new_priority; }
void Marshaller::set_iseq(InstructionSequence* iseq) { Tuple* ops = iseq->opcodes(); stream << "i" << endl << ops->num_fields() << endl; for(size_t i = 0; i < ops->num_fields(); i++) { stream << as<Fixnum>(ops->at(state, i))->to_native() << endl; } }
void VM::update_profile(STATE) { timer::StopWatch<timer::nanoseconds> timer(metrics().machine.profile_ns); metrics().machine.profiles++; profile_sample_count_++; CompiledCode* code = state->vm()->call_frame()->compiled_code; code->machine_code()->sample_count++; Tuple* profile = profile_.get(); if(profile->nil_p()) { profile = Tuple::create(state, max_profile_entries_); profile_.set(profile); } ::qsort(reinterpret_cast<void*>(profile->field), profile->num_fields(), sizeof(intptr_t), profile_compare); for(native_int i = 0; i < profile->num_fields(); i++) { if(code == profile->at(i)) return; } CompiledCode* pcode = try_as<CompiledCode>(profile->at(0)); if(!pcode || (pcode && code->machine_code()->call_count > pcode->machine_code()->call_count)) { profile->put(state, 0, code); min_profile_call_count_ = code->machine_code()->call_count; } }
// HACK todo test this! void MarkSweepGC::clean_weakrefs() { if(!weak_refs) return; for(ObjectArray::iterator i = weak_refs->begin(); i != weak_refs->end(); i++) { // ATM, only a Tuple can be marked weak. Tuple* tup = as<Tuple>(*i); for(size_t ti = 0; ti < tup->num_fields(); ti++) { Object* obj = tup->at(object_memory->state, ti); if(!obj->reference_p()) continue; if(obj->young_object_p()) { if(!obj->marked_p()) { tup->field[ti] = Qnil; } } else { Entry *entry = find_entry(obj); if(!entry->marked_p()) { tup->field[ti] = Qnil; } } } } delete weak_refs; weak_refs = NULL; }
void Tuple::Info::visit(Object* obj, ObjectVisitor& visit) { Tuple* tup = as<Tuple>(obj); for(size_t i = 0; i < tup->num_fields(); i++) { visit.call(tup->field[i]); } }
/* We were in Ruby-land and we are heading to C-land. In Ruby-land, we * may have updated the existing Array elements, appended new elements, * or shifted off elements. We account for this when updating the C * structure contents. * * We are potentially writing into a C structure that exists and that * may have been changed in C-land. It is possible for C code to change * both the len and ptr values of an RArray. We DO NOT EVER encourage * doing this, but we must account for it. The C code may also merely * change the contents of the array pointed to by ptr. Updating that * array with the current elements in the Ruby Array is the purpose of * this code. */ void update_cached_rarray(NativeMethodEnvironment* env, Handle* handle) { if(handle->is_rarray()) { Array* array = c_as<Array>(handle->object()); Tuple* tuple = array->tuple(); RArray* rarray = handle->as_rarray(env); native_int size = tuple->num_fields(); native_int start = array->start()->to_native(); native_int num = 0; if(rarray->ptr != rarray->dmwmb) { // This is a very bad C extension. Assume len is valid // and do not change its value. num = rarray->len; } else { env->shared().capi_ds_lock().lock(); if(rarray->aux.capa < size) { delete[] rarray->dmwmb; rarray->dmwmb = rarray->ptr = new VALUE[size]; rarray->aux.capa = size; } num = rarray->aux.capa; rarray->len = array->size(); env->shared().capi_ds_lock().unlock(); } for(native_int i = 0, j = start; i < num && j < size; i++, j++) { rarray->ptr[i] = env->get_handle(tuple->at(j)); } } }
/* We were in C-land and now we are returning to Ruby-land. Since the C * program can freely assign to RArray.len and RArray.ptr, we account * for that when updating the Ruby Array with the C structure contents. * * Note that we must copy the total elements in the cached C array * regardless of the value of the len parameter because the C array * contents can be changed indepedently from the len parameter. * * See Handle::as_rarray below. */ void flush_cached_rarray(NativeMethodEnvironment* env, Handle* handle) { if(handle->is_rarray()) { Array* array = c_as<Array>(handle->object()); Tuple* tuple = array->tuple(); RArray* rarray = handle->as_rarray(env); native_int size = tuple->num_fields(); native_int num = 0; if(rarray->ptr != rarray->dmwmb) { // This is a very bad C extension. Assume len is valid. num = rarray->len; } else { num = rarray->aux.capa; } if(num > size) { tuple = Tuple::create(env->state(), rarray->aux.capa); array->tuple(env->state(), tuple); } array->start(env->state(), Fixnum::from(0)); array->total(env->state(), Fixnum::from(rarray->len)); for(native_int i = 0; i < num; i++) { tuple->put(env->state(), i, env->get_object(rarray->ptr[i])); } } }
bool VM::find_and_activate_thread() { Tuple* scheduled = globals.scheduled_threads.get(); for(std::size_t i = scheduled->num_fields() - 1; i > 0; i--) { List* list = as<List>(scheduled->at(this, i)); Thread* thread = try_as<Thread>(list->shift(this)); while(thread) { thread->queued(this, Qfalse); /** @todo Should probably try to prevent dead threads here.. */ if(thread->alive() == Qfalse) { thread = try_as<Thread>(list->shift(this)); continue; } if(thread->sleep() == Qtrue) { thread = try_as<Thread>(list->shift(this)); continue; } activate_thread(thread); return true; } } return false; }
void Tuple::Info::mark(Object* obj, ObjectMark& mark) { Tuple* tup = as<Tuple>(obj); for(native_int i = 0; i < tup->num_fields(); i++) { Object* tmp = mark.call(tup->field[i]); if(tmp && tmp != tup->field[i]) mark.set(obj, &tup->field[i], tmp); } }
void test_pattern() { Fixnum* ten = Fixnum::from(10); Tuple* tuple = Tuple::pattern(state, Fixnum::from(5), ten); TS_ASSERT_EQUALS(5, tuple->num_fields()); for(size_t i = 0; i < 5; i++) { TS_ASSERT_EQUALS(ten, tuple->at(state, i)); } }
void Tuple::Info::mark(Object* obj, memory::ObjectMark& mark) { Tuple* tup = as<Tuple>(obj); for(native_int i = 0; i < tup->num_fields(); i++) { if(Object* tmp = mark.call(tup->field[i])) { mark.set(obj, &tup->field[i], tmp); } } }
void CompiledMethod::post_marshal(STATE) { formalize(state); // side-effect, populates backend_method_ // Set the sender attribute of all SendSites in this method to this CM Tuple *lit = literals(); for(std::size_t i = 0; i < lit->num_fields(); i++) { SendSite *ss = try_as<SendSite>(lit->at(state, i)); if(ss != NULL) ss->sender(state, this); } }
void Tuple::Info::mark(Object* obj, ObjectMark& mark) { Object* tmp; Tuple* tup = as<Tuple>(obj); for(size_t i = 0; i < tup->num_fields(); i++) { tmp = mark.call(tup->field[i]); if(tmp) mark.set(obj, &tup->field[i], tmp); } }
void test_new_object() { ObjectMemory& om = *state->memory(); Tuple* obj; obj = util_new_object(om); TS_ASSERT_EQUALS(obj->num_fields(), 3); TS_ASSERT_EQUALS(obj->zone(), YoungObjectZone); }
void RTuple::Info::mark(Object* obj, memory::ObjectMark& mark) { Tuple* tup = as<Tuple>(obj); for(native_int i = 0; i < tup->num_fields(); i++) { if(Object* tmp = mark.call( MemoryHandle::object(reinterpret_cast<VALUE>(tup->field[i])))) { mark.set_value(obj, &tup->field[i], tmp); } } }
bool CompiledMethod::is_rescue_target(STATE, int ip) { Tuple* table = exceptions(); if(table->nil_p()) return false; for(size_t i = 0; i < table->num_fields(); i++) { Tuple* entry = as<Tuple>(table->at(state, i)); if(as<Fixnum>(entry->at(state, 2))->to_native() == ip) return true; } return false; }
void test_new_large_object() { ObjectMemory& om = *state->memory(); Tuple* obj; om.large_object_threshold = 10; size_t start = om.young_->bytes_used(); obj = util_new_object(om,20); TS_ASSERT_EQUALS(obj->num_fields(), 20); TS_ASSERT_EQUALS(obj->zone(), MatureObjectZone); TS_ASSERT_EQUALS(om.young_->bytes_used(), start); }
Object* System::vm_add_method(STATE, Symbol* name, CompiledMethod* method, StaticScope* scope, Object* vis) { Module* mod = scope->for_method_definition(); method->scope(state, scope); method->serial(state, Fixnum::from(0)); mod->add_method(state, name, method); if(Class* cls = try_as<Class>(mod)) { if(!method->internalize(state)) { Exception::argument_error(state, "invalid bytecode method"); return 0; } object_type type = (object_type)cls->instance_type()->to_native(); TypeInfo* ti = state->om->type_info[type]; if(ti) { method->specialize(state, ti); } } bool add_ivars = false; if(Class* cls = try_as<Class>(mod)) { add_ivars = !kind_of<SingletonClass>(cls) && cls->type_info()->type == Object::type; } else { add_ivars = true; } if(add_ivars) { Array* ary = mod->seen_ivars(); if(ary->nil_p()) { ary = Array::create(state, 5); mod->seen_ivars(state, ary); } Tuple* lits = method->literals(); for(native_int i = 0; i < lits->num_fields(); i++) { if(Symbol* sym = try_as<Symbol>(lits->at(state, i))) { if(RTEST(sym->is_ivar_p(state))) { if(!ary->includes_p(state, sym)) ary->append(state, sym); } } } } vm_reset_method_cache(state, name); return method; }
void test_new_object() { ObjectMemory& om = *state->om; Tuple* obj; int start = om.young.current->used(); obj = util_new_object(om); TS_ASSERT_EQUALS(obj->num_fields(), 3U); TS_ASSERT_EQUALS(obj->zone, YoungObjectZone); TS_ASSERT(om.young.current->used() == start + obj->size_in_bytes(state)); TS_ASSERT(om.young.heap_a.used() == start + obj->size_in_bytes(state)); }
/* For each type, there is an automatically generated version * of this function (called via virtual dispatch) that marks * all slots. */ void TypeInfo::auto_mark(Object* obj, ObjectMark& mark) { // HACK: should not inspect an object that stores bytes // for references. Evan said auto_mark is slated for // destruction also. if(obj->stores_bytes_p()) return; // HACK copied from Tuple; Object* tmp; Tuple* tup = static_cast<Tuple*>(obj); for(size_t i = 0; i < tup->num_fields(); i++) { tmp = tup->field[i]; if(tmp->reference_p()) { tmp = mark.call(tmp); if(tmp) { tup->field[i] = tmp; mark.just_set(obj, tmp); } } } }
String* String::transform(STATE, Tuple* tbl, Object* respect_kcode) { uint8_t invalid[5]; if(tbl->num_fields() < 256) { return force_as<String>(Primitives::failure()); } Object** tbl_ptr = tbl->field; kcode::table* kcode_tbl = 0; if(RTEST(respect_kcode)) { kcode_tbl = state->shared().kcode_table(); } else { kcode_tbl = kcode::null_table(); } // Pointers to iterate input bytes. uint8_t* in_p = byte_address(); native_int str_size = size(); native_int data_size = as<CharArray>(data_)->size(); if(unlikely(str_size > data_size)) { str_size = data_size; } uint8_t* in_end = in_p + str_size; // Optimistic estimate that output size will be 1.25 x input. native_int out_chunk = str_size * 5 / 4; native_int out_size = out_chunk; uint8_t* output = (uint8_t*)malloc(out_size); uint8_t* out_p = output; uint8_t* out_end = out_p + out_size; while(in_p < in_end) { native_int len = 0; uint8_t byte = *in_p; uint8_t* cur_p = 0; if(kcode::mbchar_p(kcode_tbl, byte)) { len = kcode::mbclen(kcode_tbl, byte); native_int rem = in_end - in_p; // if the character length is greater than the remaining // bytes, we have a malformed character. Handled below. if(rem >= len) { cur_p = in_p; in_p += len; } } else if(String* str = try_as<String>(tbl_ptr[byte])) { cur_p = str->byte_address(); len = str->size(); in_p++; } else { Tuple* tbl = as<Tuple>(tbl_ptr[byte]); for(native_int i = 0; i < tbl->num_fields(); i += 2) { String* key = as<String>(tbl->at(i)); native_int rem = in_end - in_p; native_int klen = key->size(); if(rem < klen) continue; if(memcmp(in_p, key->byte_address(), klen) == 0) { String* str = as<String>(tbl->at(i+1)); cur_p = str->byte_address(); len = str->size(); in_p += klen; break; } } } // We could not map this byte, so we add it to the output // in stringified octal notation (ie \nnn). if(!cur_p) { snprintf((char*)invalid, 5, "\\%03o", *((char*)in_p) & 0377); in_p++; cur_p = invalid; len = 4; } if(out_p + len > out_end) { native_int pos = out_p - output; out_size += (len > out_chunk ? len : out_chunk); output = (uint8_t*)realloc(output, out_size); out_p = output + pos; out_end = output + out_size; } switch(len) { case 1: *out_p++ = *cur_p; break; case 2: *out_p++ = *cur_p++; *out_p++ = *cur_p; break; case 3: *out_p++ = *cur_p++; *out_p++ = *cur_p++; *out_p++ = *cur_p; break; default: memcpy(out_p, cur_p, len); out_p += len; break; } } String* result = String::create(state, reinterpret_cast<const char*>(output), out_p - output); free(output); if(tainted_p(state)) result->taint(state); return result; }
/* * Turns a CompiledMethod's InstructionSequence into a C array of opcodes. */ VMMethod::VMMethod(STATE, CompiledMethod* meth) : original(state, meth), type(NULL) { meth->set_executor(VMMethod::execute); total = meth->iseq()->opcodes()->num_fields(); if(Tuple* tup = try_as<Tuple>(meth->literals())) { blocks.resize(tup->num_fields(), NULL); } opcodes = new opcode[total]; Tuple* literals = meth->literals(); if(literals->nil_p()) { sendsites = NULL; } else { sendsites = new TypedRoot<SendSite*>[literals->num_fields()]; } Tuple* ops = meth->iseq()->opcodes(); Object* val; for(size_t index = 0; index < total;) { val = ops->at(state, index); if(val->nil_p()) { opcodes[index++] = 0; } else { opcodes[index] = as<Fixnum>(val)->to_native(); size_t width = InstructionSequence::instruction_width(opcodes[index]); switch(width) { case 2: opcodes[index + 1] = as<Fixnum>(ops->at(state, index + 1))->to_native(); break; case 3: opcodes[index + 1] = as<Fixnum>(ops->at(state, index + 1))->to_native(); opcodes[index + 2] = as<Fixnum>(ops->at(state, index + 2))->to_native(); break; } switch(opcodes[index]) { case InstructionSequence::insn_send_method: case InstructionSequence::insn_send_stack: case InstructionSequence::insn_send_stack_with_block: case InstructionSequence::insn_send_stack_with_splat: case InstructionSequence::insn_send_super_stack_with_block: case InstructionSequence::insn_send_super_stack_with_splat: native_int which = opcodes[index + 1]; sendsites[which].set(as<SendSite>(literals->at(state, which)), &state->globals.roots); } index += width; } } stack_size = meth->stack_size()->to_native(); number_of_locals = meth->number_of_locals(); total_args = meth->total_args()->to_native(); required_args = meth->required_args()->to_native(); if(meth->splat()->nil_p()) { splat_position = -1; } else { splat_position = as<Integer>(meth->splat())->to_native(); } setup_argument_handler(meth); }
void test_allocate() { Tuple* tuple = Tuple::allocate(state, Fixnum::from(2)); TS_ASSERT_EQUALS(2U, tuple->num_fields()); }