void VM::update_profile(STATE) { timer::StopWatch<timer::nanoseconds> timer(metrics().machine.profile_ns); metrics().machine.profiles++; profile_sample_count_++; CompiledCode* code = state->vm()->call_frame()->compiled_code; code->machine_code()->sample_count++; Tuple* profile = profile_.get(); if(profile->nil_p()) { profile = Tuple::create(state, max_profile_entries_); profile_.set(profile); } ::qsort(reinterpret_cast<void*>(profile->field), profile->num_fields(), sizeof(intptr_t), profile_compare); for(native_int i = 0; i < profile->num_fields(); i++) { if(code == profile->at(i)) return; } CompiledCode* pcode = try_as<CompiledCode>(profile->at(0)); if(!pcode || (pcode && code->machine_code()->call_count > pcode->machine_code()->call_count)) { profile->put(state, 0, code); min_profile_call_count_ = code->machine_code()->call_count; } }
static void calc_pressure_func(int n, Tuple<double*> scalars, double* result) { for (int i = 0; i < n; i++) result[i] = (num_flux.kappa - 1.) * (scalars.at(3)[i] - (scalars.at(1)[i]*scalars.at(1)[i] + scalars.at(2)[i]*scalars.at(2)[i])/(2*scalars.at(0)[i]) ); };
void LookupTable::redistribute(STATE, size_t size) { size_t num = bins_->to_native(); Tuple* new_values = Tuple::create(state, size); for(size_t i = 0; i < num; i++) { Tuple* entry = try_as<Tuple>(values_->at(state, i)); while(entry) { Tuple* link = try_as<Tuple>(entry->at(state, 2)); entry->put(state, 2, Qnil); size_t bin = find_bin(key_hash(entry->at(state, 0)), size); Tuple* slot = try_as<Tuple>(new_values->at(state, bin)); if(!slot) { new_values->put(state, bin, entry); } else { entry_append(state, slot, entry); } entry = link; } } values(state, new_values); bins(state, Fixnum::from(size)); }
void MachineCode::fill_opcodes(STATE, CompiledCode* original) { Tuple* ops = original->iseq()->opcodes(); int sends = 0; int constants = 0; for(size_t index = 0; index < total;) { Object* val = ops->at(state, index); if(val->nil_p()) { opcodes[index++] = 0; } else { opcodes[index] = as<Fixnum>(val)->to_native(); size_t width = InstructionSequence::instruction_width(opcodes[index]); switch(width) { case 2: opcodes[index + 1] = as<Fixnum>(ops->at(state, index + 1))->to_native(); break; case 3: opcodes[index + 1] = as<Fixnum>(ops->at(state, index + 1))->to_native(); opcodes[index + 2] = as<Fixnum>(ops->at(state, index + 2))->to_native(); break; } switch(opcodes[index]) { case InstructionSequence::insn_send_method: case InstructionSequence::insn_send_stack: case InstructionSequence::insn_send_stack_with_block: case InstructionSequence::insn_send_stack_with_splat: case InstructionSequence::insn_send_super_stack_with_block: case InstructionSequence::insn_send_super_stack_with_splat: case InstructionSequence::insn_zsuper: case InstructionSequence::insn_meta_send_call: case InstructionSequence::insn_meta_send_op_plus: case InstructionSequence::insn_meta_send_op_minus: case InstructionSequence::insn_meta_send_op_equal: case InstructionSequence::insn_meta_send_op_tequal: case InstructionSequence::insn_meta_send_op_lt: case InstructionSequence::insn_meta_send_op_gt: case InstructionSequence::insn_meta_to_s: case InstructionSequence::insn_check_serial: case InstructionSequence::insn_check_serial_private: case InstructionSequence::insn_call_custom: sends++; break; case InstructionSequence::insn_push_const_fast: case InstructionSequence::insn_find_const_fast: constants++; break; } index += width; } } initialize_call_sites(state, original, sends); initialize_constant_caches(state, original, constants); }
void test_copy_from_other_empty() { Tuple* tuple = Tuple::create(state, 0); Tuple* dest = new_tuple(); dest->copy_from(state, tuple, Fixnum::from(0), Fixnum::from(0), Fixnum::from(0)); TS_ASSERT_EQUALS(Fixnum::from(1), as<Fixnum>(dest->at(state, 0))); TS_ASSERT_EQUALS(Fixnum::from(4), as<Fixnum>(dest->at(state, 1))); TS_ASSERT_EQUALS(Fixnum::from(9), as<Fixnum>(dest->at(state, 2))); }
// Source function. void source_fn(int n, Tuple<scalar*> values, scalar* out) { for (int i = 0; i < n; i++) { out[i] = (nu[1][0] * Sf[1][0] * values.at(0)[i] + nu[1][1] * Sf[1][1] * values.at(1)[i] + nu[1][2] * Sf[1][2] * values.at(2)[i] + nu[1][3] * Sf[1][3] * values.at(3)[i]); } }
bool CompiledMethod::is_rescue_target(STATE, int ip) { Tuple* table = exceptions(); if(table->nil_p()) return false; for(size_t i = 0; i < table->num_fields(); i++) { Tuple* entry = as<Tuple>(table->at(state, i)); if(as<Fixnum>(entry->at(state, 2))->to_native() == ip) return true; } return false; }
void test_delete_inplace() { Tuple *tuple = new_tuple(); tuple->put(state, 1, Qnil); Integer *count = tuple->delete_inplace(state, Fixnum::from(0), Fixnum::from(3), Qnil); TS_ASSERT_EQUALS(1, count->to_native()); TS_ASSERT_EQUALS(Fixnum::from(1), as<Fixnum>(tuple->at(state, 0))); TS_ASSERT_EQUALS(Fixnum::from(9), as<Fixnum>(tuple->at(state, 1))); TS_ASSERT_EQUALS(Qnil, tuple->at(state, 2)); }
bool Relation::can_join(Tuple t1, Tuple t2, std::vector<std::pair<int, int>> common_attributes) { bool can_join = false; for (auto pair : common_attributes) { if ( t1.at(pair.first) == t2.at(pair.second) ) { can_join = true; } else { can_join = false; return can_join; } } return can_join; }
/** @todo Should we queue thread? Probably unnecessary. --rue */ void Thread::priority(STATE, Fixnum* new_priority) { /* This gets somewhat ugly to avoid existing lists. */ if(new_priority->to_native() < 0) { Exception::argument_error(state, "Thread priority must be non-negative!"); } Tuple* scheduled = state->globals.scheduled_threads.get(); std::size_t desired = new_priority->to_ulong(); std::size_t existing = scheduled->num_fields(); if(desired >= existing) { Tuple* replacement = Tuple::create(state, (desired + 1)); replacement->copy_from(state, scheduled, Fixnum::from(0), Fixnum::from(scheduled->num_fields()), Fixnum::from(0)); for(std::size_t i = existing - 1; i <= desired; ++i) { if(replacement->at(state, i)->nil_p()) { replacement->put(state, i, List::create(state)); } } state->globals.scheduled_threads.set(replacement); scheduled = replacement; } priority_ = new_priority; }
/* We were in Ruby-land and we are heading to C-land. In Ruby-land, we * may have updated the existing Array elements, appended new elements, * or shifted off elements. We account for this when updating the C * structure contents. * * We are potentially writing into a C structure that exists and that * may have been changed in C-land. It is possible for C code to change * both the len and ptr values of an RArray. We DO NOT EVER encourage * doing this, but we must account for it. The C code may also merely * change the contents of the array pointed to by ptr. Updating that * array with the current elements in the Ruby Array is the purpose of * this code. */ void update_cached_rarray(NativeMethodEnvironment* env, Handle* handle) { if(handle->is_rarray()) { Array* array = c_as<Array>(handle->object()); Tuple* tuple = array->tuple(); RArray* rarray = handle->as_rarray(env); native_int size = tuple->num_fields(); native_int start = array->start()->to_native(); native_int num = 0; if(rarray->ptr != rarray->dmwmb) { // This is a very bad C extension. Assume len is valid // and do not change its value. num = rarray->len; } else { env->shared().capi_ds_lock().lock(); if(rarray->aux.capa < size) { delete[] rarray->dmwmb; rarray->dmwmb = rarray->ptr = new VALUE[size]; rarray->aux.capa = size; } num = rarray->aux.capa; rarray->len = array->size(); env->shared().capi_ds_lock().unlock(); } for(native_int i = 0, j = start; i < num && j < size; i++, j++) { rarray->ptr[i] = env->get_handle(tuple->at(j)); } } }
Object* MatchData::nth_capture(STATE, native_int which) { if(region_->num_fields() <= which) return cNil; Tuple* sub = try_as<Tuple>(region_->at(state, which)); if(!sub) return cNil; Fixnum* beg = try_as<Fixnum>(sub->at(state, 0)); Fixnum* fin = try_as<Fixnum>(sub->at(state, 1)); native_int b = beg->to_native(); native_int f = fin->to_native(); native_int max = source_->byte_size(); if(!beg || !fin || f > max || b < 0) { return cNil; } const char* str = (char*)source_->byte_address(); native_int sz = f - b; if(sz > max) sz = max; String* string = String::create(state, str + b, sz); string->encoding(state, source_->encoding()); return string; }
void Tuple::Info::show_simple(STATE, Object* self, int level) { Tuple* tup = as<Tuple>(self); native_int size = tup->num_fields(); native_int stop = size < 6 ? size : 6; if(size == 0) { class_info(state, self, true); return; } class_info(state, self); std::cout << ": " << size << std::endl; ++level; for(native_int i = 0; i < stop; i++) { indent(level); Object* obj = tup->at(state, i); if(Tuple* t = try_as<Tuple>(obj)) { class_info(state, self); std::cout << ": " << t->num_fields() << ">" << std::endl; } else { obj->show_simple(state, level); } } if(tup->num_fields() > stop) ellipsis(level); close_body(level); }
bool VM::find_and_activate_thread() { Tuple* scheduled = globals.scheduled_threads.get(); for(std::size_t i = scheduled->num_fields() - 1; i > 0; i--) { List* list = as<List>(scheduled->at(this, i)); Thread* thread = try_as<Thread>(list->shift(this)); while(thread) { thread->queued(this, Qfalse); /** @todo Should probably try to prevent dead threads here.. */ if(thread->alive() == Qfalse) { thread = try_as<Thread>(list->shift(this)); continue; } if(thread->sleep() == Qtrue) { thread = try_as<Thread>(list->shift(this)); continue; } activate_thread(thread); return true; } } return false; }
/* lookuptable_find returns Qundef if there is not entry * referenced by 'key' in the LookupTable. This is useful * to distinguish x = {} from x = {:a => nil} and is used * in cpu.c in e.g. cpu_const_get_in_context. */ Object* LookupTable::find(STATE, Object* key) { Tuple* entry = find_entry(state, key); if(entry) { return entry->at(state, 1); } return Qundef; }
// HACK todo test this! void MarkSweepGC::clean_weakrefs() { if(!weak_refs) return; for(ObjectArray::iterator i = weak_refs->begin(); i != weak_refs->end(); i++) { // ATM, only a Tuple can be marked weak. Tuple* tup = as<Tuple>(*i); for(size_t ti = 0; ti < tup->num_fields(); ti++) { Object* obj = tup->at(object_memory->state, ti); if(!obj->reference_p()) continue; if(obj->young_object_p()) { if(!obj->marked_p()) { tup->field[ti] = Qnil; } } else { Entry *entry = find_entry(obj); if(!entry->marked_p()) { tup->field[ti] = Qnil; } } } } delete weak_refs; weak_refs = NULL; }
void Marshaller::set_iseq(InstructionSequence* iseq) { Tuple* ops = iseq->opcodes(); stream << "i" << endl << ops->num_fields() << endl; for(size_t i = 0; i < ops->num_fields(); i++) { stream << as<Fixnum>(ops->at(state, i))->to_native() << endl; } }
/// Fission source function. inline void source_fn(int n, Tuple<scalar*> values, scalar* out) { for (int i = 0; i < n; i++) { out[i] = 0.0; for_each_group(g) out[i] += nu[1][g] * Sf[1][g] * values.at(g)[i]; } }
void CompiledMethod::post_marshal(STATE) { formalize(state); // side-effect, populates backend_method_ // Set the sender attribute of all SendSites in this method to this CM Tuple *lit = literals(); for(std::size_t i = 0; i < lit->num_fields(); i++) { SendSite *ss = try_as<SendSite>(lit->at(state, i)); if(ss != NULL) ss->sender(state, this); } }
Object* LookupTable::fetch(STATE, Object* key, Object* return_on_failure) { Tuple* entry = find_entry(state, key); if(entry) { return entry->at(state, 1); } return return_on_failure; }
void Filter::init(Tuple<MeshFunction*> solutions) { this->num = solutions.size(); if(num > 10) error("Attempt to create an instance of Filter with more than 10 MeshFunctions."); for(int i = 0; i < this->num; i++) this->sln[i] = solutions.at(i); this->init(); }
void test_pattern() { Fixnum* ten = Fixnum::from(10); Tuple* tuple = Tuple::pattern(state, Fixnum::from(5), ten); TS_ASSERT_EQUALS(5, tuple->num_fields()); for(size_t i = 0; i < 5; i++) { TS_ASSERT_EQUALS(ten, tuple->at(state, i)); } }
Object* LookupTable::fetch(STATE, Object* key, bool* found) { Tuple* entry = find_entry(state, key); if(entry) { *found = true; return entry->at(state, 1); } *found = false; return Qnil; }
Array* LookupTable::collect(STATE, LookupTable* tbl, Object* (*action)(STATE, Tuple*)) { size_t i, j; Tuple* values; Tuple* entry; Array* ary = Array::create(state, tbl->entries()->to_native()); size_t num_bins = tbl->bins()->to_native(); values = tbl->values(); for(i = j = 0; i < num_bins; i++) { entry = try_as<Tuple>(values->at(state, i)); while(entry) { ary->set(state, j++, action(state, entry)); entry = try_as<Tuple>(entry->at(state, 2)); } } return ary; }
void VM::dequeue_thread(Thread* thread) { thread->queued(this, Qfalse); Tuple* scheduled = globals.scheduled_threads.get(); /** @todo Could it be in more than one somehow? --rue */ List* list = try_as<List>(scheduled->at(this, thread->priority()->to_native())); (void) list->remove(this, thread); check_events(); }
SimpleFilter::SimpleFilter(void (*filter_fn)(int n, Tuple<scalar*> values, scalar* result), Tuple<MeshFunction*> solutions, Tuple<int> items) : filter_fn(filter_fn) { this->num = solutions.size(); if(num > 10) error("Attempt to create an instance of Filter with more than 10 MeshFunctions."); if(items.size() != num) if(items.size() > 0) error("Attempt to create an instance of SimpleFilter with different supplied number of MeshFunctions than the number of types of data used from them."); for(int i = 0; i < this->num; i++) { this->sln[i] = solutions.at(i); if(items.size() > 0) this->item[i] = items.at(i); else this->item[i] = H2D_FN_VAL; } this->init(); init_components(); }
void test_tuple() { mar->sstream.str(std::string("p\n2\nI\n2\nI\n2f\n")); Object* obj = mar->unmarshal(); TS_ASSERT(kind_of<Tuple>(obj)); Tuple* tup = as<Tuple>(obj); TS_ASSERT_EQUALS(tup->at(state, 0), Fixnum::from(2)); TS_ASSERT_EQUALS(tup->at(state, 1), Fixnum::from(47)); }
Object* LookupTable::entry_append(STATE, Tuple* top, Object* nxt) { Tuple* cur = try_as<Tuple>(top->at(state, 2)); Tuple* last = top; while(cur) { last = cur; cur = try_as<Tuple>(cur->at(state, 2)); } last->put(state, 2, nxt); return nxt; }
Object* LookupTable::remove(STATE, Object* key) { hashval bin; Object* val; Tuple* entry; Tuple* lst; key_to_sym(key); size_t num_entries = entries_->to_native(); size_t num_bins = bins_->to_native(); if(min_density_p(num_entries, num_bins) && (num_bins >> 1) >= LOOKUPTABLE_MIN_SIZE) { redistribute(state, num_bins >>= 1); } bin = find_bin(key_hash(key), num_bins); entry = try_as<Tuple>(values_->at(state, bin)); lst = NULL; while(entry) { Object* link = entry->at(state, 2); if(entry->at(state, 0) == key) { val = entry->at(state, 1); if(lst) { lst->put(state, 2, link); } else { values_->put(state, bin, link); } entries(state, Fixnum::from(entries_->to_native() - 1)); return val; } lst = entry; entry = try_as<Tuple>(link); } return Qnil; }
Tuple* LookupTable::find_entry(STATE, Object* key) { unsigned int bin; Tuple* entry; key_to_sym(key); bin = find_bin(key_hash(key), bins_->to_native()); /* HACK: This should be fixed by not storing NULLs */ Object* data = values_->at(state, bin); if (!data) return NULL; entry = try_as<Tuple>(data); while(entry) { if(entry->at(state, 0) == key) { return entry; } entry = try_as<Tuple>(entry->at(state, 2)); } return NULL; }