const GurlsOption* GurlsOptionsList::getOpt(std::string key) const { if(key.empty()) throw gException(Exception_Parameter_Not_Definied_Yet + "( )"); std::vector<std::string> names; boost::split(names, key, boost::is_any_of(".")); const GurlsOption* gout = this; ValueType *tab; for(std::vector<std::string>::iterator n_it = names.begin(); n_it != names.end(); ++n_it) { tab = GurlsOptionsList::dynacast(gout)->table; std::map<std::string, GurlsOption* >::iterator it = tab->find(*n_it); if(it == tab->end()) throw gException(Exception_Parameter_Not_Definied_Yet + "( " + *n_it + " )"); gout = it->second; } return gout; }
int operator()(const InputType &x, ValueType& fvec) const { m_decoder.Decode(m_rots, x); Vector3 v = sik.endPosition(m_rots); v -= m_goal; fvec.setZero(); fvec.head<3>() = Eigen::Vector3f::Map(&v.x); // limit-exceed panelaty auto limpanl = fvec.tail(x.size()); for (int i = 0; i < x.size(); i++) { if (x[i] < m_min[i]) limpanl[i] = m_limitPanalty*(x[i] - m_min[i])*(x[i] - m_min[i]); else if (x[i] > m_max[i]) limpanl[i] = m_limitPanalty*(x[i] - m_max[i])*(x[i] - m_max[i]); } if (m_useRef) { limpanl += m_refWeights *(x - m_ref); } return 0; }
void SwitchIRBuilder::SetProfiledInstruction(IR::Instr * instr, Js::ProfileId profileId) { m_profiledSwitchInstr = instr; m_switchOptBuildBail = true; //don't optimize if the switch expression is not an Integer (obtained via dynamic profiling data of the BeginSwitch opcode) bool hasProfile = m_profiledSwitchInstr->IsProfiledInstr() && m_profiledSwitchInstr->m_func->HasProfileInfo(); if (hasProfile) { const ValueType valueType(m_profiledSwitchInstr->m_func->GetReadOnlyProfileInfo()->GetSwitchProfileInfo(profileId)); instr->AsProfiledInstr()->u.FldInfo().valueType = valueType; m_switchIntDynProfile = valueType.IsLikelyTaggedInt(); m_switchStrDynProfile = valueType.IsLikelyString(); if (PHASE_TESTTRACE1(Js::SwitchOptPhase)) { char valueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; valueType.ToString(valueTypeStr); #if ENABLE_DEBUG_CONFIG_OPTIONS char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; #endif PHASE_PRINT_TESTTRACE1(Js::SwitchOptPhase, _u("Func %s, Switch %d: Expression Type : %S\n"), m_profiledSwitchInstr->m_func->GetDebugNumberSet(debugStringBuffer), m_profiledSwitchInstr->AsProfiledInstr()->u.profileId, valueTypeStr); } } }
void Canonicalizer::do_ShiftOp (ShiftOp* x) { ValueType* t = x->x()->type(); ValueType* t2 = x->y()->type(); if (t->is_constant()) { switch (t->tag()) { case intTag : if (t->as_IntConstant()->value() == 0) { set_constant(0); return; } break; case longTag : if (t->as_LongConstant()->value() == (jlong)0) { set_constant(jlong_cast(0)); return; } break; default : ShouldNotReachHere(); } if (t2->is_constant()) { if (t->tag() == intTag) { int value = t->as_IntConstant()->value(); int shift = t2->as_IntConstant()->value() & 31; jint mask = ~(~0 << (32 - shift)); if (shift == 0) mask = ~0; switch (x->op()) { case Bytecodes::_ishl: set_constant(value << shift); return; case Bytecodes::_ishr: set_constant(value >> shift); return; case Bytecodes::_iushr: set_constant((value >> shift) & mask); return; } } else if (t->tag() == longTag) { jlong value = t->as_LongConstant()->value(); int shift = t2->as_IntConstant()->value() & 63; jlong mask = ~(~jlong_cast(0) << (64 - shift)); if (shift == 0) mask = ~jlong_cast(0); switch (x->op()) { case Bytecodes::_lshl: set_constant(value << shift); return; case Bytecodes::_lshr: set_constant(value >> shift); return; case Bytecodes::_lushr: set_constant((value >> shift) & mask); return; } }
void InstructionPrinter::print_object(Value obj) { ValueType* type = obj->type(); if (type->as_ObjectConstant() != NULL) { ciObject* value = type->as_ObjectConstant()->value(); if (value->is_null_object()) { output()->print("null"); } else if (!value->is_loaded()) { output()->print("<unloaded object %p>",value); } else if (value->is_method()) { ciMethod* m = (ciMethod*)value; output()->print("<method %s.%s>", m->holder()->name()->as_utf8(), m->name()->as_utf8()); } else { output()->print("<object %p>",value->encoding()); } } else if (type->as_InstanceConstant() != NULL) { output()->print("<instance %p>",type->as_InstanceConstant()->value()->encoding()); } else if (type->as_ArrayConstant() != NULL) { output()->print("<array %p>",type->as_ArrayConstant()->value()->encoding()); } else if (type->as_ClassConstant() != NULL) { ciInstanceKlass* klass = type->as_ClassConstant()->value(); if (!klass->is_loaded()) { output()->print("<unloaded> "); } output()->print("class "); print_klass(klass); } else { output()->print("???"); } }
void InstructionPrinter::print_object(Value obj) { ValueType* type = obj->type(); if (type->as_ObjectConstant() != NULL) { ciObject* value = type->as_ObjectConstant()->value(); if (value->is_null_object()) { tty->print("null"); } else if (!value->is_loaded()) { tty->print("<unloaded object 0x%x>", value); } else { tty->print("<object 0x%x>", value->encoding()); } } else if (type->as_InstanceConstant() != NULL) { tty->print("<instance 0x%x>", type->as_InstanceConstant()->value()->encoding()); } else if (type->as_ArrayConstant() != NULL) { tty->print("<array 0x%x>", type->as_ArrayConstant()->value()->encoding()); } else if (type->as_ClassConstant() != NULL) { ciInstanceKlass* klass = type->as_ClassConstant()->value(); if (!klass->is_loaded()) { tty->print("<unloaded class>"); } else { tty->print("<class 0x%x>", klass->encoding()); } } else { tty->print("???"); } }
bool NormalizedConstraintSet::StringRange::Intersects(const StringRange& aOther) const { if (!mExact.size() || !aOther.mExact.size()) { return true; } ValueType intersection; set_intersection(mExact.begin(), mExact.end(), aOther.mExact.begin(), aOther.mExact.end(), std::inserter(intersection, intersection.begin())); return !!intersection.size(); }
void NormalizedConstraintSet::StringRange::Intersect(const StringRange& aOther) { if (!aOther.mExact.size()) { return; } ValueType intersection; set_intersection(mExact.begin(), mExact.end(), aOther.mExact.begin(), aOther.mExact.end(), std::inserter(intersection, intersection.begin())); mExact = intersection; }
int DataValue::read(const std::string& buf) { std::istringstream is(buf); int tmp; ValueType val; while (!(is.eof())) { is >> tmp; if (is.fail()) return 1; val.push_back(static_cast<byte>(tmp)); } value_.swap(val); return 0; }
bool NormalizedConstraintSet::StringRange::Merge(const StringRange& aOther) { if (!Intersects(aOther)) { return false; } Intersect(aOther); ValueType unioned; set_union(mIdeal.begin(), mIdeal.end(), aOther.mIdeal.begin(), aOther.mIdeal.end(), std::inserter(unioned, unioned.begin())); mIdeal = unioned; return true; }
bool JITTimeWorkItem::TryGetValueType(uint symId, ValueType * valueType) const { Assert(IsLoopBody()); uint index = symId - m_jitBody.GetConstCount(); if (symId >= m_jitBody.GetConstCount() && index < m_workItemData->symIdToValueTypeMapCount) { ValueType type = ((ValueType*)m_workItemData->symIdToValueTypeMap)[index]; if (type.GetRawData() != 0) { *valueType = type; return true; } } return false; }
int GlobOpt::GetBoundCheckOffsetForSimd(ValueType arrValueType, const IR::Instr *instr, const int oldOffset /* = -1 */) { #ifdef ENABLE_SIMDJS if (!(Js::IsSimd128LoadStore(instr->m_opcode))) { return oldOffset; } if (!arrValueType.IsTypedArray()) { // no need to adjust for other array types, we will not type-spec (see Simd128DoTypeSpecLoadStore) return oldOffset; } Assert(instr->dataWidth == 4 || instr->dataWidth == 8 || instr->dataWidth == 12 || instr->dataWidth == 16); int numOfElems = Lowerer::SimdGetElementCountFromBytes(arrValueType, instr->dataWidth); // we want to make bound checks more conservative. We compute how many extra elements we need to add to the bound check // e.g. if original bound check is value <= Length + offset, and dataWidth is 16 bytes on Float32 array, then we need room for 4 elements. The bound check guarantees room for 1 element. // Hence, we need to ensure 3 more: value <= Length + offset - 3 // We round up since dataWidth may span a partial lane (e.g. dataWidth = 12, bpe = 8 bytes) int offsetBias = -(numOfElems - 1); // we should always make an existing bound-check more conservative. Assert(offsetBias <= 0); return oldOffset + offsetBias; #else return oldOffset; #endif }
void Canonicalizer::do_ShiftOp (ShiftOp* x) { ValueType* t = x->x()->type(); if (t->is_constant()) { switch (t->tag()) { case intTag : if (t->as_IntConstant()->value() == 0) set_constant(0); return; case longTag : if (t->as_LongConstant()->value() == (jlong)0) set_constant(jlong_cast(0)); return; default : ShouldNotReachHere(); } } ValueType* t2 = x->y()->type(); if (t2->is_constant()) { switch (t2->tag()) { case intTag : if (t2->as_IntConstant()->value() == 0) set_canonical(x->x()); return; default : ShouldNotReachHere(); } } }
void InstructionPrinter::do_Constant(Constant* x) { ValueType* t = x->type(); switch (t->tag()) { case intTag : output()->print("%d" , t->as_IntConstant ()->value()); break; case longTag : output()->print(os::jlong_format_specifier(), t->as_LongConstant()->value()); output()->print("L"); break; case floatTag : output()->print("%g" , t->as_FloatConstant ()->value()); break; case doubleTag : output()->print("%gD" , t->as_DoubleConstant()->value()); break; case objectTag : print_object(x); break; case addressTag: output()->print("bci:%d", t->as_AddressConstant()->value()); break; default : output()->print("???"); break; } }
IRType GlobOpt::GetIRTypeFromValueType(const ValueType &valueType) { if (valueType.IsFloat()) { return TyFloat64; } else if (valueType.IsInt()) { return TyInt32; } else if (valueType.IsSimd128Float32x4()) { return TySimd128F4; } else { Assert(valueType.IsSimd128Int32x4()); return TySimd128I4; } }
void print_fields( const ValueType& t ) { t.visit( to_json_visitor(std::cout) ); /* slog( "printing fields: %1%", t.size() ); boost::reflect::const_iterator itr = t.begin(); while( itr != t.end() ) { slog( "%3% %1% = %2%", itr.key(), itr.value().as<std::string>(), itr.value().type() ); ++itr; } */ }
IR::BailOutKind GlobOpt::GetBailOutKindFromValueType(const ValueType &valueType) { if (valueType.IsFloat()) { // if required valueType is Float, then allow coercion from any primitive except String. return IR::BailOutPrimitiveButString; } else if (valueType.IsInt()) { return IR::BailOutIntOnly; } else if (valueType.IsSimd128Float32x4()) { return IR::BailOutSimd128F4Only; } else { Assert(valueType.IsSimd128Int32x4()); return IR::BailOutSimd128I4Only; } }
void Canonicalizer::do_NegateOp(NegateOp* x) { ValueType* t = x->x()->type(); if (t->is_constant()) { switch (t->tag()) { case intTag : set_constant(-t->as_IntConstant ()->value()); return; case longTag : set_constant(-t->as_LongConstant ()->value()); return; case floatTag : set_constant(-t->as_FloatConstant ()->value()); return; case doubleTag: set_constant(-t->as_DoubleConstant()->value()); return; default : ShouldNotReachHere(); } } }
int Gear_OscInput::configuredOscHandler(const char *path, const char *types, lo_arg **argv, int argc, void *data, void *user_data) { Gear_OscInput *gearOscInput = (Gear_OscInput*)user_data; std::cout << "Osc message received : " << std::endl; std::cout << "path: " << path << std::endl; OscMessageType message; ListType list; message.setPath(std::string(path)); for (int i=0; i<argc; i++) { std::cout << "arg " << i << " " << types[i] << std::endl; if (types[i]==LO_FLOAT) { ValueType *valuet = new ValueType(); valuet->setValue((float)argv[i]->f); list.push_back(valuet); } else if (types[i]==LO_INT32) { ValueType *valuet = new ValueType(); valuet->setValue((float)argv[i]->i32); list.push_back(valuet); } else if (types[i]==LO_DOUBLE) { ValueType *valuet = new ValueType(); valuet->setValue((float)argv[i]->d); list.push_back(valuet); } else if (types[i]==LO_STRING) { StringType *stringt = new StringType(); stringt->setValue(argv[i]->s); list.push_back(stringt); } } message.setArgs(list); ScopedLock scopedLock(gearOscInput->_mutex); gearOscInput->_messages.push_back(message); return 0; }
bool GlobOpt::Simd128CanTypeSpecOpnd(const ValueType opndType, ValueType expectedType) { if (!opndType.IsSimd128() && !expectedType.IsSimd128()) { // Non-Simd types can be coerced or we bailout by a FromVar. return true; } // Simd type if (opndType.HasBeenNull() || opndType.HasBeenUndefined()) { return false; } if ( (opndType.IsLikelyObject() && opndType.ToDefiniteObject() == expectedType) || (opndType.IsLikelyObject() && opndType.GetObjectType() == ObjectType::Object) ) { return true; } return false; }
bool GlobOpt::Simd128DoTypeSpecLoadStore(IR::Instr *instr, const Value *src1Val, const Value *src2Val, const Value *dstVal, const ThreadContext::SimdFuncSignature *simdFuncSignature) { IR::Opnd *baseOpnd = nullptr, *indexOpnd = nullptr, *valueOpnd = nullptr; IR::Opnd *src, *dst; bool doTypeSpec = true; // value = Ld [arr + index] // [arr + index] = St value src = instr->GetSrc1(); dst = instr->GetDst(); Assert(dst && src && !instr->GetSrc2()); if (Js::IsSimd128Load(instr->m_opcode)) { Assert(src->IsIndirOpnd()); baseOpnd = instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd(); indexOpnd = instr->GetSrc1()->AsIndirOpnd()->GetIndexOpnd(); valueOpnd = instr->GetDst(); } else if (Js::IsSimd128Store(instr->m_opcode)) { Assert(dst->IsIndirOpnd()); baseOpnd = instr->GetDst()->AsIndirOpnd()->GetBaseOpnd(); indexOpnd = instr->GetDst()->AsIndirOpnd()->GetIndexOpnd(); valueOpnd = instr->GetSrc1(); // St(arr, index, value). Make sure value can be Simd128 type-spec'd doTypeSpec = doTypeSpec && Simd128CanTypeSpecOpnd(FindValue(valueOpnd->AsRegOpnd()->m_sym)->GetValueInfo()->Type(), simdFuncSignature->args[2]); } else { Assert(UNREACHED); } // array and index operands should have been type-specialized in OptArraySrc: ValueTypes should be definite at this point. If not, don't type-spec. // We can be in a loop prepass, where opnd ValueInfo is not set yet. Get the ValueInfo from the Value Table instead. ValueType baseOpndType = FindValue(baseOpnd->AsRegOpnd()->m_sym)->GetValueInfo()->Type(); if (IsLoopPrePass()) { doTypeSpec = doTypeSpec && (baseOpndType.IsObject() && baseOpndType.IsTypedArray()); // indexOpnd might be missing if loading from [0] if (indexOpnd != nullptr) { ValueType indexOpndType = FindValue(indexOpnd->AsRegOpnd()->m_sym)->GetValueInfo()->Type(); doTypeSpec = doTypeSpec && indexOpndType.IsLikelyInt(); } } else { doTypeSpec = doTypeSpec && (baseOpndType.IsObject() && baseOpndType.IsTypedArray()); if (indexOpnd != nullptr) { ValueType indexOpndType = FindValue(indexOpnd->AsRegOpnd()->m_sym)->GetValueInfo()->Type(); doTypeSpec = doTypeSpec && indexOpndType.IsInt(); } } return doTypeSpec; }
void Serialize(ArchiveType &archive, ValueType &value) { value.Serialize(archive); }
bool IntBounds::RequiresIntBoundedValueInfo(const ValueType valueType) const { Assert(valueType.IsLikelyInt()); return !valueType.IsInt() || RequiresIntBoundedValueInfo(); }
typename boost::enable_if<is_sequence<ValueType>, typename element_type<ValueType>::type>::type slice_value(ValueType const& x ,unsigned i) { return i < x.size() ? x[i] : typename element_type<ValueType>::type(); }
Js::FunctionInfo *InliningDecider::Inline(Js::FunctionBody *const inliner, Js::FunctionInfo* functionInfo, bool isConstructorCall, bool isPolymorphicCall, uint16 constantArgInfo, Js::ProfileId callSiteId, uint recursiveInlineDepth, bool allowRecursiveInlining) { #if defined(DBG_DUMP) || defined(ENABLE_DEBUG_CONFIG_OPTIONS) char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; char16 debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; #endif Js::FunctionProxy * proxy = functionInfo->GetFunctionProxy(); if (proxy && proxy->IsFunctionBody()) { if (isLoopBody && PHASE_OFF(Js::InlineInJitLoopBodyPhase, this->topFunc)) { INLINE_TESTTRACE_VERBOSE(_u("INLINING: Skip Inline: Jit loop body: %s (%s)\n"), this->topFunc->GetDisplayName(), this->topFunc->GetDebugNumberSet(debugStringBuffer)); return nullptr; } // Note: disable inline for debugger, as we can't bailout at return from function. // Alternative can be generate this bailout as part of inline, which can be done later as perf improvement. const auto inlinee = proxy->GetFunctionBody(); Assert(this->jitMode == ExecutionMode::FullJit); if (PHASE_OFF(Js::InlinePhase, inlinee) || PHASE_OFF(Js::GlobOptPhase, inlinee) || !ContinueInliningUserDefinedFunctions(this->bytecodeInlinedCount) || this->isInDebugMode) { return nullptr; } if (functionInfo->IsDeferred() || inlinee->GetByteCode() == nullptr) { // DeferredParse... INLINE_TESTTRACE(_u("INLINING: Skip Inline: No bytecode\tInlinee: %s (%s)\tCaller: %s (%s)\n"), inlinee->GetDisplayName(), inlinee->GetDebugNumberSet(debugStringBuffer), inliner->GetDisplayName(), inliner->GetDebugNumberSet(debugStringBuffer2)); return nullptr; } if (inlinee->GetHasTry()) { INLINE_TESTTRACE(_u("INLINING: Skip Inline: Has try\tInlinee: %s (%s)\tCaller: %s (%s)\n"), inlinee->GetDisplayName(), inlinee->GetDebugNumberSet(debugStringBuffer), inliner->GetDisplayName(), inliner->GetDebugNumberSet(debugStringBuffer2)); return nullptr; } // This is a hard limit as the argOuts array is statically sized. if (inlinee->GetInParamsCount() > Js::InlineeCallInfo::MaxInlineeArgoutCount) { INLINE_TESTTRACE(_u("INLINING: Skip Inline: Params count greater then MaxInlineeArgoutCount\tInlinee: %s (%s)\tParamcount: %d\tMaxInlineeArgoutCount: %d\tCaller: %s (%s)\n"), inlinee->GetDisplayName(), inlinee->GetDebugNumberSet(debugStringBuffer), inlinee->GetInParamsCount(), Js::InlineeCallInfo::MaxInlineeArgoutCount, inliner->GetDisplayName(), inliner->GetDebugNumberSet(debugStringBuffer2)); return nullptr; } if (inlinee->GetInParamsCount() == 0) { // Inline candidate has no params, not even a this pointer. This can only be the global function, // which we shouldn't inline. INLINE_TESTTRACE(_u("INLINING: Skip Inline: Params count is zero!\tInlinee: %s (%s)\tParamcount: %d\tCaller: %s (%s)\n"), inlinee->GetDisplayName(), inlinee->GetDebugNumberSet(debugStringBuffer), inlinee->GetInParamsCount(), inliner->GetDisplayName(), inliner->GetDebugNumberSet(debugStringBuffer2)); return nullptr; } if (inlinee->GetDontInline()) { INLINE_TESTTRACE(_u("INLINING: Skip Inline: Do not inline\tInlinee: %s (%s)\tCaller: %s (%s)\n"), inlinee->GetDisplayName(), inlinee->GetDebugNumberSet(debugStringBuffer), inliner->GetDisplayName(), inliner->GetDebugNumberSet(debugStringBuffer2)); return nullptr; } // Do not inline a call to a class constructor if it isn't part of a new expression since the call will throw a TypeError anyway. if (inlinee->IsClassConstructor() && !isConstructorCall) { INLINE_TESTTRACE(_u("INLINING: Skip Inline: Class constructor without new keyword\tInlinee: %s (%s)\tCaller: %s (%s)\n"), inlinee->GetDisplayName(), inlinee->GetDebugNumberSet(debugStringBuffer), inliner->GetDisplayName(), inliner->GetDebugNumberSet(debugStringBuffer2)); return nullptr; } if (!DeciderInlineIntoInliner(inlinee, inliner, isConstructorCall, isPolymorphicCall, constantArgInfo, recursiveInlineDepth, allowRecursiveInlining)) { return nullptr; } #if defined(ENABLE_DEBUG_CONFIG_OPTIONS) TraceInlining(inliner, inlinee->GetDisplayName(), inlinee->GetDebugNumberSet(debugStringBuffer), inlinee->GetByteCodeCount(), this->topFunc, this->bytecodeInlinedCount, inlinee, callSiteId, this->isLoopBody); #endif this->bytecodeInlinedCount += inlinee->GetByteCodeCount(); return inlinee; } Js::OpCode builtInInlineCandidateOpCode; ValueType builtInReturnType; GetBuiltInInfo(functionInfo, &builtInInlineCandidateOpCode, &builtInReturnType); if(builtInInlineCandidateOpCode == 0 && builtInReturnType.IsUninitialized()) { return nullptr; } Assert(this->jitMode == ExecutionMode::FullJit); if (builtInInlineCandidateOpCode != 0 && ( PHASE_OFF(Js::InlinePhase, inliner) || PHASE_OFF(Js::GlobOptPhase, inliner) || isConstructorCall )) { return nullptr; } // Note: for built-ins at this time we don't have enough data (the instr) to decide whether it's going to be inlined. return functionInfo; }
void Canonicalizer::do_If(If* x) { // move const to right if (x->x()->type()->is_constant()) x->swap_operands(); // simplify const Value l = x->x(); ValueType* lt = l->type(); const Value r = x->y(); ValueType* rt = r->type(); if (lt->is_constant() && rt->is_constant()) { // pattern: If (lc cond rc) => simplify to: Goto Goto* g = NULL; switch (lt->tag()) { case intTag: g = new Goto(x->sux_for(is_true(lt->as_IntConstant ()->value(), x->cond(), rt->as_IntConstant ()->value())), x->is_safepoint()); break; case longTag: g = new Goto(x->sux_for(is_true(lt->as_LongConstant()->value(), x->cond(), rt->as_LongConstant()->value())), x->is_safepoint()); break; // other cases not implemented (must be extremely careful with floats & doubles!) } if (g != NULL) { // If this If is a safepoint then the debug information should come from the state_before of the If. g->set_state_before(x->state_before()); set_canonical(g); } } else if (rt->as_IntConstant() != NULL) { // pattern: If (l cond rc) => investigate further const jint rc = rt->as_IntConstant()->value(); if (l->as_CompareOp() != NULL) { // pattern: If ((a cmp b) cond rc) => simplify to: If (x cond y) or: Goto CompareOp* cmp = l->as_CompareOp(); bool unordered_is_less = cmp->op() == Bytecodes::_fcmpl || cmp->op() == Bytecodes::_dcmpl; BlockBegin* lss_sux = x->sux_for(is_true(-1, x->cond(), rc)); // successor for a < b BlockBegin* eql_sux = x->sux_for(is_true( 0, x->cond(), rc)); // successor for a = b BlockBegin* gtr_sux = x->sux_for(is_true(+1, x->cond(), rc)); // successor for a > b BlockBegin* nan_sux = unordered_is_less ? lss_sux : gtr_sux ; // successor for unordered // Note: At this point all successors (lss_sux, eql_sux, gtr_sux, nan_sux) are // equal to x->tsux() or x->fsux(). Furthermore, nan_sux equals either // lss_sux or gtr_sux. if (lss_sux == eql_sux && eql_sux == gtr_sux) { // all successors identical => simplify to: Goto set_canonical(new Goto(lss_sux, x->is_safepoint())); } else { // two successors differ and two successors are the same => simplify to: If (x cmp y) // determine new condition & successors If::Condition cond; BlockBegin* tsux = NULL; BlockBegin* fsux = NULL; if (lss_sux == eql_sux) { cond = If::leq; tsux = lss_sux; fsux = gtr_sux; } else if (lss_sux == gtr_sux) { cond = If::neq; tsux = lss_sux; fsux = eql_sux; } else if (eql_sux == gtr_sux) { cond = If::geq; tsux = eql_sux; fsux = lss_sux; } else { ShouldNotReachHere(); } set_canonical(new If(cmp->x(), cond, nan_sux == tsux, cmp->y(), tsux, fsux, cmp->state_before(), x->is_safepoint())); set_bci(cmp->bci()); } } else if (l->as_InstanceOf() != NULL) { // NOTE: Code permanently disabled for now since it leaves the old InstanceOf // instruction in the graph (it is pinned). Need to fix this at some point. return; // pattern: If ((obj instanceof klass) cond rc) => simplify to: IfInstanceOf or: Goto InstanceOf* inst = l->as_InstanceOf(); BlockBegin* is_inst_sux = x->sux_for(is_true(1, x->cond(), rc)); // successor for instanceof == 1 BlockBegin* no_inst_sux = x->sux_for(is_true(0, x->cond(), rc)); // successor for instanceof == 0 if (is_inst_sux == no_inst_sux && inst->is_loaded()) { // both successors identical and klass is loaded => simplify to: Goto set_canonical(new Goto(is_inst_sux, x->is_safepoint())); } else { // successors differ => simplify to: IfInstanceOf set_canonical(new IfInstanceOf(inst->klass(), inst->obj(), true, inst->bci(), is_inst_sux, no_inst_sux)); } } } }
void mitk::SmartPointerProperty::SetValue(const ValueType & value) { this->SetSmartPointer(value.GetPointer()); }
String FormatValue(const ValueType& value, const String& format) { return value.ToFormat(format); }
/* Handles all Simd128 type-spec of an instr, if possible. */ bool GlobOpt::TypeSpecializeSimd128( IR::Instr *instr, Value **pSrc1Val, Value **pSrc2Val, Value **pDstVal ) { if (this->GetIsAsmJSFunc() || SIMD128_TYPE_SPEC_FLAG == false) { // no type-spec for ASMJS code or flag is off. return false; } switch (instr->m_opcode) { case Js::OpCode::ArgOut_A_InlineBuiltIn: if (instr->GetSrc1()->IsRegOpnd()) { StackSym *sym = instr->GetSrc1()->AsRegOpnd()->m_sym; if (IsSimd128TypeSpecialized(sym, this->currentBlock)) { ValueType valueType = (*pSrc1Val)->GetValueInfo()->Type(); Assert(valueType.IsSimd128()); ToTypeSpecUse(instr, instr->GetSrc1(), this->currentBlock, *pSrc1Val, nullptr, GetIRTypeFromValueType(valueType), GetBailOutKindFromValueType(valueType)); return true; } } return false; case Js::OpCode::Ld_A: if (instr->GetSrc1()->IsRegOpnd()) { StackSym *sym = instr->GetSrc1()->AsRegOpnd()->m_sym; IRType type = TyIllegal; if (IsSimd128F4TypeSpecialized(sym, this->currentBlock)) { type = TySimd128F4; } else if (IsSimd128I4TypeSpecialized(sym, this->currentBlock)) { type = TySimd128I4; } else { return false; } ToTypeSpecUse(instr, instr->GetSrc1(), this->currentBlock, *pSrc1Val, nullptr, type, IR::BailOutSimd128F4Only /*not used for Ld_A*/); TypeSpecializeSimd128Dst(type, instr, *pSrc1Val, *pSrc1Val, pDstVal); return true; } return false; case Js::OpCode::ExtendArg_A: if (Simd128DoTypeSpec(instr, *pSrc1Val, *pSrc2Val, *pDstVal)) { Assert(instr->m_opcode == Js::OpCode::ExtendArg_A); Assert(instr->GetDst()->GetType() == TyVar); ValueType valueType = instr->GetDst()->GetValueType(); // Type-spec src1 only based on dst type. Dst type is set by the inliner based on func signature. ToTypeSpecUse(instr, instr->GetSrc1(), this->currentBlock, *pSrc1Val, nullptr, GetIRTypeFromValueType(valueType), GetBailOutKindFromValueType(valueType), true /*lossy*/); ToVarRegOpnd(instr->GetDst()->AsRegOpnd(), this->currentBlock); return true; } return false; } if (!Js::IsSimd128Opcode(instr->m_opcode)) { return false; } // Simd instr if (Simd128DoTypeSpec(instr, *pSrc1Val, *pSrc2Val, *pDstVal)) { ThreadContext::SimdFuncSignature simdFuncSignature; instr->m_func->GetScriptContext()->GetThreadContext()->GetSimdFuncSignatureFromOpcode(instr->m_opcode, simdFuncSignature); // type-spec logic // special handling for load/store // OptArraySrc will type-spec the array and the index. We type-spec the value here. if (Js::IsSimd128Load(instr->m_opcode)) { TypeSpecializeSimd128Dst(GetIRTypeFromValueType(simdFuncSignature.returnType), instr, nullptr, *pSrc1Val, pDstVal); Simd128SetIndirOpndType(instr->GetSrc1()->AsIndirOpnd(), instr->m_opcode); return true; } if (Js::IsSimd128Store(instr->m_opcode)) { ToTypeSpecUse(instr, instr->GetSrc1(), this->currentBlock, *pSrc1Val, nullptr, GetIRTypeFromValueType(simdFuncSignature.args[2]), GetBailOutKindFromValueType(simdFuncSignature.args[2])); Simd128SetIndirOpndType(instr->GetDst()->AsIndirOpnd(), instr->m_opcode); return true; } // For op with ExtendArg. All sources are already type-specialized, just type-specialize dst if (simdFuncSignature.argCount <= 2) { Assert(instr->GetSrc1()); ToTypeSpecUse(instr, instr->GetSrc1(), this->currentBlock, *pSrc1Val, nullptr, GetIRTypeFromValueType(simdFuncSignature.args[0]), GetBailOutKindFromValueType(simdFuncSignature.args[0])); if (instr->GetSrc2()) { ToTypeSpecUse(instr, instr->GetSrc2(), this->currentBlock, *pSrc2Val, nullptr, GetIRTypeFromValueType(simdFuncSignature.args[1]), GetBailOutKindFromValueType(simdFuncSignature.args[1])); } } if (instr->GetDst()) { TypeSpecializeSimd128Dst(GetIRTypeFromValueType(simdFuncSignature.returnType), instr, nullptr, *pSrc1Val, pDstVal); } return true; } else { // We didn't type-spec if (!IsLoopPrePass()) { // Emit bailout if not loop prepass. // The inliner inserts bytecodeUses of original args after the instruction. Bailout is safe. IR::Instr * bailoutInstr = IR::BailOutInstr::New(Js::OpCode::BailOnNoSimdTypeSpec, IR::BailOutNoSimdTypeSpec, instr, instr->m_func); bailoutInstr->SetByteCodeOffset(instr); instr->InsertAfter(bailoutInstr); instr->m_opcode = Js::OpCode::Nop; if (instr->GetSrc1()) { instr->FreeSrc1(); if (instr->GetSrc2()) { instr->FreeSrc2(); } } if (instr->GetDst()) { instr->FreeDst(); } if (this->byteCodeUses) { // All inlined SIMD ops have jitOptimizedReg srcs Assert(this->byteCodeUses->IsEmpty()); JitAdelete(this->alloc, this->byteCodeUses); this->byteCodeUses = nullptr; } RemoveCodeAfterNoFallthroughInstr(bailoutInstr); return true; } } return false; }
bool GlobOpt::Simd128DoTypeSpec(IR::Instr *instr, const Value *src1Val, const Value *src2Val, const Value *dstVal) { bool doTypeSpec = true; // TODO: Some operations require additional opnd constraints (e.g. shuffle/swizzle). if (Js::IsSimd128Opcode(instr->m_opcode)) { ThreadContext::SimdFuncSignature simdFuncSignature; instr->m_func->GetScriptContext()->GetThreadContext()->GetSimdFuncSignatureFromOpcode(instr->m_opcode, simdFuncSignature); if (!simdFuncSignature.valid) { // not implemented yet. return false; } // special handling for Load/Store if (Js::IsSimd128Load(instr->m_opcode) || Js::IsSimd128Store(instr->m_opcode)) { return Simd128DoTypeSpecLoadStore(instr, src1Val, src2Val, dstVal, &simdFuncSignature); } const uint argCount = simdFuncSignature.argCount; switch (argCount) { case 2: Assert(src2Val); doTypeSpec = doTypeSpec && Simd128CanTypeSpecOpnd(src2Val->GetValueInfo()->Type(), simdFuncSignature.args[1]) && Simd128ValidateIfLaneIndex(instr, instr->GetSrc2(), 1); // fall-through case 1: Assert(src1Val); doTypeSpec = doTypeSpec && Simd128CanTypeSpecOpnd(src1Val->GetValueInfo()->Type(), simdFuncSignature.args[0]) && Simd128ValidateIfLaneIndex(instr, instr->GetSrc1(), 0); break; default: { // extended args Assert(argCount > 2); // Check if all args have been type specialized. int arg = argCount - 1; IR::Instr * eaInstr = GetExtendedArg(instr); while (arg>=0) { Assert(eaInstr); Assert(eaInstr->m_opcode == Js::OpCode::ExtendArg_A); ValueType expectedType = simdFuncSignature.args[arg]; IR::Opnd * opnd = eaInstr->GetSrc1(); StackSym * sym = opnd->GetStackSym(); // In Forward Prepass: Check liveness through liveness bits, not IR type, since in prepass no actual type-spec happens. // In the Forward Pass: Check IRType since Sym can be null, because of const prop. if (expectedType.IsSimd128Float32x4()) { if (sym && !IsSimd128F4TypeSpecialized(sym, ¤tBlock->globOptData) || !sym && opnd->GetType() != TySimd128F4) { return false; } } else if (expectedType.IsSimd128Int32x4()) { if (sym && !IsSimd128I4TypeSpecialized(sym, ¤tBlock->globOptData) || !sym && opnd->GetType() != TySimd128I4) { return false; } } else if (expectedType.IsFloat()) { if (sym && !IsFloat64TypeSpecialized(sym, ¤tBlock->globOptData) || !sym&& opnd->GetType() != TyFloat64) { return false; } } else if (expectedType.IsInt()) { if ((sym && !IsInt32TypeSpecialized(sym, ¤tBlock->globOptData) && !currentBlock->globOptData.liveLossyInt32Syms->Test(sym->m_id)) || !sym && opnd->GetType() != TyInt32) { return false; } // Extra check if arg is a lane index if (!Simd128ValidateIfLaneIndex(instr, opnd, arg)) { return false; } } else { Assert(UNREACHED); } eaInstr = GetExtendedArg(eaInstr); arg--; } // all args are type-spec'd doTypeSpec = true; } } } else { Assert(instr->m_opcode == Js::OpCode::ExtendArg_A); // For ExtendArg, the expected type is encoded in the dst(link) operand. doTypeSpec = doTypeSpec && Simd128CanTypeSpecOpnd(src1Val->GetValueInfo()->Type(), instr->GetDst()->GetValueType()); } return doTypeSpec; }