void IRTranslator::translateAsyncSuspend(const NormalizedInstruction& i) { if (m_hhbcTrans.resumed()) { HHIR_EMIT(AsyncSuspendR, i.nextSk().offset()); } else { HHIR_EMIT(AsyncSuspendE, i.nextSk().offset(), i.imm[0].u_IVA); } }
void IRTranslator::translateInstr(const NormalizedInstruction& ni) { auto& ht = m_hhbcTrans; ht.setBcOff(ni.source.offset(), ni.endsRegion && !m_hhbcTrans.isInlining()); FTRACE(1, "\n{:-^60}\n", folly::format("Translating {}: {} with stack:\n{}", ni.offset(), ni.toString(), ht.showStack())); // When profiling, we disable type predictions to avoid side exits assert(IMPLIES(mcg->tx().mode() == TransKind::Profile, !ni.outputPredicted)); ht.emitRB(RBTypeBytecodeStart, ni.source, 2); ht.emitIncStat(Stats::Instr_TC, 1, false); auto pc = reinterpret_cast<const Op*>(ni.pc()); for (auto i = 0, num = instrNumPops(pc); i < num; ++i) { auto const type = flavorToType(instrInputFlavor(pc, i)); if (type != Type::Gen) m_hhbcTrans.assertTypeStack(i, type); } if (RuntimeOption::EvalHHIRGenerateAsserts >= 2) { ht.emitDbgAssertRetAddr(); } if (isAlwaysNop(ni.op())) { // Do nothing } else if (instrMustInterp(ni) || ni.interp) { interpretInstr(ni); } else { translateInstrWork(ni); } }
void IRTranslator::translateBranchOp(const NormalizedInstruction& i) { auto const op = i.op(); assert(op == OpJmpZ || op == OpJmpNZ); Offset takenOffset = i.offset() + i.imm[0].u_BA; Offset fallthruOffset = i.offset() + instrLen((Op*)(i.pc())); auto jmpFlags = instrJmpFlags(i); if (i.nextOffset == takenOffset) { always_assert(RuntimeOption::EvalJitPGORegionSelector == "hottrace"); // invert the branch if (op == OpJmpZ) { HHIR_EMIT(JmpNZ, fallthruOffset, jmpFlags); } else { HHIR_EMIT(JmpZ, fallthruOffset, jmpFlags); } return; } if (op == OpJmpZ) { HHIR_EMIT(JmpZ, takenOffset, jmpFlags); } else { HHIR_EMIT(JmpNZ, takenOffset, jmpFlags); } }
void IRTranslator::translateInstr(const NormalizedInstruction& ni) { auto& ht = m_hhbcTrans; ht.setBcOff(ni.source.offset(), ni.breaksTracelet && !m_hhbcTrans.isInlining()); FTRACE(1, "\n{:-^60}\n", folly::format("Translating {}: {} with stack:\n{}", ni.offset(), ni.toString(), ht.showStack())); // When profiling, we disable type predictions to avoid side exits assert(IMPLIES(JIT::tx->mode() == TransKind::Profile, !ni.outputPredicted)); if (ni.guardedThis) { // Task #2067635: This should really generate an AssertThis ht.setThisAvailable(); } ht.emitRB(RBTypeBytecodeStart, ni.source, 2); auto pc = reinterpret_cast<const Op*>(ni.pc()); for (auto i = 0, num = instrNumPops(pc); i < num; ++i) { auto const type = flavorToType(instrInputFlavor(pc, i)); if (type != Type::Gen) m_hhbcTrans.assertTypeStack(i, type); } if (RuntimeOption::EvalHHIRGenerateAsserts >= 2) { ht.emitDbgAssertRetAddr(); } if (instrMustInterp(ni) || ni.interp) { interpretInstr(ni); } else { translateInstrWork(ni); } passPredictedAndInferredTypes(ni); }
void IRTranslator::translateBranchOp(const NormalizedInstruction& i) { auto const op = i.op(); assert(op == OpJmpZ || op == OpJmpNZ); Offset takenOffset = i.offset() + i.imm[0].u_BA; Offset fallthruOffset = i.offset() + instrLen((Op*)(i.pc())); assert(i.breaksTracelet || i.nextOffset == takenOffset || i.nextOffset == fallthruOffset); assert(!i.includeBothPaths || !i.breaksTracelet); if (i.breaksTracelet || i.nextOffset == fallthruOffset) { if (op == OpJmpZ) { HHIR_EMIT(JmpZ, takenOffset, fallthruOffset, i.includeBothPaths); } else { HHIR_EMIT(JmpNZ, takenOffset, fallthruOffset, i.includeBothPaths); } return; } assert(i.nextOffset == takenOffset); // invert the branch if (op == OpJmpZ) { HHIR_EMIT(JmpNZ, fallthruOffset, takenOffset, i.includeBothPaths); } else { HHIR_EMIT(JmpZ, fallthruOffset, takenOffset, i.includeBothPaths); } }
void ActRecState::pushFunc(const NormalizedInstruction& inst) { assertx(isFPush(inst.op())); const Unit& unit = *inst.unit(); const Func* func = nullptr; if (inst.op() == OpFPushFuncD || inst.op() == OpFPushFuncU) { Id funcId = inst.imm[1].u_SA; auto const& nep = unit.lookupNamedEntityPairId(funcId); func = Unit::lookupFunc(nep.second); } else if (inst.op() == OpFPushCtorD) { Id clsId = inst.imm[1].u_SA; auto const& nep = unit.lookupNamedEntityPairId(clsId); auto const cls = Unit::lookupClass(nep.second); auto const scopeFunc = knownFunc(); auto const ctx = scopeFunc ? scopeFunc->cls() : nullptr; func = lookupImmutableCtor(cls, ctx); } if (func) func->validate(); if (func && func->isNameBindingImmutable(&unit)) { pushFuncD(func); return; } pushDynFunc(); }
void IRTranslator::translateFCallArray(const NormalizedInstruction& i) { const Offset pcOffset = i.offset(); SrcKey next = i.nextSk(); const Offset after = next.offset(); HHIR_EMIT(FCallArray, pcOffset, after, jit::callDestroysLocals(i, m_hhbcTrans.curFunc())); }
/* * This function returns the offset of instruction i's branch target. * This is normally the offset corresponding to the branch being * taken. However, if i does not break a trace and it's followed in * the trace by the instruction in the taken branch, then this * function returns the offset of the i's fall-through instruction. * In that case, the invertCond output argument is set to true; * otherwise it's set to false. */ static Offset getBranchTarget(const NormalizedInstruction& i, bool& invertCond) { assert(instrJumpOffset((Op*)(i.pc())) != nullptr); Offset targetOffset = i.offset() + i.imm[1].u_BA; invertCond = false; if (!i.endsRegion && i.nextOffset == targetOffset) { invertCond = true; Offset fallthruOffset = i.offset() + instrLen((Op*)i.pc()); targetOffset = fallthruOffset; } return targetOffset; }
void ActRecState::pushFunc(const NormalizedInstruction& inst) { assertx(isFPush(inst.op())); if (inst.op() == OpFPushFuncD || inst.op() == OpFPushFuncU) { const Unit& unit = *inst.unit(); Id funcId = inst.imm[1].u_SA; auto const& nep = unit.lookupNamedEntityPairId(funcId); auto const func = Unit::lookupFunc(nep.second); if (func) func->validate(); if (func && func->isNameBindingImmutable(&unit)) { pushFuncD(func); return; } } pushDynFunc(); }
void IRTranslator::translateMIterInit(const NormalizedInstruction& i) { HHIR_EMIT(MIterInit, i.imm[0].u_IVA, i.offset() + i.imm[1].u_BA, i.imm[2].u_IVA); }
void IRTranslator::translateMIterNext(const NormalizedInstruction& i) { HHIR_EMIT(MIterNext, i.imm[0].u_IVA, i.offset() + i.imm[1].u_BA, i.imm[2].u_IVA, instrJmpFlags(i)); }
void IRTranslator::translateSameOp(const NormalizedInstruction& i) { auto const op = i.op(); assert(op == Op::Same || op == Op::NSame); if (op == Op::Same) { HHIR_EMIT(Same); } else { HHIR_EMIT(NSame); } }
void IRTranslator::translateEqOp(const NormalizedInstruction& i) { auto const op = i.op(); assert(op == Op::Eq || op == Op::Neq); if (op == Op::Eq) { HHIR_EMIT(Eq); } else { HHIR_EMIT(Neq); } }
void IRTranslator::translateUnaryBooleanOp(const NormalizedInstruction& i) { auto const op = i.op(); assert(op == OpCastBool || op == OpEmptyL); if (op == OpCastBool) { HHIR_EMIT(CastBool); } else { HHIR_EMIT(EmptyL, i.imm[0].u_LA); } }
void IRTranslator::translateFPassCOp(const NormalizedInstruction& i) { auto const op = i.op(); always_assert(op == OpFPassCW || op == OpFPassCE); // These cases might have to raise a warning or an error HHIR_UNIMPLEMENTED_WHEN(i.preppedByRef, FPassCW_FPassCE_byref); // Nothing to do otherwise. }
void IRTranslator::translateAssignToLocalOp(const NormalizedInstruction& ni) { auto const op = ni.op(); assert(op == OpSetL || op == OpBindL); if (op == OpSetL) { HHIR_EMIT(SetL, ni.imm[0].u_LA); } else { HHIR_EMIT(BindL, ni.imm[0].u_LA); } }
void IRTranslator::translateFPassCOp(const NormalizedInstruction& i) { auto const op = i.op(); if (i.noOp) return; if (i.preppedByRef && (op == OpFPassCW || op == OpFPassCE)) { // These cases might have to raise a warning or an error HHIR_UNIMPLEMENTED(FPassCW_FPassCE_byref); } else { HHIR_EMIT(FPassCOp); } }
void IRTranslator::translateFCall(const NormalizedInstruction& i) { auto const numArgs = i.imm[0].u_IVA; const PC after = m_hhbcTrans.curUnit()->at(i.nextSk().offset()); const Func* srcFunc = m_hhbcTrans.curFunc(); Offset returnBcOffset = srcFunc->unit()->offsetOf(after - srcFunc->base()); HHIR_EMIT(FCall, numArgs, returnBcOffset, i.funcd, jit::callDestroysLocals(i, m_hhbcTrans.curFunc())); }
void IRTranslator::translateBinaryArithOp(const NormalizedInstruction& i) { switch (i.op()) { case Op::Add: HHIR_EMIT(Add); case Op::Sub: HHIR_EMIT(Sub); case Op::Mul: HHIR_EMIT(Mul); case Op::AddO: HHIR_EMIT(AddO); case Op::SubO: HHIR_EMIT(SubO); case Op::MulO: HHIR_EMIT(MulO); case Op::BitAnd: HHIR_EMIT(BitAnd); case Op::BitOr: HHIR_EMIT(BitOr); case Op::BitXor: HHIR_EMIT(BitXor); default: break; } not_reached(); }
void IRTranslator::translateFCall(const NormalizedInstruction& i) { auto const numArgs = i.imm[0].u_IVA; const PC after = m_hhbcTrans.curUnit()->at(i.nextSk().offset()); const Func* srcFunc = m_hhbcTrans.curFunc(); Offset returnBcOffset = srcFunc->unit()->offsetOf(after - srcFunc->base()); /* * If we have a calleeTrace, we're going to see if we should inline * the call. */ if (i.calleeTrace) { if (!i.calleeTrace->m_inliningFailed) { assert(shouldIRInline(m_hhbcTrans.curFunc(), i.funcd, *i.calleeTrace)); m_hhbcTrans.beginInlining(numArgs, i.funcd, returnBcOffset); static const bool shapeStats = Stats::enabledAny() && getenv("HHVM_STATS_INLINESHAPE"); if (shapeStats) { m_hhbcTrans.profileInlineFunctionShape(traceletShape(*i.calleeTrace)); } for (auto* ni = i.calleeTrace->m_instrStream.first; ni; ni = ni->next) { if (isAlwaysNop(ni->op())) { // This might not be necessary---but for now it's preserving // side effects of the call to readMetaData that used to // exist here. ni->noOp = true; } translateInstr(*ni); } return; } static const auto enabled = Stats::enabledAny() && getenv("HHVM_STATS_FAILEDINL"); if (enabled) { m_hhbcTrans.profileFunctionEntry("FailedCandidate"); m_hhbcTrans.profileFailedInlShape(traceletShape(*i.calleeTrace)); } } HHIR_EMIT(FCall, numArgs, returnBcOffset, i.funcd, JIT::callDestroysLocals(i, m_hhbcTrans.curFunc())); }
void IRTranslator::passPredictedAndInferredTypes(const NormalizedInstruction& i) { if (!i.outStack || i.breaksTracelet) return; auto const jitType = Type(i.outStack->rtt); m_hhbcTrans.setBcOff(i.next->offset(), false); if (RuntimeOption::EvalHHIRRelaxGuards) { if (i.outputPredicted) { if (i.outputPredictionStatic && jitType.notCounted()) { // If the prediction is from static analysis it really means jitType | // InitNull. When jitType is an uncounted type, we know that the value // will always be an uncounted type, so we assert that fact before // doing the real check. This allows us to relax the CheckType away // while still eliminating some refcounting operations. m_hhbcTrans.assertTypeStack(0, Type::Uncounted); } m_hhbcTrans.checkTypeTopOfStack(jitType, i.next->offset()); } return; } NormalizedInstruction::OutputUse u = i.getOutputUsage(i.outStack); if (u == NormalizedInstruction::OutputUse::Inferred) { TRACE(1, "irPassPredictedAndInferredTypes: output inferred as %s\n", jitType.toString().c_str()); m_hhbcTrans.assertTypeStack(0, jitType); } else if (u == NormalizedInstruction::OutputUse::Used && i.outputPredicted) { // If the value was predicted statically by the front-end, it // means that it's either the predicted type or null. In this // case, if the predicted value is not ref-counted and it's simply // going to be popped, then pass the information as an assertion // that the type is not ref-counted. This avoid both generating a // type check and dec-refing the value. if (i.outputPredictionStatic && isPop(i.next->op()) && !jitType.maybeCounted()) { TRACE(1, "irPassPredictedAndInferredTypes: output inferred as %s\n", jitType.toString().c_str()); m_hhbcTrans.assertTypeStack(0, JIT::Type::Uncounted); } else { TRACE(1, "irPassPredictedAndInferredTypes: output predicted as %s\n", jitType.toString().c_str()); m_hhbcTrans.checkTypeTopOfStack(jitType, i.next->offset()); } } }
void translateInstr( IRGS& irgs, const NormalizedInstruction& ni, bool checkOuterTypeOnly, bool firstInst ) { irgen::prepareForNextHHBC( irgs, &ni, ni.source, ni.endsRegion && !irgen::isInlining(irgs) ); const Func* builtinFunc = nullptr; if (ni.op() == OpFCallBuiltin) { auto str = ni.m_unit->lookupLitstrId(ni.imm[2].u_SA); builtinFunc = Unit::lookupFunc(str); } auto pc = ni.pc(); for (auto i = 0, num = instrNumPops(pc); i < num; ++i) { auto const type = !builtinFunc ? flavorToType(instrInputFlavor(pc, i)) : builtinFunc->byRef(num - i - 1) ? TGen : TCell; // TODO(#5706706): want to use assertTypeLocation, but Location::Stack // is a little unsure of itself. irgen::assertTypeStack(irgs, BCSPOffset{i}, type); } FTRACE(1, "\nTranslating {}: {} with state:\n{}\n", ni.offset(), ni, show(irgs)); irgen::ringbufferEntry(irgs, Trace::RBTypeBytecodeStart, ni.source, 2); irgen::emitIncStat(irgs, Stats::Instr_TC, 1); if (Stats::enableInstrCount()) { irgen::emitIncStat(irgs, Stats::opToTranslStat(ni.op()), 1); } if (Trace::moduleEnabledRelease(Trace::llvm_count, 1) || RuntimeOption::EvalJitLLVMCounters) { irgen::gen(irgs, CountBytecode); } if (isAlwaysNop(ni.op())) return; if (ni.interp || RuntimeOption::EvalJitAlwaysInterpOne) { irgen::interpOne(irgs, ni); return; } translateDispatch(irgs, ni); FTRACE(3, "\nTranslated {}: {} with state:\n{}\n", ni.offset(), ni, show(irgs)); }
void IRTranslator::translateInstrWork(const NormalizedInstruction& i) { auto const op = i.op(); switch (op) { #define CASE(iNm) \ case Op::iNm: \ translate ## iNm(i); \ break; #define TRANSLATE(name, inst) translate ## name(inst); break; INSTRS PSEUDOINSTR_DISPATCH(TRANSLATE) #undef TRANSLATE #undef CASE default: not_reached(); } }
void IRTranslator::translateFCall(const NormalizedInstruction& i) { auto const numArgs = i.imm[0].u_IVA; const PC after = m_hhbcTrans.curUnit()->at(i.nextSk().offset()); const Func* srcFunc = m_hhbcTrans.curFunc(); Offset returnBcOffset = srcFunc->unit()->offsetOf(after - srcFunc->base()); /* * If we have a calleeTrace, we're going to see if we should inline * the call. */ if (i.calleeTrace) { if (!i.calleeTrace->m_inliningFailed) { assert(shouldIRInline(m_hhbcTrans.curFunc(), i.funcd, *i.calleeTrace)); m_hhbcTrans.beginInlining(numArgs, i.funcd, returnBcOffset); static const bool shapeStats = Stats::enabledAny() && getenv("HHVM_STATS_INLINESHAPE"); if (shapeStats) { m_hhbcTrans.profileInlineFunctionShape(traceletShape(*i.calleeTrace)); } Unit::MetaHandle metaHand; for (auto* ni = i.calleeTrace->m_instrStream.first; ni; ni = ni->next) { readMetaData(metaHand, *ni, m_hhbcTrans, false, MetaMode::Legacy); translateInstr(*ni); } return; } static const auto enabled = Stats::enabledAny() && getenv("HHVM_STATS_FAILEDINL"); if (enabled) { m_hhbcTrans.profileFunctionEntry("FailedCandidate"); m_hhbcTrans.profileFailedInlShape(traceletShape(*i.calleeTrace)); } } HHIR_EMIT(FCall, numArgs, returnBcOffset, i.funcd, JIT::callDestroysLocals(i, m_hhbcTrans.curFunc())); }
void IRTranslator::translateLtGtOp(const NormalizedInstruction& i) { auto const op = i.op(); assert(op == Op::Lt || op == Op::Lte || op == Op::Gt || op == Op::Gte); auto leftType = m_hhbcTrans.topType(1, DataTypeGeneric); auto rightType = m_hhbcTrans.topType(0, DataTypeGeneric); bool ok = equivDataTypes(leftType.toDataType(), rightType.toDataType()) && leftType.subtypeOfAny(Type::Null, Type::Bool, Type::Int); HHIR_UNIMPLEMENTED_WHEN(!ok, LtGtOp); switch (op) { case Op::Lt : HHIR_EMIT(Lt); case Op::Lte : HHIR_EMIT(Lte); case Op::Gt : HHIR_EMIT(Gt); case Op::Gte : HHIR_EMIT(Gte); default : HHIR_UNIMPLEMENTED(LtGtOp); } }
void IRTranslator::translateInstrWork(const NormalizedInstruction& i) { auto const op = i.op(); switch (op) { #define CASE(iNm) case Op::iNm: return unpack ## iNm(nullptr, i); REGULAR_INSTRS #undef CASE #define CASE(nm) case Op::nm: return translate ## nm(i); #define TRANSLATE(name, inst) return translate ## name(i); IRREGULAR_INSTRS PSEUDOINSTR_DISPATCH(TRANSLATE) #undef TRANSLATE #undef CASE #define CASE(op) case Op::op: INTERP_ONE_INSTRS #undef CASE always_assert(false); } }
void IRTranslator::translateLtGtOp(const NormalizedInstruction& i) { auto const op = i.op(); assert(op == Op::Lt || op == Op::Lte || op == Op::Gt || op == Op::Gte); auto leftType = m_hhbcTrans.topType(1, DataTypeGeneric); auto rightType = m_hhbcTrans.topType(0, DataTypeGeneric); if (!leftType.isKnownDataType() || !rightType.isKnownDataType()) { HHIR_UNIMPLEMENTED(LtGtOp-UnknownInput); } bool ok = leftType.subtypeOfAny (Type::Null, Type::Bool, Type::Int, Type::Dbl) && rightType.subtypeOfAny(Type::Null, Type::Bool, Type::Int, Type::Dbl); HHIR_UNIMPLEMENTED_WHEN(!ok, LtGtOp); switch (op) { case Op::Lt : HHIR_EMIT(Lt); case Op::Lte : HHIR_EMIT(Lte); case Op::Gt : HHIR_EMIT(Gt); case Op::Gte : HHIR_EMIT(Gte); default : HHIR_UNIMPLEMENTED(LtGtOp); } }
/* * Get location metadata for the inputs of `ni'. */ InputInfoVec getInputs(NormalizedInstruction& ni, FPInvOffset bcSPOff) { InputInfoVec inputs; if (isAlwaysNop(ni.op())) return inputs; always_assert_flog( instrInfo.count(ni.op()), "Invalid opcode in getInputsImpl: {}\n", opcodeToName(ni.op()) ); UNUSED auto const sk = ni.source; auto const& info = instrInfo[ni.op()]; auto const flags = info.in; auto stackOff = bcSPOff; if (flags & FStack) { stackOff -= ni.imm[0].u_IVA; // arguments consumed stackOff -= kNumActRecCells; // ActRec is torn down as well } if (flags & FuncdRef) inputs.needsRefCheck = true; if (flags & IgnoreInnerType) ni.ignoreInnerType = true; if (flags & Stack1) { SKTRACE(1, sk, "getInputs: Stack1 %d\n", stackOff.offset); inputs.emplace_back(Location::Stack { stackOff-- }); if (flags & DontGuardStack1) inputs.back().dontGuard = true; if (flags & Stack2) { SKTRACE(1, sk, "getInputs: Stack2 %d\n", stackOff.offset); inputs.emplace_back(Location::Stack { stackOff-- }); if (flags & Stack3) { SKTRACE(1, sk, "getInputs: Stack3 %d\n", stackOff.offset); inputs.emplace_back(Location::Stack { stackOff-- }); } } } if (flags & StackI) { inputs.emplace_back(Location::Stack { BCSPRelOffset{ni.imm[0].u_IVA}.to<FPInvOffset>(bcSPOff) }); } if (flags & StackN) { int numArgs = (ni.op() == Op::NewPackedArray || ni.op() == Op::NewVecArray || ni.op() == Op::ConcatN) ? ni.imm[0].u_IVA : ni.immVec.numStackValues(); SKTRACE(1, sk, "getInputs: StackN %d %d\n", stackOff.offset, numArgs); for (int i = 0; i < numArgs; i++) { inputs.emplace_back(Location::Stack { stackOff-- }); inputs.back().dontGuard = true; inputs.back().dontBreak = true; } } if (flags & BStackN) { int numArgs = ni.imm[0].u_IVA; SKTRACE(1, sk, "getInputs: BStackN %d %d\n", stackOff.offset, numArgs); for (int i = 0; i < numArgs; i++) { inputs.emplace_back(Location::Stack { stackOff-- }); } } if (flags & Local) { // (Almost) all instructions that take a Local have its index at their // first immediate. auto const loc = ni.imm[localImmIdx(ni.op())].u_IVA; SKTRACE(1, sk, "getInputs: local %d\n", loc); inputs.emplace_back(Location::Local { uint32_t(loc) }); } if (flags & AllLocals) ni.ignoreInnerType = true; if (flags & MKey) { auto mk = ni.imm[memberKeyImmIdx(ni.op())].u_KA; switch (mk.mcode) { case MEL: case MPL: inputs.emplace_back(Location::Local { uint32_t(mk.iva) }); break; case MEC: case MPC: inputs.emplace_back(Location::Stack { BCSPRelOffset{int32_t(mk.iva)}.to<FPInvOffset>(bcSPOff) }); break; case MW: case MEI: case MET: case MPT: case MQT: // The inputs vector is only used for deciding when to break the // tracelet, which can never happen for these cases. break; } } SKTRACE(1, sk, "stack args: virtual sfo now %d\n", stackOff.offset); TRACE(1, "%s\n", Trace::prettyNode("Inputs", inputs).c_str()); if ((flags & DontGuardAny) || dontGuardAnyInputs(ni.op())) { for (auto& info : inputs) info.dontGuard = true; } return inputs; }
static void translateDispatch(irgen::IRGS& irgs, const NormalizedInstruction& ni) { #define O(nm, imms, ...) case Op::nm: irgen::emit##nm(irgs imms); return; switch (ni.op()) { OPCODES } #undef O }
void IRTranslator::interpretInstr(const NormalizedInstruction& i) { FTRACE(5, "HHIR: BC Instr {}\n", i.toString()); m_hhbcTrans.emitInterpOne(i); }