CCallHelpers::Jump CheckSpecial::generate(Inst& inst, CCallHelpers& jit, GenerationContext& context) { CCallHelpers::Jump fail = hiddenBranch(inst).generate(jit, context); ASSERT(fail.isSet()); Value* value = inst.origin; Vector<ValueRep> reps; if (isCheckMath(value->opcode())) { if (value->opcode() == CheckMul) reps.append(ValueRep()); else if (value->opcode() == CheckSub && value->child(0)->isInt(0)) reps.append(ValueRep::constant(0)); else reps.append(repForArg(*context.code, inst.args[3])); reps.append(repForArg(*context.code, inst.args[2])); } else { ASSERT(value->opcode() == Check); reps.append(ValueRep::constant(1)); } appendRepsImpl(context, m_numCheckArgs + 1, inst, reps); context.latePaths.append( createSharedTask<GenerationContext::LatePathFunction>( [=] (CCallHelpers& jit, GenerationContext&) { fail.link(&jit); Stackmap* stackmap = value->stackmap(); ASSERT(stackmap); Stackmap::GenerationParams params; params.value = value; params.stackmap = stackmap; params.reps = reps; params.usedRegisters = stackmap->m_usedRegisters; stackmap->m_generator->run(jit, params); })); return CCallHelpers::Jump(); // As far as Air thinks, we are not a terminal. }
CCallHelpers::Jump CheckSpecial::generate(Inst& inst, CCallHelpers& jit, GenerationContext& context) { CCallHelpers::Jump fail = hiddenBranch(inst).generate(jit, context); ASSERT(fail.isSet()); StackmapValue* value = inst.origin->as<StackmapValue>(); ASSERT(value); Vector<ValueRep> reps = repsImpl(context, numB3Args(inst), m_numCheckArgs + 1, inst); // Set aside the args that are relevant to undoing the operation. This is because we don't want to // capture all of inst in the closure below. Vector<Arg, 3> args; for (unsigned i = 0; i < m_numCheckArgs; ++i) args.append(inst.args[1 + i]); context.latePaths.append( createSharedTask<GenerationContext::LatePathFunction>( [=] (CCallHelpers& jit, GenerationContext& context) { fail.link(&jit); // If necessary, undo the operation. switch (m_checkKind.opcode) { case BranchAdd32: if ((m_numCheckArgs == 4 && args[1] == args[2] && args[2] == args[3]) || (m_numCheckArgs == 3 && args[1] == args[2])) { // This is ugly, but that's fine - we won't have to do this very often. ASSERT(args[1].isGPR()); GPRReg valueGPR = args[1].gpr(); GPRReg scratchGPR = CCallHelpers::selectScratchGPR(valueGPR); jit.pushToSave(scratchGPR); jit.setCarry(scratchGPR); jit.lshift32(CCallHelpers::TrustedImm32(31), scratchGPR); jit.urshift32(CCallHelpers::TrustedImm32(1), valueGPR); jit.or32(scratchGPR, valueGPR); jit.popToRestore(scratchGPR); break; } if (m_numCheckArgs == 4) { if (args[1] == args[3]) Inst(Sub32, nullptr, args[2], args[3]).generate(jit, context); else if (args[2] == args[3]) Inst(Sub32, nullptr, args[1], args[3]).generate(jit, context); } else if (m_numCheckArgs == 3) Inst(Sub32, nullptr, args[1], args[2]).generate(jit, context); break; case BranchAdd64: if ((m_numCheckArgs == 4 && args[1] == args[2] && args[2] == args[3]) || (m_numCheckArgs == 3 && args[1] == args[2])) { // This is ugly, but that's fine - we won't have to do this very often. ASSERT(args[1].isGPR()); GPRReg valueGPR = args[1].gpr(); GPRReg scratchGPR = CCallHelpers::selectScratchGPR(valueGPR); jit.pushToSave(scratchGPR); jit.setCarry(scratchGPR); jit.lshift64(CCallHelpers::TrustedImm32(63), scratchGPR); jit.urshift64(CCallHelpers::TrustedImm32(1), valueGPR); jit.or64(scratchGPR, valueGPR); jit.popToRestore(scratchGPR); break; } if (m_numCheckArgs == 4) { if (args[1] == args[3]) Inst(Sub64, nullptr, args[2], args[3]).generate(jit, context); else if (args[2] == args[3]) Inst(Sub64, nullptr, args[1], args[3]).generate(jit, context); } else if (m_numCheckArgs == 3) Inst(Sub64, nullptr, args[1], args[2]).generate(jit, context); break; case BranchSub32: Inst(Add32, nullptr, args[1], args[2]).generate(jit, context); break; case BranchSub64: Inst(Add64, nullptr, args[1], args[2]).generate(jit, context); break; case BranchNeg32: Inst(Neg32, nullptr, args[1]).generate(jit, context); break; case BranchNeg64: Inst(Neg64, nullptr, args[1]).generate(jit, context); break; default: break; } value->m_generator->run(jit, StackmapGenerationParams(value, reps, context)); })); return CCallHelpers::Jump(); // As far as Air thinks, we are not a terminal. }
void generate(Code& code, CCallHelpers& jit) { TimingScope timingScope("Air::generate"); // We don't expect the incoming code to have predecessors computed. code.resetReachability(); if (shouldValidateIR()) validate(code); // If we're doing super verbose dumping, the phase scope of any phase will already do a dump. if (shouldDumpIR() && !shouldDumpIRAtEachPhase()) { dataLog("Initial air:\n"); dataLog(code); } // This is where we run our optimizations and transformations. // FIXME: Add Air optimizations. // https://bugs.webkit.org/show_bug.cgi?id=150456 eliminateDeadCode(code); // This is where we would have a real register allocator. Then, we could use spillEverything() // in place of the register allocator only for testing. // FIXME: https://bugs.webkit.org/show_bug.cgi?id=150457 spillEverything(code); // Prior to this point the prologue and epilogue is implicit. This makes it explicit. It also // does things like identify which callee-saves we're using and saves them. handleCalleeSaves(code); // This turns all Stack and CallArg Args into Addr args that use the frame pointer. It does // this by first-fit allocating stack slots. It should be pretty darn close to optimal, so we // shouldn't have to worry about this very much. allocateStack(code); // If we coalesced moves then we can unbreak critical edges. This is the main reason for this // phase. simplifyCFG(code); // FIXME: We should really have a code layout optimization here. // https://bugs.webkit.org/show_bug.cgi?id=150478 reportUsedRegisters(code); if (shouldValidateIR()) validate(code); // Do a final dump of Air. Note that we have to do this even if we are doing per-phase dumping, // since the final generation is not a phase. if (shouldDumpIR()) { dataLog("Air after ", code.lastPhaseName(), ", before generation:\n"); dataLog(code); } TimingScope codeGenTimingScope("Air::generate backend"); // And now, we generate code. jit.emitFunctionPrologue(); jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), MacroAssembler::stackPointerRegister); GenerationContext context; context.code = &code; IndexMap<BasicBlock, CCallHelpers::Label> blockLabels(code.size()); IndexMap<BasicBlock, CCallHelpers::JumpList> blockJumps(code.size()); auto link = [&] (CCallHelpers::Jump jump, BasicBlock* target) { if (blockLabels[target].isSet()) { jump.linkTo(blockLabels[target], &jit); return; } blockJumps[target].append(jump); }; for (BasicBlock* block : code) { blockJumps[block].link(&jit); ASSERT(block->size() >= 1); for (unsigned i = 0; i < block->size() - 1; ++i) { CCallHelpers::Jump jump = block->at(i).generate(jit, context); ASSERT_UNUSED(jump, !jump.isSet()); } if (block->last().opcode == Jump && block->successorBlock(0) == code.findNextBlock(block)) continue; if (block->last().opcode == Ret) { // We currently don't represent the full prologue/epilogue in Air, so we need to // have this override. jit.emitFunctionEpilogue(); jit.ret(); continue; } CCallHelpers::Jump jump = block->last().generate(jit, context); for (Inst& inst : *block) jump = inst.generate(jit, context); switch (block->numSuccessors()) { case 0: ASSERT(!jump.isSet()); break; case 1: link(jump, block->successorBlock(0)); break; case 2: link(jump, block->successorBlock(0)); if (block->successorBlock(1) != code.findNextBlock(block)) link(jit.jump(), block->successorBlock(1)); break; default: RELEASE_ASSERT_NOT_REACHED(); break; } } for (auto& latePath : context.latePaths) latePath->run(jit, context); }
void generate(Code& code, CCallHelpers& jit) { TimingScope timingScope("Air::generate"); DisallowMacroScratchRegisterUsage disallowScratch(jit); // And now, we generate code. jit.emitFunctionPrologue(); if (code.frameSize()) jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), MacroAssembler::stackPointerRegister); GenerationContext context; context.code = &code; IndexMap<BasicBlock, CCallHelpers::Label> blockLabels(code.size()); IndexMap<BasicBlock, CCallHelpers::JumpList> blockJumps(code.size()); auto link = [&] (CCallHelpers::Jump jump, BasicBlock* target) { if (blockLabels[target].isSet()) { jump.linkTo(blockLabels[target], &jit); return; } blockJumps[target].append(jump); }; for (BasicBlock* block : code) { blockJumps[block].link(&jit); blockLabels[block] = jit.label(); ASSERT(block->size() >= 1); for (unsigned i = 0; i < block->size() - 1; ++i) { CCallHelpers::Jump jump = block->at(i).generate(jit, context); ASSERT_UNUSED(jump, !jump.isSet()); } if (block->last().opcode == Jump && block->successorBlock(0) == code.findNextBlock(block)) continue; if (block->last().opcode == Ret) { // We currently don't represent the full prologue/epilogue in Air, so we need to // have this override. if (code.frameSize()) jit.emitFunctionEpilogue(); else jit.emitFunctionEpilogueWithEmptyFrame(); jit.ret(); continue; } CCallHelpers::Jump jump = block->last().generate(jit, context); switch (block->numSuccessors()) { case 0: ASSERT(!jump.isSet()); break; case 1: link(jump, block->successorBlock(0)); break; case 2: link(jump, block->successorBlock(0)); if (block->successorBlock(1) != code.findNextBlock(block)) link(jit.jump(), block->successorBlock(1)); break; default: RELEASE_ASSERT_NOT_REACHED(); break; } } for (auto& latePath : context.latePaths) latePath->run(jit, context); }