void Jit::WriteExitDestInEAX() { // TODO: Some wasted potential, dispatcher will always read this back into EAX. MOV(32, M(&mips_->pc), R(EAX)); WriteDowncount(); // Validate the jump to avoid a crash? if (!g_Config.bFastMemory) { CMP(32, R(EAX), Imm32(PSP_GetKernelMemoryBase())); FixupBranch tooLow = J_CC(CC_L); CMP(32, R(EAX), Imm32(PSP_GetUserMemoryEnd())); FixupBranch tooHigh = J_CC(CC_GE); JMP(asm_.dispatcher, true); SetJumpTarget(tooLow); SetJumpTarget(tooHigh); ABI_CallFunctionA(thunks.ProtectFunction((void *) Memory::GetPointer, 1), R(EAX)); CMP(32, R(EAX), Imm32(0)); J_CC(CC_NE, asm_.dispatcher, true); // TODO: "Ignore" this so other threads can continue? if (g_Config.bIgnoreBadMemAccess) MOV(32, M((void*)&coreState), Imm32(CORE_ERROR)); JMP(asm_.dispatcherCheckCoreState, true); } else JMP(asm_.dispatcher, true); }
void Jit::WriteExitDestInEAX() { // TODO: Some wasted potential, dispatcher will always read this back into EAX. MOV(32, M(&mips_->pc), R(EAX)); // If we need to verify coreState and rewind, we may not jump yet. if (js.afterOp & (JitState::AFTER_CORE_STATE | JitState::AFTER_REWIND_PC_BAD_STATE)) { // CORE_RUNNING is <= CORE_NEXTFRAME. CMP(32, M((void*)&coreState), Imm32(CORE_NEXTFRAME)); FixupBranch skipCheck = J_CC(CC_LE); MOV(32, M(&mips_->pc), Imm32(js.compilerPC)); WriteSyscallExit(); SetJumpTarget(skipCheck); js.afterOp = JitState::AFTER_NONE; } WriteDowncount(); // Validate the jump to avoid a crash? if (!g_Config.bFastMemory) { CMP(32, R(EAX), Imm32(PSP_GetKernelMemoryBase())); FixupBranch tooLow = J_CC(CC_B); CMP(32, R(EAX), Imm32(PSP_GetUserMemoryEnd())); FixupBranch tooHigh = J_CC(CC_AE); // Need to set neg flag again if necessary. SUB(32, M(¤tMIPS->downcount), Imm32(0)); JMP(asm_.dispatcher, true); SetJumpTarget(tooLow); SetJumpTarget(tooHigh); CallProtectedFunction((void *) Memory::GetPointer, R(EAX)); CMP(32, R(EAX), Imm32(0)); FixupBranch skip = J_CC(CC_NE); // TODO: "Ignore" this so other threads can continue? if (g_Config.bIgnoreBadMemAccess) CallProtectedFunction((void *) Core_UpdateState, Imm32(CORE_ERROR)); SUB(32, M(¤tMIPS->downcount), Imm32(0)); JMP(asm_.dispatcherCheckCoreState, true); SetJumpTarget(skip); SUB(32, M(¤tMIPS->downcount), Imm32(0)); J_CC(CC_NE, asm_.dispatcher, true); } else JMP(asm_.dispatcher, true); }
bool Jit::CheckJitBreakpoint(u32 addr, int downcountOffset) { if (CBreakPoints::IsAddressBreakPoint(addr)) { SAVE_FLAGS; FlushAll(); MOV(32, M(&mips_->pc), Imm32(js.compilerPC)); ABI_CallFunction(&JitBreakpoint); // If 0, the conditional breakpoint wasn't taken. CMP(32, R(EAX), Imm32(0)); FixupBranch skip = J_CC(CC_Z); WriteDowncount(downcountOffset); // Just to fix the stack. LOAD_FLAGS; JMP(asm_.dispatcherCheckCoreState, true); SetJumpTarget(skip); LOAD_FLAGS; return true; } return false; }
// Uses value in IR to determine course of action // Returns false if errors bool interpreter() { bool success = true; //While no error flag and no timer interrupt while (success && timer_interrupt < QUANTUM) { machine.IR = main_memory[MMU(machine.PC)]; machine.PC++; // Increment Program Counter unsigned short int op = getOpcode(machine.IR); switch (op) { case 0: success = LOD(); break; case 1: success = STO(); break; case 2: success = ADD(); break; case 3: success = SUB(); break; case 4: success = ADR(); break; case 5: success = SUR(); break; case 6: success = AND(); break; case 7: success = IOR(); break; case 8: success = NOT(); break; case 9: success = JMP(); break; case 10: success = JEQ(); break; case 11: success = JGT(); break; case 12: success = JLT(); break; case 13: success = CMP(); break; case 14: success = CLR(); break; case 15: return HLT(); break; //Quit early on HLT default: success = false; break; } usleep(1000000); // Sleep 1 second to allow easier instruction tracing (*sysclock)++; timer_interrupt++; } timer_interrupt = 0; return success; }
//Execute cycle void execute(FILE *ofp) { fprintf(ofp, "%d\t%s\t%d\t%d\t", pc-1, opcodes[ir.op], ir.l, ir.m); switch( ir.op ) { case 1 : LIT(ir.l, ir.m); break; case 2 : OPR(ir.l, ir.m); break; case 3 : LOD(ir.l, ir.m); break; case 4 : STO(ir.l, ir.m); break; case 5 : CAL(ir.l, ir.m); break; case 6 : INC(ir.l, ir.m); break; case 7 : JMP(ir.l, ir.m); break; case 8 : JPC(ir.l, ir.m); break; case 9 : SIO(ir.l, ir.m); break; case 10 : SIO(ir.l, ir.m); break; case 11 : SIO(ir.l, ir.m); break; default : break; } fprintf(ofp, "%d\t%d\t%d\t", pc, bp, sp); int i = 1; int countAR = 0; for(i = 1; i <= sp; i++) { if(countAR < numAR && ar[countAR] < i) { countAR++; fprintf(ofp, " |"); } fprintf(ofp, " %d", stack[i]); } fprintf(ofp, "\n"); }
void Jit::WriteSyscallExit() { WriteDowncount(); if (js.afterOp & JitState::AFTER_MEMCHECK_CLEANUP) { ABI_CallFunction(&JitMemCheckCleanup); } JMP(asm_.dispatcherCheckCoreState, true); }
// Check condition flag register; jump if appropriate bool JLT() { if (machine.CR == LST) JMP(); #ifdef DEBUG printDebug("JLT"); #endif return true; }
// Check condition flag register; jump if appropriate bool JGT() { if (machine.CR == GRT) JMP(); #ifdef DEBUG printDebug("JGT"); #endif return true; }
// Check condition flag register; jump if appropriate bool JEQ() { if (machine.CR == EQL) JMP(); #ifdef DEBUG printDebug("JEQ"); #endif return true; }
int main(int argc, char *argv[]) { struct sigaction sa; BUF jb; sigset_t ss; int i, x; i = getpid(); #ifdef TEST_SETJMP expectsignal = 0; #endif #ifdef TEST_U_SETJMP expectsignal = 1; #endif #ifdef TEST_SIGSETJMP if (argc != 2 || (strcmp(argv[1], "save") && strcmp(argv[1], "nosave"))) { fprintf(stderr, "usage: %s [save|nosave]\n", argv[0]); exit(1); } expectsignal = (strcmp(argv[1], "save") != 0); #endif sa.sa_handler = aborthandler; sigemptyset(&sa.sa_mask); sa.sa_flags = 0; if (sigaction(SIGABRT, &sa, NULL) == -1) err(1, "sigaction failed"); if (sigemptyset(&ss) == -1) err(1, "sigemptyset failed"); if (sigaddset(&ss, SIGABRT) == -1) err(1, "sigaddset failed"); if (sigprocmask(SIG_BLOCK, &ss, NULL) == -1) err(1, "sigprocmask (1) failed"); x = SET(jb, !expectsignal); if (x != 0) { if (x != i) errx(1, "setjmp returned wrong value"); kill(i, SIGABRT); if (expectsignal) errx(1, "kill(SIGABRT) failed"); else exit(0); } if (sigprocmask(SIG_UNBLOCK, &ss, NULL) == -1) err(1, "sigprocmask (2) failed"); JMP(jb, i); errx(1, "jmp failed"); }
void Jit::WriteExit(u32 destination, int exit_num) { WriteDowncount(); //If nobody has taken care of this yet (this can be removed when all branches are done) JitBlock *b = js.curBlock; b->exitAddress[exit_num] = destination; b->exitPtrs[exit_num] = GetWritableCodePtr(); // Link opportunity! int block = blocks.GetBlockNumberFromStartAddress(destination); if (block >= 0 && jo.enableBlocklink) { // It exists! Joy of joy! JMP(blocks.GetBlock(block)->checkedEntry, true); b->linkStatus[exit_num] = true; } else { // No blocklinking. MOV(32, M(&mips_->pc), Imm32(destination)); JMP(asm_.dispatcher, true); } }
constexpr auto do_x_times(Count count, Body... body) { return block( MOV(ecx, count), "start"_label, CMP(ecx, 0_d), JE("done"_rel8), body..., DEC(ecx), JMP("start"_rel8), "done"_label); }
void Jit::WriteExit(u32 destination, int exit_num) { _dbg_assert_msg_(JIT, exit_num < MAX_JIT_BLOCK_EXITS, "Expected a valid exit_num"); if (!Memory::IsValidAddress(destination)) { ERROR_LOG_REPORT(JIT, "Trying to write block exit to illegal destination %08x: pc = %08x", destination, currentMIPS->pc); } // If we need to verify coreState and rewind, we may not jump yet. if (js.afterOp & (JitState::AFTER_CORE_STATE | JitState::AFTER_REWIND_PC_BAD_STATE)) { // CORE_RUNNING is <= CORE_NEXTFRAME. CMP(32, M((void*)&coreState), Imm32(CORE_NEXTFRAME)); FixupBranch skipCheck = J_CC(CC_LE); MOV(32, M(&mips_->pc), Imm32(js.compilerPC)); WriteSyscallExit(); SetJumpTarget(skipCheck); js.afterOp = JitState::AFTER_NONE; } WriteDowncount(); //If nobody has taken care of this yet (this can be removed when all branches are done) JitBlock *b = js.curBlock; b->exitAddress[exit_num] = destination; b->exitPtrs[exit_num] = GetWritableCodePtr(); // Link opportunity! int block = blocks.GetBlockNumberFromStartAddress(destination); if (block >= 0 && jo.enableBlocklink) { // It exists! Joy of joy! JMP(blocks.GetBlock(block)->checkedEntry, true); b->linkStatus[exit_num] = true; } else { // No blocklinking. MOV(32, M(&mips_->pc), Imm32(destination)); JMP(asm_.dispatcher, true); } }
bool Jit::CheckJitBreakpoint(u32 addr, int downcountOffset) { if (CBreakPoints::IsAddressBreakPoint(addr)) { FlushAll(); MOV(32, M(&mips_->pc), Imm32(js.compilerPC)); CALL((void *)&JitBreakpoint); WriteDowncount(downcountOffset); JMP(asm_.dispatcherCheckCoreState, true); return true; } return false; }
const u8 *Jit::DoJit(u32 em_address, JitBlock *b) { js.cancel = false; js.blockStart = js.compilerPC = mips_->pc; js.downcountAmount = 0; js.curBlock = b; js.compiling = true; js.inDelaySlot = false; js.PrefixStart(); // We add a check before the block, used when entering from a linked block. b->checkedEntry = GetCodePtr(); // Downcount flag check. The last block decremented downcounter, and the flag should still be available. FixupBranch skip = J_CC(CC_NBE); MOV(32, M(&mips_->pc), Imm32(js.blockStart)); JMP(asm_.outerLoop, true); // downcount hit zero - go advance. SetJumpTarget(skip); b->normalEntry = GetCodePtr(); // TODO: this needs work MIPSAnalyst::AnalysisResults analysis; // = MIPSAnalyst::Analyze(em_address); gpr.Start(mips_, analysis); fpr.Start(mips_, analysis); js.numInstructions = 0; while (js.compiling) { // Jit breakpoints are quite fast, so let's do them in release too. CheckJitBreakpoint(js.compilerPC, 0); u32 inst = Memory::Read_Instruction(js.compilerPC); js.downcountAmount += MIPSGetInstructionCycleEstimate(inst); MIPSCompileOp(inst); js.compilerPC += 4; js.numInstructions++; } b->codeSize = (u32)(GetCodePtr() - b->normalEntry); NOP(); AlignCode4(); b->originalSize = js.numInstructions; return b->normalEntry; }
void Assembler::nFragExit(LInsp guard) { SideExit* exit = guard->record()->exit; Fragment *frag = exit->target; GuardRecord *lr; if (frag && frag->fragEntry) { JMP(frag->fragEntry); lr = 0; } else { // Target doesn't exit yet. Emit jump to epilog, and set up to patch later. if (!_epilogue) _epilogue = genEpilogue(); lr = guard->record(); JMP_long((intptr_t)_epilogue); lr->jmp = _nIns; } // return value is GuardRecord* SET32(int(lr), O0); }
INLINE void jmp3(void) { JMP(3); }
int main(int argc, char *argv[]) { E_RT_init(argc, argv); JMP(begin); Label0: R005=ADD(R000,2); STI(R001, R000); R000=SUB(R000,1); MOVI(R000, R001); LDI(R005, R008); R005=ADD(R005,1); LDF(R005, F001); R005=ADD(R005,1); MOVIF(R008, F003); MOVIF(R008, F006); F005=FADD(F006,F001); MOVF(F005, F008); R010=ADD(R008,1); MOVI(R010, R011); MOVI(R010, R012); MOVIF(R008, F010); MOVF(F005, F011); while_1_start: JMPC(GT(1000, R008), while_1_begin); JMP(while_1_end); while_1_begin: MOVIF(R008, F013); MOVIF(R008, F014); F008=FADD(F014,F001); F001=FMUL(F001,2.0); R008=ADD(R008,100); JMP(while_1_start); while_1_end: PRTF(F008); PRTS(R007); MOVI(R008, R002); JMP(Label1); Label1: MOVI(R001, R000); R000=ADD(R000,1); LDI(R000, R001); R000=ADD(R000,1); LDI(R000, R004); R000=ADD(R000,2); JMPI(R004); eventLabel_a: INI(R010); INF(F013); STI(R010, R000); R000=SUB(R000,1); STF(F013, R000); R000=SUB(R000,1); STF(F013, R000); R000=SUB(R000,1); STI(R010, R000); R000=SUB(R000,1); MOVL(Label2, R004); STI(R004, R000); R000=SUB(R000,1); JMP(Label0); Label2: MOVI(R002, R006); R000=ADD(R000,1); LDF(R000, F013); R000=ADD(R000,1); LDI(R000, R010); JMP(EventMStart); begin: MOVI(10000, R000); MOVI(0, R006); MOVS("\n", R007); IN(R010); IN(R010); IN(R010); EventMStart: IN(R010); JMPC(GT(64, R010), EventMOut); JMPC(EQ(97, R010), eventLabel_a); JMP(EventMStart); EventMOut: PRTS("\nDone\n"); E_RT_exit(); return 0; }
const u8 *Jit::DoJit(u32 em_address, JitBlock *b) { js.cancel = false; js.blockStart = js.compilerPC = mips_->pc; js.nextExit = 0; js.downcountAmount = 0; js.curBlock = b; js.compiling = true; js.inDelaySlot = false; js.afterOp = JitState::AFTER_NONE; js.PrefixStart(); // We add a check before the block, used when entering from a linked block. b->checkedEntry = GetCodePtr(); // Downcount flag check. The last block decremented downcounter, and the flag should still be available. FixupBranch skip = J_CC(CC_NBE); MOV(32, M(&mips_->pc), Imm32(js.blockStart)); JMP(asm_.outerLoop, true); // downcount hit zero - go advance. SetJumpTarget(skip); b->normalEntry = GetCodePtr(); MIPSAnalyst::AnalysisResults analysis = MIPSAnalyst::Analyze(em_address); gpr.Start(mips_, analysis); fpr.Start(mips_, analysis); js.numInstructions = 0; while (js.compiling) { // Jit breakpoints are quite fast, so let's do them in release too. CheckJitBreakpoint(js.compilerPC, 0); MIPSOpcode inst = Memory::Read_Opcode_JIT(js.compilerPC); js.downcountAmount += MIPSGetInstructionCycleEstimate(inst); MIPSCompileOp(inst); if (js.afterOp & JitState::AFTER_CORE_STATE) { // TODO: Save/restore? FlushAll(); // If we're rewinding, CORE_NEXTFRAME should not cause a rewind. // It doesn't really matter either way if we're not rewinding. // CORE_RUNNING is <= CORE_NEXTFRAME. CMP(32, M(&coreState), Imm32(CORE_NEXTFRAME)); FixupBranch skipCheck = J_CC(CC_LE); if (js.afterOp & JitState::AFTER_REWIND_PC_BAD_STATE) MOV(32, M(&mips_->pc), Imm32(js.compilerPC)); else MOV(32, M(&mips_->pc), Imm32(js.compilerPC + 4)); WriteSyscallExit(); SetJumpTarget(skipCheck); js.afterOp = JitState::AFTER_NONE; } if (js.afterOp & JitState::AFTER_MEMCHECK_CLEANUP) { js.afterOp &= ~JitState::AFTER_MEMCHECK_CLEANUP; } js.compilerPC += 4; js.numInstructions++; // Safety check, in case we get a bunch of really large jit ops without a lot of branching. if (GetSpaceLeft() < 0x800) { FlushAll(); WriteExit(js.compilerPC, js.nextExit++); js.compiling = false; } } b->codeSize = (u32)(GetCodePtr() - b->normalEntry); NOP(); AlignCode4(); b->originalSize = js.numInstructions; return b->normalEntry; }
//------------------------------ generate_exception_blob --------------------------- // creates exception blob at the end // Using exception blob, this code is jumped from a compiled method. // (see emit_exception_handler in sparc.ad file) // // Given an exception pc at a call we call into the runtime for the // handler in this method. This handler might merely restore state // (i.e. callee save registers) unwind the frame and jump to the // exception handler for the nmethod if there is no Java level handler // for the nmethod. // // This code is entered with a jmp. // // Arguments: // O0: exception oop // O1: exception pc // // Results: // O0: exception oop // O1: exception pc in caller or ??? // destination: exception handler of caller // // Note: the exception pc MUST be at a call (precise debug information) // void OptoRuntime::generate_exception_blob() { // allocate space for code ResourceMark rm; int pad = VerifyThread ? 256 : 0;// Extra slop space for more verify code // setup code generation tools // Measured 8/7/03 at 256 in 32bit debug build (no VerifyThread) // Measured 8/7/03 at 528 in 32bit debug build (VerifyThread) CodeBuffer buffer("exception_blob", 600+pad, 512); MacroAssembler* masm = new MacroAssembler(&buffer); int framesize_in_bytes = __ total_frame_size_in_bytes(0); int framesize_in_words = framesize_in_bytes / wordSize; int framesize_in_slots = framesize_in_bytes / sizeof(jint); Label L; int start = __ offset(); __ verify_thread(); __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset()); __ st_ptr(Oissuing_pc, G2_thread, JavaThread::exception_pc_offset()); // This call does all the hard work. It checks if an exception catch // exists in the method. // If so, it returns the handler address. // If the nmethod has been deoptimized and it had a handler the handler // address is the deopt blob unpack_with_exception entry. // // If no handler exists it prepares for stack-unwinding, restoring the callee-save // registers of the frame being removed. // __ save_frame(0); __ mov(G2_thread, O0); __ set_last_Java_frame(SP, noreg); __ save_thread(L7_thread_cache); // This call can block at exit and nmethod can be deoptimized at that // point. If the nmethod had a catch point we would jump to the // now deoptimized catch point and fall thru the vanilla deopt // path and lose the exception // Sure would be simpler if this call didn't block! __ call(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C), relocInfo::runtime_call_type); __ delayed()->mov(L7_thread_cache, O0); // Set an oopmap for the call site. This oopmap will only be used if we // are unwinding the stack. Hence, all locations will be dead. // Callee-saved registers will be the same as the frame above (i.e., // handle_exception_stub), since they were restored when we got the // exception. OopMapSet *oop_maps = new OopMapSet(); oop_maps->add_gc_map( __ offset()-start, new OopMap(framesize_in_slots, 0)); __ bind(L); __ restore_thread(L7_thread_cache); __ reset_last_Java_frame(); __ mov(O0, G3_scratch); // Move handler address to temp __ restore(); // Restore SP from L7 if the exception PC is a MethodHandle call site. __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), O7); __ tst(O7); __ movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // G3_scratch contains handler address // Since this may be the deopt blob we must set O7 to look like we returned // from the original pc that threw the exception __ ld_ptr(G2_thread, JavaThread::exception_pc_offset(), O7); __ sub(O7, frame::pc_return_offset, O7); assert(Assembler::is_simm13(in_bytes(JavaThread::exception_oop_offset())), "exception offset overflows simm13, following ld instruction cannot be in delay slot"); __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception); // O0 #ifdef ASSERT __ st_ptr(G0, G2_thread, JavaThread::exception_handler_pc_offset()); __ st_ptr(G0, G2_thread, JavaThread::exception_pc_offset()); #endif __ JMP(G3_scratch, 0); // Clear the exception oop so GC no longer processes it as a root. __ delayed()->st_ptr(G0, G2_thread, JavaThread::exception_oop_offset()); // ------------- // make sure all code is generated masm->flush(); _exception_blob = ExceptionBlob::create(&buffer, oop_maps, framesize_in_words); }
void Jit::WriteSyscallExit() { WriteDowncount(); JMP(asm_.dispatcherCheckCoreState, true); }
// Used by compiler only; may use only caller saved, non-argument registers // NOTE: %%%% if any change is made to this stub make sure that the function // pd_code_size_limit is changed to ensure the correct size for VtableStub VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { const int sparc_code_length = VtableStub::pd_code_size_limit(true); VtableStub* s = new(sparc_code_length) VtableStub(true, vtable_index); // Can be NULL if there is no free space in the code cache. if (s == NULL) { return NULL; } ResourceMark rm; CodeBuffer cb(s->entry_point(), sparc_code_length); MacroAssembler* masm = new MacroAssembler(&cb); #ifndef PRODUCT if (CountCompiledCalls) { __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), G5, G3_scratch); } #endif /* PRODUCT */ assert(VtableStub::receiver_location() == O0->as_VMReg(), "receiver expected in O0"); // get receiver klass address npe_addr = __ pc(); __ load_klass(O0, G3_scratch); // set Method* (in case of interpreted method), and destination address #ifndef PRODUCT if (DebugVtables) { Label L; // check offset vs vtable length __ ld(G3_scratch, in_bytes(Klass::vtable_length_offset()), G5); __ cmp_and_br_short(G5, vtable_index*vtableEntry::size(), Assembler::greaterUnsigned, Assembler::pt, L); __ set(vtable_index, O2); __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), O0, O2); __ bind(L); } #endif __ lookup_virtual_method(G3_scratch, vtable_index, G5_method); #ifndef PRODUCT if (DebugVtables) { Label L; __ br_notnull_short(G5_method, Assembler::pt, L); __ stop("Vtable entry is ZERO"); __ bind(L); } #endif address ame_addr = __ pc(); // if the vtable entry is null, the method is abstract // NOTE: for vtable dispatches, the vtable entry will never be null. __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch); // jump to target (either compiled code or c2iadapter) __ JMP(G3_scratch, 0); // load Method* (in case we call c2iadapter) __ delayed()->nop(); masm->flush(); if (PrintMiscellaneous && (WizardMode || Verbose)) { tty->print_cr("vtable #%d at " PTR_FORMAT "[%d] left over: %d", vtable_index, p2i(s->entry_point()), (int)(s->code_end() - s->entry_point()), (int)(s->code_end() - __ pc())); } guarantee(__ pc() <= s->code_end(), "overflowed buffer"); // shut the door on sizing bugs int slop = 2*BytesPerInstWord; // 32-bit offset is this much larger than a 13-bit one assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for sethi;add"); s->set_exception_points(npe_addr, ame_addr); return s; }
// NOTE: %%%% if any change is made to this stub make sure that the function // pd_code_size_limit is changed to ensure the correct size for VtableStub VtableStub* VtableStubs::create_itable_stub(int itable_index) { const int sparc_code_length = VtableStub::pd_code_size_limit(false); VtableStub* s = new(sparc_code_length) VtableStub(false, itable_index); // Can be NULL if there is no free space in the code cache. if (s == NULL) { return NULL; } ResourceMark rm; CodeBuffer cb(s->entry_point(), sparc_code_length); MacroAssembler* masm = new MacroAssembler(&cb); Register G3_Klass = G3_scratch; Register G5_interface = G5; // Passed in as an argument Label search; // Entry arguments: // G5_interface: Interface // O0: Receiver assert(VtableStub::receiver_location() == O0->as_VMReg(), "receiver expected in O0"); // get receiver klass (also an implicit null-check) address npe_addr = __ pc(); __ load_klass(O0, G3_Klass); // Push a new window to get some temp registers. This chops the head of all // my 64-bit %o registers in the LION build, but this is OK because no longs // are passed in the %o registers. Instead, longs are passed in G1 and G4 // and so those registers are not available here. __ save(SP,-frame::register_save_words*wordSize,SP); #ifndef PRODUCT if (CountCompiledCalls) { __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), L0, L1); } #endif /* PRODUCT */ Label throw_icce; Register L5_method = L5; __ lookup_interface_method(// inputs: rec. class, interface, itable index G3_Klass, G5_interface, itable_index, // outputs: method, scan temp. reg L5_method, L2, L3, throw_icce); #ifndef PRODUCT if (DebugVtables) { Label L01; __ br_notnull_short(L5_method, Assembler::pt, L01); __ stop("Method* is null"); __ bind(L01); } #endif // If the following load is through a NULL pointer, we'll take an OS // exception that should translate into an AbstractMethodError. We need the // window count to be correct at that time. __ restore(L5_method, 0, G5_method); // Restore registers *before* the AME point. address ame_addr = __ pc(); // if the vtable entry is null, the method is abstract __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch); // G5_method: Method* // O0: Receiver // G3_scratch: entry point __ JMP(G3_scratch, 0); __ delayed()->nop(); __ bind(throw_icce); AddressLiteral icce(StubRoutines::throw_IncompatibleClassChangeError_entry()); __ jump_to(icce, G3_scratch); __ delayed()->restore(); masm->flush(); if (PrintMiscellaneous && (WizardMode || Verbose)) { tty->print_cr("itable #%d at " PTR_FORMAT "[%d] left over: %d", itable_index, p2i(s->entry_point()), (int)(s->code_end() - s->entry_point()), (int)(s->code_end() - __ pc())); } guarantee(__ pc() <= s->code_end(), "overflowed buffer"); // shut the door on sizing bugs int slop = 2*BytesPerInstWord; // 32-bit offset is this much larger than a 13-bit one assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for sethi;add"); s->set_exception_points(npe_addr, ame_addr); return s; }
int main(int argc, char *argv[]) { E_RT_init(argc, argv); JMP(main); main: MOVI(0, R999); JMP(ML1); RL3: MOVI(2, R901); MOVI(10000, R777); MOVS("NAVDEEP", R777); PRTS(R777); PRTS("a:b\n"); JMP(L4); RL5: MOVI(2, R904); MOVI(10000, R777); MOVS("NAVDEEP", R777); PRTS(R777); PRTS("a OR b:c\n"); JMP(L6); ML1: R900=MUL(128,0); R900=ADD(10999,R900); R001=ADD(R900,97); STI(1, R001); R001=ADD(R900,98); STI(-1, R001); R001=ADD(R900,99); STI(-1, R001); R001=ADD(R900,100); STI(-1, R001); R900=MUL(128,1); R900=ADD(10999,R900); R001=ADD(R900,97); STI(-1, R001); R001=ADD(R900,98); STI(2, R001); R001=ADD(R900,99); STI(-1, R001); R001=ADD(R900,100); STI(-1, R001); R900=MUL(128,2); R900=ADD(10999,R900); R001=ADD(R900,97); STI(-1, R001); R001=ADD(R900,98); STI(-1, R001); R001=ADD(R900,99); STI(-1, R001); R001=ADD(R900,100); STI(-1, R001); MOVI(2, R902); MOVI(0, R901); R903=MUL(128,0); R903=ADD(11383,R903); R001=ADD(R903,97); STI(1, R001); R001=ADD(R903,98); STI(1, R001); R001=ADD(R903,99); STI(-1, R001); R001=ADD(R903,100); STI(-1, R001); R903=MUL(128,1); R903=ADD(11383,R903); R001=ADD(R903,97); STI(-1, R001); R001=ADD(R903,98); STI(-1, R001); R001=ADD(R903,99); STI(2, R001); R001=ADD(R903,100); STI(-1, R001); R903=MUL(128,2); R903=ADD(11383,R903); R001=ADD(R903,97); STI(-1, R001); R001=ADD(R903,98); STI(-1, R001); R001=ADD(R903,99); STI(-1, R001); R001=ADD(R903,100); STI(-1, R001); MOVI(2, R905); MOVI(0, R904); L2: IN(R000); JMPC(GT(0, R000), L0); JMPC(GT(0, R901), L4); R001=MUL(128,R901); R001=ADD(10999,R001); R001=ADD(R000,R001); LDI(R001, R002); JMPC(EQ(R002, R902), RL3); MOVI(R002, R901); L4: JMPC(GT(0, R904), L6); R001=MUL(128,R904); R001=ADD(11383,R001); R001=ADD(R000,R001); LDI(R001, R002); JMPC(EQ(R002, R905), RL5); MOVI(R002, R904); L6: JMP(L2); L0: MOVI(31000, R000); MOVS("The End", R000); PRTS(R000); E_RT_exit(); return 0; }
INLINE void jmp0(void) { JMP(0); }
static void analop_esil(RAnal *a, RAnalOp *op, ut64 addr, const ut8 *buf, const char *buf_asm) { r_strbuf_init (&op->esil); r_strbuf_set (&op->esil, ""); switch (buf[0]) { // Irregulars sorted by lower nibble case 0x00: /* nop */ emit(","); break; case 0x10: /* jbc */ k(BIT_R "&,?{,%2$d,1,<<,255,^,%1$d,&=[1],%3$hhd,3,+,pc,+=,}"); break; case 0x20: /* jb */ k(BIT_R "&,?{,%3$hhd,3,+,pc,+=,}"); break; case 0x30: /* jnb */ k(BIT_R "&,!,?{,%3$hhd,3,+,pc,+=,}"); break; case 0x40: /* jc */ emitf("C,!,?{,%hhd,2,+,pc,+=,}", buf[1]); break; case 0x50: /* jnc */ emitf("C,""?{,%hhd,2,+,pc,+=,}", buf[1]); break; case 0x60: /* jz */ emitf("A,!,?{,%hhd,2,+,pc,+=,}", buf[1]); break; case 0x70: /* jnz */ emitf("A,""?{,%hhd,2,+,pc,+=,}", buf[1]); break; case 0x80: /* sjmp */ j(ESX_L1 JMP("2")); break; case 0x90: /* mov */ emitf("%d,dptr,=", (buf[1]<<8) + buf[2]); break; case 0xA0: /* orl */ k(BIT_R "C,|="); break; case 0xB0: /* anl */ k(BIT_R "C,&="); break; case 0xC0: /* push */ h(XR(IB1) PUSH1); break; case 0xD0: /* pop */ h(POP1 XW(IB1)); break; case 0xE0: /* movx */ /* TODO */ break; case 0xF0: /* movx */ /* TODO */ break; case 0x11: case 0x31: case 0x51: case 0x71: case 0x91: case 0xB1: case 0xD1: case 0xF1: emit(CALL("2")); // fall through case 0x01: case 0x21: case 0x41: case 0x61: case 0x81: case 0xA1: case 0xC1: case 0xE1: emitf("0x%x,pc,=", (addr & 0xF800) | ((((unsigned short)buf[0])<<3) & 0x0700) | buf[1]); break; case 0x02: /* ljmp */ emitf( "%d,pc,=", (unsigned int)((buf[1]<<8)+buf[2])); break; case 0x12: /* lcall */ emitf(CALL("3")",%d,pc,=", (unsigned int)((buf[1]<<8)+buf[2])); break; case 0x22: /* ret */ emitf(POP2 "pc,="); break; case 0x32: /* reti */ /* TODO */ break; case 0x72: /* orl */ /* TODO */ break; case 0x82: /* anl */ /* TODO */ break; case 0x92: /* mov */ /* TODO */ break; case 0xA2: /* mov */ /* TODO */ break; case 0xB2: /* cpl */ k("%2$d,1,<<,%1$d,^=[1]"); break; case 0xC2: /* clr */ /* TODO */ break; case 0x03: /* rr */ emit("1,A,0x101,*,>>,A,="); break; case 0x13: /* rrc */ /* TODO */ break; case 0x23: /* rl */ emit("7,A,0x101,*,>>,A,="); break; case 0x33: /* rlc */ /* TODO */ break; case 0x73: /* jmp */ emit("dptr,A,+,pc,="); break; case 0x83: /* movc */ emit("A,dptr,+,[1],A,="); break; case 0x93: /* movc */ emit("A,pc,+,[1],A,="); break; case 0xA3: /* inc */ h(XI(IB1, "++")); break; case 0xB3: /* cpl */ emit("1," XI(C, "^")); break; case 0xC3: /* clr */ emit("0,C,="); break; // Regulars sorted by upper nibble OP_GROUP_UNARY_4(0x00, "++") OP_GROUP_UNARY_4(0x10, "--") OP_GROUP_INPLACE_LHS_4(0x20, A, "+") case 0x34: h (XR(L1) "C,+," XI(A, "+")) break; case 0x35: h (XR(IB1) "C,+," XI(A, "+")) break; case 0x36: case 0x37: j (XR(R0I) "C,+," XI(A, "+")) break; case 0x38: case 0x39: case 0x3A: case 0x3B: case 0x3C: case 0x3D: case 0x3E: case 0x3F: h (XR(R0) "C,+," XI(A, "+")) break; OP_GROUP_INPLACE_LHS_4(0x40, A, "|") OP_GROUP_INPLACE_LHS_4(0x50, A, "&") OP_GROUP_INPLACE_LHS_4(0x60, A, "^") case 0x74: h (XR(L1) XW(A)) break; case 0x75: h (XR(L2) XW(IB1)) break; case 0x76: case 0x77: j (XR(L1) XW(R0I)) break; case 0x78: case 0x79: case 0x7A: case 0x7B: case 0x7C: case 0x7D: case 0x7E: case 0x7F: h (XR(L1) XW(R0)) break; case 0x84: /* div */ emit("B,!,OV,=,0,A,B,A,/=,A,B,*,-,-,B,=,0,C,="); break; case 0x85: /* mov */ h(IRAM_BASE ",%2$d,+,[1]," IRAM_BASE ",%2$d,+,=[1]"); break; case 0x86: case 0x87: j (XR(R0I) XW(IB1)) break; case 0x88: case 0x89: case 0x8A: case 0x8B: case 0x8C: case 0x8D: case 0x8E: case 0x8F: h (XR(R0) XW(IB1)) break; OP_GROUP_INPLACE_LHS_4(0x90, A, ".") case 0xA4: /* mul */ emit("8,A,B,*,DUP,>>,DUP,!,!,OV,=,B,=,A,=,0,C,="); break; case 0xA5: /* ??? */ emit("0,TRAP"); break; case 0xA6: case 0xA7: j (XR(IB1) XW(R0I)) break; case 0xA8: case 0xA9: case 0xAA: case 0xAB: case 0xAC: case 0xAD: case 0xAE: case 0xAF: h (XR(IB1) XW(R0)) break; case 0xB4: h (XR(L1) XR(A) "!=,?{,%3$hhd,2,+pc,+=,}") break; case 0xB5: h (XR(IB1) XR(A) "!=,?{,%3$hhd,2,+pc,+=,}") break; case 0xB6: case 0xB7: j (XR(L1) XR(R0I) "!=,?{,%3$hhd,2,+pc,+=,}") break; case 0xB8: case 0xB9: case 0xBA: case 0xBB: case 0xBC: case 0xBD: case 0xBE: case 0xBF: h (XR(L1) XR(R0) "!=,?{,%3$hhd,2,+pc,+=,}") break; case 0xC4: /* swap */ emit("4,A,0x101,*,>>,A,="); break; case 0xC5: /* xch */ /* TODO */ break; case 0xC6: case 0xC7: /* xch */ /* TODO */ break; case 0xC8: case 0xC9: case 0xCA: case 0xCB: case 0xCC: case 0xCD: case 0xCE: case 0xCF: /* xch */ h (XR(A) XR(R0) XW(A) "," XW(R0)); break; case 0xD2: /* setb */ /* TODO */ break; case 0xD3: /* setb */ /* TODO */ break; case 0xD4: /* da */ emit("A,--="); break; case 0xD5: /* djnz */ h(XI(R0I, "--") "," XR(R0I) CJMP(L2, "2")); break; case 0xD6: /* xchd */ /* TODO */ break; case 0xD7: /* xchd */ /* TODO */ break; case 0xD8: case 0xD9: case 0xDA: case 0xDB: case 0xDC: case 0xDD: case 0xDE: case 0xDF: /* djnz */ h(XI(R0, "--") "," XR(R0) CJMP(L1, "2")); break; case 0xE2: case 0xE3: /* movx */ j(XRAM_BASE "r%0$d,+,[1]," XW(A)); break; case 0xE4: /* clr */ emit("0,A,="); break; case 0xE5: /* mov */ h (XR(IB1) XW(A)) break; case 0xE6: case 0xE7: /* mov */ j (XR(R0I) XW(A)) break; case 0xE8: case 0xE9: case 0xEA: case 0xEB: case 0xEC: case 0xED: case 0xEE: case 0xEF: /* mov */ h (XR(R0) XW(A)) break; case 0xF2: case 0xF3: /* movx */ j(XR(A) XRAM_BASE "r%0$d,+,=[1]"); case 0xF4: /* cpl */ h ("255" XI(A, "^")) break; case 0xF5: /* mov */ h (XR(A) XW(IB1)) break; case 0xF6: case 0xF7: /* mov */ j (XR(A) XW(R0I)) break; case 0xF8: case 0xF9: case 0xFA: case 0xFB: case 0xFC: case 0xFD: case 0xFE: case 0xFF: /* mov */ h (XR(A) XW(R0)) break; default: break; } }
INLINE void jmp2(void) { JMP(2); }
INLINE void jmp1(void) { JMP(1); }
void MicroSequencer::decode() { INT32 immed4 = readBits(4); INT32 nextInstruction = readBits(4, TRUE); switch (nextInstruction) { case 0x0: if (immed4 == 0) RTS(); else SETPAGE(immed4); break; case 0x8: SETMODE(immed4); break; case 0x4: LOAD_4(immed4); break; case 0xC: LOAD_C(immed4); break; case 0x2: LOAD_2(immed4); break; case 0xA: SETMSB_A(immed4); break; case 0x6: SETMSB_6(immed4); break; case 0xE: LOAD_E(immed4); break; case 0x1: LOADALL(immed4); break; case 0x9: DELTA_9(immed4); break; case 0x5: SETMSB_5(immed4); break; case 0xD: DELTA_D(immed4); break; case 0x3: SETMSB_3(immed4); break; case 0xB: JSR(immed4); break; case 0x7: JMP(immed4); break; case 0xF: PAUSE(immed4); break; /* case 0x0: if (immed4 == 0) RTS(); else SETPAGE(immed4); break; case 0x1: SETMODE(immed4); break; case 0x2: LOAD_4(immed4); break; case 0x3: LOAD_C(immed4); break; case 0x4: LOAD_2(immed4); break; case 0x5: SETMSB_A(immed4); break; case 0x6: SETMSB_6(immed4); break; case 0x7: LOAD_E(immed4); break; case 0x8: LOADALL(immed4); break; case 0x9: DELTA_9(immed4); break; case 0xA: SETMSB_5(immed4); break; case 0xB: DELTA_D(immed4); break; case 0xC: SETMSB_3(immed4); break; case 0xD: JSR(immed4); break; case 0xE: JMP(immed4); break; case 0xF: PAUSE(immed4); break; */ } }
/* * Function that does the real stuff */ bpf_filter_func bpf_jit_compile(struct bpf_insn *prog, u_int nins, int *mem) { struct bpf_insn *ins; u_int i, pass; bpf_bin_stream stream; /* * NOTE: do not modify the name of this variable, as it's used by * the macros to emit code. */ emit_func emitm; /* Allocate the reference table for the jumps */ #ifdef _KERNEL stream.refs = (u_int *)malloc((nins + 1) * sizeof(u_int), M_BPFJIT, M_NOWAIT); #else stream.refs = (u_int *)malloc((nins + 1) * sizeof(u_int)); #endif if (stream.refs == NULL) return (NULL); /* Reset the reference table */ for (i = 0; i < nins + 1; i++) stream.refs[i] = 0; stream.cur_ip = 0; stream.bpf_pc = 0; /* * the first pass will emit the lengths of the instructions * to create the reference table */ emitm = emit_length; pass = 0; for (;;) { ins = prog; /* create the procedure header */ MOVrq2(RBX, R8); MOVrq(RDI, RBX); MOVrd2(ESI, R9D); MOVrd(EDX, EDI); for (i = 0; i < nins; i++) { stream.bpf_pc++; switch (ins->code) { default: #ifdef _KERNEL return (NULL); #else abort(); #endif case BPF_RET|BPF_K: MOVid(ins->k, EAX); MOVrq3(R8, RBX); RET(); break; case BPF_RET|BPF_A: MOVrq3(R8, RBX); RET(); break; case BPF_LD|BPF_W|BPF_ABS: MOVid(ins->k, ESI); CMPrd(EDI, ESI); JAb(12); MOVrd(EDI, ECX); SUBrd(ESI, ECX); CMPid(sizeof(int32_t), ECX); JAEb(6); ZEROrd(EAX); MOVrq3(R8, RBX); RET(); MOVobd(RBX, RSI, EAX); BSWAP(EAX); break; case BPF_LD|BPF_H|BPF_ABS: ZEROrd(EAX); MOVid(ins->k, ESI); CMPrd(EDI, ESI); JAb(12); MOVrd(EDI, ECX); SUBrd(ESI, ECX); CMPid(sizeof(int16_t), ECX); JAEb(4); MOVrq3(R8, RBX); RET(); MOVobw(RBX, RSI, AX); SWAP_AX(); break; case BPF_LD|BPF_B|BPF_ABS: ZEROrd(EAX); MOVid(ins->k, ESI); CMPrd(EDI, ESI); JBb(4); MOVrq3(R8, RBX); RET(); MOVobb(RBX, RSI, AL); break; case BPF_LD|BPF_W|BPF_LEN: MOVrd3(R9D, EAX); break; case BPF_LDX|BPF_W|BPF_LEN: MOVrd3(R9D, EDX); break; case BPF_LD|BPF_W|BPF_IND: CMPrd(EDI, EDX); JAb(27); MOVid(ins->k, ESI); MOVrd(EDI, ECX); SUBrd(EDX, ECX); CMPrd(ESI, ECX); JBb(14); ADDrd(EDX, ESI); MOVrd(EDI, ECX); SUBrd(ESI, ECX); CMPid(sizeof(int32_t), ECX); JAEb(6); ZEROrd(EAX); MOVrq3(R8, RBX); RET(); MOVobd(RBX, RSI, EAX); BSWAP(EAX); break; case BPF_LD|BPF_H|BPF_IND: ZEROrd(EAX); CMPrd(EDI, EDX); JAb(27); MOVid(ins->k, ESI); MOVrd(EDI, ECX); SUBrd(EDX, ECX); CMPrd(ESI, ECX); JBb(14); ADDrd(EDX, ESI); MOVrd(EDI, ECX); SUBrd(ESI, ECX); CMPid(sizeof(int16_t), ECX); JAEb(4); MOVrq3(R8, RBX); RET(); MOVobw(RBX, RSI, AX); SWAP_AX(); break; case BPF_LD|BPF_B|BPF_IND: ZEROrd(EAX); CMPrd(EDI, EDX); JAEb(13); MOVid(ins->k, ESI); MOVrd(EDI, ECX); SUBrd(EDX, ECX); CMPrd(ESI, ECX); JAb(4); MOVrq3(R8, RBX); RET(); ADDrd(EDX, ESI); MOVobb(RBX, RSI, AL); break; case BPF_LDX|BPF_MSH|BPF_B: MOVid(ins->k, ESI); CMPrd(EDI, ESI); JBb(6); ZEROrd(EAX); MOVrq3(R8, RBX); RET(); ZEROrd(EDX); MOVobb(RBX, RSI, DL); ANDib(0x0f, DL); SHLib(2, EDX); break; case BPF_LD|BPF_IMM: MOVid(ins->k, EAX); break; case BPF_LDX|BPF_IMM: MOVid(ins->k, EDX); break; case BPF_LD|BPF_MEM: MOViq((uintptr_t)mem, RCX); MOVid(ins->k * 4, ESI); MOVobd(RCX, RSI, EAX); break; case BPF_LDX|BPF_MEM: MOViq((uintptr_t)mem, RCX); MOVid(ins->k * 4, ESI); MOVobd(RCX, RSI, EDX); break; case BPF_ST: /* * XXX this command and the following could * be optimized if the previous instruction * was already of this type */ MOViq((uintptr_t)mem, RCX); MOVid(ins->k * 4, ESI); MOVomd(EAX, RCX, RSI); break; case BPF_STX: MOViq((uintptr_t)mem, RCX); MOVid(ins->k * 4, ESI); MOVomd(EDX, RCX, RSI); break; case BPF_JMP|BPF_JA: JMP(stream.refs[stream.bpf_pc + ins->k] - stream.refs[stream.bpf_pc]); break; case BPF_JMP|BPF_JGT|BPF_K: if (ins->jt == 0 && ins->jf == 0) break; CMPid(ins->k, EAX); JCC(JA, JBE); break; case BPF_JMP|BPF_JGE|BPF_K: if (ins->jt == 0 && ins->jf == 0) break; CMPid(ins->k, EAX); JCC(JAE, JB); break; case BPF_JMP|BPF_JEQ|BPF_K: if (ins->jt == 0 && ins->jf == 0) break; CMPid(ins->k, EAX); JCC(JE, JNE); break; case BPF_JMP|BPF_JSET|BPF_K: if (ins->jt == 0 && ins->jf == 0) break; TESTid(ins->k, EAX); JCC(JNE, JE); break; case BPF_JMP|BPF_JGT|BPF_X: if (ins->jt == 0 && ins->jf == 0) break; CMPrd(EDX, EAX); JCC(JA, JBE); break; case BPF_JMP|BPF_JGE|BPF_X: if (ins->jt == 0 && ins->jf == 0) break; CMPrd(EDX, EAX); JCC(JAE, JB); break; case BPF_JMP|BPF_JEQ|BPF_X: if (ins->jt == 0 && ins->jf == 0) break; CMPrd(EDX, EAX); JCC(JE, JNE); break; case BPF_JMP|BPF_JSET|BPF_X: if (ins->jt == 0 && ins->jf == 0) break; TESTrd(EDX, EAX); JCC(JNE, JE); break; case BPF_ALU|BPF_ADD|BPF_X: ADDrd(EDX, EAX); break; case BPF_ALU|BPF_SUB|BPF_X: SUBrd(EDX, EAX); break; case BPF_ALU|BPF_MUL|BPF_X: MOVrd(EDX, ECX); MULrd(EDX); MOVrd(ECX, EDX); break; case BPF_ALU|BPF_DIV|BPF_X: TESTrd(EDX, EDX); JNEb(6); ZEROrd(EAX); MOVrq3(R8, RBX); RET(); MOVrd(EDX, ECX); ZEROrd(EDX); DIVrd(ECX); MOVrd(ECX, EDX); break; case BPF_ALU|BPF_AND|BPF_X: ANDrd(EDX, EAX); break; case BPF_ALU|BPF_OR|BPF_X: ORrd(EDX, EAX); break; case BPF_ALU|BPF_LSH|BPF_X: MOVrd(EDX, ECX); SHL_CLrb(EAX); break; case BPF_ALU|BPF_RSH|BPF_X: MOVrd(EDX, ECX); SHR_CLrb(EAX); break; case BPF_ALU|BPF_ADD|BPF_K: ADD_EAXi(ins->k); break; case BPF_ALU|BPF_SUB|BPF_K: SUB_EAXi(ins->k); break; case BPF_ALU|BPF_MUL|BPF_K: MOVrd(EDX, ECX); MOVid(ins->k, EDX); MULrd(EDX); MOVrd(ECX, EDX); break; case BPF_ALU|BPF_DIV|BPF_K: MOVrd(EDX, ECX); ZEROrd(EDX); MOVid(ins->k, ESI); DIVrd(ESI); MOVrd(ECX, EDX); break; case BPF_ALU|BPF_AND|BPF_K: ANDid(ins->k, EAX); break; case BPF_ALU|BPF_OR|BPF_K: ORid(ins->k, EAX); break; case BPF_ALU|BPF_LSH|BPF_K: SHLib((ins->k) & 0xff, EAX); break; case BPF_ALU|BPF_RSH|BPF_K: SHRib((ins->k) & 0xff, EAX); break; case BPF_ALU|BPF_NEG: NEGd(EAX); break; case BPF_MISC|BPF_TAX: MOVrd(EAX, EDX); break; case BPF_MISC|BPF_TXA: MOVrd(EDX, EAX); break; } ins++; } pass++; if (pass == 2) break; #ifdef _KERNEL stream.ibuf = (char *)malloc(stream.cur_ip, M_BPFJIT, M_NOWAIT); if (stream.ibuf == NULL) { free(stream.refs, M_BPFJIT); return (NULL); } #else stream.ibuf = (char *)malloc(stream.cur_ip); if (stream.ibuf == NULL) { free(stream.refs); return (NULL); } #endif /* * modify the reference table to contain the offsets and * not the lengths of the instructions */ for (i = 1; i < nins + 1; i++) stream.refs[i] += stream.refs[i - 1]; /* Reset the counters */ stream.cur_ip = 0; stream.bpf_pc = 0; /* the second pass creates the actual code */ emitm = emit_code; } /* * the reference table is needed only during compilation, * now we can free it */ #ifdef _KERNEL free(stream.refs, M_BPFJIT); #else free(stream.refs); #endif return ((bpf_filter_func)stream.ibuf); }