BOOL DelaySlotAffectBranch(DWORD PC) { OPCODE Branch, Delay; OPCODE_INFO infoBranch, infoDelay; if (IsOpcodeNop(PC + 4) == TRUE) { return FALSE; } RSP_LW_IMEM(PC, &Branch.Hex); RSP_LW_IMEM(PC+4, &Delay.Hex); memset(&infoDelay,0,sizeof(infoDelay)); memset(&infoBranch,0,sizeof(infoBranch)); GetInstructionInfo(PC, &Branch, &infoBranch); GetInstructionInfo(PC+4, &Delay, &infoDelay); if ((infoDelay.flags & COPO_MF_Instruction) == COPO_MF_Instruction) { return TRUE; } if ((infoDelay.flags & Instruction_Mask) == VEC_Instruction) { return FALSE; } if (infoBranch.SourceReg0 == infoDelay.DestReg) { return TRUE; } if (infoBranch.SourceReg1 == infoDelay.DestReg) { return TRUE; } return FALSE; }
status_t ArchitectureX8664::ResolvePICFunctionAddress(target_addr_t instructionAddress, CpuState* state, target_addr_t& _targetAddress) { target_addr_t previousIP = state->InstructionPointer(); // if the function in question is position-independent, the call // will actually have taken us to its corresponding PLT slot. // in such a case, look at the disassembled jump to determine // where to find the actual function address. InstructionInfo info; if (GetInstructionInfo(instructionAddress, info, state) != B_OK) return B_BAD_VALUE; // x86-64 is likely to use a RIP-relative jump here // as such, set our instruction pointer to the address // after this instruction (where it would be during actual // execution), and recalculate the target address of the jump state->SetInstructionPointer(info.Address() + info.Size()); status_t result = GetInstructionInfo(info.Address(), info, state); state->SetInstructionPointer(previousIP); if (result != B_OK) return result; target_addr_t subroutineAddress; ssize_t bytesRead = fTeamMemory->ReadMemory(info.TargetAddress(), &subroutineAddress, fAddressSize); if (bytesRead != fAddressSize) return B_BAD_VALUE; _targetAddress = subroutineAddress; return B_OK; }
int32_t DelaySlotAffectBranch(uint32_t PC) { RSPOPCODE Branch, Delay; OPCODE_INFO infoBranch, infoDelay; if (IsOpcodeNop(PC + 4) == 1) { return 0; } RSP_LW_IMEM(PC, &Branch.Hex); RSP_LW_IMEM(PC + 4, &Delay.Hex); memset(&infoDelay, 0, sizeof(infoDelay)); memset(&infoBranch, 0, sizeof(infoBranch)); GetInstructionInfo(PC, &Branch, &infoBranch); GetInstructionInfo(PC + 4, &Delay, &infoDelay); if ((infoDelay.flags & Instruction_Mask) == VEC_Instruction) { return 0; } if (infoBranch.SourceReg0 == infoDelay.DestReg) { return 1; } if (infoBranch.SourceReg1 == infoDelay.DestReg) { return 1; } return 0; }
bool swsl::Disassembler::Disassemble(const swsl::Shader &shader, mtlString &output) { output.Free(); output.Reserve(4096); mtlString num; output.Append("0 inputs "); num.FromInt(shader.m_program[0].u_addr); output.Append(num); output.Append("\n"); output.Append("1 entry "); num.FromInt(shader.m_program[1].u_addr); output.Append(num); output.Append("\n"); for (int iptr = 2; iptr < shader.m_program.GetSize(); ) { num.FromInt(iptr); output.Append(num); for (int j = 0; j < (6 - num.GetSize()); ++j) { output.Append(' '); } const InstructionInfo *instr = GetInstructionInfo((swsl::InstructionSet)shader.m_program[iptr++].instr); if (instr == NULL) { output.Append("<<Unknown instruction. Abort.>>"); return false; } output.Append(instr->name); for (int i = 0; i < (12 - instr->name.GetSize()); ++i) { output.Append(' '); } if (iptr + instr->params - 1 >= shader.m_program.GetSize()) { output.Append("<<Parameter corruption. Abort.>>"); return false; } for (int i = 0; i < instr->params; ++i) { if (i == instr->params - 1 && SWSL_INSTR_IMM_PARAM2(instr->instr)) { num.FromFloat(shader.m_program[iptr++].fl_imm); output.Append(num); } else { num.FromInt(shader.m_program[iptr++].u_addr); output.Append(num); } for (int j = 0; j < (6 - num.GetSize()); ++j) { output.Append(' '); } } output.Append("\n"); } return shader.GetErrorCount() == 0; }
status_t ArchitectureX8664::GetStatement(FunctionDebugInfo* function, target_addr_t address, Statement*& _statement) { // TODO: This is not architecture dependent anymore! // get the instruction info InstructionInfo info; status_t error = GetInstructionInfo(address, info, NULL); if (error != B_OK) return error; // create a statement ContiguousStatement* statement = new(std::nothrow) ContiguousStatement( SourceLocation(-1), TargetAddressRange(info.Address(), info.Size())); if (statement == NULL) return B_NO_MEMORY; _statement = statement; return B_OK; }
BOOL CompareInstructions(DWORD PC, OPCODE * Top, OPCODE * Bottom) { OPCODE_INFO info0, info1; DWORD InstructionType; GetInstructionInfo(PC - 4, Top, &info0); GetInstructionInfo(PC, Bottom, &info1); #ifdef COMPARE_INSTRUCTIONS_VERBOSE CPU_Message("Comparing %s (%X)", RSPOpcodeName ( Top->Hex, PC - 4 ), PC - 4); CPU_Message("to %s (%X)", RSPOpcodeName ( Bottom->Hex, PC), PC); #endif /* usually branches and such */ if ((info0.flags & InvalidOpcode) != 0) return FALSE; if ((info1.flags & InvalidOpcode) != 0) return FALSE; if ((info0.flags & Flag_Instruction) != 0 && (info1.flags & Flag_Instruction) != 0) return FALSE; InstructionType = (info0.flags & Instruction_Mask) << 2; InstructionType |= info1.flags & Instruction_Mask; InstructionType &= 0x0F; /* Paranoia */ /* 4 bit range, 16 possible combinations */ switch (InstructionType) { /* ** Detect noop instruction, 7 cases, (see flags) */ case 0x01: case 0x02: case 0x03: /* First is a noop */ return TRUE; case 0x00: /* Both ??? */ case 0x10: case 0x20: case 0x30: /* Second is a noop */ return FALSE; case 0x06: /* GPR than Vector - 01,10 */ if ((info0.flags & MemOperation_Mask) != 0 && (info1.flags & MemOperation_Mask) != 0) { /* TODO: We have a vector & GPR memory operation */ return FALSE; } else if ((info1.flags & MemOperation_Mask) != 0) { /* We have a vector memory operation */ return (info1.IndexReg == info0.DestReg) ? FALSE : TRUE; } /* We could have memory or normal gpr instruction here ** paired with some kind of vector operation */ return TRUE; case 0x0A: /* Vector than Vector - 10,10 */ /* ** Check for Vector Store than Vector multiply (VMULF) ** ** This basically gives preferences to putting stores ** as close to the finish of an operation as possible */ if ((info0.flags & Store_Operation) != 0 && (info1.flags & Accum_Operation) != 0 && !(info1.flags & VEC_Accumulate)) { return FALSE; } /* ** Look for loads and than some kind of vector operation ** that does no accumulating, there is no reason to reorder */ if ((info0.flags & Load_Operation) != 0 && (info1.flags & Accum_Operation) != 0 && !(info1.flags & VEC_Accumulate)) { return FALSE; } if ((info0.flags & MemOperation_Mask) != 0 && (info1.flags & MemOperation_Mask) != 0) { /* ** TODO: This is a bitch, its best to leave it alone **/ return FALSE; } else if ((info1.flags & MemOperation_Mask) != 0) { /* Remember stored reg & loaded reg are the same */ if (info0.DestReg == info1.DestReg) { return FALSE; } if (info1.flags & Load_Operation) { if (info0.SourceReg0 == info1.DestReg) { return FALSE; } if (info0.SourceReg1 == info1.DestReg) { return FALSE; } } else if (info1.flags & Store_Operation) { /* It can store source regs */ return TRUE; } return TRUE; } else if ((info0.flags & MemOperation_Mask) != 0) { /* Remember stored reg & loaded reg are the same */ if (info0.DestReg == info1.DestReg) { return FALSE; } if (info0.flags & Load_Operation) { if (info1.SourceReg0 == info0.DestReg) { return FALSE; } if (info1.SourceReg1 == info0.DestReg) { return FALSE; } } else if (info0.flags & Store_Operation) { /* It can store source regs */ return TRUE; } return TRUE; } else if ((info0.flags & VEC_Accumulate) != 0) { /* ** Example: ** VMACF ** VMUDH or VMADH or VADD */ return FALSE; } else if ((info1.flags & VEC_Accumulate) != 0) { /* ** Example: ** VMULF ** VMADH */ return FALSE; } else { /* ** Example: ** VMULF or VADDC ** VADD or VMUDH */ return FALSE; } break; case 0x09: /* Vector than GPR - 10,01 */ /********** ** this is where the bias comes into play, otherwise ** we can sit here all day swapping these 2 types ***********/ return FALSE; case 0x05: /* GPR than GPR - 01,01 */ case 0x07: /* GPR than Cop2 - 01, 11 */ case 0x0D: /* Cop2 than GPR - 11, 01 */ case 0x0F: /* Cop2 than Cop2 - 11, 11 */ return FALSE; case 0x0B: /* Vector than Cop2 - 10, 11 */ if (info1.flags & Load_Operation) { /* Move To Cop2 (dest) from GPR (source) */ if (info1.DestReg == info0.DestReg) { return FALSE; } if (info1.DestReg == info0.SourceReg0) { return FALSE; } if (info1.DestReg == info0.SourceReg1) { return FALSE; } } else if (info1.flags & Store_Operation) { /* Move From Cop2 (source) to GPR (dest) */ if (info1.SourceReg0 == info0.DestReg) { return FALSE; } if (info1.SourceReg0 == info0.SourceReg0) { return FALSE; } if (info1.SourceReg0 == info0.SourceReg1) { return FALSE; } } else { CompilerWarning("ReOrder: Unhandled Vector than Cop2"); } // we want vectors on top return FALSE; case 0x0E: /* Cop2 than Vector - 11, 10 */ if (info0.flags & Load_Operation) { /* Move To Cop2 (dest) from GPR (source) */ if (info0.DestReg == info1.DestReg) { return FALSE; } if (info0.DestReg == info1.SourceReg0) { return FALSE; } if (info0.DestReg == info1.SourceReg1) { return FALSE; } } else if (info0.flags & Store_Operation) { /* Move From Cop2 (source) to GPR (dest) */ if (info0.SourceReg0 == info1.DestReg) { return FALSE; } if (info0.SourceReg0 == info1.SourceReg0) { return FALSE; } if (info0.SourceReg0 == info1.SourceReg1) { return FALSE; } if (info0.DestReg == info1.SourceReg0) { return FALSE; } } else { CompilerWarning("ReOrder: Unhandled Cop2 than Vector"); } // we want this at the top return TRUE; default: CompilerWarning("ReOrder: Unhandled instruction type: %i", InstructionType); } return FALSE; }