void ConsoleLogFrame::OnToggleSource( wxCommandEvent& evt ) { evt.Skip(); if (!GetMenuBar()) return; if (evt.GetId() == MenuId_LogSource_Devel) { if( wxMenuItem* item = GetMenuBar()->FindItem(evt.GetId()) ) DevConWriterEnabled = item->IsChecked(); return; } uint srcid = evt.GetId() - MenuId_LogSource_Start; if (!pxAssertDev( ArraySize(ConLogSources) > srcid, "Invalid source log index (out of bounds)" )) return; if (!pxAssertDev( ConLogSources[srcid] != NULL, "Invalid source log index (NULL pointer [separator])" )) return; if( wxMenuItem* item = GetMenuBar()->FindItem(evt.GetId()) ) { pxAssertDev( item->IsCheckable(), "Uncheckable log source menu item? Seems fishy!" ); ConLogSources[srcid]->Enabled = item->IsChecked(); } }
// ------------------------------------------------------------------------ // Writes a jump at the current x86Ptr, which targets a pre-established target address. // (usually a backwards jump) // // slideForward - used internally by xSmartJump to indicate that the jump target is going // to slide forward in the event of an 8 bit displacement. // __emitinline void xJccKnownTarget(JccComparisonType comparison, const void *target, bool slideForward) { // Calculate the potential j8 displacement first, assuming an instruction length of 2: sptr displacement8 = (sptr)target - (sptr)(xGetPtr() + 2); const int slideVal = slideForward ? ((comparison == Jcc_Unconditional) ? 3 : 4) : 0; displacement8 -= slideVal; if (slideForward) { pxAssertDev(displacement8 >= 0, "Used slideForward on a backward jump; nothing to slide!"); } if (is_s8(displacement8)) xJcc8(comparison, displacement8); else { // Perform a 32 bit jump instead. :( s32 *bah = xJcc32(comparison); sptr distance = (sptr)target - (sptr)xGetPtr(); #ifdef __x86_64__ // This assert won't physically happen on x86 targets pxAssertDev(distance >= -0x80000000LL && distance < 0x80000000LL, "Jump target is too far away, needs an indirect register"); #endif *bah = (s32)distance; } }
void xForwardJumpBase::_setTarget(uint opsize) const { pxAssertDev(BasePtr != NULL, ""); sptr displacement = (sptr)xGetPtr() - (sptr)BasePtr; if (opsize == 1) { pxAssertDev(is_s8(displacement), "Emitter Error: Invalid short jump displacement."); BasePtr[-1] = (s8)displacement; } else { // full displacement, no sanity checks needed :D ((s32 *)BasePtr)[-1] = displacement; } }
void __fastcall WriteFIFO_VIF1(const mem128_t *value) { VIF_LOG("WriteFIFO/VIF1 <- %ls", value->ToString().c_str()); if (vif1Regs.stat.FDR) DevCon.Warning("writing to fifo when fdr is set!"); if (vif1Regs.stat.test(VIF1_STAT_INT | VIF1_STAT_VSS | VIF1_STAT_VIS | VIF1_STAT_VFS) ) DevCon.Warning("writing to vif1 fifo when stalled"); vif1ch.qwc += 1; if(vif1.irqoffset != 0 && vif1.vifstalled == true) DevCon.Warning("Offset on VIF1 FIFO start!"); bool ret = VIF1transfer((u32*)value, 4); if(GSTransferStatus.PTH2 == STOPPED_MODE && gifRegs.stat.APATH == GIF_APATH2) { if(gifRegs.stat.DIR == 0)gifRegs.stat.OPH = false; gifRegs.stat.APATH = GIF_APATH_IDLE; if(gifRegs.stat.P1Q) gsPath1Interrupt(); } if (vif1.cmd) { if(vif1.done == true && vif1ch.qwc == 0) vif1Regs.stat.VPS = VPS_WAITING; } else { vif1Regs.stat.VPS = VPS_IDLE; } pxAssertDev( ret, "vif stall code not implemented" ); }
VirtualMemoryReserve& VirtualMemoryReserve::SetBaseAddr( uptr newaddr ) { if (!pxAssertDev(!m_pages_reserved, "Invalid object state: you must release the virtual memory reserve prior to changing its base address!")) return *this; m_baseptr = (void*)newaddr; return *this; }
void __fastcall WriteFIFO_VIF1(const mem128_t *value) { VIF_LOG("WriteFIFO/VIF1 <- %ls", value->ToString().c_str()); if (vif1Regs.stat.FDR) { DevCon.Warning("writing to fifo when fdr is set!"); } if (vif1Regs.stat.test(VIF1_STAT_INT | VIF1_STAT_VSS | VIF1_STAT_VIS | VIF1_STAT_VFS) ) { DevCon.Warning("writing to vif1 fifo when stalled"); } if (vif1.irqoffset.value != 0 && vif1.vifstalled.enabled == true) { DevCon.Warning("Offset on VIF1 FIFO start!"); } vif1ch.qwc += 1; bool ret = VIF1transfer((u32*)value, 4); if (vif1.cmd) { if (vif1.done && !vif1ch.qwc) vif1Regs.stat.VPS = VPS_WAITING; } else vif1Regs.stat.VPS = VPS_IDLE; if( gifRegs.stat.APATH == 2 && gifUnit.gifPath[1].isDone()) { gifRegs.stat.APATH = 0; gifRegs.stat.OPH = 0; vif1Regs.stat.VGW = false; //Let vif continue if it's stuck on a flush if(gifUnit.checkPaths(1,0,1)) gifUnit.Execute(false, true); } pxAssertDev( ret, "vif stall code not implemented" ); }
void MemProtect( void* baseaddr, size_t size, PageProtectionMode mode, bool allowExecution ) { pxAssertDev( ((size & (__pagesize-1)) == 0), wxsFormat( L"Memory block size must be a multiple of the target platform's page size.\n" L"\tPage Size: 0x%04x (%d), Block Size: 0x%04x (%d)", __pagesize, __pagesize, size, size ) ); DWORD winmode = 0; switch( mode ) { case Protect_NoAccess: winmode = ( allowExecution ) ? PAGE_EXECUTE : PAGE_NOACCESS; break; case Protect_ReadOnly: winmode = ( allowExecution ) ? PAGE_EXECUTE_READ : PAGE_READONLY; break; case Protect_ReadWrite: winmode = ( allowExecution ) ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; break; } DWORD OldProtect; // enjoy my uselessness, yo! VirtualProtect( baseaddr, size, winmode, &OldProtect ); }
void isoFile::ReadBlock(u8* dst, uint lsn) { if (lsn > m_blocks) { FastFormatUnicode msg; msg.Write("isoFile error: Block index is past the end of file! (%u > %u).", lsn, m_blocks); pxAssertDev(false, msg); Console.Error(msg); // [TODO] : Throw exception? // Typically an error like this is bad; indicating an invalid dump or corrupted // iso file. return; } if (m_flags == ISOFLAGS_BLOCKDUMP_V2) _ReadBlockD(dst, lsn); else _ReadBlock(dst, lsn); if (m_type == ISOTYPE_CD) { lsn_to_msf(dst + 12, lsn); dst[15] = 2; } }
// Returns: // The previous suspension state; true if the thread was running or false if it was // closed, not running, or paused. // void SysThreadBase::Pause() { if( IsSelf() || !IsRunning() ) return; // shortcut ExecMode check to avoid deadlocking on redundant calls to Suspend issued // from Resume or OnResumeReady code. if( (m_ExecMode == ExecMode_Closed) || (m_ExecMode == ExecMode_Paused) ) return; { ScopedLock locker( m_ExecModeMutex ); // Check again -- status could have changed since above. if( (m_ExecMode == ExecMode_Closed) || (m_ExecMode == ExecMode_Paused) ) return; if( m_ExecMode == ExecMode_Opened ) m_ExecMode = ExecMode_Pausing; pxAssertDev( m_ExecMode == ExecMode_Pausing, "ExecMode should be nothing other than Pausing..." ); OnPause(); m_sem_event.Post(); } m_RunningLock.Wait(); }
void __fastcall WriteFIFO_VIF1(const mem128_t *value) { VIF_LOG("WriteFIFO/VIF1 <- %ls", value->ToString().c_str()); if (vif1Regs.stat.FDR) { DevCon.Warning("writing to fifo when fdr is set!"); } if (vif1Regs.stat.test(VIF1_STAT_INT | VIF1_STAT_VSS | VIF1_STAT_VIS | VIF1_STAT_VFS) ) { DevCon.Warning("writing to vif1 fifo when stalled"); } if (vif1.irqoffset != 0 && vif1.vifstalled == true) { DevCon.Warning("Offset on VIF1 FIFO start!"); } vif1ch.qwc += 1; bool ret = VIF1transfer((u32*)value, 4); if (vif1.cmd) { if (vif1.done && !vif1ch.qwc) vif1Regs.stat.VPS = VPS_WAITING; } else vif1Regs.stat.VPS = VPS_IDLE; pxAssertDev( ret, "vif stall code not implemented" ); }
xAddressVoid& xAddressVoid::Add( const xAddressReg& src ) { if( src == Index ) { Factor++; } else if( src == Base ) { // Compound the existing register reference into the Index/Scale pair. Base = xEmptyReg; if( src == Index ) Factor++; else { pxAssertDev( Index.IsEmpty(), "x86Emitter: Only one scaled index register is allowed in an address modifier." ); Index = src; Factor = 2; } } else if( Base.IsEmpty() ) Base = src; else if( Index.IsEmpty() ) Index = src; else pxAssumeDev( false, L"x86Emitter: address modifiers cannot have more than two index registers." ); // oops, only 2 regs allowed per ModRm! return *this; }
void SysExecEvent_CoreThreadPause::InvokeEvent() { #ifdef PCSX2_DEVBUILD bool CorePluginsAreOpen = GetCorePlugins().AreOpen(); ScopedCoreThreadPause paused_core; _post_and_wait(paused_core); // All plugins should be initialized and opened upon resuming from // a paused state. If the thread that puased us changed plugin status, it should // have used Close instead. if( CorePluginsAreOpen ) { CorePluginsAreOpen = GetCorePlugins().AreOpen(); pxAssertDev( CorePluginsAreOpen, "Invalid plugin close/shutdown detected during paused CoreThread; please Stop/Suspend the core instead." ); } paused_core.AllowResume(); #else ScopedCoreThreadPause paused_core; _post_and_wait(paused_core); paused_core.AllowResume(); #endif }
void SysCoreThread::UploadStateCopy( const VmStateBuffer& copy ) { if( !pxAssertDev( IsPaused(), "CoreThread is not paused; new VM state cannot be uploaded." ) ) return; memLoadingState loadme( copy ); loadme.FreezeAll(); m_resetVirtualMachine = false; }
// Suspends emulation and closes the emulation state (including plugins) at the next PS2 vsync, // and returns control to the calling thread; or does nothing if the core is already suspended. // // Parameters: // isNonblocking - if set to true then the function will not block for emulation suspension. // Defaults to false if parameter is not specified. Performing non-blocking suspension // is mostly useful for starting certain non-Emu related gui activities (improves gui // responsiveness). // // Returns: // The previous suspension state; true if the thread was running or false if it was // suspended. // // Exceptions: // CancelEvent - thrown if the thread is already in a Paused or Closing state. Because // actions that pause emulation typically rely on plugins remaining loaded/active, // Suspension must cancel itself forcefully or risk crashing whatever other action is // in progress. // void SysThreadBase::Suspend( bool isBlocking ) { if (!pxAssertDev(!IsSelf(),"Suspend/Resume are not allowed from this thread.")) return; if (!IsRunning()) return; // shortcut ExecMode check to avoid deadlocking on redundant calls to Suspend issued // from Resume or OnResumeReady code. if( m_ExecMode == ExecMode_Closed ) return; { ScopedLock locker( m_ExecModeMutex ); switch( m_ExecMode ) { // FIXME what to do for this case // case ExecMode_NoThreadYet: // Check again -- status could have changed since above. case ExecMode_Closed: return; case ExecMode_Pausing: case ExecMode_Paused: if( !isBlocking ) throw Exception::CancelEvent( L"Cannot suspend in non-blocking fashion: Another thread is pausing the VM state." ); m_ExecMode = ExecMode_Closing; m_sem_Resume.Post(); m_sem_ChangingExecMode.Wait(); break; case ExecMode_Opened: m_ExecMode = ExecMode_Closing; break; case ExecMode_Closing: break; } pxAssertDev( m_ExecMode == ExecMode_Closing, "ExecMode should be nothing other than Closing..." ); m_sem_event.Post(); } if( isBlocking ) m_RunningLock.Wait(); }
// Sets the block size via pages (pages are defined by the __pagesize global, which is // typically 4096). // // This method must be called prior to accessing or modifying the array contents. Calls to // a modified buffer will be ignored (and generate an assertion in dev/debug modes). SpatialArrayReserve& SpatialArrayReserve::SetBlockSizeInPages( uint pages ) { if (pxAssertDev(!m_pages_commited, "Invalid object state: Block size can only be changed prior to accessing or modifying the reserved buffer contents.")) { m_blocksize = pages; m_numblocks = (m_pages_reserved + m_blocksize - 1) / m_blocksize; m_blockbits.Alloc( _calcBlockBitArrayLength() ); } return *this; }
void Threading::pxThread::_platform_specific_OnStartInThread() { // OpenThread Note: Vista and Win7 need only THREAD_QUERY_LIMITED_INFORMATION (XP and 2k need more), // however we own our process threads, so shouldn't matter in any case... m_native_id = (uptr)GetCurrentThreadId(); m_native_handle = (uptr)OpenThread( THREAD_QUERY_INFORMATION, false, (DWORD)m_native_id ); pxAssertDev( m_native_handle, wxNullChar ); }
void EmitSibMagic( uint regfield, const void* address ) { ModRM( 0, regfield, ModRm_UseDisp32 ); // SIB encoding only supports 32bit offsets, even on x86_64 // We must make sure that the displacement is within the 32bit range // Else we will fail out in a spectacular fashion sptr displacement = (sptr)address; pxAssertDev(displacement >= -0x80000000LL && displacement < 0x80000000LL, "SIB target is too far away, needs an indirect register"); xWrite<s32>( (s32)displacement ); }
void SysThreadBase::OnStart() { if( !pxAssertDev( m_ExecMode == ExecMode_NoThreadYet, "SysSustainableThread:Start(): Invalid execution mode" ) ) return; m_sem_Resume.Reset(); m_sem_ChangingExecMode.Reset(); FrankenMutex( m_ExecModeMutex ); FrankenMutex( m_RunningLock ); _parent::OnStart(); }
////////////////////////////////////////////////////////////////////////////////////////// // Conditionally generates Sib encoding information! // // regfield - register field to be written to the ModRm. This is either a register specifier // or an opcode extension. In either case, the instruction determines the value for us. // void EmitSibMagic(uint regfield, const xIndirectVoid &info) { // 3 bits also on x86_64 (so max is 8) // We might need to mask it on x86_64 pxAssertDev(regfield < 8, "Invalid x86 register identifier."); int displacement_size = (info.Displacement == 0) ? 0 : ((info.IsByteSizeDisp()) ? 1 : 2); pxAssert(!info.Base.IsEmpty() || !info.Index.IsEmpty() || displacement_size == 2); if (!NeedsSibMagic(info)) { // Use ModRm-only encoding, with the rm field holding an index/base register, if // one has been specified. If neither register is specified then use Disp32 form, // which is encoded as "EBP w/o displacement" (which is why EBP must always be // encoded *with* a displacement of 0, if it would otherwise not have one). if (info.Index.IsEmpty()) { EmitSibMagic(regfield, (void *)info.Displacement); return; } else { if (info.Index == ebp && displacement_size == 0) displacement_size = 1; // forces [ebp] to be encoded as [ebp+0]! ModRM(displacement_size, regfield, info.Index.Id); } } else { // In order to encode "just" index*scale (and no base), we have to encode // it as a special [index*scale + displacement] form, which is done by // specifying EBP as the base register and setting the displacement field // to zero. (same as ModRm w/o SIB form above, basically, except the // ModRm_UseDisp flag is specified in the SIB instead of the ModRM field). if (info.Base.IsEmpty()) { ModRM(0, regfield, ModRm_UseSib); SibSB(info.Scale, info.Index.Id, ModRm_UseDisp32); xWrite<s32>(info.Displacement); return; } else { if (info.Base == ebp && displacement_size == 0) displacement_size = 1; // forces [ebp] to be encoded as [ebp+0]! ModRM(displacement_size, regfield, ModRm_UseSib); SibSB(info.Scale, info.Index.Id, info.Base.Id); } } if (displacement_size != 0) { if (displacement_size == 1) xWrite<s8>(info.Displacement); else xWrite<s32>(info.Displacement); } }
// Applies a full suite of new settings, which will automatically facilitate the necessary // resets of the core and components (including plugins, if needed). The scope of resetting // is determined by comparing the current settings against the new settings, so that only // real differences are applied. void SysCoreThread::ApplySettings( const Pcsx2Config& src ) { if( src == EmuConfig ) return; if( !pxAssertDev( IsPaused(), "CoreThread is not paused; settings cannot be applied." ) ) return; m_resetRecompilers = ( src.Cpu != EmuConfig.Cpu ) || ( src.Gamefixes != EmuConfig.Gamefixes ) || ( src.Speedhacks != EmuConfig.Speedhacks ); m_resetProfilers = ( src.Profiler != EmuConfig.Profiler ); m_resetVsyncTimers = ( src.GS != EmuConfig.GS ); const_cast<Pcsx2Config&>(EmuConfig) = src; }
void SysThreadBase::Start() { _parent::Start(); Sleep( 1 ); pxAssertDev( (m_ExecMode == ExecMode_Closing) || (m_ExecMode == ExecMode_Closed), "Unexpected thread status during SysThread startup." ); m_sem_event.Post(); }
static __ri void PageSizeAssertionTest( size_t size ) { pxAssertMsg( (__pagesize == getpagesize()), pxsFmt( "Internal system error: Operating system pagesize does not match compiled pagesize.\n\t" L"\tOS Page Size: 0x%x (%d), Compiled Page Size: 0x%x (%u)", getpagesize(), getpagesize(), __pagesize, __pagesize ) ); pxAssertDev( (size & (__pagesize-1)) == 0, pxsFmt( L"Memory block size must be a multiple of the target platform's page size.\n" L"\tPage Size: 0x%x (%u), Block Size: 0x%x (%u)", __pagesize, __pagesize, size, size ) ); }
int InputIsoFile::ReadSync(u8* dst, uint lsn) { if (lsn > m_blocks) { FastFormatUnicode msg; msg.Write("isoFile error: Block index is past the end of file! (%u > %u).", lsn, m_blocks); pxAssertDev(false, msg); Console.Error(msg.c_str()); return -1; } return m_reader->ReadSync(dst+m_blockofs, lsn, 1); }
// Resumes the core execution state, or does nothing is the core is already running. If // settings were changed, resets will be performed as needed and emulation state resumed from // memory savestates. // // Note that this is considered a non-blocking action. Most times the state is safely resumed // on return, but in the case of re-entrant or nested message handling the function may return // before the thread has resumed. If you need explicit behavior tied to the completion of the // Resume, you'll need to bind callbacks to either OnResumeReady or OnResumeInThread. // // Exceptions: // PluginInitError - thrown if a plugin fails init (init is performed on the current thread // on the first time the thread is resumed from it's initial idle state) // ThreadCreationError - Insufficient system resources to create thread. // void SysThreadBase::Resume() { if( IsSelf() ) return; if( m_ExecMode == ExecMode_Opened ) return; ScopedLock locker( m_ExecModeMutex ); // Implementation Note: // The entire state coming out of a Wait is indeterminate because of user input // and pending messages being handled. So after each call we do some seemingly redundant // sanity checks against m_ExecMode/m_Running status, and if something doesn't feel // right, we should abort; the user may have canceled the action before it even finished. switch( m_ExecMode ) { case ExecMode_Opened: return; case ExecMode_NoThreadYet: { Start(); if( !m_running || (m_ExecMode == ExecMode_NoThreadYet) ) throw Exception::ThreadCreationError(this); if( m_ExecMode == ExecMode_Opened ) return; } // fall through... case ExecMode_Closing: case ExecMode_Pausing: // we need to make sure and wait for the emuThread to enter a fully suspended // state before continuing... m_RunningLock.Wait(); if( !m_running ) return; if( (m_ExecMode != ExecMode_Closed) && (m_ExecMode != ExecMode_Paused) ) return; if( !GetCorePlugins().AreLoaded() ) return; break; case ExecMode_Paused: case ExecMode_Closed: break; } pxAssertDev( (m_ExecMode == ExecMode_Closed) || (m_ExecMode == ExecMode_Paused), "SysThreadBase is not in a closed/paused state? wtf!" ); OnResumeReady(); m_ExecMode = ExecMode_Opened; m_sem_Resume.Post(); }
// Notes: // * This method should be called if the object is already in an released (unreserved) state. // Subsequent calls will be ignored, and the existing reserve will be returned. // // Parameters: // size - size of the reserve, in bytes. (optional) // If not specified (or zero), then the default size specified in the constructor for the // object instance is used. // // upper_bounds - criteria that must be met for the allocation to be valid. // If the OS refuses to allocate the memory below the specified address, the // object will fail to initialize and an exception will be thrown. void* VirtualMemoryReserve::Reserve( size_t size, uptr base, uptr upper_bounds ) { if (!pxAssertDev( m_baseptr == NULL, "(VirtualMemoryReserve) Invalid object state; object has already been reserved." )) return m_baseptr; if (!size) size = m_defsize; if (!size) return NULL; m_pages_reserved = (size + __pagesize-4) / __pagesize; uptr reserved_bytes = m_pages_reserved * __pagesize; m_baseptr = (void*)HostSys::MmapReserve(base, reserved_bytes); if (!m_baseptr || (upper_bounds != 0 && (((uptr)m_baseptr + reserved_bytes) > upper_bounds))) { DevCon.Warning( L"%s: host memory @ %s -> %s is unavailable; attempting to map elsewhere...", m_name.c_str(), pxsPtr(base), pxsPtr(base + size) ); SafeSysMunmap(m_baseptr, reserved_bytes); if (base) { // Let's try again at an OS-picked memory area, and then hope it meets needed // boundschecking criteria below. m_baseptr = HostSys::MmapReserve( 0, reserved_bytes ); } } if ((upper_bounds != 0) && (((uptr)m_baseptr + reserved_bytes) > upper_bounds)) { SafeSysMunmap(m_baseptr, reserved_bytes); // returns null, caller should throw an exception or handle appropriately. } if (!m_baseptr) return NULL; FastFormatUnicode mbkb; uint mbytes = reserved_bytes / _1mb; if (mbytes) mbkb.Write( "[%umb]", mbytes ); else mbkb.Write( "[%ukb]", reserved_bytes / 1024 ); DevCon.WriteLn( Color_Gray, L"%-32s @ %s -> %s %s", m_name.c_str(), pxsPtr(m_baseptr), pxsPtr((uptr)m_baseptr+reserved_bytes), mbkb.c_str()); return m_baseptr; }
////////////////////////////////////////////////////////////////////////// // WriteFIFO Pages // void __fastcall WriteFIFO_VIF0(const mem128_t *value) { VIF_LOG("WriteFIFO/VIF0 <- %ls", value->ToString().c_str()); vif0ch.qwc += 1; if(vif0.irqoffset.value != 0 && vif0.vifstalled.enabled == true) DevCon.Warning("Offset on VIF0 FIFO start!"); bool ret = VIF0transfer((u32*)value, 4); if (vif0.cmd) { if(vif0.done && vif0ch.qwc == 0) vif0Regs.stat.VPS = VPS_WAITING; } else { vif0Regs.stat.VPS = VPS_IDLE; } pxAssertDev( ret, "vif stall code not implemented" ); }
void Threading::Mutex::Detach() { if( EBUSY != pthread_mutex_destroy(&m_mutex) ) return; if( IsRecursive() ) { // Sanity check: Recursive locks could be held by our own thread, which would // be considered an assertion failure, but can also be handled gracefully. // (note: if the mutex is locked recursively more than twice then this assert won't // detect it) Release(); Release(); // in case of double recursion. int result = pthread_mutex_destroy( &m_mutex ); if( pxAssertDev( result != EBUSY, "Detachment of a recursively-locked mutex (self-locked!)." ) ) return; } if( Wait(def_detach_timeout) ) pthread_mutex_destroy( &m_mutex ); else Console.Error( "(Thread Log) Mutex cleanup failed due to possible deadlock."); }
void InputIsoFile::BeginRead2(uint lsn) { if (lsn > m_blocks) { FastFormatUnicode msg; msg.Write("isoFile error: Block index is past the end of file! (%u > %u).", lsn, m_blocks); pxAssertDev(false, msg); Console.Error(msg.c_str()); // [TODO] : Throw exception? // Typically an error like this is bad; indicating an invalid dump or corrupted // iso file. m_current_lsn = -1; return; } m_current_lsn = lsn; if(lsn >= m_read_lsn && lsn < (m_read_lsn+m_read_count)) { // Already buffered return; } m_read_lsn = lsn; m_read_count = 1; if(ReadUnit > 1) { //m_read_lsn = lsn - (lsn % ReadUnit); m_read_count = std::min(ReadUnit, m_blocks - m_read_lsn); } m_reader->BeginRead(m_readbuffer, m_read_lsn, m_read_count); m_read_inprogress = true; }
xForwardJumpBase::xForwardJumpBase(uint opsize, JccComparisonType cctype) { pxAssert(opsize == 1 || opsize == 4); pxAssertDev(cctype != Jcc_Unknown, "Invalid ForwardJump conditional type."); BasePtr = (s8 *)xGetPtr() + ((opsize == 1) ? 2 : // j8's are always 2 bytes. ((cctype == Jcc_Unconditional) ? 5 : 6)); // j32's are either 5 or 6 bytes if (opsize == 1) xWrite8((cctype == Jcc_Unconditional) ? 0xeb : (0x70 | cctype)); else { if (cctype == Jcc_Unconditional) xWrite8(0xe9); else { xWrite8(0x0f); xWrite8(0x80 | cctype); } } xAdvancePtr(opsize); }
// ------------------------------------------------------------------------ // Writes a jump at the current x86Ptr, which targets a pre-established target address. // (usually a backwards jump) // // slideForward - used internally by xSmartJump to indicate that the jump target is going // to slide forward in the event of an 8 bit displacement. // __emitinline void xJccKnownTarget( JccComparisonType comparison, const void* target, bool slideForward ) { // Calculate the potential j8 displacement first, assuming an instruction length of 2: sptr displacement8 = (sptr)target - (sptr)(xGetPtr() + 2); const int slideVal = slideForward ? ((comparison == Jcc_Unconditional) ? 3 : 4) : 0; displacement8 -= slideVal; if( slideForward ) { pxAssertDev( displacement8 >= 0, "Used slideForward on a backward jump; nothing to slide!" ); } if( is_s8( displacement8 ) ) xJcc8( comparison, displacement8 ); else { // Perform a 32 bit jump instead. :( s32* bah = xJcc32( comparison ); *bah = (s32)target - (s32)xGetPtr(); } }