Exemple #1
0
duint LinearAnalysis::FindFunctionEnd(duint start, duint maxaddr)
{
    //disassemble first instruction for some heuristics
    if(_cp.Disassemble(start, TranslateAddress(start), MAX_DISASM_BUFFER))
    {
        //JMP [123456] ; import
        if(_cp.InGroup(CS_GRP_JUMP) && _cp.x86().operands[0].type == X86_OP_MEM)
            return 0;
    }

    //linear search with some trickery
    duint end = 0;
    duint jumpback = 0;
    for(duint addr = start, fardest = 0; addr < maxaddr;)
    {
        if(_cp.Disassemble(addr, TranslateAddress(addr), MAX_DISASM_BUFFER))
        {
            if(addr + _cp.Size() > maxaddr)  //we went past the maximum allowed address
                break;

            const cs_x86_op & operand = _cp.x86().operands[0];
            if((_cp.InGroup(CS_GRP_JUMP) || _cp.IsLoop()) && operand.type == X86_OP_IMM)   //jump
            {
                duint dest = (duint)operand.imm;

                if(dest >= maxaddr)   //jump across function boundaries
                {
                    //currently unused
                }
                else if(dest > addr && dest > fardest)   //save the farthest JXX destination forward
                {
                    fardest = dest;
                }
                else if(end && dest < end && (_cp.GetId() == X86_INS_JMP || _cp.GetId() == X86_INS_LOOP)) //save the last JMP backwards
                {
                    jumpback = addr;
                }
            }
            else if(_cp.InGroup(CS_GRP_RET))   //possible function end?
            {
                end = addr;
                if(fardest < addr)  //we stop if the farthest JXX destination forward is before this RET
                    break;
            }

            addr += _cp.Size();
        }
        else
            addr++;
    }
    return end < jumpback ? jumpback : end;
}
Exemple #2
0
BYTE CMemoryController::GetByte(WORD address, BOOL okHaltMode)
{
    WORD offset;
    int addrtype = TranslateAddress(address, okHaltMode, FALSE, &offset);

    switch (addrtype)
    {
    case ADDRTYPE_RAM0:
    case ADDRTYPE_RAM1:
    case ADDRTYPE_RAM2:
        return m_pBoard->GetRAMByte((addrtype & ADDRTYPE_MASK_RAM), offset);
    case ADDRTYPE_RAM12:
        if ((offset & 1) == 0)
            return m_pBoard->GetRAMByte(1, offset / 2);
        else
            return m_pBoard->GetRAMByte(2, offset / 2);

    case ADDRTYPE_ROMCART1:
        return m_pBoard->GetROMCartByte(1, offset);
    case ADDRTYPE_ROMCART2:
        return m_pBoard->GetROMCartByte(2, offset);

    case ADDRTYPE_ROM:
        return m_pBoard->GetROMByte(offset);
    case ADDRTYPE_IO:
        //TODO: What to do if okExec == TRUE ?
        return GetPortByte(address);
    case ADDRTYPE_DENY:
        //TODO: Exception processing
        return 0;
    }

    ASSERT(FALSE);  // If we are here - then addrtype has invalid value
    return 0;
}
Exemple #3
0
// Read word from memory for debugger
WORD CMemoryController::GetWordView(WORD address, BOOL okHaltMode, BOOL okExec, BOOL* pValid)
{
    WORD offset;
    int addrtype = TranslateAddress(address, okHaltMode, okExec, &offset, TRUE);

    switch (addrtype)
    {
    case ADDRTYPE_RAM0:
    case ADDRTYPE_RAM1:
    case ADDRTYPE_RAM2:
        *pValid = TRUE;
        return m_pBoard->GetRAMWord((addrtype & ADDRTYPE_MASK_RAM), offset);
    case ADDRTYPE_RAM12:
        *pValid = TRUE;
        return MAKEWORD(
            m_pBoard->GetRAMByte(1, offset / 2),
            m_pBoard->GetRAMByte(2, offset / 2));
    case ADDRTYPE_ROMCART1:
        return m_pBoard->GetROMCartWord(1, offset);
    case ADDRTYPE_ROMCART2:
        return m_pBoard->GetROMCartWord(2, offset);
    case ADDRTYPE_ROM:
        *pValid = TRUE;
        return m_pBoard->GetROMWord(offset);
    case ADDRTYPE_IO:
        *pValid = FALSE;  // I/O port, not memory
        return 0;
    case ADDRTYPE_DENY:
        *pValid = TRUE;  // This memory is inaccessible for reading
        return 0;
    }

    ASSERT(FALSE);  // If we are here - then addrtype has invalid value
    return 0;
}
Exemple #4
0
void CMemoryController::SetByte(WORD address, BOOL okHaltMode, BYTE byte)
{
    WORD offset;
    int addrtype = TranslateAddress(address, okHaltMode, FALSE, &offset);

    switch (addrtype)
    {
    case ADDRTYPE_RAM0:
    case ADDRTYPE_RAM1:
    case ADDRTYPE_RAM2:
        m_pBoard->SetRAMByte((addrtype & ADDRTYPE_MASK_RAM), offset, byte);
        return;
    case ADDRTYPE_RAM12:
        if ((offset & 1) == 0)
            m_pBoard->SetRAMByte(1, offset / 2, byte);
        else
            m_pBoard->SetRAMByte(2, offset / 2, byte);
    
    case ADDRTYPE_ROMCART1:
    case ADDRTYPE_ROMCART2:
    case ADDRTYPE_ROM:
        // Nothing to do: writing to ROM
        return;
    case ADDRTYPE_IO:
        SetPortByte(address, byte);
        return;
    case ADDRTYPE_DENY:
        //TODO: Exception processing
        return;
    }

    ASSERT(FALSE);  // If we are here - then addrtype has invalid value
}
void ControlFlowAnalysis::BasicBlockStarts()
{
    _blockStarts.insert(_base);
    bool bSkipFilling = false;
    for(uint i = 0; i < _size;)
    {
        uint addr = _base + i;
        if(_cp.Disassemble(addr, TranslateAddress(addr), MAX_DISASM_BUFFER))
        {
            if(bSkipFilling)   //handle filling skip mode
            {
                if(!_cp.IsFilling())   //do nothing until the filling stopped
                {
                    bSkipFilling = false;
                    _blockStarts.insert(addr);
                }
            }
            else if(_cp.InGroup(CS_GRP_RET) || _cp.GetId() == X86_INS_INT3)  //RET/INT3 break control flow
            {
                bSkipFilling = true; //skip INT3/NOP/whatever filling bytes (those are not part of the control flow)
            }
            else if(_cp.InGroup(CS_GRP_JUMP) || _cp.IsLoop())   //branches
            {
                uint dest1 = GetReferenceOperand();
                uint dest2 = 0;
                if(_cp.GetId() != X86_INS_JMP)    //unconditional jump
                    dest2 = addr + _cp.Size();

                if(!dest1 && !dest2)  //TODO: better code for this (make sure absolutely no filling is inserted)
                    bSkipFilling = true;
                if(dest1)
                    _blockStarts.insert(dest1);
                if(dest2)
                    _blockStarts.insert(dest2);
            }
            else if(_cp.InGroup(CS_GRP_CALL))
            {
                uint dest1 = GetReferenceOperand();
                if(dest1)
                {
                    _blockStarts.insert(dest1);
                    _functionStarts.insert(dest1);
                }
            }
            else
            {
                uint dest1 = GetReferenceOperand();
                if(dest1)
                    _blockStarts.insert(dest1);
            }
            i += _cp.Size();
        }
        else
            i++;
    }
}
Exemple #6
0
inline void ReadFromHardware(T &_var, const u32 em_address, const u32 effective_address, Memory::XCheckTLBFlag flag)
{
	// TODO: Figure out the fastest order of tests for both read and write (they are probably different).
	if ((em_address & 0xC8000000) == 0xC8000000)
	{
		if (em_address < 0xcc000000)
			_var = EFB_Read(em_address);
		else
			mmio_mapping->Read(em_address, &_var);
	}
	else if (((em_address & 0xF0000000) == 0x80000000) ||
		((em_address & 0xF0000000) == 0xC0000000) ||
		((em_address & 0xF0000000) == 0x00000000))
	{
		_var = bswap((*(const T*)&m_pRAM[em_address & RAM_MASK]));
	}
	else if (m_pEXRAM && (((em_address & 0xF0000000) == 0x90000000) ||
		((em_address & 0xF0000000) == 0xD0000000) ||
		((em_address & 0xF0000000) == 0x10000000)))
	{
		_var = bswap((*(const T*)&m_pEXRAM[em_address & EXRAM_MASK]));
	}
	else if ((em_address >= 0xE0000000) && (em_address < (0xE0000000+L1_CACHE_SIZE)))
	{
		_var = bswap((*(const T*)&m_pL1Cache[em_address & L1_CACHE_MASK]));
	}
	else if ((bFakeVMEM && ((em_address &0xF0000000) == 0x70000000)) ||
		(bFakeVMEM && ((em_address &0xF0000000) == 0x40000000)))
	{
		// fake VMEM
		_var = bswap((*(const T*)&m_pFakeVMEM[em_address & FAKEVMEM_MASK]));
	}
	else
	{
		// MMU
		u32 tlb_addr = TranslateAddress(em_address, flag);
		if (tlb_addr == 0)
		{
			if (flag == FLAG_READ)
			{
				GenerateDSIException(em_address, false);
			}
		}
		else
		{
			_var = bswap((*(const T*)&m_pRAM[tlb_addr & RAM_MASK]));
		}
	}
}
Exemple #7
0
void LinearAnalysis::PopulateReferences()
{
    //linear immediate reference scan (call <addr>, push <addr>, mov [somewhere], <addr>)
    for(duint i = 0; i < _size;)
    {
        duint addr = _base + i;
        if(_cp.Disassemble(addr, TranslateAddress(addr), MAX_DISASM_BUFFER))
        {
            duint ref = GetReferenceOperand();
            if(ref)
                _functions.push_back({ ref, 0 });
            i += _cp.Size();
        }
        else
            i++;
    }
    SortCleanup();
}
Exemple #8
0
bool SocketObj::Bind() {
  Close();
  sockFD_ = socket(AF_INET, SOCK_STREAM, 0);
  if (sockFD_ == -1) {
    strErrorMessage_ = "can't bind, because sockFD_ = -1";
    return false;
  }
  struct sockaddr_in sAddr;
  memset(&sAddr, 0, sizeof(sAddr));
  sAddr.sin_addr.s_addr = TranslateAddress();
  sAddr.sin_family = AF_INET;
  sAddr.sin_port = htons(iPort_);
  if (bind(sockFD_, (struct sockaddr*)&sAddr, sizeof(sAddr)) != 0) {
    Close();
    strErrorMessage_ = "bind != 0";
    return false;
  } 
  return true;
}
Exemple #9
0
void LinearAnalysis::AnalyseFunctions()
{
    for(size_t i = 0; i < _functions.size(); i++)
    {
        FunctionInfo & function = _functions[i];
        if(function.end)  //skip already-analysed functions
            continue;
        duint maxaddr = _base + _size;
        if(i < _functions.size() - 1)
            maxaddr = _functions[i + 1].start;

        duint end = FindFunctionEnd(function.start, maxaddr);
        if(end)
        {
            if(_cp.Disassemble(end, TranslateAddress(end), MAX_DISASM_BUFFER))
                function.end = end + _cp.Size() - 1;
            else
                function.end = end;
        }
    }
}
Exemple #10
0
void ControlFlowAnalysis::BasicBlocks()
{
    for(auto i = _blockStarts.begin(); i != _blockStarts.end(); ++i)
    {
        uint start = *i;
        if(!IsValidAddress(start))
            continue;
        uint nextStart = _base + _size;
        auto next = std::next(i);
        if(next != _blockStarts.end())
            nextStart = *next;
        for(uint addr = start, prevaddr = 0; addr < _base + _size;)
        {
            prevaddr = addr;
            if(_cp.Disassemble(addr, TranslateAddress(addr), MAX_DISASM_BUFFER))
            {
                if(_cp.InGroup(CS_GRP_RET) || _cp.GetId() == X86_INS_INT3)
                {
                    insertBlock(BasicBlock(start, addr, 0, 0)); //leaf block
                    break;
                }
                else if(_cp.InGroup(CS_GRP_JUMP) || _cp.IsLoop())
                {
                    uint dest1 = GetReferenceOperand();
                    uint dest2 = _cp.GetId() != X86_INS_JMP ? addr + _cp.Size() : 0;
                    insertBlock(BasicBlock(start, addr, dest1, dest2));
                    insertParent(dest1, start);
                    insertParent(dest2, start);
                    break;
                }
                addr += _cp.Size();
            }
            else
                addr++;
            if(addr == nextStart)   //special case handling overlapping blocks
            {
                insertBlock(BasicBlock(start, prevaddr, 0, nextStart));
                insertParent(nextStart, start);
                break;
            }
        }
    }
    _blockStarts.clear();

#ifdef _WIN64
    int count = 0;
    EnumerateFunctionRuntimeEntries64([&](PRUNTIME_FUNCTION Function)
    {
        const uint funcAddr = _moduleBase + Function->BeginAddress;
        const uint funcEnd = _moduleBase + Function->EndAddress;

        // If within limits...
        if(funcAddr >= _base && funcAddr < _base + _size)
            _functionStarts.insert(funcAddr);
        count++;
        return true;
    });
    dprintf("%u functions from the exception directory...\n", count);
#endif // _WIN64

    dprintf("%u basic blocks, %u function starts detected...\n", _blocks.size(), _functionStarts.size());
}
Exemple #11
0
BOOL
CPCIDisk::SetupDMA(
    PSG_BUF pSgBuf,
    DWORD dwSgCount,
    BOOL fRead
    )
{
    DWORD dwAlignMask = m_dwDMAAlign - 1;
    DWORD dwPageMask = UserKInfo[KINX_PAGESIZE] - 1;

    DWORD iPage = 0, iPFN, iBuffer;
    BOOL fUnalign = FALSE;

    DMA_ADAPTER_OBJECT Adapter;

    Adapter.ObjectSize = sizeof(DMA_ADAPTER_OBJECT);
    Adapter.InterfaceType = (INTERFACE_TYPE)m_pPort->m_pController->m_dwi.dwInterfaceType;
    Adapter.BusNumber = m_pPort->m_pController->m_dwi.dwBusNumber;

    DEBUGMSG(ZONE_DMA, (_T(
        "Atapi!CPCIDisk::SetupDMA> Request(%s), SgCount(%d)\r\n"
        ), fRead ? (_T("Read")) : (_T("Write")), dwSgCount));

#ifndef ST202T_SATA
    // disable bus master
    WriteBMCommand(0);
#endif

    if (!m_pPRD) {
        m_pPRD = (PDMATable)HalAllocateCommonBuffer(&Adapter,
            UserKInfo[KINX_PAGESIZE], &m_pPRDPhys, FALSE);
        if (!m_pPRD) {
            goto ExitFailure;
        }
    }

    // m_pPhysList tracks pages used for DMA buffers when the scatter/gather
    // buffer is unaligned
    if (!m_pPhysList) {
        m_pPhysList = (PPhysTable)VirtualAlloc(m_pStartMemory, UserKInfo[KINX_PAGESIZE], MEM_COMMIT, PAGE_READWRITE);
        if (!m_pPhysList) {
            goto ExitFailure;
        }
        // allocate the minimum number of fixed pages
        for (DWORD i = 0; i < MIN_PHYS_PAGES; i++) {
            PHYSICAL_ADDRESS PhysicalAddress = {0};
            m_pPhysList[i].pVirtualAddress = (LPBYTE)HalAllocateCommonBuffer(&Adapter,
                UserKInfo[KINX_PAGESIZE], &PhysicalAddress, FALSE);
            m_pPhysList[i].pPhysicalAddress = (LPBYTE)PhysicalAddress.QuadPart;
            if (!m_pPhysList[i].pVirtualAddress) {
                goto ExitFailure;
            }
        }
    }
    m_dwPhysCount = 0;

    // m_pSGCopy tracks the mapping between scatter/gather buffers and DMA
    // buffers when the scatter/gather buffer is unaligned and we are reading,
    // so we can copy the read data back to the scatter/gather buffer; when the
    // scatter/gather buffer is aligned, m_pSGCopy tracks the scatter/gather
    // buffers of a particular DMA transfer, so we can unlock the buffers at
    // completion

    if (!m_pSGCopy) {
        m_pSGCopy = (PSGCopyTable)VirtualAlloc(
            m_pStartMemory + UserKInfo[KINX_PAGESIZE],
            UserKInfo[KINX_PAGESIZE],
            MEM_COMMIT,
            PAGE_READWRITE);
        if (!m_pSGCopy) {
            goto ExitFailure;
        }
    }
    m_dwSGCount = 0;

    if (!m_pPFNs) {
        m_pPFNs = (PDWORD)VirtualAlloc(
            m_pStartMemory + 2*UserKInfo[KINX_PAGESIZE],
            UserKInfo[KINX_PAGESIZE],
            MEM_COMMIT,
            PAGE_READWRITE);
        if (!m_pPFNs) {
            goto ExitFailure;
        }
    }

    // determine whether the a buffer or the buffer length is unaligned
    for (iBuffer = 0; iBuffer < dwSgCount; iBuffer++) {
        if (
            ((DWORD)pSgBuf[iBuffer].sb_buf & dwAlignMask) ||
            ((DWORD)pSgBuf[iBuffer].sb_len & dwAlignMask)
        ) {
            fUnalign = TRUE;
            break;
        }
    }

    if (fUnalign) {

        DWORD dwCurPageOffset = 0;

        for (iBuffer = 0; iBuffer < dwSgCount; iBuffer++) {

            LPBYTE pBuffer = (LPBYTE)pSgBuf[iBuffer].sb_buf;

            DWORD dwBufferLeft = pSgBuf[iBuffer].sb_len;
            while (dwBufferLeft) {

                DWORD dwBytesInCurPage = UserKInfo[KINX_PAGESIZE] - dwCurPageOffset;
                DWORD dwBytesToTransfer = (dwBufferLeft > dwBytesInCurPage) ? dwBytesInCurPage : dwBufferLeft;

                // allocate a new page, if necessary
                if ((dwCurPageOffset == 0) && (m_dwPhysCount >= MIN_PHYS_PAGES)) {
                    PHYSICAL_ADDRESS PhysicalAddress = {0};
                    m_pPhysList[m_dwPhysCount].pVirtualAddress = (LPBYTE)HalAllocateCommonBuffer(
                        &Adapter, UserKInfo[KINX_PAGESIZE], &PhysicalAddress, FALSE);
                    m_pPhysList[m_dwPhysCount].pPhysicalAddress = (LPBYTE)PhysicalAddress.QuadPart;
                    if (!m_pPhysList[m_dwPhysCount].pVirtualAddress) {
                        goto ExitFailure;
                    }
                }

                if (fRead) {

                    // prepare a scatter/gather copy entry on read, so we can
                    // copy data from the DMA buffer to the scatter/gather
                    // buffer after this DMA transfer is complete

                    m_pSGCopy[m_dwSGCount].pSrcAddress = m_pPhysList[m_dwPhysCount].pVirtualAddress + dwCurPageOffset;
                    m_pSGCopy[m_dwSGCount].pDstAddress = pBuffer;
                    m_pSGCopy[m_dwSGCount].dwSize = dwBytesToTransfer;
                    m_dwSGCount++;

                }
                else {
                    memcpy(m_pPhysList[m_dwPhysCount].pVirtualAddress + dwCurPageOffset, pBuffer, dwBytesToTransfer);
                }

                // if this buffer is larger than the space remaining on the page,
                // then finish processing this page by setting @dwCurPageOffset<-0

                if (dwBufferLeft >= dwBytesInCurPage) {
                    dwCurPageOffset = 0;
                }
                else {
                    dwCurPageOffset += dwBytesToTransfer;
                }

                // have we finished a page? (i.e., offset was reset or this is the last buffer)
                if ((dwCurPageOffset == 0) || (iBuffer == (dwSgCount - 1))) {
                    // add this to the PRD table
                    m_pPRD[m_dwPhysCount].physAddr = (DWORD)m_pPhysList[m_dwPhysCount].pPhysicalAddress;
                    m_pPRD[m_dwPhysCount].size = dwCurPageOffset ? (USHORT)dwCurPageOffset : (USHORT)UserKInfo[KINX_PAGESIZE];
                    m_pPRD[m_dwPhysCount].EOTpad = 0;
                    m_dwPhysCount++;
                }

                // update transfer
                dwBufferLeft -= dwBytesToTransfer;
                pBuffer += dwBytesToTransfer;
           }
        }

        m_pPRD[m_dwPhysCount - 1].EOTpad = 0x8000;

    }
    else {

        DWORD dwTotalBytes = 0;

        for (iBuffer = 0; iBuffer < dwSgCount; iBuffer++) {

            LPBYTE pBuffer = (LPBYTE)pSgBuf[iBuffer].sb_buf;

            // determine the number of bytes remaining to be placed in PRD
            dwTotalBytes = pSgBuf[iBuffer].sb_len;
            if (!LockPages (
                pBuffer,
                dwTotalBytes,
                m_pPFNs,
                fRead ? LOCKFLAG_WRITE : LOCKFLAG_READ)
            ) {
                goto ExitFailure;
            }

            // add a scatter/gather copy entry for the area we lock, so that
            // we can unlock it when we are finished
            m_pSGCopy[m_dwSGCount].pSrcAddress = pBuffer;
            m_pSGCopy[m_dwSGCount].pDstAddress = 0;
            m_pSGCopy[m_dwSGCount].dwSize = dwTotalBytes;
            m_dwSGCount++;

            iPFN = 0;
            while (dwTotalBytes) {

                DWORD dwBytesToTransfer = UserKInfo[KINX_PAGESIZE];

                if ((DWORD)pBuffer & dwPageMask) {
                    // the buffer is not page aligned; use up the next page
                    // boundary
                    dwBytesToTransfer = UserKInfo[KINX_PAGESIZE] - ((DWORD)pBuffer & dwPageMask);
                }

                if (dwTotalBytes < dwBytesToTransfer) {
                    // use what remains
                    dwBytesToTransfer = dwTotalBytes;
                }

                m_pPRD[iPage].physAddr = (m_pPFNs[iPFN] << UserKInfo[KINX_PFN_SHIFT]) + ((DWORD)pBuffer & dwPageMask);

                if (!TranslateAddress(&m_pPRD[iPage].physAddr)) {
                    goto ExitFailure;
                }

                m_pPRD[iPage].size = (USHORT)dwBytesToTransfer;
                m_pPRD[iPage].EOTpad = 0;

                iPage++;
                iPFN++;

                // update transfer
                pBuffer += dwBytesToTransfer;
                dwTotalBytes -= dwBytesToTransfer;
            }
        }

        m_dwPhysCount = 0;
        m_pPRD[iPage-1].EOTpad = 0x8000;
    }

    return TRUE;

ExitFailure:

    DEBUGCHK(0);

    // clean up
    // FreeDMABuffers();

    return FALSE;
}
inline void ReadFromHardware(T &_var, const u32 em_address, const u32 effective_address, Memory::XCheckTLBFlag flag)
{
	// TODO: Figure out the fastest order of tests for both read and write (they are probably different).
	if ((em_address & 0xC8000000) == 0xC8000000)
	{
		if (em_address < 0xcc000000)
			_var = EFB_Read(em_address);
		else
			_var = mmio_mapping->Read<T>(em_address);
	}
	else if (((em_address & 0xF0000000) == 0x80000000) ||
		((em_address & 0xF0000000) == 0xC0000000) ||
		((em_address & 0xF0000000) == 0x00000000))
	{
		_var = bswap((*(const T*)&m_pRAM[em_address & RAM_MASK]));
	}
	else if (m_pEXRAM && (((em_address & 0xF0000000) == 0x90000000) ||
		((em_address & 0xF0000000) == 0xD0000000) ||
		((em_address & 0xF0000000) == 0x10000000)))
	{
		_var = bswap((*(const T*)&m_pEXRAM[em_address & EXRAM_MASK]));
	}
	else if ((em_address >= 0xE0000000) && (em_address < (0xE0000000+L1_CACHE_SIZE)))
	{
		_var = bswap((*(const T*)&m_pL1Cache[em_address & L1_CACHE_MASK]));
	}
	else if ((bFakeVMEM && ((em_address &0xF0000000) == 0x70000000)) ||
		(bFakeVMEM && ((em_address &0xF0000000) == 0x40000000)))
	{
		// fake VMEM
		_var = bswap((*(const T*)&m_pFakeVMEM[em_address & FAKEVMEM_MASK]));
	}
	else
	{
		// MMU
		// Handle loads that cross page boundaries (ewwww)
		if (sizeof(T) > 1 && (em_address & (HW_PAGE_SIZE - 1)) > HW_PAGE_SIZE - sizeof(T))
		{
			_var = 0;
			// This could be unaligned down to the byte level... hopefully this is rare, so doing it this
			// way isn't too terrible.
			// TODO: floats on non-word-aligned boundaries should technically cause alignment exceptions.
			// Note that "word" means 32-bit, so paired singles or doubles might still be 32-bit aligned!
			u32 tlb_addr = TranslateAddress(em_address, flag);
			for (u32 addr = em_address; addr < em_address + sizeof(T); addr++, tlb_addr++)
			{
				// Start of the new page... translate the address again!
				if (!(addr & (HW_PAGE_SIZE-1)))
					tlb_addr = TranslateAddress(addr, flag);
				// Important: we need to generate the DSI on the first store that caused the fault, NOT
				// the address of the start of the load.
				if (tlb_addr == 0)
				{
					if (flag == FLAG_READ)
					{
						GenerateDSIException(addr, false);
						break;
					}
				}
				else
				{
					_var <<= 8;
					_var |= m_pRAM[tlb_addr & RAM_MASK];
				}
			}
		}
		else
		{
			u32 tlb_addr = TranslateAddress(em_address, flag);
			if (tlb_addr == 0)
			{
				if (flag == FLAG_READ)
				{
					GenerateDSIException(em_address, false);
				}
			}
			else
			{
				_var = bswap((*(const T*)&m_pRAM[tlb_addr & RAM_MASK]));
			}
		}
	}
}
Exemple #13
0
int KBasicDecoder::Decode(ATOM typ, const void * pValue, char * szBuffer, int nBufferSize)
{
	unsigned data = * (unsigned *) pValue;

//	strcpy(szBuffer, m_pAtomTable->GetAtomName(typ));
//	szBuffer += strlen(szBuffer);

    if ( typ==atom_char )
    {
		* szBuffer++ = '\'';
        * szBuffer++ = (char) data;;
        * szBuffer++ = '\'';
        * szBuffer++ = 0;
        
		return 4;
	}

	if ( typ==atom_BYTE )
	{
		wsprintf(szBuffer, "%d", data & 0xFF);
		return 4;
	}

	if ( typ==atom_short )
	{
		data &= 0xFFFF;

		if ( data & 0x8000 )
			data |= 0xFFFF0000;

		wsprintf(szBuffer, "%d", data);

		return 4;
	}

	if ( typ==atom_WORD )
	{
		wsprintf(szBuffer, "%d", data & 0xFFFF);
		
		return 4;
	}

	if ( (typ==atom_long) || (typ==atom_int) )
	{
		wsprintf(szBuffer, "%d", data);
	
		return 4;
	}

	if ( (typ==atom_DWORD) || (typ==atom_UINT) )
	{
		wsprintf(szBuffer, "%u", data);

		return 4;
	}

	if ( typ==atom_D )
	{
		wsprintf(szBuffer, "0x%x", data);

		return 4;
	}

	if ( (typ==atom_LPVOID) || (typ==atom_this) )
	{
		if ( data==0 )
			strcpy(szBuffer, "NULL");
		else
			wsprintf(szBuffer, "0x%x", data);

		return 4;
	}

	if ( typ==atom_COLORREF )
	{
		if ( data==0 )
			strcpy(szBuffer, "BLACK");
		else if ( data==0xFFFFFF )
			strcpy(szBuffer, "WHITE");
		else
			wsprintf(szBuffer, "%06x", data);

		return 4;
	}

	if ( typ==atom_LPSTR )
	{
		if ( data==0 )
			strcpy(szBuffer, "NULL");
		else
		{
			* szBuffer='"';
			strcpy(szBuffer+1, (const char *) data);
			strcat(szBuffer, """");
		}

		return 4;
	}
	
	if ( typ==atom_BOOL )
	{
		if ( data==0 )
			strcpy(szBuffer, "FALSE");
		else if ( data==1 )
			strcpy(szBuffer, "TRUE");
		else
			wsprintf(szBuffer, "%d", data);

		return 4;
	}

	if ( (typ==atom_HDC)    || (typ==atom_HGDIOBJ)  || (typ==atom_HPEN) ||
		 (typ==atom_HBRUSH) || (typ==atom_HPALETTE) || (typ==atom_HRGN) ||
		 (typ==atom_HFONT) )
	{         
		wsprintf(szBuffer, "%x", data);
        
		return 4;
	}

	if ( typ==atom_returnaddr )
	{
		TranslateAddress(data, szBuffer);

		return 4;
	}

	// unhandled
	return 0;
}
Exemple #14
0
void LinearPass::AnalysisWorker(duint Start, duint End, BBlockArray* Blocks)
{
    Capstone disasm;

    duint blockBegin = Start;        // BBlock starting virtual address
    duint blockEnd = 0;              // BBlock ending virtual address

    bool blockPrevPad = false;       // Indicator if the last instruction was padding
    BasicBlock* lastBlock = nullptr; // Avoid an expensive call to std::vector::back()

    int insnCount = 0;               // Temporary number of instructions counted for a block

    for(duint i = Start; i < End;)
    {
        if(!disasm.Disassemble(i, TranslateAddress(i), int(End - i)))
        {
            // Skip instructions that can't be determined
            i++;
            continue;
        }

        // Increment counters
        i += disasm.Size();
        blockEnd = i;
        insnCount++;

        // The basic block ends here if it is a branch
        bool call = disasm.InGroup(CS_GRP_CALL);    // CALL
        bool jmp = disasm.InGroup(CS_GRP_JUMP);     // JUMP
        bool ret = disasm.InGroup(CS_GRP_RET);      // RETURN
        bool padding = disasm.IsFilling();          // INSTRUCTION PADDING

        if(padding)
        {
            // PADDING is treated differently. They are all created as their
            // own separate block for more analysis later.
            duint realBlockEnd = blockEnd - disasm.Size();

            if((realBlockEnd - blockBegin) > 0)
            {
                // The next line terminates the BBlock before the INT instruction.
                // Early termination, faked as an indirect JMP. Rare case.
                lastBlock = CreateBlockWorker(Blocks, blockBegin, realBlockEnd, false, false, false, false);
                lastBlock->SetFlag(BASIC_BLOCK_FLAG_PREPAD);

                blockBegin = realBlockEnd;
                lastBlock->InstrCount = insnCount;
                insnCount = 0;
            }
        }

        if(call || jmp || ret || padding)
        {
            // Was this a padding instruction?
            if(padding && blockPrevPad)
            {
                // Append it to the previous block
                lastBlock->VirtualEnd = blockEnd;
            }
            else
            {
                // Otherwise use the default route: create a new entry
                auto block = lastBlock = CreateBlockWorker(Blocks, blockBegin, blockEnd, call, jmp, ret, padding);

                // Counters
                lastBlock->InstrCount = insnCount;
                insnCount = 0;

                if(!padding)
                {
                    // Check if absolute jump, regardless of operand
                    if(disasm.GetId() == X86_INS_JMP)
                        block->SetFlag(BASIC_BLOCK_FLAG_ABSJMP);

                    // Figure out the operand type(s)
                    const auto & operand = disasm.x86().operands[0];

                    if(operand.type == X86_OP_IMM)
                    {
                        // Branch target immediate
                        block->Target = (duint)operand.imm;
                    }
                    else
                    {
                        // Indirects (no operand, register, or memory)
                        block->SetFlag(BASIC_BLOCK_FLAG_INDIRECT);

                        if(operand.type == X86_OP_MEM &&
                                operand.mem.base == X86_REG_RIP &&
                                operand.mem.index == X86_REG_INVALID &&
                                operand.mem.scale == 1)
                        {
                            /*
                            block->SetFlag(BASIC_BLOCK_FLAG_INDIRPTR);
                            block->Target = (duint)operand.mem.disp;
                            */
                        }
                    }
                }
            }

            // Reset the loop variables
            blockBegin = i;
            blockPrevPad = padding;
        }
    }
}