BOOL CPCIDisk::CompleteDMA( PSG_BUF pSgBuf, DWORD dwSgCount, BOOL fRead ) { DWORD i; for (i = 0; i < m_dwSGCount; i++) { if (m_pSGCopy[i].pDstAddress) { // this corresponds to an unaligned region; copy it back to the // scatter/gather buffer memcpy(m_pSGCopy[i].pDstAddress, m_pSGCopy[i].pSrcAddress, m_pSGCopy[i].dwSize); } else { // this memory region needs to be unlocked UnlockPages(m_pSGCopy[i].pSrcAddress, m_pSGCopy[i].dwSize); } } // free all but the first @MIN_PHYS_PAGES pages; the first @MIN_PHYS_PAGES // pages are fixed for (i = MIN_PHYS_PAGES; i < m_dwPhysCount; i++) { FreePhysMem(m_pPhysList[i].pVirtualAddress); } return TRUE; }
BOOL CPCIDisk::AbortDMA( ) { DWORD i; WriteBMCommand(0); for (i = 0; i < m_dwSGCount; i++) { if (!m_pSGCopy[i].pDstAddress) { UnlockPages(m_pSGCopy[i].pSrcAddress, m_pSGCopy[i].dwSize); } } // free all but the first @MIN_PHYS_PAGES pages; these are fixed for (i = MIN_PHYS_PAGES; i < m_dwPhysCount; i++) { FreePhysMem(m_pPhysList[i].pVirtualAddress); } return FALSE; }
// // Perform the actual DMA copy // WARNING!! This function assumes the physical memory is contiguous. // No check is performed for performance improvement. // Buffers passed-in come from CMEM and DISPLAY drivers. // DWORD SdmaPx::Copy( LPVOID pSource, LPVOID pDestination, DWORD dwClientIdx ) { DWORD dwRet = 0; DWORD dwCause, dwStatus; DWORD paSrc, paDst; BYTE ffCached = 0; DmaConfigInfo_t dmaSettings = m_SdmaPxClient[dwClientIdx].GetConfig(); DWORD dwDataLength = m_SdmaPxClient[dwClientIdx].GetDataLength(); DWORD dwElementCount = m_SdmaPxClient[dwClientIdx].GetElementCount(); DWORD dwFrameCount = m_SdmaPxClient[dwClientIdx].GetFrameCount(); ASSERTMSG(L"Client must be configured before Copy I/O control is called !!!\n", m_SdmaPxClient[dwClientIdx].IsClientConfigured()); // Configure SDMA for specific client // Need to call this first for length // memset() on DmaConfigInfo_t not needed as done in the SdmaPxClient::Init() method. // m_SdmaPxClient[dwClientIdx].GetConfig(&dmaSettings, &dwDataLength); if(DmaConfigure(m_hDmaChannel, &dmaSettings, 0, &m_dmaInfo) != TRUE) { ERRORMSG(ZONE_ERROR, (TEXT("ERROR! Unable to configure DMA for client\r\n"))); goto cleanUp; } // flush cache if necessary if (ISUNCACHEDADDRESS(pSource) == FALSE) { ffCached |= SOURCE_CACHED; } if (ISUNCACHEDADDRESS(pDestination) == FALSE) { ffCached |= DESTINATION_CACHED; } if (ffCached & (SOURCE_CACHED | DESTINATION_CACHED)) { FlushCache(pSource, pDestination, dwDataLength, ffCached); } // Retrieve base physical address of buffer to be copied and READ lock the pages on the length. if(LockPages( (LPVOID)pSource, (DWORD)dwDataLength, m_rgPFNsrc, LOCKFLAG_READ) == FALSE) { ERRORMSG(ZONE_ERROR, (TEXT("LockPages call \"src\" failed. (error code=%d)\r\n"), GetLastError())); goto cleanUp; } // Not necessary to do the page shift on ARM platform as always 0. (ref. MSDN) // paSrc = (m_rgPFNsrc[0] << m_pageShift) + ((DWORD)pSource & m_pageMask); paSrc = m_rgPFNsrc[0] + ((DWORD)pSource & m_pageMask); // Retrieve base physical address of destination buffer and WRITE lock the pages on the length. if(LockPages( (LPVOID)pDestination, dwDataLength, m_rgPFNdst, LOCKFLAG_WRITE) == FALSE) { ERRORMSG(ZONE_ERROR, (TEXT("LockPages \"dest\" call failed. (error code=%d)\r\n"), GetLastError())); goto cleanUp; } // Not necessary to do the page shift on ARM platform as always 0. (ref. MSDN) // paDst = (m_rgPFNdst[0] << UserKInfo[KINX_PFN_SHIFT]) + ((DWORD)pDestination & m_pageMask); paDst = m_rgPFNdst[0] + ((DWORD)pDestination & m_pageMask); // Configure Dest and Src buffers for DMA. DmaSetSrcBuffer(&m_dmaInfo, (UINT8 *)pSource, paSrc); DmaSetDstBuffer(&m_dmaInfo, (UINT8 *)pDestination, paDst); // Watch out parameters in DmaSetElemenAndFrameCount. For e.g., shift right element count by 2 if you have bytes in input and you use 32bits element size. DmaSetElementAndFrameCount(&m_dmaInfo, dwElementCount, (UINT16)(dwFrameCount)); // start dma DmaStart(&m_dmaInfo); // wait until we hit the end of buffer // Wait for dma interrupt... dwCause = WaitForSingleObject(m_hEvent, DMA_IRQ_TIMEOUT); switch(dwCause) { case WAIT_OBJECT_0: { // Verify cause of interrupt was because we hit the end of block dwStatus = DmaGetStatus(&m_dmaInfo); if ((dwStatus & (dmaSettings.interrupts)) == 0) { ERRORMSG(ZONE_ERROR, (TEXT("Unexpected cause of interrupt\r\n"))); break; } DmaClearStatus(&m_dmaInfo, dwStatus); if (DmaInterruptDone(m_hDmaChannel) == FALSE) { ERRORMSG(ZONE_ERROR, (TEXT("ERROR! Unable to get status for dma interrupt\r\n"))); break; } // Do the "good" client job DmaStop(&m_dmaInfo); break; } default: RETAILMSG(ZONE_ERROR, (TEXT("ERROR! didn't receive DMA interrupt\r\n"))); break; } #if DEBUG_VERIFY_SDMA_COPY // // Beware!! Bring a lot of overhead and can lead to bad display rendering. // NKDbgPrintfW(L"verify memory\r\n"); if (memcmp(pSource, pDestination, dwDataLength) != 0) { NKDbgPrintfW(L"ERROR! memory doesn't match up\r\n"); DebugBreak(); goto cleanUp; } #endif dwRet = dwDataLength; // everything went fine obviously... cleanUp: UnlockPages((LPVOID)pSource, dwDataLength); UnlockPages((LPVOID)pDestination, dwDataLength); return dwRet; }