void ScanlineEffect_InitHBlankDmaTransfer(void) { if (gScanlineEffect.state == 0) { return; } else if (gScanlineEffect.state == 3) { gScanlineEffect.state = 0; DmaStop(0); sShouldStopWaveTask = TRUE; } else { DmaStop(0); // Set DMA to copy to dest register on each HBlank for the next frame. // The HBlank DMA transfers do not occurr during VBlank, so the transfer // will begin on the HBlank after the first scanline DmaSet(0, gScanlineEffect.dmaSrcBuffers[gScanlineEffect.srcBuffer], gScanlineEffect.dmaDest, gScanlineEffect.dmaControl); // Manually set the reg for the first scanline gScanlineEffect.setFirstScanlineReg(); // Swap current buffer gScanlineEffect.srcBuffer ^= 1; } }
void ScanlineEffect_Stop(void) { gScanlineEffect.state = 0; DmaStop(0); if (gScanlineEffect.waveTaskId != 0xFF) { DestroyTask(gScanlineEffect.waveTaskId); gScanlineEffect.waveTaskId = 0xFF; } }
int sceSdBlockTrans(s16 chan, u16 mode, u8 *iopaddr, u32 size, ...) { int transfer_dir = mode & 3; int core = chan & 1; int _size = size; va_list alist; u8* startaddr; switch(transfer_dir) { case SD_TRANS_WRITE: { TransIntrData[core].mode = 0x100 | core; if(mode & SD_TRANS_LOOP) { TransIntrData[core].mode |= SD_TRANS_LOOP << 8; _size /= 2; } if(BlockTransWrite(iopaddr, _size, core) >= 0) return 0; } break; case SD_TRANS_READ: { TransIntrData[core].mode = 0x300 | core; if(mode & SD_TRANS_LOOP) { TransIntrData[core].mode |= SD_TRANS_LOOP << 8; _size /= 2; } if(BlockTransRead(iopaddr, _size, chan, mode) >= 0) return 0; } break; case SD_TRANS_STOP: { return DmaStop(core); } break; case SD_TRANS_WRITE_FROM: { va_start(alist, size); startaddr = va_arg(alist, u8*); va_end(alist); TransIntrData[core].mode = 0x100 | core; if(mode & SD_TRANS_LOOP) { TransIntrData[core].mode |= SD_TRANS_LOOP << 8; _size /= 2; } if(BlockTransWriteFrom(iopaddr, _size, core, mode, startaddr) >= 0) return 0; } break; } return -1; }
// // Perform the actual DMA copy // WARNING!! This function assumes the physical memory is contiguous. // No check is performed for performance improvement. // Buffers passed-in come from CMEM and DISPLAY drivers. // DWORD SdmaPx::Copy( LPVOID pSource, LPVOID pDestination, DWORD dwClientIdx ) { DWORD dwRet = 0; DWORD dwCause, dwStatus; DWORD paSrc, paDst; BYTE ffCached = 0; DmaConfigInfo_t dmaSettings = m_SdmaPxClient[dwClientIdx].GetConfig(); DWORD dwDataLength = m_SdmaPxClient[dwClientIdx].GetDataLength(); DWORD dwElementCount = m_SdmaPxClient[dwClientIdx].GetElementCount(); DWORD dwFrameCount = m_SdmaPxClient[dwClientIdx].GetFrameCount(); ASSERTMSG(L"Client must be configured before Copy I/O control is called !!!\n", m_SdmaPxClient[dwClientIdx].IsClientConfigured()); // Configure SDMA for specific client // Need to call this first for length // memset() on DmaConfigInfo_t not needed as done in the SdmaPxClient::Init() method. // m_SdmaPxClient[dwClientIdx].GetConfig(&dmaSettings, &dwDataLength); if(DmaConfigure(m_hDmaChannel, &dmaSettings, 0, &m_dmaInfo) != TRUE) { ERRORMSG(ZONE_ERROR, (TEXT("ERROR! Unable to configure DMA for client\r\n"))); goto cleanUp; } // flush cache if necessary if (ISUNCACHEDADDRESS(pSource) == FALSE) { ffCached |= SOURCE_CACHED; } if (ISUNCACHEDADDRESS(pDestination) == FALSE) { ffCached |= DESTINATION_CACHED; } if (ffCached & (SOURCE_CACHED | DESTINATION_CACHED)) { FlushCache(pSource, pDestination, dwDataLength, ffCached); } // Retrieve base physical address of buffer to be copied and READ lock the pages on the length. if(LockPages( (LPVOID)pSource, (DWORD)dwDataLength, m_rgPFNsrc, LOCKFLAG_READ) == FALSE) { ERRORMSG(ZONE_ERROR, (TEXT("LockPages call \"src\" failed. (error code=%d)\r\n"), GetLastError())); goto cleanUp; } // Not necessary to do the page shift on ARM platform as always 0. (ref. MSDN) // paSrc = (m_rgPFNsrc[0] << m_pageShift) + ((DWORD)pSource & m_pageMask); paSrc = m_rgPFNsrc[0] + ((DWORD)pSource & m_pageMask); // Retrieve base physical address of destination buffer and WRITE lock the pages on the length. if(LockPages( (LPVOID)pDestination, dwDataLength, m_rgPFNdst, LOCKFLAG_WRITE) == FALSE) { ERRORMSG(ZONE_ERROR, (TEXT("LockPages \"dest\" call failed. (error code=%d)\r\n"), GetLastError())); goto cleanUp; } // Not necessary to do the page shift on ARM platform as always 0. (ref. MSDN) // paDst = (m_rgPFNdst[0] << UserKInfo[KINX_PFN_SHIFT]) + ((DWORD)pDestination & m_pageMask); paDst = m_rgPFNdst[0] + ((DWORD)pDestination & m_pageMask); // Configure Dest and Src buffers for DMA. DmaSetSrcBuffer(&m_dmaInfo, (UINT8 *)pSource, paSrc); DmaSetDstBuffer(&m_dmaInfo, (UINT8 *)pDestination, paDst); // Watch out parameters in DmaSetElemenAndFrameCount. For e.g., shift right element count by 2 if you have bytes in input and you use 32bits element size. DmaSetElementAndFrameCount(&m_dmaInfo, dwElementCount, (UINT16)(dwFrameCount)); // start dma DmaStart(&m_dmaInfo); // wait until we hit the end of buffer // Wait for dma interrupt... dwCause = WaitForSingleObject(m_hEvent, DMA_IRQ_TIMEOUT); switch(dwCause) { case WAIT_OBJECT_0: { // Verify cause of interrupt was because we hit the end of block dwStatus = DmaGetStatus(&m_dmaInfo); if ((dwStatus & (dmaSettings.interrupts)) == 0) { ERRORMSG(ZONE_ERROR, (TEXT("Unexpected cause of interrupt\r\n"))); break; } DmaClearStatus(&m_dmaInfo, dwStatus); if (DmaInterruptDone(m_hDmaChannel) == FALSE) { ERRORMSG(ZONE_ERROR, (TEXT("ERROR! Unable to get status for dma interrupt\r\n"))); break; } // Do the "good" client job DmaStop(&m_dmaInfo); break; } default: RETAILMSG(ZONE_ERROR, (TEXT("ERROR! didn't receive DMA interrupt\r\n"))); break; } #if DEBUG_VERIFY_SDMA_COPY // // Beware!! Bring a lot of overhead and can lead to bad display rendering. // NKDbgPrintfW(L"verify memory\r\n"); if (memcmp(pSource, pDestination, dwDataLength) != 0) { NKDbgPrintfW(L"ERROR! memory doesn't match up\r\n"); DebugBreak(); goto cleanUp; } #endif dwRet = dwDataLength; // everything went fine obviously... cleanUp: UnlockPages((LPVOID)pSource, dwDataLength); UnlockPages((LPVOID)pDestination, dwDataLength); return dwRet; }