/* ******************************************************************************* ** ** \brief Locks the IDE access. ** ******************************************************************************* */ void GD_IDE_LockDevice( void ) { #undef __FUNCTION__ #define __FUNCTION__ "GD_IDE_LockDevice" GBOOL loop = GTRUE; DEBUG_Printf( 9, ( "enter: %s\n", __FUNCTION__ ) ); DEBUG_Printf( 6, ( "locking IDE device\n" ) ); while( loop ) { ///////////////////// RTOS_EnterCritical(); ///////////////////// if( !gd_ide_device_in_use ) { gd_ide_device_in_use = GTRUE; loop = GFALSE; RTOS_SchedulerLock(); } ///////////////////// RTOS_LeaveCritical(); ///////////////////// } DEBUG_Printf( 9, ( "leave: %s\n", __FUNCTION__ ) ); }
/* ******************************************************************************* ** ** \brief Release a ide handle structure ** ** This functions releases a given ide handle structure and make it available ** for later allocate calls. ** ** \param ide The gd_ide_handle_t structure to release ** ** \return ** - GFALSE in case of error, either if ide does not point to a valid handle ** - GTRUE if ok ** ******************************************************************************* */ GBOOL GD_IDE_HandleRelease( GD_IDE_HANDLE_T* ide ) { #undef __FUNCTION__ #define __FUNCTION__ "GD_IDE_HandleRelease" U32 sreg; DEBUG_Printf( 9, ( "enter: %s\n", __FUNCTION__ ) ); DEBUG_Printf( 7, ( "releasing IDE handle\n" ) ); if( !ide || ide->id != GD_IDE_HANDLE_ID || !ide->inuse ) { DEBUG_Printf( 9, ( "leave: %s, result=%d\n", __FUNCTION__, GFALSE ) ); return( GFALSE ); } GD_EnterCritical( sreg ); ide->id = GD_IDE_HANDLE_ID; ide->inuse = 0; ide->use48bit = GFALSE; ide->master = GFALSE; ide->packet = GFALSE; ide->pioMode = 0; ide->dmaMode = 0; ide->sleepEnabled = GFALSE; GD_LeaveCritical( sreg ); DEBUG_Printf( 9, ( "leave: %s, result=%d\n", __FUNCTION__, GTRUE ) ); return( GTRUE ); }
/* ******************************************************************************* ** ** \brief Stop AUDIO data capturing ** ** This function stops the AUDIO capturing by setting the autoRestart ** flasg to GFALSE, the AUDIO engine really stops at the next finish ** callback. ** ** \param dmaRequest The DMA request data structure to be used ** ** \return ** - GD_ERR_INVALID_HANDLE in case of error, the given handle is not a valid ** GD_DMA_REQUEST_S handle ** - GD_OK if ok ** ******************************************************************************* */ GERR GD_DMA_AUDIO_CaptureStop( GD_DMA_REQUEST_S* dmaRequest ) { #undef __FUNCTION__ #define __FUNCTION__ "GD_DMA_AUDIO_CaptureStop" GERR result = GD_ERR_INVALID_HANDLE; U32 sreg; GD_DMA_REQUEST_S* requestP; DEBUG_Printf( 9, ( "enter: %s\n", __FUNCTION__ ) ); GD_EnterCritical( sreg ); if( !dmaRequest ) { // only channel 0 supports the auto-restart feature requestP = gd_dma_channel[0]; if( requestP->feature == GD_DMA_FEATURE_AUDIO_CAPTURE ) dmaRequest = requestP; } if( dmaRequest ) { DEBUG_Printf( 7, ( "disable the auto-restart of the AUDIO unit\n" ) ); result = GD_DMA_RequestSetAutoRestart( dmaRequest, GFALSE ); } GD_LeaveCritical( sreg ); DEBUG_Printf( 9, ( "leave: %s, result=0x%08x\n", __FUNCTION__, result ) ); return( result ); }
/* ******************************************************************************* ** ** \brief Prepare the AUDIO capture unit ** ** This function will internally used by the DMA driver to prepare the AUDIO ** capture unit, it enables the AUDIO unit and sets all required ** specific configuration parameters.. ** ** \param dmaRequest The DMA request data structure to be used ** ******************************************************************************* */ static void GD_DMA_AUDIO_Prepare( GD_DMA_REQUEST_S* dmaRequest ) { #undef __FUNCTION__ #define __FUNCTION__ "GD_DMA_AUDIO_Prepare" GD_DMA_AUDIO_PARAMS_S* prepare_params; DEBUG_Printf( 9, ( "enter: %s\n", __FUNCTION__ ) ); prepare_params = (GD_DMA_AUDIO_PARAMS_S*)(dmaRequest->optData); GH_DMA_set_AudioConfig( 0 ); GH_DMA_set_AudioConfig_EXTERNAL_INPUT_SELECT( prepare_params->useExternalInput ); GH_DMA_set_AudioConfig_END_POSITION( prepare_params->endPosition ); GH_DMA_set_AudioConfig_DATA_INVERT( prepare_params->invertData ); GH_DMA_set_AudioConfig_BIT_CLOCK_INVERT( prepare_params->invertBitClock ); GH_DMA_set_AudioConfig_WORD_CLOCK_INVERT( prepare_params->invertWordClock ); GH_DMA_set_AudioConfig_LEFT_CHANNEL_ENABLE( prepare_params->enableLeftChannel ); GH_DMA_set_AudioConfig_RIGHT_CHANNEL_ENABLE( prepare_params->enableRightChannel ); GH_DMA_set_AudioConfig_NUMBER_OF_BITS( prepare_params->capture20BitMode ); GH_DMA_set_AudioConfig_MSB_SWITCH( prepare_params->msbFirst ); GH_DMA_set_AudioConfig_ENABLE( 1 ); DEBUG_Printf( 9, ( "leave: %s\n", __FUNCTION__ ) ); return; }
/* ******************************************************************************* ** ** \brief Internal block buffer report function ** ** This functions prints the 512 byte contents of the given block buffer. ** ** \param data32 Block buffer pointer ** \param data32 Title string to print up front ** ** \return ** - GFALSE in case of error, either if ide does not point to a valid handle ** - GTRUE if ok ** ******************************************************************************* */ void GD_IDE_PrintBlockBuffer( U32* data32, U32 bytes, char* title ) { U8 index; DEBUG_Printf( 8, ( "%s:\n", title ) ); for( index=0; bytes > 0; index++ ) { DEBUG_Printf( 8, ( " 0x%08X 0x%08X 0x%08X 0x%08X\n", *(data32+0), *(data32+1), *(data32+2), *(data32+3) ) ); data32 += 4; if( bytes >= 16 ) bytes -= 16; else bytes = 0; } }
// CVC void set_cvc_frm_id(int layer_id, int frm_id) { unsigned long int PhysicalAddress = CVC_BASE ; int map_len = 0xF00; int fd = open( "/dev/mem", O_RDWR); unsigned char* virtual_addr; ///////// Configure CVC ///////////// virtual_addr = (unsigned char*)mmap(NULL, map_len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)PhysicalAddress); if(virtual_addr == MAP_FAILED) { perror("Mapping memory for CVC failed.\n"); return; } DEBUG_Printf ("TPG_BASE mapping successful :\n0x%x to 0x%x, size = %d\n ", (int )PhysicalAddress, (int)virtual_addr, map_len ); // sanity check can done for cvc (if its already doing what was intended last time ? ) if (layer_id == 0) { REG_WRITE(virtual_addr,CVC_L0_CTRL,0x3); REG_WRITE(virtual_addr,CVC_VBUFF_SEL, (0x1 << 10 /* layer 0 */) | (frm_id & 0x3) << 0 /*layer 0 */ ); } else if(layer_id == 1) { REG_WRITE(virtual_addr,CVC_L1_CTRL,0x3); REG_WRITE(virtual_addr,CVC_VBUFF_SEL, (0x1 << 11 /* layer 1 */) | (frm_id & 0x3) << 2 /*layer 1 */ ); } else { perror("not supported"); } munmap((void *)virtual_addr, map_len); close(fd); }
/* ******************************************************************************* ** ** \brief Finalize DMA AUDIO data capture ** ** This functions will internally used by the DMA driver to finalize the AUDIO ** unit after DMA data capture, it simply stops the AUDIO capture unit. ** ** \param dmaRequest The DMA request data structure to be used ** ******************************************************************************* */ static void GD_DMA_AUDIO_Finalize( GD_DMA_REQUEST_S* dmaRequest ) { #undef __FUNCTION__ #define __FUNCTION__ "GD_DMA_AUDIO_Finalize" DEBUG_Printf( 9, ( "enter: %s\n", __FUNCTION__ ) ); if( dmaRequest->autoRestart == GFALSE ) { // stop the AUDIO capture unit DEBUG_Printf( 7, ( "stop AUDIO unit\n" ) ); GH_DMA_set_AudioConfig_ENABLE( 0 ); } DEBUG_Printf( 9, ( "leave: %s\n", __FUNCTION__ ) ); }
void set_cvc_circular_mode(int layer_id) { unsigned long int PhysicalAddress = CVC_BASE ; int map_len = 0xF00; int fd = open( "/dev/mem", O_RDWR); unsigned char* cvc_base = (unsigned char*)mmap(NULL, map_len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)PhysicalAddress); //check if it worked if(cvc_base == MAP_FAILED) { perror("Mapping memory for absolute memory access failed.\n"); //return NULL; } DEBUG_Printf ("TPG_BASE mapping successful :\n0x%x to 0x%x, size = %d\n ", (int )PhysicalAddress, (int)cvc_base, map_len ); if(layer_id ==0) { REG_WRITE(cvc_base, CVC_L0_CTRL ,0x00000007); } else if(layer_id==1) { REG_WRITE(cvc_base, CVC_L1_CTRL ,0x00000007); } else { perror("Not Supported"); } munmap((void *)cvc_base, map_len); close(fd); }
/*! ******************************************************************************* ** ** \brief Add a request to an available DMA channel ** ** This functions tries to add the given request to an available DMA channel. ** Internally it will be checked whether there is already a same feature ** served by the DMA controller. The variable gd_dma_feature_allow contains ** the max. number of features that can be served in parallel, for most ** features it is allowed only to execute one instance at a time. ** If the given request can be moved from the DMA fifo to the DMA channel ** list the request status will be reset to GD_DMA_REQUEST_STATUS_CHANNELED. ** ** \param requestP The DMA request to moved from the fifo to the channel list ** ** \return ** - 0 in case of error if there is either no channel available or there ** is already a request of the same feature in use ** - non-0 if ok, the pointer to given request structure ** ** \note the search for an available DMA channel starts at index 5 down to 0 ** to keep channel#0 untouched as long as possible, as this channel ** is required to handle the special auto-restart feature needed by ** the DMA features #GD_DMA_FEATURE_CCIR656_CAPTURE and ** #GD_DMA_FEATURE_AUDIO_CAPTURE ******************************************************************************* */ GD_DMA_REQUEST_S* GD_DMA_ChannelAddRequest( GD_DMA_REQUEST_S* requestP ) { #undef __FUNCTION__ #define __FUNCTION__ "GD_DMA_ChannelAddRequest" GD_DMA_REQUEST_S* result = 0; int channel; DEBUG_Printf( 9, ( "enter: %s\n", __FUNCTION__ ) ); if( requestP->autoRestart == GTRUE ) { channel = 0; if( gd_dma_channel[channel] == 0 ) { if( GD_DMA_ChannelCheckFeatureUsage( requestP->feature ) ) { gd_dma_channel[channel] = requestP; gd_dma_channel_usage++; result = requestP; requestP->status = GD_DMA_REQUEST_STATUS_CHANNELED; requestP->channel = channel; } } } else { for( channel=GD_DMA_CHANNEL_MAX-1; channel >= 0; channel-- ) { if( gd_dma_channel[channel] == 0 ) { if( GD_DMA_ChannelCheckFeatureUsage( requestP->feature ) ) { gd_dma_channel[channel] = requestP; gd_dma_channel_usage++; result = requestP; requestP->status = GD_DMA_REQUEST_STATUS_CHANNELED; requestP->channel = channel; } break; } } } DEBUG_Printf( 9, ( "leave: %s, result=0x%08x\n", __FUNCTION__, result ) ); return( result ); }
/*! ******************************************************************************* ** ** \brief Removes the given request from the DMA channel list ** ** This functions removed the given request from the DMA channel list. ** ** \param requestP The DMA request to be moved from the fifo to the channel ** list ** ** \return ** - 0 in case of error, if the given request is currently not on the DMA ** channel list ** - non-0 if ok, the pointer to given removed request structure ** ******************************************************************************* */ GD_DMA_REQUEST_S* GD_DMA_ChannelRemoveRequest( GD_DMA_REQUEST_S* requestP ) { #undef __FUNCTION__ #define __FUNCTION__ "GD_DMA_ChannelRemoveRequest" GD_DMA_REQUEST_S* result = 0; DEBUG_Printf( 9, ( "enter: %s\n", __FUNCTION__ ) ); if( gd_dma_channel[requestP->channel] == requestP ) { result = requestP; gd_dma_channel[requestP->channel] = 0; gd_dma_channel_usage--; } DEBUG_Printf( 9, ( "leave: %s, result=0x%08x\n", __FUNCTION__, result ) ); return( result ); }
/*! ******************************************************************************* ** ** \brief Internal function to copy left bytes ** ** This functions copies the number of non long word count bytes left by ** the DMA copy call, it simply uses the cpu to copy the remaining bytes ** (max. 3) . ** ** \param requestP The DMA request to be accessed ** ******************************************************************************* */ static void GD_DMA_SDRAM_Finalize( GD_DMA_REQUEST_S* dmaRequest ) { #undef __FUNCTION__ #define __FUNCTION__ "GD_DMA_SDRAM_Finalize" U32 bytes = (U32)(dmaRequest->channelConfig.chLength); U32 count = (U32)(dmaRequest->optData); U8* source = (U8*)(dmaRequest->channelConfig.chReadAddr_OFFSET_ADDR) + bytes; U8* target = (U8*)(dmaRequest->channelConfig.chWriteAddr_OFFSET_ADDR) + bytes; U8* finish = (U8*)(dmaRequest->channelConfig.chReadAddr_OFFSET_ADDR) + count; DEBUG_Printf( 9, ( "enter: %s\n", __FUNCTION__ ) ); // check for non-aligned copy byte count // copy left bytes by hand while( source < finish ) *target++ = *source++; DEBUG_Printf( 9, ( "leave: %s\n", __FUNCTION__ ) ); }
/*! ******************************************************************************* ** ** \brief Finalize a DMA channel request ** ** This functions finalizes a DMA request after the DMA hardware has signaled ** either the end condition or an error condition. Basically, there are just ** two jobs to be done by this function, first disable the DMA channel which ** was used to serve the given request, and second, call the callcack function ** postFinishCallback() if set. ** ** \note This function will be called from within the DMA interrupt ** service routine, so the callback code is executed within interrupt ** context ** ** \param requestP The DMA request to be finialized ** ******************************************************************************* */ void GD_DMA_ChannelFinalizeRequest( GD_DMA_REQUEST_S* requestP ) { #undef __FUNCTION__ #define __FUNCTION__ "GD_DMA_ChannelFinishRequest" DEBUG_Printf( 9, ( "enter: %s\n", __FUNCTION__ ) ); if( requestP->channel >= GD_DMA_CHANNEL_MAX ) return; // // disable the DMA channel configuration // GH_DMA_set_ChConfig( requestP->channel, 0 ); // // execute the post-finish callback function to disable // various other registers from DMA channel usage // if( requestP->postFinishCallback ) { DEBUG_Printf( 7, ( "calling postFinishCallback\n" ) ); requestP->postFinishCallback( requestP ); } // // disable the DMA channel configuration // //GH_DMA_set_ChConfig( requestP->channel, 0 ); // // execute the user specific finsh callback function // if( requestP->optFinishCallback ) { DEBUG_Printf( 7, ( "calling optFinishCallback\n" ) ); requestP->optFinishCallback( requestP->result, requestP->channelConfig.chLength ); } DEBUG_Printf( 9, ( "leave: %s\n", __FUNCTION__ ) ); }
/////////////// Time base //////////////////// void init_timebase(int res_id) { unsigned long int PhysicalAddress = TIMEBASE_BASE; int map_len = 0x100; int hActive, hTotal, hSync_start, hSync_end; int vActive, vTotal, vSync_start, vSync_end; int vBlankh_start, vBlankh_end; //vBlank offset in cyles (should be equal to hactive) int vSyncH_start, vSyncH_end; // vSync offset in cycles. int fd = open("/dev/mem", O_RDWR); unsigned char* timebase_base = (unsigned char*) mmap(NULL, map_len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t) PhysicalAddress); //check if it worked if (timebase_base == MAP_FAILED) { perror("Mapping memory for VTC access failed.\n"); return; } DEBUG_Printf ("Time_BASE mapping successful :\n0x%x to 0x%x, size = %d\n ", (int )PhysicalAddress, (int)timebase_base, map_len ); // Calculating timing parameters hActive = gVideoParam[res_id][E_HActive]; hSync_start = hActive + gVideoParam[res_id][E_HFP]; hSync_end = hSync_start + gVideoParam[res_id][E_HSyncLen]; hTotal = gVideoParam[res_id][E_HTotal]; vActive = gVideoParam[res_id][E_VActive]; vSync_start = vActive + gVideoParam[res_id][E_VFP] - 1; // one line is componseted with vSyncH offset cycles vSync_end = vSync_start + gVideoParam[res_id][E_VSyncLen]; vTotal = gVideoParam[res_id][E_VTotal]; vBlankh_start = hActive; // after active lines vBlankh_end = hActive; vSyncH_start = hActive + gVideoParam[res_id][E_HFP]; vSyncH_end =vSyncH_start; REG_WRITE(timebase_base, TIME_BASE_ACT_SIZE, (vActive << SHIFT_16)| hActive); REG_WRITE(timebase_base, TIME_BASE_ENCODE , 0x00000002); REG_WRITE(timebase_base, TIME_BASE_POL , 0x0000003F); REG_WRITE(timebase_base, TIME_BASE_HSIZE , hTotal); REG_WRITE(timebase_base, TIME_BASE_VSIZE , vTotal); REG_WRITE(timebase_base, TIME_BASE_HSYNC , (hSync_end << SHIFT_16) | hSync_start); REG_WRITE(timebase_base, TIME_BASE_VBLANKH , (vBlankh_end << SHIFT_16) | vBlankh_start); REG_WRITE(timebase_base, TIME_BASE_VSYNC , (vSync_end << SHIFT_16) | vSync_start); REG_WRITE(timebase_base, TIME_BASE_VSYNCH , (vSyncH_end << SHIFT_16) | vSyncH_start); REG_WRITE(timebase_base, TIME_BASE_CR , 0x03F5EF06); // Control register, has to be written at last. munmap((void *) timebase_base, map_len); close(fd); }
/* ******************************************************************************* ** ** \brief Allocate a new ide handle structure ** ** This functions allocates a new ide handle structure from the handle array ** "gd_ide_handle[]" ** ** \return ** - 0 in case of error if there are no more handles available ** - non-0 if ok, the pointer to the allocated handle structure ** ******************************************************************************* */ GD_IDE_HANDLE_T* GD_IDE_HandleAllocate( void ) { #undef __FUNCTION__ #define __FUNCTION__ "GD_IDE_HandleAllocate" int index; U32 sreg; GD_IDE_HANDLE_T* ide = &(gd_ide_handle_array[0]); DEBUG_Printf( 9, ( "enter: %s\n", __FUNCTION__ ) ); DEBUG_Printf( 7, ( "allocating IDE handle\n" ) ); GD_EnterCritical( sreg ); for( index=0; index < GD_IDE_MAX_NUM; index++, ide++ ) { if( ide->inuse == GFALSE ) { ide->id = GD_IDE_HANDLE_ID; ide->inuse = 1; ide->use48bit = GFALSE; ide->master = GFALSE; ide->packet = GFALSE; ide->pioMode = 2; ide->dmaMode = 0; ide->sleepEnabled = GFALSE; DEBUG_Printf( 9, ( "leave: %s, result=%d\n", __FUNCTION__, ide ) ); GD_LeaveCritical( sreg ); return( ide ); } } GD_LeaveCritical( sreg ); DEBUG_Printf( 9, ( "leave: %s, result=%d\n", __FUNCTION__, 0 ) ); return( 0 ); }
/*! ******************************************************************************* ** ** \brief Retrieves the number of features currently served ** ** This functions calculates the number of instances of the given feature ** which are currently served by the DMA channel list, this number is required ** the check whether it is allowed to add a request for the given feature ** to the DMA channel list or not. ** ** \param feature The feature to get the current usage count for ** ** \return ** - 0 in case of error, if the given request is currently not on the DMA ** channel list ** - non-0 if ok, the pointer to given removed request structure ** ******************************************************************************* */ GBOOL GD_DMA_ChannelCheckFeatureUsage( U16 feature ) { #undef __FUNCTION__ #define __FUNCTION__ "GD_DMA_ChannelGetFeatureUsage" GBOOL result = GTRUE; U8 channel; U8 index; U8 usage; U32 sreg; DEBUG_Printf( 9, ( "enter: %s\n", __FUNCTION__ ) ); GD_EnterCritical( sreg ); for( index=0; index < GD_DMA_FEATURE_COUNT; index++ ) { usage = 0; if( ( ( feature >> index ) & 0x1 ) == 0x1 ) { for( channel=0; channel < GD_DMA_CHANNEL_MAX; channel++ ) { if( gd_dma_channel[channel] && ( ( ( gd_dma_channel[channel]->feature >> index ) & 0x1 ) == 0x1 ) ) usage++; } if( usage >= gd_dma_feature_allow[index] ) { result = GFALSE; break; } } } GD_LeaveCritical( sreg ); DEBUG_Printf( 9, ( "leave: %s, result=%d\n", __FUNCTION__, result ) ); return( result ); }
/* ******************************************************************************* ** ** \brief Locks the IDE access. ** ******************************************************************************* */ void GD_IDE_UnlockDevice( void ) { #undef __FUNCTION__ #define __FUNCTION__ "GD_IDE_UnlockDevice" DEBUG_Printf( 9, ( "enter: %s\n", __FUNCTION__ ) ); DEBUG_Printf( 6, ( "unlocking IDE device\n" ) ); ///////////////////// RTOS_EnterCritical(); ///////////////////// gd_ide_device_in_use = GFALSE; RTOS_SchedulerUnlock(); ///////////////////// RTOS_LeaveCritical(); ///////////////////// DEBUG_Printf( 9, ( "leave: %s\n", __FUNCTION__ ) ); }
/*! ******************************************************************************* ** ** \brief Calls the change callback function ** ** This functions performs the 'change' callback if required, it simply calls ** the callcack function optChangeCallback() if set. ** ** \param requestP The DMA request to be accessed ** ** \note This function will be called from within the DMA interrupt ** service routine, so the callback code is executed within interrupt ** context ** ** \note The bytes field of the optinal callback parameter will be used ** to pass the last hit address ** ******************************************************************************* */ void GD_DMA_ChannelChangeCallback( GD_DMA_REQUEST_S* requestP ) { #undef __FUNCTION__ #define __FUNCTION__ "GD_DMA_ChannelChangeCallback" U32 address = 0; DEBUG_Printf( 9, ( "enter: %s\n", __FUNCTION__ ) ); // // execute the opt-change callback function to disable // various other registers from DMA channel usage // if( requestP->optChangeCallback ) { DEBUG_Printf( 7, ( "calling optChangeCallback\n" ) ); if( requestP->feature == GD_DMA_FEATURE_SEARCH_REPLACE ) address = GD_DMA_SrGetHitAddress( requestP ); requestP->optChangeCallback( requestP->result, address ); } DEBUG_Printf( 9, ( "leave: %s\n", __FUNCTION__ ) ); }
/*! ******************************************************************************* ** ** \brief Capture AUDIO data using DMA ** ** This function starts the AUDIO capture unit, it captures everything ** specified within the captureParams block into the memory referenced ** by the targetBuffer pointer. ** ** \param targetBuffer Memory address to write the captured data into ** \param bufferBytes Size of the capture data buffer in bytes ** \param captureParams Capture specific parameters describing the field(s) ** and the region to be captured ** \param finishCallback Function to be called when transfer has completed ** ** \return ** - non-NULL if successfull, a pointer to the generated DMA request buffer ** - NULL in case of error, if there are not enough request buffers available ** to handle the ccir capture feature. ** ** \note The buffer address must be long word aligned as the DMA controller ** cannot handle byte addresses at this point. ** \note The bytes parameter in the optional sync changed callback is always '0' ** \note The bytes parameter in the optional finish callback will be used to ** pass the number of bytes captured. ** \note The finish callback function should have the following signature: ** void finishCallback(GERR status, U32 bytes); ** passing NULL here disables the finish callback functionallity ** \note The size of the given data buffer must not be less than ** (captureParams.horizontalEnd-captureParams.horizontalStart-2) ** * (captureParams.verticalEnd-captureParams.verticalStart-2) ** * 2; ** the function will return '0' if bufferBytes of the given data buffer ** is too small. ** \note The four parameters for horizontal/vertical start/end positions will ** internally masked by 0x1FE to strip the lower 9 bits and to make sure ** that bit 0 is '0' to indicate every 2nd pixel only ******************************************************************************* */ GD_DMA_REQUEST_S* GD_DMA_AUDIO_Capture( U8* targetBuffer, U32 bufferBytes, GD_DMA_AUDIO_PARAMS_S* captureParams, GD_DMA_OPT_CALLBACK_F finishCallback ) { #undef __FUNCTION__ #define __FUNCTION__ "GD_DMA_AUDIO_Capture" GD_DMA_REQUEST_S* dmaRequest = 0; U32 bytes = 0; U16 dmaFeature = GD_DMA_FEATURE_AUDIO_CAPTURE | GD_DMA_FEATURE_SDRAM_COPY; DEBUG_Printf( 9, ( "enter: %s\n", __FUNCTION__ ) ); if( bytes <= bufferBytes ) { dmaRequest = GD_DMA_RequestAllocate( dmaFeature, GTRUE ); if( dmaRequest ) { gd_dma_audio_prepare.useExternalInput = captureParams->useExternalInput; gd_dma_audio_prepare.endPosition = captureParams->endPosition; gd_dma_audio_prepare.invertData = captureParams->invertData; gd_dma_audio_prepare.invertBitClock = captureParams->invertBitClock; gd_dma_audio_prepare.invertWordClock = captureParams->invertWordClock; gd_dma_audio_prepare.enableLeftChannel = captureParams->enableLeftChannel; gd_dma_audio_prepare.enableRightChannel = captureParams->enableRightChannel; gd_dma_audio_prepare.capture20BitMode = captureParams->capture20BitMode; gd_dma_audio_prepare.msbFirst = captureParams->msbFirst; DEBUG_Printf( 7, ( "preparing DMA request block for AUDIO capture\n" ) ); dmaRequest->preEnableCallback = GD_DMA_AUDIO_Prepare; dmaRequest->postEnableCallback = 0; dmaRequest->postFinishCallback = GD_DMA_AUDIO_Finalize; dmaRequest->optHandle = 0; dmaRequest->optData = (void*)&gd_dma_audio_prepare; dmaRequest->optFinishCallback = finishCallback; dmaRequest->optChangeCallback = 0; dmaRequest->channelConfig.chConfig = 0; dmaRequest->channelConfig.chLength = bufferBytes; dmaRequest->channelConfig.chLLAddr = 0; dmaRequest->channelConfig.chReadAddr_PERIPHERAL_ADDR = 0x5; dmaRequest->channelConfig.chReadAddr_OFFSET_ADDR = (U32)0; dmaRequest->channelConfig.chReadLine_LINES = 0x3FF; dmaRequest->channelConfig.chReadInc_LINE_LENGTH = 0xFFF; dmaRequest->channelConfig.chReadInc_LINE_INCREMENT = 0; dmaRequest->channelConfig.chReadLoopAddr = 0; dmaRequest->channelConfig.chWriteAddr_PERIPHERAL_ADDR = 0x0; dmaRequest->channelConfig.chWriteAddr_OFFSET_ADDR = (U32)targetBuffer; dmaRequest->channelConfig.chWriteLine_LINES = 0x3FF; dmaRequest->channelConfig.chWriteInc_LINE_LENGTH = 0xFFF; dmaRequest->channelConfig.chWriteInc_LINE_INCREMENT = 0; dmaRequest->channelConfig.chWriteLoopAddr = 0; DEBUG_Printf( 7, ( "setting auto-restart flag\n" ) ); GD_DMA_RequestSetAutoRestart( dmaRequest, GTRUE ); DEBUG_Printf( 7, ( "sending DMA request block to DMA fifo\n" ) ); if( GD_DMA_SendRequest( dmaRequest ) != GD_OK ) { GD_DMA_RequestRelease( dmaRequest ); dmaRequest = 0; } } } else { DEBUG_Printf( 1, ( "ERROR: data buffer is too small, got %d but require %d bytes\n", bufferBytes, bytes ) ); } DEBUG_Printf( 9, ( "leave: %s, result=0x%08x\n", __FUNCTION__, dmaRequest ) ); return( dmaRequest ); }
/*! ******************************************************************************* ** ** \brief Start a DMA channel request ** ** This functions really starts a request which is on the DMA channel list. ** It first gets an unused DMA channel index, than, this channel will be ** configured using the values from the requestP->channelConfig data block. ** Next, the callback function preEnableCallback() will be called if set. ** Now, after the channel preparation phase is finished the DMA interrupt ** enable register will be set to get all required interrupts for the current ** DMA request feature. After having set this, the channel status will be ** changed to GD_DMA_REQUEST_STATUS_ACTIVE and the channel enable bit will ** be set. At this point the DMA hardware block performs its job. ** Finally, the callback function postEnableCallback() will be called if set. ** ** \note This function will be called either from within the DMA interrupt ** service routine or from the DMA API function GD_DMA_SendRequest(). ** ** \param requestP The DMA request to be started ** ******************************************************************************* */ void GD_DMA_ChannelStartRequest( GD_DMA_REQUEST_S* requestP ) { #undef __FUNCTION__ #define __FUNCTION__ "GD_DMA_ChannelStartRequest" GD_DMA_CHANNEL_CONFIG_S* config; U8 channel; DEBUG_Printf( 9, ( "enter: %s\n", __FUNCTION__ ) ); // // search channel index of given request block // for( channel=0; channel < GD_DMA_CHANNEL_MAX; channel++ ) if( gd_dma_channel[channel] == requestP ) break; if( channel == GD_DMA_CHANNEL_MAX ) return; // // configure the DMA channel using the configuration // parameters passed by the requets data block // config = &(requestP->channelConfig); GH_DMA_set_ChLength( channel, config->chLength ); GH_DMA_set_ChLLAddr( channel, config->chLLAddr ); GH_DMA_set_ChReadAddr_ENDIAN_SWAP( channel, config->chReadAddr_ENDIAN_SWAP ); GH_DMA_set_ChReadAddr_PERIPHERAL_ADDR( channel, config->chReadAddr_PERIPHERAL_ADDR ); GH_DMA_set_ChReadAddr_OFFSET_ADDR( channel, config->chReadAddr_OFFSET_ADDR ); GH_DMA_set_ChReadLine_LINES( channel, config->chReadLine_LINES ); GH_DMA_set_ChReadLine_LOOP_INCREMENT( channel, config->chReadLine_LOOP_INCREMENT ); GH_DMA_set_ChReadInc_LINE_LENGTH( channel, config->chReadInc_LINE_LENGTH ); GH_DMA_set_ChReadInc_LINE_INCREMENT( channel, config->chReadInc_LINE_INCREMENT ); GH_DMA_set_ChReadLoopAddr( channel, config->chReadLoopAddr ); GH_DMA_set_ChWriteAddr_ENDIAN_SWAP( channel, config->chWriteAddr_ENDIAN_SWAP ); GH_DMA_set_ChWriteAddr_PERIPHERAL_ADDR( channel, config->chWriteAddr_PERIPHERAL_ADDR); GH_DMA_set_ChWriteAddr_OFFSET_ADDR( channel, config->chWriteAddr_OFFSET_ADDR ); GH_DMA_set_ChWriteLine_LINES( channel, config->chWriteLine_LINES ); GH_DMA_set_ChWriteLine_LOOP_INCREMENT( channel, config->chWriteLine_LOOP_INCREMENT ); GH_DMA_set_ChWriteInc_LINE_LENGTH( channel, config->chWriteInc_LINE_LENGTH ); GH_DMA_set_ChWriteInc_LINE_INCREMENT( channel, config->chWriteInc_LINE_INCREMENT ); GH_DMA_set_ChWriteLoopAddr( channel, config->chWriteLoopAddr ); // // execute the pre-enable callback function to prepare // various other registers for DMA channel transfer // if( requestP->preEnableCallback ) { DEBUG_Printf( 7, ( "calling preEnableCallback\n" ) ); requestP->preEnableCallback( requestP ); } if( requestP->autoRestart == GTRUE ) { DEBUG_Printf( 7, ( "enable the auto-restart feature\n" ) ); GH_DMA_set_ChConfig_AUTO_RESTART_ENABLE( channel, 1 ); } // // enable the DMA channel configuration // requestP->status = GD_DMA_REQUEST_STATUS_ACTIVE; GH_DMA_set_ChConfig_ENABLE( channel, 1 ); // // execute the pre-enable callback function to prepare // various other registers for DMA channel transfer // if( requestP->postEnableCallback ) { DEBUG_Printf( 7, ( "calling postEnableCallback\n" ) ); requestP->postEnableCallback( requestP ); } DEBUG_Printf( 9, ( "leave: %s\n", __FUNCTION__ ) ); }
void *thread_sw_sync(void* temp) { void cvc_vdma_sw_sync_init(void); void setCVC_TPGBuffer(int cvc_id, int tpg_id); void sw_sobel_processing(unsigned long in_buffer, unsigned long out_buffer); int current_state = gActiveState; unsigned long int vmem[MAX_BUFFER]; unsigned long int sob_buff[MAX_BUFFER]; int i = 0; int offset = 0; // starting indices int tpg_index = 2 ,cvc_index = 0, sobel_in_index = 1, sobel_out_index = 1; for (i = 0; i<MAX_BUFFER; i++) { vmem[i] = gLayerBase[DISPLAY_LAYER] + offset; sob_buff[i] = gLayerBase[SOBEL_LAYER] + offset; offset += BUFFER_OFFSETS; } while(1) { int new_state = gActiveState; if(new_state == STATE_EXIT) { disable_cvc_layer(DISPLAY_LAYER); // do clean up if needed. resetStop_VDMA_ALL(); gSwSobelState = SW_SOBEL_STATE_OFF; break; } if (new_state != SOBEL_SW) { if(current_state == SOBEL_SW) { set_cvc_circular_mode(DISPLAY_LAYER); resetStop_VDMA_ALL(); gSwSobelState = SW_SOBEL_STATE_OFF; } current_state = new_state; sleep(1); continue; } // s/w soble has to be on... Either turn on or continue doing s/w synchronisation.... if (current_state != SOBEL_SW) // new_state == SOBEL_SW { resetStop_VDMA_ALL(); DEBUG_Text ("turning on sobel: resetStop\n"); //(1) set cvc to sw sync mode (not automatic switch) cvc_vdma_sw_sync_init(); //(2) set cvc to read buffer 0 // cvc_current = 0 //(3) Set tpg vdma to park mode //(4) tpg write in intermediate buffer 2 // tpg_current = 2 setCVC_TPGBuffer(cvc_index,tpg_index); //(5) start sw sobel // sobel_IN = 1; //i[1] // sobel_out = 1; //vmem[1] sw_sobel_processing(sob_buff[sobel_in_index], vmem[sobel_out_index]); //this will be synchronouse call and will wait for completion of sobel filter. } else { //Continue the s/w sync for s/w sobel filter //(1) cvc_current = (cvc_current + 1) % 3 & activate //todo: need to figure out; do we need to check if tpg is done writing previous. //(2) tpg_current = (tpg_current + 1) % 3 & set the vsize again //(3) increment sobel_in & sobel_out and start sw sobel // wait for its completion. #ifndef CVC_FREE_RUNNING cvc_index++; cvc_index %= MAX_BUFFER; #endif tpg_index++; tpg_index %= MAX_BUFFER; //todo: check if previous tpg was done. setCVC_TPGBuffer(cvc_index,tpg_index); sobel_in_index++; sobel_in_index %= MAX_BUFFER; #ifndef CVC_FREE_RUNNING sobel_out_index++; sobel_out_index %= MAX_BUFFER; #else sobel_out_index = cvc_index; #endif sw_sobel_processing(sob_buff[sobel_in_index], vmem[sobel_out_index]); } gSwSobelState = SW_SOBEL_STATE_ON; current_state = new_state; } // end of the while loop. DEBUG_Printf ("Exiting from %s\n",__func__); return NULL; }
/* AddEventToStreamBuffer * * Put the given event into the given stream buffer at the given location * te must point to an event filled out in accordance with the description * given in GetTrackEvent() * * Returns zero on sucess, non-zero on error. */ static int AddEventToStreamBuffer (temp_event_t *te, convert_buf_t *buf) { DWORD delta_time; MIDIEVENT *me; me = (MIDIEVENT *)(buf->mh.lpData + buf->start_ofs + buf->bytes_in); /* When we see a new, empty buffer, set the start time on it */ if (!buf->bytes_in) buf->starttime = currenttime; /* Use the above set start time to figure out how much longer * we should fill this buffer before declaring it as "full" */ if (currenttime - buf->starttime > buffer_tick_len) { if (buf->times_up) { buf->times_up = FALSE; return CONVERTERR_BUFFERFULL; } buf->times_up = TRUE; } /* Delta time is absolute event time minus absolute time * already gone by on this track */ delta_time = te->event_time - currenttime; /* Event time is now current time on this track */ currenttime = te->event_time; if (te->shortdata[0] < MIDICMD_SYSEX) { /* Channel message. Need 3 DWORD's: delta-t, stream-ID, event */ if (buf->maxlen - buf->bytes_in < 3*sizeof(DWORD)) return CONVERTERR_BUFFERFULL; me->dwDeltaTime = delta_time; me->dwStreamID = 0; me->dwEvent = (te->shortdata[0]) | (((DWORD)te->shortdata[1]) << 8) | (((DWORD)te->shortdata[2]) << 16) | MEVT_F_SHORT; if ((te->shortdata[0] & 0xF0) == MIDICMD_CONTROL && te->shortdata[1] == MIDICTL_MSB_MAIN_VOLUME) { /* If this is a volume change, generate a callback * so we can grab the new volume for our cache. */ me->dwEvent |= MEVT_F_CALLBACK; } buf->bytes_in += 3 *sizeof(DWORD); } else if (te->shortdata[0] == MIDICMD_SYSEX || te->shortdata[0] == MIDICMD_SYSEX_END) { DEBUG_Printf("%s: Ignoring SysEx event.\n", __thisfunc__); if (te->longdata) { Z_Free(te->longdata); te->longdata = NULL; } } else { /* Better be a meta event. * BYTE event * BYTE type * VDWORD len * BYTE longdata[len] */ assert(te->shortdata[0] == MIDI_META_EVENT); /* The only meta-event we care about is change tempo */ if (te->shortdata[1] != MIDI_META_TEMPO) { if (te->longdata) { Z_Free(te->longdata); te->longdata = NULL; } return CONVERTERR_METASKIP; } /* We should have three bytes of parameter data */ assert(te->event_len == 3); /* Need 3 DWORD's: delta-t, stream-ID, event data */ if (buf->maxlen - buf->bytes_in < 3 *sizeof(DWORD)) { if (te->longdata) { Z_Free(te->longdata); te->longdata = NULL; } return CONVERTERR_BUFFERFULL; } me->dwDeltaTime = delta_time; me->dwStreamID = 0; /* Note: this is backwards from above because we're converting * a single data value from hi-lo to lo-hi format... */ me->dwEvent = (te->longdata[2]) | (((DWORD)te->longdata[1]) << 8 ) | (((DWORD)te->longdata[0]) << 16); buffer_tick_len = (mfs.timediv * 1000 * BUFFER_TIME_LENGTH) / me->dwEvent /* == current tempo */; DEBUG_Printf("%s: buffer tick length: %lu\n", __thisfunc__, buffer_tick_len); me->dwEvent |= (((DWORD)MEVT_TEMPO) << 24) | MEVT_F_SHORT; if (te->longdata) { Z_Free(te->longdata); te->longdata = NULL; } buf->bytes_in += 3 *sizeof(DWORD); } return 0; }
/* ConvertToBuffer * * Converts MIDI data from the track buffers setup by a previous call * to ConverterInit(). Converts data until an error is encountered * or the output buffer has been filled with as much event data as possible, * not to exceed maxlen. * * Success/failure and the number of output bytes actually converted will * be returned in the convert_buf structure. */ int ConvertToBuffer (unsigned int flags, convert_buf_t *buf) { static track_state_t *ts, *found; static DWORD status; static DWORD next_time; static temp_event_t tevent; int err; DWORD idx; buf->bytes_in = 0; if (flags & CONVERTF_RESET) { status = 0; memset(&tevent, 0, sizeof(struct _temp_event_s)); ts = found = NULL; } /* If we were already done, then return with a warning */ if (status & CONVERTF_STATUS_DONE) { if (!bgmloop) return CONVERTERR_DONE; RewindConverter(); status = 0; } /* The caller is asking us to continue, but we're already hosed because * we previously identified something as corrupt, so complain louder this * time. */ else if (status & CONVERTF_STATUS_STUCK) { return CONVERTERR_STUCK; } else if (status & CONVERTF_STATUS_GOTEVENT) { /* Turn off this bit flag */ status ^= CONVERTF_STATUS_GOTEVENT; /* The following code for this case is duplicated from below, * and is designed to handle a "straggler" event, should we * have one left over from previous processing the last time * this function was called. */ /* Don't add end of track event until we are done */ if (tevent.shortdata[0] == MIDI_META_EVENT && tevent.shortdata[1] == MIDI_META_EOT) { if (tevent.longdata) { Z_Free(tevent.longdata); tevent.longdata = NULL; } } else if ((err = AddEventToStreamBuffer(&tevent, buf)) != CONVERTERR_NOERROR) { if (err == CONVERTERR_BUFFERFULL) { /* Do some processing and tell caller that this buffer is full */ status |= CONVERTF_STATUS_GOTEVENT; return CONVERTERR_NOERROR; } else if (err == CONVERTERR_METASKIP) { /* We skip by all meta events that aren't tempo changes */ } else { DEBUG_Printf("MIDI: %s\n", err_add_event); if (tevent.longdata) { Z_Free(tevent.longdata); tevent.longdata = NULL; } return err; } } } for ( ; ; ) { found = NULL; next_time = ~(DWORD)0; /* 0xFFFFFFFFL */ /* Find nearest event due */ for (idx = 0, ts = mfs.tracks; idx < mfs.numtracks; ++idx, ++ts) { if (!(ts->status & ITS_F_ENDOFTRK) && ts->next_event_time < next_time) { next_time = ts->next_event_time; found = ts; } } /* None found? We must be done, so return to the caller with a smile. */ if (!found) { status |= CONVERTF_STATUS_DONE; return CONVERTERR_NOERROR; } /* Ok, get the event header from that track */ if (GetTrackEvent(found, &tevent)) { /* Warn future calls that this converter is stuck * at a corrupt spot and can't continue */ status |= CONVERTF_STATUS_STUCK; return CONVERTERR_CORRUPT; } /* Don't add end of track event 'til we're done */ if (tevent.shortdata[0] == MIDI_META_EVENT && tevent.shortdata[1] == MIDI_META_EOT) { if (tevent.longdata) { Z_Free(tevent.longdata); tevent.longdata = NULL; } continue; } if ((err = AddEventToStreamBuffer(&tevent, buf)) != CONVERTERR_NOERROR) { if (err == CONVERTERR_BUFFERFULL) { /* Do some processing and tell caller that this buffer is full */ status |= CONVERTF_STATUS_GOTEVENT; return CONVERTERR_NOERROR; } else if (err == CONVERTERR_METASKIP) { /* We skip by all meta events that aren't tempo changes */ } else { DEBUG_Printf("MIDI: %s\n", err_add_event); if (tevent.longdata) { Z_Free(tevent.longdata); tevent.longdata = NULL; } return err; } } } return CONVERTERR_NOERROR; /* not reached. */ }
/* ******************************************************************************* ** ** \brief Sets the given DMA mode. ** ** This function sets the IDE controler into the given PIO transfer mode. ** ** \param master The master/slave flag, GTRUE=master, GFALSE=slave. ** \param dmamode The DMA mode to set. ** ** \return ** - GD_OK if successful ** - GD_ERR_IDE_TIMEOUT if timed out during READY wait ** - GD_ERR_IDE_STATUS if the device reports an ERROR state ** - GD_ERR_NOT_INITIALIZED if the driver is not yet initialized ** ** \note This function must be called within a section protected by ** RTOS_EnterCritical() and RTOS_LeaveCritical() as IDE register ** access is like accessing global variables. ** ******************************************************************************* */ GERR GD_IDE_DmaSetMode( GD_HANDLE ideHandle, U8 dmamode ) { #undef __FUNCTION__ #define __FUNCTION__ "GD_IDE_DmaSetMode" GD_IDE_HANDLE_T* ide = (GD_IDE_HANDLE_T*)ideHandle; U8 mode_mask = dmamode; U8 master_mask = 0; GERR result; DEBUG_Printf( 9, ( "enter: %s\n", __FUNCTION__ ) ); DEBUG_Printf( 3, ( "setting DMA mode for %s to %d\n", ide->master?"master":"slave", dmamode ) ); if( ide && ide->inuse && ide->id == GD_IDE_HANDLE_ID ) { if( !gd_ide_initialized ) { result = GD_ERR_NOT_INITIALIZED; DEBUG_Printf( 9, ( "leave: %s, result=0x%08x\n", __FUNCTION__, result ) ); return( result ); } if( GD_IDE_ReadyWait( ideHandle, GD_IDE_BUSY_TIMEOUT_MSECS_CMD ) != GD_OK ) { result = GD_ERR_IDE_TIMEOUT; DEBUG_Printf( 9, ( "leave: %s, result=0x%08x\n", __FUNCTION__, result ) ); return( result ); } GD_IDE_SelectDevice( ideHandle ); if( GD_IDE_ReadyWait( ideHandle, GD_IDE_BUSY_TIMEOUT_MSECS_CMD ) != GD_OK ) { result = GD_ERR_IDE_TIMEOUT; DEBUG_Printf( 9, ( "leave: %s, result=0x%08x\n", __FUNCTION__, result ) ); return( result ); } // the following table shows the IDE mode mask // for all supported PIO and DMA modes: // // DEFAULT WITH IORDY 0x00 == 00000 000 // DEFAULT NO IORDY 0x01 == 00000 001 // PIO-0 WITH IORDY 0x08 == 00001 000 // PIO-0 WITHOUT IORDY 0x00 == 00000 000 // PIO-1 WITH IORDY 0x09 == 00001 001 // PIO-1 WITHOUT IORDY 0x01 == 00000 001 // PIO-2 WITH IORDY 0x0A == 00001 010 // PIO-2 WITHOUT IORDY 0x02 == 00000 010 // PIO-3 WITH IORDY 0x0B == 00001 011 // PIO-4 WITH IORDY 0x0C == 00001 100 // PIO-5 WITH IORDY 0x0C == 00001 101 // multiword DMA-0 0x20 == 00100 000 // multiword DMA-1 0x21 == 00100 001 // multiword DMA-2 0x22 == 00100 010 mode_mask &= 0x03; // use the last 2 bits (DMA) only mode_mask += 0x20; // add the mask 001000xx (multiword DMA) // obs,useLBA,obs,DEV=0,0,0,0,0 = 1110 0000 // obs,useLBA,obs,DEV=1,0,0,0,0 = 1111 0000 if( ide->master ) master_mask = 0xE0; else master_mask = 0xF0; // fill LBA registers with dummy NULL values gd_ide_registers->device = master_mask; gd_ide_registers->lba_high = 0x00; gd_ide_registers->lba_mid = 0x00; gd_ide_registers->lba_low = 0x00; gd_ide_registers->sector_count = mode_mask; gd_ide_registers->feature_error = 0x03; // 0x03 = set transfer mode gd_ide_registers->command_state = GD_IDE_COMMAND_SET_FEATURE; if( GD_IDE_ReadyWait( ideHandle, GD_IDE_BUSY_TIMEOUT_MSECS_CMD ) != GD_OK ) { result = GD_ERR_IDE_TIMEOUT; DEBUG_Printf( 9, ( "leave: %s, result=0x%08x\n", __FUNCTION__, result ) ); return( result ); } if( gd_ide_registers->command_state & GD_IDE_STATE_ERROR ) { result = GD_ERR_IDE_STATUS; DEBUG_Printf( 9, ( "leave: %s, result=0x%08x\n", __FUNCTION__, result ) ); return( result ); } // the following table shows the ATA controller // settings for all supported DMA modes: // // DMA# | Teoc | Td | Tm | UPI_ATAPCTR // ------+------+----+----+------------- // 0 | 26 | 28 | 6 | 0x1A001C06 // 1 | 4 | 10 | 3 | 0x04000A03 // 2 | 2 | 8 | 3 | 0x02000803 // switch( dmamode ) { default: case 0: if( gd_ide_init_params.useATA == GTRUE ) { if( ide->master ) { GH_ATA_set_DTR0_TEOC( 26 ); GH_ATA_set_DTR0_TD( 28 ); GH_ATA_set_DTR0_TM( 6 ); } else { GH_ATA_set_DTR1_TEOC( 26 ); GH_ATA_set_DTR1_TD( 28 ); GH_ATA_set_DTR1_TM( 6 ); } } else { result = GD_ERR_FEATURE_NOT_SUPPORTED; DEBUG_Printf( 9, ( "leave: %s, result=0x%08x\n", __FUNCTION__, result ) ); return( result ); } break; case 1: if( gd_ide_init_params.useATA == GTRUE ) { if( ide->master ) { GH_ATA_set_DTR0_TEOC( 4 ); GH_ATA_set_DTR0_TD( 10 ); GH_ATA_set_DTR0_TM( 3 ); } else { GH_ATA_set_DTR1_TEOC( 4 ); GH_ATA_set_DTR1_TD( 10 ); GH_ATA_set_DTR1_TM( 3 ); } } else { result = GD_ERR_FEATURE_NOT_SUPPORTED; DEBUG_Printf( 9, ( "leave: %s, result=0x%08x\n", __FUNCTION__, result ) ); return( result ); } break; case 2: if( gd_ide_init_params.useATA == GTRUE ) { if( ide->master ) { GH_ATA_set_DTR0_TEOC( 2 ); GH_ATA_set_DTR0_TD( 8 ); GH_ATA_set_DTR0_TM( 3 ); } else { GH_ATA_set_DTR1_TEOC( 2 ); GH_ATA_set_DTR1_TD( 8 ); GH_ATA_set_DTR1_TM( 3 ); } } else { result = GD_ERR_FEATURE_NOT_SUPPORTED; DEBUG_Printf( 9, ( "leave: %s, result=0x%08x\n", __FUNCTION__, result ) ); return( result ); } break; } // set command again to make sure the settings are // accepted by the IDE controller... // fill LBA registers with dummy NULL values gd_ide_registers->device = master_mask; gd_ide_registers->lba_high = 0x00; gd_ide_registers->lba_mid = 0x00; gd_ide_registers->lba_low = 0x00; gd_ide_registers->sector_count = mode_mask; gd_ide_registers->feature_error = 0x03; // 0x03 = set transfer mode gd_ide_registers->command_state = GD_IDE_COMMAND_SET_FEATURE; if( GD_IDE_ReadyWait( ideHandle, GD_IDE_BUSY_TIMEOUT_MSECS_CMD ) != GD_OK ) { result = GD_ERR_IDE_TIMEOUT; DEBUG_Printf( 9, ( "leave: %s, result=0x%08x\n", __FUNCTION__, result ) ); return( result ); } if( gd_ide_registers->command_state & GD_IDE_STATE_ERROR ) { result = GD_ERR_IDE_STATUS; DEBUG_Printf( 9, ( "leave: %s, result=0x%08x\n", __FUNCTION__, result ) ); return( result ); } result = GD_OK; } else result = GD_ERR_INVALID_HANDLE; DEBUG_Printf( 9, ( "leave: %s, result=0x%08x\n", __FUNCTION__, result ) ); return( result ); }
void mep_debug_putc(unsigned char c) { DEBUG_Printf("%c", c); }
void init_cvc(int layer_id, int res_id) { unsigned long int PhysicalAddress = CVC_BASE ; int map_len = 0xF00; int fd = open( "/dev/mem", O_RDWR); unsigned char* cvc_base = (unsigned char*)mmap(NULL, map_len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)PhysicalAddress); IN0(); //printf("%s\n",__func__); //check if it worked if(cvc_base == MAP_FAILED) { perror("Mapping memory for absolute memory access failed.\n"); return; } DEBUG_Printf ("CVC Base mapping successful :\n0x%x to 0x%x, size = %d\n ", (int )PhysicalAddress, (int)cvc_base, map_len ); // disable all the layers REG_WRITE(cvc_base, CVC_L0_CTRL , 0x00000000); REG_WRITE(cvc_base, CVC_L1_CTRL , 0x00000000); // lAYER_1_EN =1 REG_WRITE(cvc_base, CVC_L2_CTRL , 0x00000000); // lAYER_2_EN =1 // Layer specific configuration. switch(layer_id) { case 0: REG_WRITE(cvc_base, CVC_L0_H_OFFSET ,0x00000000); REG_WRITE(cvc_base, CVC_L0_V_OFFSET ,0x00000000); REG_WRITE(cvc_base, CVC_L0_H_POSITION ,gVideoParam[res_id][E_HActive] - 1); REG_WRITE(cvc_base, CVC_L0_V_POSITION ,gVideoParam[res_id][E_VActive] - 1); REG_WRITE(cvc_base, CVC_L0_WIDTH ,gVideoParam[res_id][E_HActive] - 1); REG_WRITE(cvc_base, CVC_L0_HEIGHT ,gVideoParam[res_id][E_VActive] - 1); REG_WRITE(cvc_base, CVC_L0_CTRL ,0x00000007); break; case 1: REG_WRITE(cvc_base, CVC_L1_H_OFFSET ,0x00000000); REG_WRITE(cvc_base, CVC_L1_V_OFFSET ,0x00000000); REG_WRITE(cvc_base, CVC_L1_H_POSITION ,gVideoParam[res_id][E_HActive] - 1); REG_WRITE(cvc_base, CVC_L1_V_POSITION ,gVideoParam[res_id][E_VActive] - 1); REG_WRITE(cvc_base, CVC_L1_WIDTH ,gVideoParam[res_id][E_HActive] - 1); REG_WRITE(cvc_base, CVC_L1_HEIGHT ,gVideoParam[res_id][E_VActive] - 1); REG_WRITE(cvc_base, CVC_L1_CTRL ,0x00000007); break; case 2: REG_WRITE(cvc_base, CVC_L2_H_OFFSET ,0x00000000); REG_WRITE(cvc_base, CVC_L2_V_OFFSET ,0x00000000); REG_WRITE(cvc_base, CVC_L2_H_POSITION ,gVideoParam[res_id][E_HActive] - 1); REG_WRITE(cvc_base, CVC_L2_V_POSITION ,gVideoParam[res_id][E_VActive] - 1); REG_WRITE(cvc_base, CVC_L2_WIDTH ,gVideoParam[res_id][E_HActive] - 1); REG_WRITE(cvc_base, CVC_L2_HEIGHT ,gVideoParam[res_id][E_VActive] - 1); REG_WRITE(cvc_base, CVC_L2_CTRL ,0x00000007); break; default: perror("Not Supported"); return; } munmap((void *)cvc_base, map_len); close(fd); OUT0(); }
/* ******************************************************************************* ** ** \brief Set the DMA read/write transfer command ** ** This function sets the required IDE transfer command for DMA data transfer, ** depending on the 'use48bit' element in the given IDE handle and depending ** on the passed 'readMode' flag. ** ** \param handle The IDE handle to access ** \param lba The logical block address to start reading at. ** \param lba_count The number of 512byte sectors to read. ** \param readMode A flag which decides whether to start a DMA read (GTRUE) ** or a DMA write transfer (GFALSE). ** ** \return ** - GD_OK if successful ** - other in case of error. ** ******************************************************************************* */ GERR GD_IDE_DmaSetCommand( GD_HANDLE ideHandle, U32 lba, U8 lba_count, GBOOL readMode ) { #undef __FUNCTION__ #define __FUNCTION__ "GD_IDE_DmaSetCommand" GD_IDE_HANDLE_T* ide = (GD_IDE_HANDLE_T*)ideHandle; U16 sectors = lba_count; GERR result = GD_ERR_INVALID_HANDLE; U8 command; DEBUG_Printf( 9, ( "enter: %s\n", __FUNCTION__ ) ); if( sectors == 0 ) sectors = 256; if( ( ide != 0 ) && ( ide->inuse == GTRUE ) && ( ide->id == GD_IDE_HANDLE_ID ) ) { // enable IDE mode in ATA controller #ifdef RE_INIT_ATA_CTRL DEBUG_Printf( 7, ( "enable ATA controller\n" ) ); GH_ATA_set_CTRL( 1 ); //GH_ATA_set_CTRL( 0 ); GH_ATA_set_CTRL_IDEEN( 1 ); DEBUG_Printf( 7, ( "ATA CTRL: 0x%08x\n", GH_ATA_get_CTRL() ) ); #endif if( readMode ) { if( ide->use48bit ) command = GD_IDE_COMMAND_READ_DMA_EXT; else command = GD_IDE_COMMAND_READ_DMA; // configure ATA controller for IDE disk read DEBUG_Printf( 8, ( "configure ATA controller for read\n" ) ); GH_ATA_set_CTRL_DMADIR( 0 ); // 1=write to HD, 0=read from HD DEBUG_Printf( 8, ( "ATA CTRL: 0x%08x\n", GH_ATA_get_CTRL() ) ); } else // readMode == GFALSE --> perform 'write' { if( ide->use48bit ) command = GD_IDE_COMMAND_WRITE_DMA_EXT; else command = GD_IDE_COMMAND_WRITE_DMA; // configure ATA controller for IDE disk write DEBUG_Printf( 7, ( "configure ATA controller for write\n" ) ); GH_ATA_set_CTRL_DMADIR( 1 ); // 1=write to HD, 0=read from HD DEBUG_Printf( 7, ( "ATA CTRL: 0x%08x\n", GH_ATA_get_CTRL() ) ); } // set the required DMA read/write command DEBUG_Printf( 7, ( "set ATA/IDE command=0x%02X, lba=%d, sectors=%d, master=%d\n", command, lba, sectors, ide->master ) ); result = GD_IDE_SetCommand( ideHandle, command, lba, sectors ); DEBUG_Printf( 7, ( "enable DMA controller\n" ) ); GH_ATA_set_CTRL_DMAEN( 1 ); DEBUG_Printf( 7, ( "ATA CTRL: 0x%08x\n", GH_ATA_get_CTRL() ) ); } DEBUG_Printf( 9, ( "leave: %s, result=%d\n", __FUNCTION__, result ) ); return( result ); }
/*! ******************************************************************************* ** ** \brief Copy sdram memory using DMA ** ** This functions implements the DMA based SDRAM memory copy feature. ** It copies the given number of bytes from the source address to the target ** address. Only long word aligned byte counts will be copied by the DMA ** controller, any left bytes (max. 3) will be copied within an internal ** finish function using the cpu to copy the data. ** ** \param target Target address where to copy data into ** \param source Source address where to copy data from ** \param bytes Number of bytes to copy ** \param finishCallback Fuunction to be called when transfer has completed ** ** \note Both addresses, the source and the target address must be long word ** aligned as the DMA controller cannot handle byte addresses. ** ** The following exmaple shows how to use the DMA memory copy engine ** within an application: ** ** \code ** #include <gtypes.h> ** #include <gd_lib/gd_dma.h> ** #include <rtos/rtos_lib.h> ** ** static RTOS_Semaphore main_memcpy_semphore = 0; ** ** void MAIN_MemcpyFinish( GERR status, U32 bytes ) ** { ** RTOS_SemaphoreRelease( main_memcpy_semphore ); ** } ** ** void MAIN_MemcpyFunction( U8* into_buffer, U8* from_buffer, U32 bytes ) ** { ** GD_DMA_REQUEST_S* request; ** *** main_memcpy_semphore = RTOS_SemaphoreCreate( 0 ); ** ** request = GD_DMA_SDRAM_Memcpy( into_buffer, from_buffer, bytes, ** MAIN_MemcpyFinish ); ** ** RTOS_SemaphoreWait( main_memcpy_semphore, GTRUE ); ** RTOS_SemaphoreDestroy( main_memcpy_semphore ); ** ** // thread will be suspended here until the sdram copy ** // engine calls the function MAIN_MemcpyFinish ** : ** : ** : ** } ** \endcode ******************************************************************************* */ GD_DMA_REQUEST_S* GD_DMA_SDRAM_Memcpy( U8* target, U8* source, U32 bytes, GD_DMA_OPT_CALLBACK_F finishCallback ) { #undef __FUNCTION__ #define __FUNCTION__ "GD_DMA_Memcpy" GD_DMA_REQUEST_S* dmaRequest; U16 dmaFeature = GD_DMA_FEATURE_SDRAM_COPY; DEBUG_Printf( 9, ( "enter: %s\n", __FUNCTION__ ) ); // check for long word alignment if( ( ( (U32)target & 0x3 ) != 0x0 ) || ( ( (U32)source & 0x3 ) != 0 ) ) { DEBUG_Printf( 1, ( "DMA memcpy, address is not long word aligned\n" ) ); return( 0 ); } dmaRequest = GD_DMA_RequestAllocate( dmaFeature, GTRUE ); if( dmaRequest ) { DEBUG_Printf( 7, ( "preparing DMA request block for SDRAM copy\n" ) ); dmaRequest->preEnableCallback = 0; dmaRequest->postEnableCallback = 0; dmaRequest->postFinishCallback = GD_DMA_SDRAM_Finalize; dmaRequest->optHandle = 0; dmaRequest->optData = (void*)bytes; dmaRequest->optFinishCallback = finishCallback; dmaRequest->optChangeCallback = 0; dmaRequest->channelConfig.chConfig = 0; dmaRequest->channelConfig.chLength = bytes & (~0x3); dmaRequest->channelConfig.chLLAddr = 0; if (((U32)source & CPU_VOID_MEM_SWAP) != 0) dmaRequest->channelConfig.chReadAddr_ENDIAN_SWAP = 1; dmaRequest->channelConfig.chReadAddr_PERIPHERAL_ADDR = 0x0; dmaRequest->channelConfig.chReadAddr_OFFSET_ADDR = (U32)source & 0x0FFFFFFF; dmaRequest->channelConfig.chReadLine_LINES = 0x3FF; dmaRequest->channelConfig.chReadInc_LINE_LENGTH = 0xFFF; dmaRequest->channelConfig.chReadInc_LINE_INCREMENT = 0; dmaRequest->channelConfig.chReadLoopAddr = 0; if (((U32)target & CPU_VOID_MEM_SWAP) != 0) dmaRequest->channelConfig.chWriteAddr_ENDIAN_SWAP = 1; dmaRequest->channelConfig.chWriteAddr_PERIPHERAL_ADDR = 0x0; dmaRequest->channelConfig.chWriteAddr_OFFSET_ADDR = (U32)target & 0x0FFFFFFF; dmaRequest->channelConfig.chWriteLine_LINES = 0x3FF; dmaRequest->channelConfig.chWriteInc_LINE_LENGTH = 0xFFF; dmaRequest->channelConfig.chWriteInc_LINE_INCREMENT = 0; dmaRequest->channelConfig.chWriteLoopAddr = 0; DEBUG_Printf( 7, ( "sending DMA request block to DMA fifo\n" ) ); if( GD_DMA_SendRequest( dmaRequest ) != GD_OK ) { GD_DMA_RequestRelease( dmaRequest ); dmaRequest = 0; } } DEBUG_Printf( 9, ( "leave: %s, result=0x%08x\n", __FUNCTION__, dmaRequest ) ); return( dmaRequest ); }
void mep_debug_puts(unsigned char *msg) { DEBUG_Printf("%s", msg); }
void COMMONK_CommonCfgImportByHexData(UINT32 a_ulDataID, UINT32 a_ulDataLength, UINT8 * a_pucDataBuf, alCOMMONK_CommonCfg *a_ptCfg) { UINT32 ulDataIndex = 0; switch (a_ulDataID & 0xFFFF) { case 0x0000: // Car Model ID { a_ptCfg->ulCarModel = alHEXDATA_UnpackUINT32(a_ulDataLength, a_pucDataBuf); } break; case 0x0001: // Display Area Size X { a_ptCfg->tDisplayAreaSize.lX = alHEXDATA_UnpackINT32(a_ulDataLength, a_pucDataBuf); } break; case 0x0002: // Display Area Size Y { a_ptCfg->tDisplayAreaSize.lY = alHEXDATA_UnpackINT32(a_ulDataLength, a_pucDataBuf); } break; case 0x0003: // Display Start Point X { a_ptCfg->tDisplayStartPoint.lX = alHEXDATA_UnpackINT32(a_ulDataLength, a_pucDataBuf); } break; case 0x0004: // Display Start Point Y { a_ptCfg->tDisplayStartPoint.lY = alHEXDATA_UnpackINT32(a_ulDataLength, a_pucDataBuf); } break; case 0x0005: // fLCDRatio { a_ptCfg->fLCDRatio = alHEXDATA_UnpackFLOAT32(a_ulDataLength, a_pucDataBuf); } break; case 0x0006: // Version number of Fisheye Model. { a_ptCfg->ucFisheyeModelVersion = alHEXDATA_UnpackUINT8(a_ulDataLength, a_pucDataBuf); } break; case 0x0007: // Car Length { a_ptCfg->ulCarLength = alHEXDATA_UnpackUINT32(a_ulDataLength, a_pucDataBuf); } break; case 0x0008: // Car Width { a_ptCfg->ulCarWidth = alHEXDATA_UnpackUINT32(a_ulDataLength, a_pucDataBuf); } break; case 0x0009: // Maximum Steering Angle { a_ptCfg->ulMaxSteeringAngle = alHEXDATA_UnpackUINT32(a_ulDataLength, a_pucDataBuf); } break; case 0x000A: // 玡近禸糴 { a_ptCfg->ulFrontWheelAxleWidth = alHEXDATA_UnpackUINT32(a_ulDataLength, a_pucDataBuf); } break; case 0x000B: // 近禸糴 { a_ptCfg->ulRearWheelAxleWidth = alHEXDATA_UnpackUINT32(a_ulDataLength, a_pucDataBuf); } break; case 0x000C: // 玡近禸禯 { a_ptCfg->ulWheelAxleDistance = alHEXDATA_UnpackUINT32(a_ulDataLength, a_pucDataBuf); } break; case 0x000D: // 玡近禸みó繷禯瞒 { a_ptCfg->ulFrontWheelAxle2Head = alHEXDATA_UnpackUINT32(a_ulDataLength, a_pucDataBuf); } break; case 0x000E: // 近禸みóЮ禯瞒 { a_ptCfg->ulRearWheelAxle2Tail = alHEXDATA_UnpackUINT32(a_ulDataLength, a_pucDataBuf); } break; case 0x000F: // 玡近﹚翴ó繷禯瞒 { a_ptCfg->ulFrontFixedPoint2Head = alHEXDATA_UnpackUINT32(a_ulDataLength, a_pucDataBuf); } break; case 0x0010: // 玡近玡絫ó繷禯瞒 { a_ptCfg->ulFrontWheelLE2Head = alHEXDATA_UnpackUINT32(a_ulDataLength, a_pucDataBuf); } break; case 0x0011: // 搽àó进锣そΑ把计 - A { a_ptCfg->fSteerParamA = alHEXDATA_UnpackFLOAT32(a_ulDataLength, a_pucDataBuf); } case 0x0012: // 搽àó进锣そΑ把计 - B { a_ptCfg->fSteerParamB = alHEXDATA_UnpackFLOAT32(a_ulDataLength, a_pucDataBuf); } break; default: // Do nothing DEBUG_Printf("Invalid Data ID: 0x%X\n - CommonkCfgImport\n", a_ulDataID); break; } }