Encoder::Encoder(DigitalSource &aSource, DigitalSource &bSource, bool reverseDirection, EncodingType encodingType) : m_aSource(&aSource, NullDeleter<DigitalSource>()), m_bSource(&bSource, NullDeleter<DigitalSource>()) { InitEncoder(reverseDirection, encodingType); }
/** * Encoder constructor. * Construct a Encoder given a and b channels assuming the default module. * @param aChannel The a channel digital input channel. * @param bChannel The b channel digital input channel. * @param reverseDirection represents the orientation of the encoder and inverts the output values * if necessary so forward represents positive values. * @param encodingType either k1X, k2X, or k4X to indicate 1X, 2X or 4X decoding. If 4X is * selected, then an encoder FPGA object is used and the returned counts will be 4x the encoder * spec'd value since all rising and falling edges are counted. If 1X or 2X are selected then * a counter object will be used and the returned value will either exactly match the spec'd count * or be double (2x) the spec'd count. */ Encoder::Encoder(UINT32 aChannel, UINT32 bChannel, bool reverseDirection, EncodingType encodingType) { m_aSource = new DigitalInput(aChannel); m_bSource = new DigitalInput(bChannel); InitEncoder(reverseDirection, encodingType); m_allocatedASource = true; m_allocatedBSource = true; }
/** * Encoder constructor. * Construct a Encoder given a and b channels as digital inputs. This is used in the case * where the digital inputs are shared. The Encoder class will not allocate the digital inputs * and assume that they already are counted. * @param aSource The source that should be used for the a channel. * @param bSource the source that should be used for the b channel. * @param reverseDirection represents the orientation of the encoder and inverts the output values * if necessary so forward represents positive values. * @param encodingType either k1X, k2X, or k4X to indicate 1X, 2X or 4X decoding. If 4X is * selected, then an encoder FPGA object is used and the returned counts will be 4x the encoder * spec'd value since all rising and falling edges are counted. If 1X or 2X are selected then * a counter object will be used and the returned value will either exactly match the spec'd count * or be double (2x) the spec'd count. */ Encoder::Encoder(DigitalSource &aSource, DigitalSource &bSource, bool reverseDirection, EncodingType encodingType) { m_aSource = &aSource; m_bSource = &bSource; m_allocatedASource = false; m_allocatedBSource = false; InitEncoder(reverseDirection, encodingType); }
Encoder::Encoder(std::shared_ptr<DigitalSource> aSource, std::shared_ptr<DigitalSource> bSource, bool reverseDirection, EncodingType encodingType) : m_aSource(aSource), m_bSource(bSource) { if (m_aSource == nullptr || m_bSource == nullptr) wpi_setWPIError(NullParameter); else InitEncoder(reverseDirection, encodingType); }
/** * Encoder constructor. * * Construct a Encoder given a and b channels as digital inputs. This is used in * the case where the digital inputs are shared. The Encoder class will not * allocate the digital inputs and assume that they already are counted. * * The counter will start counting immediately. * * @param aSource The source that should be used for the a channel. * @param bSource the source that should be used for the b channel. * @param reverseDirection represents the orientation of the encoder and * inverts the output values if necessary so forward * represents positive values. * @param encodingType either k1X, k2X, or k4X to indicate 1X, 2X or 4X * decoding. If 4X is selected, then an encoder FPGA * object is used and the returned counts will be 4x * the encoder spec'd value since all rising and * falling edges are counted. If 1X or 2X are selected * then a counter object will be used and the returned * value will either exactly match the spec'd count or * be double (2x) the spec'd count. */ Encoder::Encoder(DigitalSource* aSource, DigitalSource* bSource, bool reverseDirection, EncodingType encodingType) : m_aSource(aSource, NullDeleter<DigitalSource>()), m_bSource(bSource, NullDeleter<DigitalSource>()) { if (m_aSource == nullptr || m_bSource == nullptr) wpi_setWPIError(NullParameter); else InitEncoder(reverseDirection, encodingType); }
/** * Encoder constructor. * Construct a Encoder given a and b channels assuming the default module. * @param aChannel The a channel digital input channel. * @param bChannel The b channel digital input channel. * @param reverseDirection represents the orientation of the encoder and inverts the output values * if necessary so forward represents positive values. * @param encodingType either k1X, k2X, or k4X to indicate 1X, 2X or 4X decoding. If 4X is * selected, then an encoder FPGA object is used and the returned counts will be 4x the encoder * spec'd value since all rising and falling edges are counted. If 1X or 2X are selected then * a counter object will be used and the returned value will either exactly match the spec'd count * or be double (2x) the spec'd count. */ Encoder::Encoder(uint32_t aChannel, uint32_t bChannel, bool reverseDirection, EncodingType encodingType) : m_encoder(NULL), m_counter(NULL) { m_aSource = new DigitalInput(aChannel); m_bSource = new DigitalInput(bChannel); InitEncoder(reverseDirection, encodingType); m_allocatedASource = true; m_allocatedBSource = true; }
/** * Encoder constructor. * Construct a Encoder given a and b channels as digital inputs. This is used in the case * where the digital inputs are shared. The Encoder class will not allocate the digital inputs * and assume that they already are counted. * @param aSource The source that should be used for the a channel. * @param bSource the source that should be used for the b channel. * @param reverseDirection represents the orientation of the encoder and inverts the output values * if necessary so forward represents positive values. * @param encodingType either k1X, k2X, or k4X to indicate 1X, 2X or 4X decoding. If 4X is * selected, then an encoder FPGA object is used and the returned counts will be 4x the encoder * spec'd value since all rising and falling edges are counted. If 1X or 2X are selected then * a counter object will be used and the returned value will either exactly match the spec'd count * or be double (2x) the spec'd count. */ Encoder::Encoder(DigitalSource *aSource, DigitalSource *bSource, bool reverseDirection, EncodingType encodingType) { m_aSource = aSource; m_bSource = bSource; m_allocatedASource = false; m_allocatedBSource = false; if (m_aSource == NULL || m_bSource == NULL) wpi_fatal(NullParameter); else InitEncoder(reverseDirection, encodingType); }
/** * Encoder constructor. * Construct a Encoder given a and b modules and channels fully specified. * @param aModuleNumber The a channel digital input module. * @param aChannel The a channel digital input channel. * @param bModuleNumber The b channel digital input module. * @param bChannel The b channel digital input channel. * @param reverseDirection represents the orientation of the encoder and inverts the output values * if necessary so forward represents positive values. * @param encodingType either k1X, k2X, or k4X to indicate 1X, 2X or 4X decoding. If 4X is * selected, then an encoder FPGA object is used and the returned counts will be 4x the encoder * spec'd value since all rising and falling edges are counted. If 1X or 2X are selected then * a counter object will be used and the returned value will either exactly match the spec'd count * or be double (2x) the spec'd count. */ Encoder::Encoder(UINT8 aModuleNumber, UINT32 aChannel, UINT8 bModuleNumber, UINT32 bChannel, bool reverseDirection, EncodingType encodingType) : m_encoder(NULL), m_counter(NULL) { m_aSource = new DigitalInput(aModuleNumber, aChannel); m_bSource = new DigitalInput(bModuleNumber, bChannel); InitEncoder(reverseDirection, encodingType); m_allocatedASource = true; m_allocatedBSource = true; }
int WebPEncode(const WebPConfig* const config, WebPPicture* const pic) { VP8Encoder* enc; int ok; if (pic == NULL) return 0; WebPEncodingSetError(pic, VP8_ENC_OK); // all ok so far if (config == NULL) // bad params return WebPEncodingSetError(pic, VP8_ENC_ERROR_NULL_PARAMETER); if (!WebPValidateConfig(config)) return WebPEncodingSetError(pic, VP8_ENC_ERROR_INVALID_CONFIGURATION); if (pic->width <= 0 || pic->height <= 0) return WebPEncodingSetError(pic, VP8_ENC_ERROR_BAD_DIMENSION); if (pic->y == NULL || pic->u == NULL || pic->v == NULL) return WebPEncodingSetError(pic, VP8_ENC_ERROR_NULL_PARAMETER); if (pic->width > WEBP_MAX_DIMENSION || pic->height > WEBP_MAX_DIMENSION) return WebPEncodingSetError(pic, VP8_ENC_ERROR_BAD_DIMENSION); enc = InitEncoder(config, pic); if (enc == NULL) return 0; // pic->error is already set. // Note: each of the tasks below account for 20% in the progress report. ok = VP8EncAnalyze(enc) && VP8StatLoop(enc) && VP8EncLoop(enc) && VP8EncFinishAlpha(enc) #ifdef WEBP_EXPERIMENTAL_FEATURES && VP8EncFinishLayer(enc) #endif && VP8EncWrite(enc); StoreStats(enc); if (!ok) { VP8EncFreeBitWriters(enc); } DeleteEncoder(enc); return ok; }
/** * Encoder constructor. * * Construct a Encoder given a and b channels. * * The counter will start counting immediately. * * @param aChannel The a channel DIO channel. 0-9 are on-board, 10-25 * are on the MXP port * @param bChannel The b channel DIO channel. 0-9 are on-board, 10-25 * are on the MXP port * @param reverseDirection represents the orientation of the encoder and * inverts the output values if necessary so forward * represents positive values. * @param encodingType either k1X, k2X, or k4X to indicate 1X, 2X or 4X * decoding. If 4X is selected, then an encoder FPGA * object is used and the returned counts will be 4x * the encoder spec'd value since all rising and * falling edges are counted. If 1X or 2X are selected * then a counter object will be used and the returned * value will either exactly match the spec'd count or * be double (2x) the spec'd count. */ Encoder::Encoder(uint32_t aChannel, uint32_t bChannel, bool reverseDirection, EncodingType encodingType) { m_aSource = std::make_shared<DigitalInput>(aChannel); m_bSource = std::make_shared<DigitalInput>(bChannel); InitEncoder(reverseDirection, encodingType); }
/** * Encoder constructor. * Construct a Encoder given a and b channels. * * The counter will start counting immediately. * * @param aChannel The a channel digital input channel. * @param bChannel The b channel digital input channel. * @param reverseDirection represents the orientation of the encoder and inverts the output values * if necessary so forward represents positive values. * @param encodingType either k1X, k2X, or k4X to indicate 1X, 2X or 4X decoding. If 4X is * selected, then an encoder FPGA object is used and the returned counts will be 4x the encoder * spec'd value since all rising and falling edges are counted. If 1X or 2X are selected then * a counter object will be used and the returned value will either exactly match the spec'd count * or be double (2x) the spec'd count. */ Encoder::Encoder(uint32_t aChannel, uint32_t bChannel, bool reverseDirection, EncodingType encodingType) { InitEncoder(aChannel, bChannel, reverseDirection, encodingType); }
int main(int argc, char **argv) { int ret = -1; uint8_t *data = NULL; unsigned long size = 0; int times = 0; int width = 0; int height = 0; char *dest = NULL; unsigned long dest_size = 0; long long pts = 0; long long dts = 0; AUDIOPACKET ap[30] = {0}; int ap_len = 0; int i = 0; CAPTURECONFIG captureConfig; PCAPTURECONFIG pCaptureConfig = &captureConfig; ENCODECONFIG encodeConfig; PENCODECONFIG pEncodeConfig = &encodeConfig; PENCODER pEncoder; PCAPTURE pCapture; DWORD start_time, end_time; pCaptureConfig->fps = 5; pCaptureConfig->channels = 2; pCaptureConfig->bits_per_sample = 16; pCaptureConfig->samples_per_sec = 48000; pCaptureConfig->avg_bytes_per_sec = 48000; pEncodeConfig->fps = 5; pEncodeConfig->width = 1366; pEncodeConfig->height = 768; pEncodeConfig->bit_rate = 400000; pEncodeConfig->channels = 2; pEncodeConfig->bits_per_sample = 16; pEncodeConfig->sample_rate = 48000; pEncodeConfig->avg_bytes_per_sec = 48000; pEncodeConfig->record = 1; memcpy(pEncodeConfig->record_file, "D:\\desktop_live.mp4", 20); InitLog(LOG_DEBUG, OUT_FILE); pCapture = InitCapture(pCaptureConfig); if (NULL == pCapture) { printf("init capture failed\n"); return -1; } pEncoder = InitEncoder(pEncodeConfig); if (NULL == pEncoder) { printf("init encoder failed\n"); return -1; } ret = StartCapture(pCapture); if (SECCESS != ret) { printf("start capture failed\n"); return -1; } start_time = end_time = timeGetTime(); while(10*1000 > (end_time - start_time)) { if (SECCESS == GetVideoFrame(pCapture, &data, &size, &width, &height)) { ret = EncodeVideo(pEncoder, data, width, height, &dest, &dest_size, &pts, &dts); if (ret == SECCESS) { free(dest); } times++; printf("video data size = %d\n", size); free(data); } if (SECCESS == GetAudioFrame(pCapture, &data, &size)) { ap_len = 0; ret = EncodeAudio(pEncoder, data, size, ap, &ap_len); if (ret == SECCESS) { for (i=0; i<ap_len; i++) { free(ap[i].data); } } printf("audio data size = %d\n", size); free(data); } end_time = timeGetTime(); } StopCapture(pCapture); FreeCapture(pCapture); FflushEncoder(pEncoder); FreeEncoder(pEncoder); FreeLog(); _CrtDumpMemoryLeaks(); return 0; }
/* * CWelsH264SVCEncoder class implementation */ CWelsH264SVCEncoder::CWelsH264SVCEncoder() : m_pEncContext (NULL), m_pWelsTrace (NULL), m_iMaxPicWidth (0), m_iMaxPicHeight (0), m_iCspInternal (0), m_bInitialFlag (false) { #ifdef REC_FRAME_COUNT int32_t m_uiCountFrameNum = 0; #endif//REC_FRAME_COUNT #ifdef OUTPUT_BIT_STREAM char strStreamFileName[1024] = { 0 }; //for .264 int32_t iBufferUsed = 0; int32_t iBufferLeft = 1023; int32_t iCurUsed; char strLenFileName[1024] = { 0 }; //for .len int32_t iBufferUsedSize = 0; int32_t iBufferLeftSize = 1023; int32_t iCurUsedSize; #endif//OUTPUT_BIT_STREAM #ifdef OUTPUT_BIT_STREAM SWelsTime tTime; WelsGetTimeOfDay (&tTime); iCurUsed = WelsSnprintf (strStreamFileName, iBufferLeft, "enc_bs_0x%p_", (void*)this); iCurUsedSize = WelsSnprintf (strLenFileName, iBufferLeftSize, "enc_size_0x%p_", (void*)this); iBufferUsed += iCurUsed; iBufferLeft -= iCurUsed; if (iBufferLeft > 0) { iCurUsed = WelsStrftime (&strStreamFileName[iBufferUsed], iBufferLeft, "%y%m%d%H%M%S", &tTime); iBufferUsed += iCurUsed; iBufferLeft -= iCurUsed; } iBufferUsedSize += iCurUsedSize; iBufferLeftSize -= iCurUsedSize; if (iBufferLeftSize > 0) { iCurUsedSize = WelsStrftime (&strLenFileName[iBufferUsedSize], iBufferLeftSize, "%y%m%d%H%M%S", &tTime); iBufferUsedSize += iCurUsedSize; iBufferLeftSize -= iCurUsedSize; } if (iBufferLeft > 0) { iCurUsed = WelsSnprintf (&strStreamFileName[iBufferUsed], iBufferLeft, ".%03.3u.264", WelsGetMillisecond (&tTime)); iBufferUsed += iCurUsed; iBufferLeft -= iCurUsed; } if (iBufferLeftSize > 0) { iCurUsedSize = WelsSnprintf (&strLenFileName[iBufferUsedSize], iBufferLeftSize, ".%03.3u.len", WelsGetMillisecond (&tTime)); iBufferUsedSize += iCurUsedSize; iBufferLeftSize -= iCurUsedSize; } m_pFileBs = WelsFopen (strStreamFileName, "wb"); m_pFileBsSize = WelsFopen (strLenFileName, "wb"); m_bSwitch = false; m_iSwitchTimes = 0; #endif//OUTPUT_BIT_STREAM InitEncoder(); }
int MSDKEncode::HandleProcess() { mfxStatus sts = MFX_ERR_NONE; mfxFrameSurface1* pFrameSurface = NULL; mfxSyncPoint syncpE; mfxEncodeCtrl* pEncCtrl = NULL; int startP = -1; #ifndef CONFIG_READ_RAW_BUFFER std::map<MSDKVpp*, RING_BUFFER*>::iterator it = m_mapRingBuf.begin(); #endif while (!m_bWantToStop) { usleep(1000); //Check if need to generate key frame if (m_bForceKeyFrame) pEncCtrl = &m_encCtrl; else pEncCtrl = NULL; #ifndef CONFIG_READ_RAW_BUFFER if (it->second->IsEmpty()) //No data { //printf("[MSDKEncode]-----There's no more data, just continue or exit the main loop\n"); if (it->first->GetDataEos()) //No more data in the furture { pEncCtrl = NULL; pFrameSurface = NULL; } else continue; } else { if (MASTER == m_type) { if (!m_bAccessNextElem) continue; it->second->Get(pFrameSurface); } else it->second->Pop(pFrameSurface); } //printf("[MSDKEncode]-----Get next frame surface successfully\n"); #endif if (!m_bInit) { #ifdef CONFIG_READ_RAW_BUFFER sts = InitEncoder(NULL); #else sts = InitEncoder(pFrameSurface); #endif if (MFX_ERR_NONE == sts) { if (m_pMeasuremnt) { m_pMeasuremnt->GetLock(); pipelineinfo einfo; einfo.mElementType = m_type; einfo.mChannelNum = MSDKBase::nEncChannels; MSDKBase::nEncChannels++; m_pMeasuremnt->SetElementInfo(ENC_ENDURATION_TIME_STAMP, this, &einfo); m_pMeasuremnt->TimeStpStart(ENC_ENDURATION_TIME_STAMP, this); m_pMeasuremnt->RelLock(); } m_bInit = true; H264E_TRACE_INFO("[MSDKEncode]Encoder %p init successfully\n", this); } else { H264E_TRACE_ERROR("Encode init failed: %d\n", sts); return -1; } } #ifdef CONFIG_READ_RAW_BUFFER int nIndex = GetFreeSurfaceIndex(m_pSurfacePool, m_nSurfaces); //Find free frame surface slot if (MFX_ERR_NOT_FOUND == nIndex) continue; else pFrameSurface = m_pSurfacePool[nIndex]; sts = LoadRawFrame(m_pSurfacePool[nIndex]); if (MFX_ERR_MORE_DATA == sts) { if (m_pInputMem->GetDataEof()) { //No more data in the future pEncCtrl = NULL; pFrameSurface = NULL; } else { continue; } } //printf("[MSDKEncode]-----Get the free surface slot and complete load of the raw frame\n"); #endif if (m_pMeasuremnt) { m_pMeasuremnt->GetLock(); m_pMeasuremnt->TimeStpStart(ENC_FRAME_TIME_STAMP, this); m_pMeasuremnt->RelLock(); } for (;;) { //Encode a frame asychronously(returns immediately) sts = m_pEncode->EncodeFrameAsync(pEncCtrl, pFrameSurface, &m_outputBs, &syncpE); //printf("[MSDKEncode]-----EncodeFrameAsync ret code: %d\n", sts); if (sts > MFX_ERR_NONE && !syncpE) { //Repeat the call if warning and no output if (MFX_WRN_DEVICE_BUSY == sts) usleep(1000); //wait if device is busy, then repeat the same call } else { if (sts > MFX_ERR_NONE && syncpE) sts = MFX_ERR_NONE; //ignore warnings if output is available if (MFX_ERR_NOT_ENOUGH_BUFFER == sts) H264E_TRACE_WARNI("[MSDKEncode]-----The size of buffer allocated for encoder is too small\n"); break; } } if (MFX_ERR_NONE == sts) { sts = m_pSession->SyncOperation(syncpE, 60000); if (m_pMeasuremnt) { m_pMeasuremnt->GetLock(); m_pMeasuremnt->TimeStpFinish(ENC_FRAME_TIME_STAMP, this); m_pMeasuremnt->RelLock(); } //check if output is key frame: startP = 0-key frame otherwise not //A group of pictures sizeof(GROUP_OF_PICTURE*2) have a key frame startP = (++startP)%GROUP_OF_PICTURE; //release surface pool slot if (MASTER == m_type) { m_bAccessNextElem = false; it->first->ReleaseSurface(); } else { pFrameSurface->Data.Locked--; } m_pNotify->OnGetMSDKCodecData(m_outputBs.Data+m_outputBs.DataOffset, m_outputBs.DataLength, (!startP || m_bForceKeyFrame), m_nLogicIndex); m_outputBs.DataLength = 0; if (m_bForceKeyFrame) { m_bForceKeyFrame = false; //printf("[MSDKEncode]-----Force key frame successfully\n"); startP = 0; } } if (!pFrameSurface) { if (m_pMeasuremnt) { m_pMeasuremnt->GetLock(); m_pMeasuremnt->TimeStpFinish(ENC_ENDURATION_TIME_STAMP, this); m_pMeasuremnt->RelLock(); } break; } } if (!m_bWantToStop) { H264E_TRACE_INFO("[MSDKEncode]Got EOS in Encoder %p\n", this); m_bWantToStop = true; } return 0; }
void *threadProp(void *param){ int i=0; int cpt = 0; int dist_Sonar; static int u = 0; #ifdef THREAD_PROP eSta prevOrder; eTypeCmd prevTyp_Cmd = STATE; #endif InitMode(3); InitEncoder(); InitAcc(2); pthread_mutex_lock(&mtx_order); order = STP; prevOrder = order; pthread_mutex_unlock(&mtx_order); while(1){ // Update distSonar pthread_mutex_lock(&mtx_distSonar); dist_Sonar = distSonar; pthread_mutex_unlock(&mtx_distSonar); pthread_mutex_lock(&mtx_order); pthread_mutex_unlock(&mtx_typ_Cmd); //printf("order = %s\n" // "typ_Cmd = %s\n",dspl_eSta(order), dspl_eTypeCmd(typ_Cmd)); #ifdef THREAD_PROP if( (order != prevOrder) || (typ_Cmd != prevTyp_Cmd)){ printf("order = %s\n" "typ_Cmd = %s\n",dspl_eSta(order), dspl_eTypeCmd(typ_Cmd)); prevOrder = order; prevTyp_Cmd = typ_Cmd; } #endif //printf("distSonar = %d, \t DIST_MIN_SONAR = %d\n", distSonar, DIST_MIN_SONAR); // if there is an obstacl if(dist_Sonar < DIST_MIN_SONAR){ prevOrder = order; order = STP; cpt = 1; } else if(cpt == 1){ order = prevOrder; cpt = 0; } // Stop Rover if obstacl or press Stop button or command stop if(order == STP){ #ifdef THREAD_PROP if(u != 1){ printf("----Rover stop\n"); } u = 1; #endif stopRover(); } else if(order == MVT && typ_Cmd == TRAJ){ #ifdef THREAD_PROP if(u != 2){ printf("----Rover is moving to point\n"); } u = 2; #endif pthread_mutex_lock(&mtx_position); if(followTraj((sPt)position.pt) == 1) order = STP; pthread_mutex_unlock(&mtx_position); } else if(order == MVT && typ_Cmd == POS){ #ifdef THREAD_PROP if(u != 3){ printf("----Rover is moving to point and is turnning following angl\n"); } u = 3; #endif pthread_mutex_lock(&mtx_position); if(followTraj((sPt)position.pt) == 1) order = STP; rotOnPt(position.ang); pthread_mutex_unlock(&mtx_position); } else if(order == MVT && (typ_Cmd != TRAJ || typ_Cmd != POS)){ #ifdef THREAD_PROP if(u != 4){ printf("----Rover is following points\n"); } u = 4; #endif if(followTraj(tabTraj[i])==1){ if(i==(N-1))i=0; else i++; } } pthread_mutex_unlock(&mtx_order); pthread_mutex_unlock(&mtx_typ_Cmd); } }