/*! Track the line in the image I. \param I : Image in which the line appears. */ void vpMeLine::track(const vpImage<unsigned char> &I) { vpCDEBUG(1) <<"begin vpMeLine::track()"<<std::endl ; // 1. On fait ce qui concerne les droites (peut etre vide) { } // 2. On appelle ce qui n'est pas specifique { vpMeTracker::track(I) ; } // 3. On revient aux droites { // supression des points rejetes par les ME suppressPoints() ; setExtremities() ; // Estimation des parametres de la droite aux moindres carre try { leastSquare() ; } catch(...) { vpERROR_TRACE("Error caught") ; throw ; } // recherche de point aux extremite de la droites // dans le cas d'un glissement seekExtremities(I) ; setExtremities() ; try { leastSquare() ; } catch(...) { vpERROR_TRACE("Error caught") ; throw ; } // suppression des points rejetes par la regression robuste suppressPoints() ; setExtremities() ; //reechantillonage si necessaire reSample(I) ; // remet a jour l'angle delta pour chaque point de la liste updateDelta() ; // Remise a jour de delta dans la liste de site me if (vpDEBUG_ENABLE(2)) { display(I,vpColor::red) ; vpMeTracker::display(I) ; vpDisplay::flush(I) ; } } computeRhoTheta(I) ; vpCDEBUG(1) <<"end vpMeLine::track()"<<std::endl ; }
/*! Track the ellipse in the image I. \param I : Image in which the ellipse appears. */ void vpMeEllipse::track(const vpImage<unsigned char> &I) { vpCDEBUG(1) <<"begin vpMeEllipse::track()"<<std::endl ; static int iter =0 ; // 1. On fait ce qui concerne les ellipse (peut etre vide) { } //vpDisplay::display(I) ; // 2. On appelle ce qui n'est pas specifique { try{ vpMeTracker::track(I) ; } catch(...) { vpERROR_TRACE("Error caught") ; throw ; } // std::cout << "number of signals " << numberOfSignal() << std::endl ; } // 3. On revient aux ellipses { // Estimation des parametres de la droite aux moindres carre suppressPoints() ; setExtremities() ; try{ leastSquare() ; } catch(...) { vpERROR_TRACE("Error caught") ; throw ; } seekExtremities(I) ; setExtremities() ; try { leastSquare() ; } catch(...) { vpERROR_TRACE("Error caught") ; throw ; } // suppression des points rejetes par la regression robuste suppressPoints() ; setExtremities() ; //reechantillonage si necessaire reSample(I) ; // remet a jour l'angle delta pour chaque point de la liste updateTheta() ; computeMoments(); // Remise a jour de delta dans la liste de site me if (vpDEBUG_ENABLE(2)) { display(I,vpColor::red) ; vpMeTracker::display(I) ; vpDisplay::flush(I) ; } // computeAngle(iP1, iP2) ; // // if (iter%5==0) // { // sample(I) ; // try{ // leastSquare() ; } // catch(...) // { // vpERROR_TRACE("Error caught") ; // throw ; // } // computeAngle(iP1, iP2) ; // } // seekExtremities(I) ; // // vpMeTracker::display(I) ; // // vpDisplay::flush(I) ; // // // remet a jour l'angle theta pour chaque point de la liste // updateTheta() ; } iter++ ; vpCDEBUG(1) << "end vpMeEllipse::track()"<<std::endl ; }
void writeImagerLine(LineDoc * lineDoc){ int c; // channel int d; // logical detector c = lineDoc->channel(); GetChanDet(lineDoc, d); if(lineDoc->valid() ){ int abs_scan_count = lineDoc->rel_scan_count() + (imagerDoc.abs_scan_count-imagerDoc.rel_scan_count); int i = imager; /*double line= (8.0/VIS_LINES_PER_LINE[i][c])*abs_scan_count+d; */ /* 8 is number of vis lines per scan */ double line= (8.0/VIS_LINES_PER_LINE[i][c])*(abs_scan_count-2)+d+1; line = lineDoc->vis_line(imagerDoc) / VIS_LINES_PER_LINE[i][c] ; int line_in_frame = (lineDoc->vis_line(imagerDoc) - imagerDoc.N_line_of_frame) / VIS_LINES_PER_LINE[i][c] ; if (Opt.debug()==Dframe) { DebugFile <<"\t line,line_in_frame\t"<<line <<"\t"<<line_in_frame ; } int side = lineDoc->side(); int pixels = lineDoc->n_pixels(); int VisFirstPixel = imagerDoc.W_pix_of_frame; int xoffset; void * converted = NULL; for(int f=0; f<Opt.sectors(i); f++){ converted = NULL ; if( (OutFile[f][i] ) && Runits[f][i][c][side][d] && Opt.wordType(f,i,c) != Undef && skipIt(f,i,c) == FALSE ){ xoffset= (int)(Opt.xscale(f,i,c) * VisFirstPixel / VIS_PIX_PER_PIX[i][c]); pixels = (int)(Opt.xscale(f,i,c) * lineDoc->n_pixels() ); double yscale = Opt.yscale(f,i,c); double wstart, wstop; wstart = line*yscale; wstop = (line+1.0)*yscale - 1; DebugFile <<"\tws\t"<<wstart<<"\txoff\t"<<xoffset<<"\tpix\t"<<pixels<<"\txst\t"<<OutFile[f][i]->xStart(c)<<"\tyst\t"<<OutFile[f][i]->yStart(c)<<"\txsz\t"<<OutFile[f][i]->xSize(c)<<"\tysz\t"<<OutFile[f][i]->ySize(c) ; for(double wline=wstart; wline<=wstop; wline+=1.0){ if(OutFile[f][i]->lineClean(c, (int)wline, xoffset, pixels) ){ DebugFile <<"\t"<<"will write"; if(!converted){ converted = reSample(lineDoc->data(), pixels, Opt.xscale(f,i,c), BufSpace); if(Opt.map(f,i,c) ){ int d0 = (unsigned)(d-(int)(0.5/yscale)*VIS_LINES_PER_LINE[i][c]) +1; int d1 = MIN(IDETECTORS, (d + 1 + (int)(0.5/yscale)) * VIS_LINES_PER_LINE[i][c] ) ; converted = Gridburn(&imagerDoc, (unsigned short*)converted, d0, d1, VIS_PIX_PER_PIX[i][c]/Opt.xscale(f,i,c), Opt.map(f,i,c)); } converted = Runits[f][i][c][side][d]->convert((unsigned short*) converted ,pixels ,BufSpace2); } OutFile[f][i]->writeData(c, (int)(wline), xoffset, converted, pixels); } } } } } else gvar.trash(lineDoc->bytes()); }
// Thread entry point int StreamWAVFormatDecoder::run(void* pArgs) { int iSamplesInOutBuffer = 0; Sample partialFrame[80] ; int nSamplesPartialFrame = 0; int numOutSamples = 0; ssize_t iDataLength ; int nQueuedFrames = 0; //used if the files are aLaw or uLaw encodec InitG711Tables(); StreamDataSource* pSrc = getDataSource() ; if (pSrc != NULL) { ssize_t iRead = 0; char buf[16]; // "pre-read" 4 bytes, to see if this is a 0 length file and should // be skipped. Alas, apparently one cannot just call getLength() // as an http fetch might not have returned any info yet. if (pSrc->peek(buf, 4, iRead) != OS_SUCCESS) // empty file { // If one doesn't queue at least one frame, then it seems things stall // Queue one frame of a "click" to give some audible indication // a file was played (even if it was empty) Sample Click[80] = {0} ; // one frame of "click" to give audible // indication of some problem. Click[39] = -200 ; Click[40] = 20000 ; // An impulse should do nicely Click[41] = -200 ; queueFrame((const uint16_t*)Click); mbEnd = TRUE ; } while (!mbEnd && nextDataChunk(iDataLength)) { //we really want 80 SAMPLES not 80 bytes unsigned char InBuffer[NUM_SAMPLES*2] ; Sample OutBuffer[4000] = {0} ; //make room for lots of decompression iSamplesInOutBuffer = 0; while ((iDataLength > 0) && !mbEnd) { ssize_t iRead = 0; UtlBoolean retval = OS_INVALID; if (mFormatChunk.formatTag == 1 && mFormatChunk.nBitsPerSample == 8) //8bit unsigned { //we need to read 80 samples iRead = __min(iDataLength, NUM_SAMPLES); retval = pSrc->read((char *)InBuffer, iRead, iRead); //now convert to 16bit unsigned, which is what we use internally ConvertUnsigned8ToSigned16(InBuffer,OutBuffer,iRead); numOutSamples = iRead; } else if (mFormatChunk.formatTag == 1 && mFormatChunk.nBitsPerSample == 16) //16 bit signed { iRead = __min(iDataLength, NUM_SAMPLES*2); //just read in the data, because it's the format we need retval = pSrc->read((char *)OutBuffer, iRead, iRead); numOutSamples = iRead/2; #ifdef __BIG_ENDIAN__ if (retval == OS_SUCCESS) { //We are running on a big endian processor - 16-bit samples are //in the little endian byte order - convert them to big endian. unsigned short *pData = (unsigned short *)OutBuffer; for ( int index = 0; index < numOutSamples; index++, pData++ ) *pData = letohs(*pData); } #endif } else if (mFormatChunk.formatTag == 6 || mFormatChunk.formatTag == 7) //16 bit signed { //we need to read 80 samples iRead = __min(iDataLength, NUM_SAMPLES); retval = pSrc->read((char *)OutBuffer, iRead, iRead); //no conversion to 16bit will take place because we need to decompress this } else { syslog(FAC_STREAMING, PRI_ERR, "StreamWAVFormatDecoder::run Unsupport bit per sample rate!"); } iDataLength -= iRead; if (retval == OS_SUCCESS) { ssize_t bytes; switch (mFormatChunk.formatTag) { case 1: // PCM //NO CONVERSION NEEDED break ; case 6: // G711 alaw bytes = DecompressG711ALaw(OutBuffer, iRead); numOutSamples = iRead; break ; case 7: // G711 ulaw bytes = DecompressG711MuLaw(OutBuffer,iRead); numOutSamples = iRead; break ; } //we now should have a buffer filled with Samples, not bytes int numBytes = numOutSamples * sizeof(Sample); //next we check if the sound file is stereo...at this point in our lives //we only want to support mono //takes bytes in and gets bytes out. NOT samples if (mFormatChunk.nChannels > 1) { numBytes = mergeChannels((char *)OutBuffer, numBytes, mFormatChunk.nChannels); //now calculate how many sample we have numOutSamples = numBytes/sizeof(Sample); } //in the next fucntion we must pass bytes, NOT samples as second param numBytes = reSample((char *)OutBuffer, numBytes, mFormatChunk.nSamplesPerSec, DESIRED_SAMPLE_RATE); //now calculate how many sample we have numOutSamples = numBytes/sizeof(Sample); //this next part will buffer the samples if under 80 samples if (numOutSamples > 0) { int iCount = 0 ; while ((iCount < numOutSamples) && !mbEnd) { int iToCopy = numOutSamples - iCount ; if (iToCopy > 80) iToCopy = 80 ; if (nSamplesPartialFrame == 0) { if (iToCopy >= 80) { queueFrame((const uint16_t*)OutBuffer+iCount); nQueuedFrames++ ; } else { nSamplesPartialFrame = iToCopy ; memcpy(partialFrame, (const unsigned short *)OutBuffer+iCount,iToCopy*sizeof(Sample)) ; } } else { if (iToCopy > (80-nSamplesPartialFrame)) iToCopy = 80-nSamplesPartialFrame ; memcpy(&partialFrame[nSamplesPartialFrame],(const unsigned short *)OutBuffer+iCount, iToCopy*sizeof(Sample)) ; nSamplesPartialFrame += iToCopy ; if (nSamplesPartialFrame == 80) { queueFrame((const uint16_t*) partialFrame); nSamplesPartialFrame = 0 ; nQueuedFrames++ ; } } iCount += iToCopy ; } } } else { // Truncated data source? syslog(FAC_STREAMING, PRI_ERR, "StreamWAVFormatDecoder::run (FireEvent DecodingErrorEvent)"); fireEvent(DecodingErrorEvent) ; break ; } } } pSrc->close() ; } queueEndOfFrames() ; syslog(FAC_STREAMING, PRI_DEBUG, "StreamWAVFormatDecoder::run queued %d frames", nQueuedFrames); fireEvent(DecodingCompletedEvent) ; mSemExited.release() ; return 0 ; }
int EncoderFfmpegCore::initEncoder(int bitrate, int samplerate) { #ifndef avformat_alloc_output_context2 qDebug() << "EncoderFfmpegCore::initEncoder: Old Style initialization"; m_pEncodeFormatCtx = avformat_alloc_context(); #endif m_lBitrate = bitrate * 1000; m_lSampleRate = samplerate; #if LIBAVCODEC_VERSION_INT > 3544932 if (m_SCcodecId == AV_CODEC_ID_MP3) { #else if (m_SCcodecId == CODEC_ID_MP3) { #endif // LIBAVCODEC_VERSION_INT > 3544932 qDebug() << "EncoderFfmpegCore::initEncoder: Codec MP3"; #ifdef avformat_alloc_output_context2 avformat_alloc_output_context2(&m_pEncodeFormatCtx, NULL, NULL, "output.mp3"); #else m_pEncoderFormat = av_guess_format(NULL, "output.mp3", NULL); #endif // avformat_alloc_output_context2 #if LIBAVCODEC_VERSION_INT > 3544932 } else if (m_SCcodecId == AV_CODEC_ID_AAC) { #else } else if (m_SCcodecId == CODEC_ID_AAC) { #endif // LIBAVCODEC_VERSION_INT > 3544932 qDebug() << "EncoderFfmpegCore::initEncoder: Codec M4A"; #ifdef avformat_alloc_output_context2 avformat_alloc_output_context2(&m_pEncodeFormatCtx, NULL, NULL, "output.m4a"); #else m_pEncoderFormat = av_guess_format(NULL, "output.m4a", NULL); #endif // avformat_alloc_output_context2 } else { qDebug() << "EncoderFfmpegCore::initEncoder: Codec OGG/Vorbis"; #ifdef avformat_alloc_output_context2 avformat_alloc_output_context2(&m_pEncodeFormatCtx, NULL, NULL, "output.ogg"); m_pEncodeFormatCtx->oformat->audio_codec=AV_CODEC_ID_VORBIS; #else m_pEncoderFormat = av_guess_format(NULL, "output.ogg", NULL); #if LIBAVCODEC_VERSION_INT > 3544932 m_pEncoderFormat->audio_codec=AV_CODEC_ID_VORBIS; #else m_pEncoderFormat->audio_codec=CODEC_ID_VORBIS; #endif // LIBAVCODEC_VERSION_INT > 3544932 #endif // avformat_alloc_output_context2 } #ifdef avformat_alloc_output_context2 m_pEncoderFormat = m_pEncodeFormatCtx->oformat; #else m_pEncodeFormatCtx->oformat = m_pEncoderFormat; #endif // avformat_alloc_output_context2 m_pEncoderAudioStream = addStream(m_pEncodeFormatCtx, &m_pEncoderAudioCodec, m_pEncoderFormat->audio_codec); openAudio(m_pEncoderAudioCodec, m_pEncoderAudioStream); // qDebug() << "jepusti"; return 0; } // Private methods int EncoderFfmpegCore::writeAudioFrame(AVFormatContext *formatctx, AVStream *stream) { AVCodecContext *l_SCodecCtx = NULL;; AVPacket l_SPacket; AVFrame *l_SFrame = avcodec_alloc_frame(); int l_iGotPacket; int l_iRet; #ifdef av_make_error_string char l_strErrorBuff[256]; #endif // av_make_error_string av_init_packet(&l_SPacket); l_SPacket.size = 0; l_SPacket.data = NULL; // Calculate correct DTS for FFMPEG m_lDts = round(((double)m_lRecordedBytes / (double)44100 / (double)2. * (double)m_pEncoderAudioStream->time_base.den)); m_lPts = m_lDts; l_SCodecCtx = stream->codec; #ifdef av_make_error_string memset(l_strErrorBuff, 0x00, 256); #endif // av_make_error_string l_SFrame->nb_samples = m_iAudioInputFrameSize; // Mixxx uses float (32 bit) samples.. l_SFrame->format = AV_SAMPLE_FMT_FLT; #ifndef __FFMPEGOLDAPI__ l_SFrame->channel_layout = l_SCodecCtx->channel_layout; #endif // __FFMPEGOLDAPI__ l_iRet = avcodec_fill_audio_frame(l_SFrame, l_SCodecCtx->channels, AV_SAMPLE_FMT_FLT, (const uint8_t *)m_pFltSamples, m_iFltAudioCpyLen, 1); if (l_iRet != 0) { #ifdef av_make_error_string qDebug() << "Can't fill FFMPEG frame: error " << l_iRet << "String '" << av_make_error_string(l_strErrorBuff, 256, l_iRet) << "'" << m_iFltAudioCpyLen; #endif // av_make_error_string qDebug() << "Can't refill 1st FFMPEG frame!"; return -1; } // If we have something else than AV_SAMPLE_FMT_FLT we have to convert it // to something that fits.. if (l_SCodecCtx->sample_fmt != AV_SAMPLE_FMT_FLT) { reSample(l_SFrame); // After we have turned our samples to destination // Format we must re-alloc l_SFrame.. it easier like this.. #if LIBAVCODEC_VERSION_INT > 3544932 avcodec_free_frame(&l_SFrame); #else av_free(l_SFrame); #endif // LIBAVCODEC_VERSION_INT > 3544932 l_SFrame = NULL; l_SFrame = avcodec_alloc_frame(); l_SFrame->nb_samples = m_iAudioInputFrameSize; l_SFrame->format = l_SCodecCtx->sample_fmt; #ifndef __FFMPEGOLDAPI__ l_SFrame->channel_layout = m_pEncoderAudioStream->codec->channel_layout; #endif // __FFMPEGOLDAPI__ l_iRet = avcodec_fill_audio_frame(l_SFrame, l_SCodecCtx->channels, l_SCodecCtx->sample_fmt, (const uint8_t *)m_pResample->getBuffer(), m_iAudioCpyLen, 1); if (l_iRet != 0) { #ifdef av_make_error_string qDebug() << "Can't refill FFMPEG frame: error " << l_iRet << "String '" << av_make_error_string(l_strErrorBuff, 256, l_iRet) << "'" << m_iAudioCpyLen << " " << av_samples_get_buffer_size( NULL, 2, m_iAudioInputFrameSize, m_pEncoderAudioStream->codec->sample_fmt, 1) << " " << m_pOutSize; #endif // av_make_error_string qDebug() << "Can't refill 2nd FFMPEG frame!"; return -1; } } //qDebug() << "!!" << l_iRet; l_iRet = avcodec_encode_audio2(l_SCodecCtx, &l_SPacket, l_SFrame, &l_iGotPacket); if (l_iRet < 0) { qDebug() << "Error encoding audio frame"; return -1; } if (!l_iGotPacket) { // qDebug() << "No packet! Can't encode audio!!"; return -1; } l_SPacket.stream_index = stream->index; // Let's calculate DTS/PTS and give it to FFMPEG.. // THEN codecs like OGG/Voris works ok!! l_SPacket.dts = m_lDts; l_SPacket.pts = m_lDts; // Some times den is zero.. so 0 dived by 0 is // Something? if (m_pEncoderAudioStream->pts.den == 0) { qDebug() << "Time hack!"; m_pEncoderAudioStream->pts.den = 1; } // Write the compressed frame to the media file. */ l_iRet = av_interleaved_write_frame(formatctx, &l_SPacket); if (l_iRet != 0) { qDebug() << "Error while writing audio frame"; return -1; } av_free_packet(&l_SPacket); av_destruct_packet(&l_SPacket); av_free(l_SFrame); return 0; }