Beispiel #1
0
/*
 * Class:     com_example_testffmpeg_CFFmpegJni
 * Method:    IPlay
 * Signature: ()I
 */
jint Java_com_example_testffmpeg_CFFmpegJni_IPlay(JNIEnv *env, jobject thiz)
{
	/// 定义返回值
	int nRet = -1;
	/// 打开文件
	if(NULL != m_pFormatCtx)
	{
		avformat_close_input(&m_pFormatCtx);
		/// 释放数据
		av_free(m_pFormatCtx);
		m_pFormatCtx = NULL;
	}

	if(NULL == m_pFormatCtx)
	{
		/// 打开文件
		if(0 != (nRet = avformat_open_input(&m_pFormatCtx, m_szURLPath, 0, NULL/*&m_pDictOptions*/)))
		{
			char szTemp[256];
			memset(szTemp, 0x00, sizeof(szTemp));
			av_strerror(nRet, szTemp, 255);
			/// 打印错误信息
			LOGD("%s, Error Code = %d, %s, Error = %s", m_szURLPath, nRet,
					" The Error URL Or Path--------------->", szTemp);
			return nRet;
		}
	}

	// m_pFormatCtx->max_analyze_duration = 1000;
	// m_pFormatCtx->probesize = 2048;
	if(0 > avformat_find_stream_info(m_pFormatCtx, NULL))
	{
		LOGD("Couldn't find stream information.");
		return -1;
	}

	int nVideoIndex = -1;
	for(int i = 0; i < m_pFormatCtx->nb_streams; i++)
	{
		if(AVMEDIA_TYPE_VIDEO == m_pFormatCtx->streams[i]->codec->codec_type)
		{
			nVideoIndex = i;
			break;
		}
	}
	if(-1 == nVideoIndex)
	{
		LOGD("Didn't find a video stream.");
		return -1;
	}

	AVCodecContext* pCodecCtx = m_pFormatCtx->streams[nVideoIndex]->codec;
	AVCodec* pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if(NULL == pCodec)
	{
		LOGD("Codec not found.");
		return -1;
	}

	if(pCodec->capabilities & CODEC_CAP_TRUNCATED)
	{
		pCodecCtx->flags |= CODEC_FLAG_TRUNCATED;
	}

	if(0 > avcodec_open2(pCodecCtx, pCodec, NULL))
	{
		LOGD("Could not open codec.");
		return -1;
	}

	/// 声明数据帧变量
	AVFrame	*pFrame = NULL, *pFrameYUV = NULL;
	pFrame = avcodec_alloc_frame();
	pFrameYUV = avcodec_alloc_frame();
	/// 创建转换数据缓冲
	int nConvertSize = avpicture_get_size(PIX_FMT_RGB565, iWidth, iHeight);
	uint8_t* pConvertbuffer = new uint8_t[nConvertSize];
	avpicture_fill((AVPicture *)pFrameYUV, pConvertbuffer, PIX_FMT_RGB565, iWidth, iHeight);

	/// 声明解码参数
	int nCodecRet, nHasGetPicture;
	/// 声明数据帧解码数据包
	int nPackgeSize  = pCodecCtx->width * pCodecCtx->height;
	AVPacket* pAVPacket = (AVPacket *)malloc(sizeof(AVPacket));
	av_new_packet(pAVPacket, nPackgeSize);

	/// 列出输出文件的相关流信息
	av_dump_format(m_pFormatCtx, 0, m_szURLPath, 0);
	/// 设置播放状态
	m_bIsPlaying = true;

	/// 声明格式转换参数
	struct SwsContext* img_convert_ctx = NULL;
	/// 格式化像素格式为YUV
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
		iWidth, iHeight, PIX_FMT_RGB565, SWS_BICUBIC, NULL, NULL, NULL);
	/// 读取数据帧
	while(0 <= av_read_frame(m_pFormatCtx, pAVPacket) && true == m_bIsPlaying)
	{
		/// 判断是否是视频数据流
		if(nVideoIndex == pAVPacket->stream_index)
		{
			/// 解码数据包
			nCodecRet = avcodec_decode_video2(pCodecCtx, pFrame, &nHasGetPicture, pAVPacket);
			if(0 < nHasGetPicture)
			{
				/// 转换格式为YUV
				sws_scale(img_convert_ctx, (const uint8_t* const* )pFrame->data, pFrame->linesize,
						0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
				/// 回调显示数据
				e_DisplayCallBack(env, pConvertbuffer, nConvertSize);
			}
		}
		/// 释放解码包,此数据包,在 av_read_frame 调用时被创建
		av_free_packet(pAVPacket);
	}
	/// 释放个格式化信息
	sws_freeContext(img_convert_ctx);

	/// 释放转换图片缓存
	delete[] pConvertbuffer;
	pConvertbuffer = NULL;
	/// 释放数据帧对象指针
	av_free(pFrame); pFrame = NULL;
	av_free(pFrameYUV); pFrameYUV = NULL;

	/// 释放解码信息对象
	avcodec_close(pCodecCtx); pCodecCtx = NULL;
	avformat_close_input(&m_pFormatCtx);
	/// 释放数据
	av_free(m_pFormatCtx);
	m_pFormatCtx = NULL;
	return nRet;
}
int main(int argc, char* argv[])
{
	AVFormatContext	*pFormatCtx;
	int				i, videoindex;
	AVCodecContext	*pCodecCtx;
	AVCodec			*pCodec;
	AVFrame	*pFrame,*pFrameYUV;
	uint8_t *out_buffer;
	AVPacket *packet;
	int y_size;
	int ret, got_picture;
	struct SwsContext *img_convert_ctx;

	char filepath[]="bigbuckbunny_480x272.h265";
	//SDL---------------------------
	int screen_w=0,screen_h=0;
	SDL_Window *screen; 
	SDL_Renderer* sdlRenderer;
	SDL_Texture* sdlTexture;
	SDL_Rect sdlRect;

	FILE *fp_yuv;

	av_register_all();
	avformat_network_init();
	pFormatCtx = avformat_alloc_context();

	if(avformat_open_input(&pFormatCtx,filepath,NULL,NULL)!=0){
		printf("Couldn't open input stream.\n");
		return -1;
	}
	if(avformat_find_stream_info(pFormatCtx,NULL)<0){
		printf("Couldn't find stream information.\n");
		return -1;
	}
	videoindex=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++) 
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			videoindex=i;
			break;
		}
	if(videoindex==-1){
		printf("Didn't find a video stream.\n");
		return -1;
	}

	pCodecCtx=pFormatCtx->streams[videoindex]->codec;
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL){
		printf("Codec not found.\n");
		return -1;
	}
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){
		printf("Could not open codec.\n");
		return -1;
	}
	
	pFrame=av_frame_alloc();
	pFrameYUV=av_frame_alloc();
	out_buffer=(uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
	avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
	packet=(AVPacket *)av_malloc(sizeof(AVPacket));
	//Output Info-----------------------------
	printf("--------------- File Information ----------------\n");
	av_dump_format(pFormatCtx,0,filepath,0);
	printf("-------------------------------------------------\n");
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 
		pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); 

#if OUTPUT_YUV420P 
    fp_yuv=fopen("output.yuv","wb+");  
#endif  
	
	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  
		printf( "Could not initialize SDL - %s\n", SDL_GetError()); 
		return -1;
	} 

	screen_w = pCodecCtx->width;
	screen_h = pCodecCtx->height;
	//SDL 2.0 Support for multiple windows
	screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
		screen_w, screen_h,
		SDL_WINDOW_OPENGL);

	if(!screen) {  
		printf("SDL: could not create window - exiting:%s\n",SDL_GetError());  
		return -1;
	}

	sdlRenderer = SDL_CreateRenderer(screen, -1, 0);  
	//IYUV: Y + U + V  (3 planes)
	//YV12: Y + V + U  (3 planes)
	sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height);  

	sdlRect.x=0;
	sdlRect.y=0;
	sdlRect.w=screen_w;
	sdlRect.h=screen_h;

	//SDL End----------------------
	while(av_read_frame(pFormatCtx, packet)>=0){
		if(packet->stream_index==videoindex){
			ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
			if(ret < 0){
				printf("Decode Error.\n");
				return -1;
			}
			if(got_picture){
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, 
					pFrameYUV->data, pFrameYUV->linesize);
				
#if OUTPUT_YUV420P
				y_size=pCodecCtx->width*pCodecCtx->height;  
				fwrite(pFrameYUV->data[0],1,y_size,fp_yuv);    //Y 
				fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv);  //U
				fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv);  //V
#endif
				//SDL---------------------------
#if 0
				SDL_UpdateTexture( sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0] );  
#else
				SDL_UpdateYUVTexture(sdlTexture, &sdlRect,
				pFrameYUV->data[0], pFrameYUV->linesize[0],
				pFrameYUV->data[1], pFrameYUV->linesize[1],
				pFrameYUV->data[2], pFrameYUV->linesize[2]);
#endif	
				
				SDL_RenderClear( sdlRenderer );  
				SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);  
				SDL_RenderPresent( sdlRenderer );  
				//SDL End-----------------------
				//Delay 40ms
				SDL_Delay(40);
			}
		}
		av_free_packet(packet);
	}
	//flush decoder
	//FIX: Flush Frames remained in Codec
	while (1) {
		ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
		if (ret < 0)
			break;
		if (!got_picture)
			break;
		sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, 
			pFrameYUV->data, pFrameYUV->linesize);
#if OUTPUT_YUV420P
		int y_size=pCodecCtx->width*pCodecCtx->height;  
		fwrite(pFrameYUV->data[0],1,y_size,fp_yuv);    //Y 
		fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv);  //U
		fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv);  //V
#endif
		//SDL---------------------------
		SDL_UpdateTexture( sdlTexture, &sdlRect, pFrameYUV->data[0], pFrameYUV->linesize[0] );  
		SDL_RenderClear( sdlRenderer );  
		SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);  
		SDL_RenderPresent( sdlRenderer );  
		//SDL End-----------------------
		//Delay 40ms
		SDL_Delay(40);
	}

	sws_freeContext(img_convert_ctx);

#if OUTPUT_YUV420P 
    fclose(fp_yuv);
#endif 

	SDL_Quit();

	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);

	return 0;
}
/**
* @brief 
*
* @return 
*/
int LocalVideoInput::run()
{
    AVInputFormat *inputFormat = av_find_input_format( "video4linux2" );
    if ( inputFormat == NULL)
        Fatal( "Can't load input format" );

#if 0
    AVProbeData probeData;
    probeData.filename = mSource.c_str();
    probeData.buf = new unsigned char[1024];
    probeData.buf_size = 1024;
    inputFormat = av_probe_input_format( &probeData, 0 );
    if ( inputFormat == NULL)
        Fatal( "Can't probe input format" );

    AVFormatParameters formatParameters ;
    memset( &formatParameters, 0, sizeof(formatParameters) );
    formatParameters.channels = 1;
    formatParameters.channel = 0;
    formatParameters.standard = "PAL";
    formatParameters.pix_fmt = PIX_FMT_RGB24;
    //formatParameters.time_base.num = 1;
    //formatParameters.time_base.den = 10;
    formatParameters.width = 352;
    formatParameters.height = 288;
    //formatParameters.prealloced_context = 1;
#endif

    /* New API */
    AVDictionary *opts = NULL;
    av_dict_set( &opts, "standard", "PAL", 0 );
    av_dict_set( &opts, "video_size", "320x240", 0 );
    av_dict_set( &opts, "channel", "0", 0 );
    av_dict_set( &opts, "pixel_format", "rgb24", 0 );
    //av_dict_set( &opts, "framerate", "10", 0 );
    avDumpDict( opts );

    int avError = 0;
    AVFormatContext *formatContext = NULL;
    //if ( av_open_input_file( &formatContext, mSource.c_str(), inputFormat, 0, &formatParameters ) !=0 )
    if ( (avError = avformat_open_input( &formatContext, mSource.c_str(), inputFormat, &opts )) < 0 )
        Fatal( "Unable to open input %s due to: %s", mSource.c_str(), avStrError(avError) );

    avDumpDict( opts );
#if 0
    if ( av_open_input_stream( &formatContext, 0, mSource.c_str(), inputFormat, &formatParameters ) !=0 )
        Fatal( "Unable to open input %s due to: %s", mSource.c_str(), strerror(errno) );
#endif

    // Locate stream info from input
    if ( (avError = avformat_find_stream_info( formatContext, &opts )) < 0 )
        Fatal( "Unable to find stream info from %s due to: %s", mSource.c_str(), avStrError(avError) );
    
    if ( dbgLevel > DBG_INF )
        av_dump_format( formatContext, 0, mSource.c_str(), 0 );

    // Find first video stream present
    int videoStreamId = -1;
    for ( int i=0; i < formatContext->nb_streams; i++ )
    {
        if ( formatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
        {
            videoStreamId = i;
            //set_context_opts( formatContext->streams[i]->codec, avcodec_opts[CODEC_TYPE_VIDEO], AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
            break;
        }
    }
    if ( videoStreamId == -1 )
        Fatal( "Unable to locate video stream in %s", mSource.c_str() );
    mStream = formatContext->streams[videoStreamId];
    mCodecContext = mStream->codec;

    // Try and get the codec from the codec context
    AVCodec *codec = NULL;
    if ( (codec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
        Fatal( "Can't find codec for video stream from %s", mSource.c_str() );

    // Open the codec
    if ( avcodec_open2( mCodecContext, codec, &opts ) < 0 )
        Fatal( "Unable to open codec for video stream from %s", mSource.c_str() );

    //AVFrame *savedFrame = avcodec_alloc_frame();

    // Allocate space for the native video frame
    AVFrame *frame = avcodec_alloc_frame();

    // Determine required buffer size and allocate buffer
    int pictureSize = avpicture_get_size( mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height );
    
    ByteBuffer frameBuffer( pictureSize );
    
    //avpicture_fill( (AVPicture *)savedFrame, mLastFrame.mBuffer.data(), mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height);

    AVPacket packet;
    while( !mStop )
    {
        int frameComplete = false;
        while ( !frameComplete && (av_read_frame( formatContext, &packet ) >= 0) )
        {
            Debug( 5, "Got packet from stream %d", packet.stream_index );
            if ( packet.stream_index == videoStreamId )
            {
                frameComplete = false;
                if ( avcodec_decode_video2( mCodecContext, frame, &frameComplete, &packet ) < 0 )
                    Fatal( "Unable to decode frame at frame %ju", mFrameCount );

                Debug( 3, "Decoded video packet at frame %ju, pts %jd", mFrameCount, packet.pts );

                if ( frameComplete )
                {
                    Debug( 3, "Got frame %d, pts %jd (%.3f)", mCodecContext->frame_number, frame->pkt_pts, (((double)(packet.pts-mStream->start_time)*mStream->time_base.num)/mStream->time_base.den) );

                    avpicture_layout( (AVPicture *)frame, mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height, frameBuffer.data(), frameBuffer.capacity() );

                    uint64_t timestamp = packet.pts;
                    //Debug( 3, "%d: TS: %lld, TS1: %lld, TS2: %lld, TS3: %.3f", time( 0 ), timestamp, packet.pts, ((1000000LL*packet.pts*mStream->time_base.num)/mStream->time_base.den), (((double)packet.pts*mStream->time_base.num)/mStream->time_base.den) );
                    //Info( "%ld:TS: %lld, TS1: %lld, TS2: %lld, TS3: %.3f", time( 0 ), timestamp, packet.pts, ((1000000LL*packet.pts*mStream->time_base.num)/mStream->time_base.den), (((double)packet.pts*mStream->time_base.num)/mStream->time_base.den) );

                    VideoFrame *videoFrame = new VideoFrame( this, mCodecContext->frame_number, timestamp, frameBuffer );
                    distributeFrame( FramePtr( videoFrame ) );
                }
            }
            av_free_packet( &packet );
        }
        usleep( INTERFRAME_TIMEOUT );
    }
    cleanup();

    av_freep( &frame );
    if ( mCodecContext )
    {
       avcodec_close( mCodecContext );
       mCodecContext = NULL; // Freed by avformat_close_input
    }
    if ( formatContext )
    {
        avformat_close_input( &formatContext );
        formatContext = NULL;
        //av_free( formatContext );
    }
    return( !ended() );
}
Beispiel #4
0
static int applehttp_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
    AppleHTTPContext *c = s->priv_data;
    int ret = 0, i, j, stream_offset = 0;

    if ((ret = parse_playlist(c, s->filename, NULL, s->pb)) < 0)
        goto fail;

    if (c->n_variants == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
        ret = AVERROR_EOF;
        goto fail;
    }
    /* If the playlist only contained variants, parse each individual
     * variant playlist. */
    if (c->n_variants > 1 || c->variants[0]->n_segments == 0) {
        for (i = 0; i < c->n_variants; i++) {
            struct variant *v = c->variants[i];
            if ((ret = parse_playlist(c, v->url, v, NULL)) < 0)
                goto fail;
        }
    }

    if (c->variants[0]->n_segments == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
        ret = AVERROR_EOF;
        goto fail;
    }

    /* If this isn't a live stream, calculate the total duration of the
     * stream. */
    if (c->variants[0]->finished) {
        int64_t duration = 0;
        for (i = 0; i < c->variants[0]->n_segments; i++)
            duration += c->variants[0]->segments[i]->duration;
        s->duration = duration * AV_TIME_BASE;
    }

    /* Open the demuxer for each variant */
    for (i = 0; i < c->n_variants; i++) {
        struct variant *v = c->variants[i];
        AVInputFormat *in_fmt = NULL;
        char bitrate_str[20];
        if (v->n_segments == 0)
            continue;

        if (!(v->ctx = avformat_alloc_context())) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        v->index  = i;
        v->needed = 1;
        v->parent = s;

        /* If this is a live stream with more than 3 segments, start at the
         * third last segment. */
        v->cur_seq_no = v->start_seq_no;
        if (!v->finished && v->n_segments > 3)
            v->cur_seq_no = v->start_seq_no + v->n_segments - 3;

        v->read_buffer = av_malloc(INITIAL_BUFFER_SIZE);
        ffio_init_context(&v->pb, v->read_buffer, INITIAL_BUFFER_SIZE, 0, v,
                          read_data, NULL, NULL);
        v->pb.seekable = 0;
        ret = av_probe_input_buffer(&v->pb, &in_fmt, v->segments[0]->url,
                                    NULL, 0, 0);
        if (ret < 0)
            goto fail;
        v->ctx->pb       = &v->pb;
        ret = avformat_open_input(&v->ctx, v->segments[0]->url, in_fmt, NULL);
        if (ret < 0)
            goto fail;
        v->stream_offset = stream_offset;
        snprintf(bitrate_str, sizeof(bitrate_str), "%d", v->bandwidth);
        /* Create new AVStreams for each stream in this variant */
        for (j = 0; j < v->ctx->nb_streams; j++) {
            AVStream *st = av_new_stream(s, i);
            if (!st) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
            avcodec_copy_context(st->codec, v->ctx->streams[j]->codec);
            if (v->bandwidth)
                av_dict_set(&st->metadata, "variant_bitrate", bitrate_str,
                                 0);
        }
        stream_offset += v->ctx->nb_streams;
    }

    c->first_packet = 1;

    return 0;
fail:
    free_variant_list(c);
    return ret;
}
Beispiel #5
0
static GF_Err FFD_ConnectService(GF_InputService *plug, GF_ClientService *serv, const char *url)
{
	GF_Err e;
	s64 last_aud_pts;
	u32 i;
	s32 res;
	Bool is_local;
	const char *sOpt;
	char *ext, szName[1024];
	FFDemux *ffd = plug->priv;
	AVInputFormat *av_in = NULL;
	char szExt[20];

	if (ffd->ctx) return GF_SERVICE_ERROR;

	assert( url && strlen(url) < 1024);
	strcpy(szName, url);
	ext = strrchr(szName, '#');
	ffd->service_type = 0;
	e = GF_NOT_SUPPORTED;
	ffd->service = serv;

	if (ext) {
		if (!stricmp(&ext[1], "video")) ffd->service_type = 1;
		else if (!stricmp(&ext[1], "audio")) ffd->service_type = 2;
		ext[0] = 0;
	}

	/*some extensions not supported by ffmpeg, overload input format*/
	ext = strrchr(szName, '.');
	strcpy(szExt, ext ? ext+1 : "");
	strlwr(szExt);
	if (!strcmp(szExt, "cmp")) av_in = av_find_input_format("m4v");

	is_local = (strnicmp(url, "file://", 7) && strstr(url, "://")) ? 0 : 1;

	GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[FFMPEG] opening file %s - local %d - av_in %08x\n", url, is_local, av_in));

	if (!is_local) {
		AVProbeData   pd;

		/*setup wraper for FFMPEG I/O*/
		ffd->buffer_size = 8192;
		sOpt = gf_modules_get_option((GF_BaseInterface *)plug, "FFMPEG", "IOBufferSize");
		if (sOpt) ffd->buffer_size = atoi(sOpt);
		ffd->buffer = gf_malloc(sizeof(char)*ffd->buffer_size);
#ifdef FFMPEG_DUMP_REMOTE
		ffd->outdbg = gf_f64_open("ffdeb.raw", "wb");
#endif
#ifdef USE_PRE_0_7
		init_put_byte(&ffd->io, ffd->buffer, ffd->buffer_size, 0, ffd, ff_url_read, NULL, NULL);
		ffd->io.is_streamed = 1;
#else
		ffd->io.seekable = 1;
#endif

		ffd->dnload = gf_term_download_new(ffd->service, url, GF_NETIO_SESSION_NOT_THREADED  | GF_NETIO_SESSION_NOT_CACHED, NULL, ffd);
		if (!ffd->dnload) return GF_URL_ERROR;
		while (1) {
			u32 read;
			e = gf_dm_sess_fetch_data(ffd->dnload, ffd->buffer + ffd->buffer_used, ffd->buffer_size - ffd->buffer_used, &read);
			if (e==GF_EOS) break;
			/*we're sync!!*/
			if (e==GF_IP_NETWORK_EMPTY) continue;
			if (e) goto err_exit;
			ffd->buffer_used += read;
			if (ffd->buffer_used == ffd->buffer_size) break;
		}
		if (e==GF_EOS) {
			const char *cache_file = gf_dm_sess_get_cache_name(ffd->dnload);
			res = open_file(&ffd->ctx, cache_file, av_in);
		} else {
			pd.filename = szName;
			pd.buf_size = ffd->buffer_used;
			pd.buf = ffd->buffer;
			av_in = av_probe_input_format(&pd, 1);
			if (!av_in) {
				GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] error probing file %s - probe start with %c %c %c %c\n", url, ffd->buffer[0], ffd->buffer[1], ffd->buffer[2], ffd->buffer[3]));
				return GF_NOT_SUPPORTED;
			}
			/*setup downloader*/
			av_in->flags |= AVFMT_NOFILE;
#if FF_API_FORMAT_PARAMETERS /*commit ffmpeg 603b8bc2a109978c8499b06d2556f1433306eca7*/
			res = avformat_open_input(&ffd->ctx, szName, av_in, NULL);
#else
			res = av_open_input_stream(&ffd->ctx, &ffd->io, szName, av_in, NULL);
#endif
		}
	} else {
		res = open_file(&ffd->ctx, szName, av_in);
	}

	switch (res) {
#ifndef _WIN32_WCE
	case 0: e = GF_OK; break;
	case AVERROR_IO: e = GF_URL_ERROR; goto err_exit;
	case AVERROR_INVALIDDATA: e = GF_NON_COMPLIANT_BITSTREAM; goto err_exit;
	case AVERROR_NOMEM: e = GF_OUT_OF_MEM; goto err_exit;
	case AVERROR_NOFMT: e = GF_NOT_SUPPORTED; goto err_exit;
#endif
	default: e = GF_SERVICE_ERROR; goto err_exit;
	}

	GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[FFMPEG] looking for streams in %s - %d streams - type %s\n", ffd->ctx->filename, ffd->ctx->nb_streams, ffd->ctx->iformat->name));

	res = av_find_stream_info(ffd->ctx);
	if (res <0) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] cannot locate streams - error %d\n", res));
		e = GF_NOT_SUPPORTED;
		goto err_exit;
	}
	GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[FFMPEG] file %s opened - %d streams\n", url, ffd->ctx->nb_streams));

	/*figure out if we can use codecs or not*/
	ffd->audio_st = ffd->video_st = -1;
	for (i = 0; i < ffd->ctx->nb_streams; i++) {
		AVCodecContext *enc = ffd->ctx->streams[i]->codec;
		switch(enc->codec_type) {
		case AVMEDIA_TYPE_AUDIO:
			if ((ffd->audio_st<0) && (ffd->service_type!=1)) {
				ffd->audio_st = i;
				ffd->audio_tscale = ffd->ctx->streams[i]->time_base;
			}
			break;
		case AVMEDIA_TYPE_VIDEO:
			if ((ffd->video_st<0) && (ffd->service_type!=2)) {
				ffd->video_st = i;
				ffd->video_tscale = ffd->ctx->streams[i]->time_base;
			}
			break;
		default:
			break;
		}
	}
	if ((ffd->service_type==1) && (ffd->video_st<0)) goto err_exit;
	if ((ffd->service_type==2) && (ffd->audio_st<0)) goto err_exit;
	if ((ffd->video_st<0) && (ffd->audio_st<0)) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] No supported streams in file\n"));
		goto err_exit;
	}


	sOpt = gf_modules_get_option((GF_BaseInterface *)plug, "FFMPEG", "DataBufferMS");
	ffd->data_buffer_ms = 0;
	if (sOpt) ffd->data_buffer_ms = atoi(sOpt);
	if (!ffd->data_buffer_ms) ffd->data_buffer_ms = FFD_DATA_BUFFER;

	/*build seek*/
	if (is_local) {
		/*check we do have increasing pts. If not we can't rely on pts, we must skip SL
		we assume video pts is always present*/
		if (ffd->audio_st>=0) {
			last_aud_pts = 0;
			for (i=0; i<20; i++) {
				AVPacket pkt;
				pkt.stream_index = -1;
				if (av_read_frame(ffd->ctx, &pkt) <0) break;
				if (pkt.pts == AV_NOPTS_VALUE) pkt.pts = pkt.dts;
				if (pkt.stream_index==ffd->audio_st) last_aud_pts = pkt.pts;
			}
			if (last_aud_pts*ffd->audio_tscale.den<10*ffd->audio_tscale.num) ffd->unreliable_audio_timing = 1;
		}

		ffd->seekable = (av_seek_frame(ffd->ctx, -1, 0, AVSEEK_FLAG_BACKWARD)<0) ? 0 : 1;
		if (!ffd->seekable) {
			av_close_input_file(ffd->ctx);
			open_file(&ffd->ctx, szName, av_in);
			av_find_stream_info(ffd->ctx);
		}
	}

	/*let's go*/
	gf_term_on_connect(serv, NULL, GF_OK);
	/*if (!ffd->service_type)*/ FFD_SetupObjects(ffd);
	ffd->service_type = 0;
	return GF_OK;

err_exit:
	GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] Error opening file %s: %s\n", url, gf_error_to_string(e)));
    if (ffd->ctx) av_close_input_file(ffd->ctx);
	ffd->ctx = NULL;
	gf_term_on_connect(serv, NULL, e);
	return GF_OK;
}
Beispiel #6
0
int main(int argc, char *argv[]) {
  AVFormatContext *pFormatCtx = NULL;
  int             i, videoStream, audioStream;
  AVCodecContext  *pCodecCtx = NULL;
  AVCodec         *pCodec = NULL;
  AVFrame         *pFrame = NULL; 
  AVPacket        packet;
  int             frameFinished;
  //float           aspect_ratio;
  
  AVCodecContext  *aCodecCtx = NULL;
  AVCodec         *aCodec = NULL;

  SDL_Overlay     *bmp = NULL;
  SDL_Surface     *screen = NULL;
  SDL_Rect        rect;
  SDL_Event       event;
  SDL_AudioSpec   wanted_spec, spec;

  struct SwsContext   *sws_ctx            = NULL;
  AVDictionary        *videoOptionsDict   = NULL;
  AVDictionary        *audioOptionsDict   = NULL;

  if(argc < 2) {
    fprintf(stderr, "Usage: test <file>\n");
    exit(1);
  }
  // Register all formats and codecs
  av_register_all();
  
  if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
    fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
    exit(1);
  }

  // Open video file
  if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
    return -1; // Couldn't open file
  
  // Retrieve stream information
  if(avformat_find_stream_info(pFormatCtx, NULL)<0)
    return -1; // Couldn't find stream information
  
  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, argv[1], 0);
  
  // Find the first video stream
  videoStream=-1;
  audioStream=-1;
  for(i=0; i<pFormatCtx->nb_streams; i++) {
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO &&
       videoStream < 0) {
      videoStream=i;
    }
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO &&
       audioStream < 0) {
      audioStream=i;
    }
  }
  if(videoStream==-1)
    return -1; // Didn't find a video stream
  if(audioStream==-1)
    return -1;
   
  aCodecCtx=pFormatCtx->streams[audioStream]->codec;
  // Set audio settings from codec info
  wanted_spec.freq = aCodecCtx->sample_rate;
  wanted_spec.format = AUDIO_S16SYS;
  wanted_spec.channels = aCodecCtx->channels;
  wanted_spec.silence = 0;
  wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
  wanted_spec.callback = audio_callback;
  wanted_spec.userdata = aCodecCtx;
  
  if(SDL_OpenAudio(&wanted_spec, &spec) < 0) {
    fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
    return -1;
  }
  aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
  if(!aCodec) {
    fprintf(stderr, "Unsupported codec!\n");
    return -1;
  }
  avcodec_open2(aCodecCtx, aCodec, &audioOptionsDict);

  // audio_st = pFormatCtx->streams[index]
  packet_queue_init(&audioq);
  SDL_PauseAudio(0);

  // Get a pointer to the codec context for the video stream
  pCodecCtx=pFormatCtx->streams[videoStream]->codec;
  
  // Find the decoder for the video stream
  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
  if(pCodec==NULL) {
    fprintf(stderr, "Unsupported codec!\n");
    return -1; // Codec not found
  }
  // Open codec
  if(avcodec_open2(pCodecCtx, pCodec, &videoOptionsDict)<0)
    return -1; // Could not open codec
  
  // Allocate video frame
  pFrame=av_frame_alloc();

  // Make a screen to put our video

#ifndef __DARWIN__
        screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
#else
        screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif
  if(!screen) {
    fprintf(stderr, "SDL: could not set video mode - exiting\n");
    exit(1);
  }
  
  // Allocate a place to put our YUV image on that screen
  bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
				 pCodecCtx->height,
				 SDL_YV12_OVERLAY,
				 screen);
  sws_ctx =
    sws_getContext
    (
        pCodecCtx->width,
        pCodecCtx->height,
        pCodecCtx->pix_fmt,
        pCodecCtx->width,
        pCodecCtx->height,
        AV_PIX_FMT_YUV420P,
        SWS_BILINEAR,
        NULL,
        NULL,
        NULL
    );


  // Read frames and save first five frames to disk
  i=0;
  while(av_read_frame(pFormatCtx, &packet)>=0) {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {
      // Decode video frame
      avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 
			   &packet);
      
      // Did we get a video frame?
      if(frameFinished) {
	SDL_LockYUVOverlay(bmp);

	AVFrame pict;
	pict.data[0] = bmp->pixels[0];
	pict.data[1] = bmp->pixels[2];
	pict.data[2] = bmp->pixels[1];

	pict.linesize[0] = bmp->pitches[0];
	pict.linesize[1] = bmp->pitches[2];
	pict.linesize[2] = bmp->pitches[1];

	// Convert the image into YUV format that SDL uses
    sws_scale
    (
        sws_ctx, 
        (uint8_t const * const *)pFrame->data, 
        pFrame->linesize, 
        0,
        pCodecCtx->height,
        pict.data,
        pict.linesize
    );
	
	SDL_UnlockYUVOverlay(bmp);
	
	rect.x = 0;
	rect.y = 0;
	rect.w = pCodecCtx->width;
	rect.h = pCodecCtx->height;
	SDL_DisplayYUVOverlay(bmp, &rect);
	av_packet_unref(&packet);
      }
    } else if(packet.stream_index==audioStream) {
      packet_queue_put(&audioq, &packet);
    } else {
      av_packet_unref(&packet);
    }
    // Free the packet that was allocated by av_read_frame
    SDL_PollEvent(&event);
    switch(event.type) {
    case SDL_QUIT:
      quit = 1;
      SDL_Quit();
      exit(0);
      break;
    default:
      break;
    }

  }

  // Free the YUV frame
  av_free(pFrame);
  
  // Close the codec
  avcodec_close(pCodecCtx);
  
  // Close the video file
  avformat_close_input(&pFormatCtx);
  
  return 0;
}
Beispiel #7
0
u32 adecOpen(AudioDecoder* data)
{
	AudioDecoder& adec = *data;

	adec.adecCb = &Emu.GetCPU().AddThread(CPU_THREAD_PPU);

	u32 adec_id = cellAdec->GetNewId(data);

	adec.id = adec_id;

	adec.adecCb->SetName("Audio Decoder[" + std::to_string(adec_id) + "] Callback");

	thread t("Audio Decoder[" + std::to_string(adec_id) + "] Thread", [&]()
	{
		cellAdec->Notice("Audio Decoder thread started");

		AdecTask& task = adec.task;

		while (true)
		{
			if (Emu.IsStopped())
			{
				break;
			}

			if (!adec.job.GetCountUnsafe() && adec.is_running)
			{
				std::this_thread::sleep_for(std::chrono::milliseconds(1));
				continue;
			}

			/*if (adec.frames.GetCount() >= 50)
			{
				std::this_thread::sleep_for(std::chrono::milliseconds(1));
				continue;
			}*/

			if (!adec.job.Pop(task))
			{
				break;
			}

			switch (task.type)
			{
			case adecStartSeq:
			{
				// TODO: reset data
				cellAdec->Warning("adecStartSeq:");

				adec.reader.addr = 0;
				adec.reader.size = 0;
				adec.reader.init = false;
				if (adec.reader.rem) free(adec.reader.rem);
				adec.reader.rem = nullptr;
				adec.reader.rem_size = 0;
				adec.is_running = true;
				adec.just_started = true;
			}
			break;

			case adecEndSeq:
			{
				// TODO: finalize
				cellAdec->Warning("adecEndSeq:");

				/*Callback cb;
				cb.SetAddr(adec.cbFunc);
				cb.Handle(adec.id, CELL_ADEC_MSG_TYPE_SEQDONE, CELL_OK, adec.cbArg);
				cb.Branch(true); // ???*/
				adec.adecCb->ExecAsCallback(adec.cbFunc, true, adec.id, CELL_ADEC_MSG_TYPE_SEQDONE, CELL_OK, adec.cbArg);

				adec.is_running = false;
				adec.just_finished = true;
			}
			break;

			case adecDecodeAu:
			{
				int err = 0;

				adec.reader.addr = task.au.addr;
				adec.reader.size = task.au.size;
				//LOG_NOTICE(HLE, "Audio AU: size = 0x%x, pts = 0x%llx", task.au.size, task.au.pts);

				if (adec.just_started)
				{
					adec.first_pts = task.au.pts;
					adec.last_pts = task.au.pts - 0x10000; // hack
				}

				struct AVPacketHolder : AVPacket
				{
					AVPacketHolder(u32 size)
					{
						av_init_packet(this);

						if (size)
						{
							data = (u8*)av_calloc(1, size + FF_INPUT_BUFFER_PADDING_SIZE);
							this->size = size + FF_INPUT_BUFFER_PADDING_SIZE;
						}
						else
						{
							data = NULL;
							size = 0;
						}
					}

					~AVPacketHolder()
					{
						av_free(data);
						//av_free_packet(this);
					}

				} au(0);

				/*{
					wxFile dump;
					dump.Open(wxString::Format("audio pts-0x%llx.dump", task.au.pts), wxFile::write);
					u8* buf = (u8*)malloc(task.au.size);
					if (Memory.CopyToReal(buf, task.au.addr, task.au.size)) dump.Write(buf, task.au.size);
					free(buf);
					dump.Close();
				}*/

				if (adec.just_started && adec.just_finished)
				{
					avcodec_flush_buffers(adec.ctx);
					adec.reader.init = true;
					adec.just_finished = false;
					adec.just_started = false;
				}
				else if (adec.just_started) // deferred initialization
				{
					err = avformat_open_input(&adec.fmt, NULL, av_find_input_format("oma"), NULL);
					if (err)
					{
						cellAdec->Error("adecDecodeAu: avformat_open_input() failed");
						Emu.Pause();
						break;
					}
					AVCodec* codec = avcodec_find_decoder(AV_CODEC_ID_ATRAC3P); // ???
					if (!codec)
					{
						cellAdec->Error("adecDecodeAu: avcodec_find_decoder() failed");
						Emu.Pause();
						break;
					}
					//err = avformat_find_stream_info(adec.fmt, NULL);
					//if (err)
					//{
					//	cellAdec->Error("adecDecodeAu: avformat_find_stream_info() failed");
					//	Emu.Pause();
					//	break;
					//}
					//if (!adec.fmt->nb_streams)
					//{
					//	cellAdec->Error("adecDecodeAu: no stream found");
					//	Emu.Pause();
					//	break;
					//}
					if (!avformat_new_stream(adec.fmt, codec))
					{
						cellAdec->Error("adecDecodeAu: avformat_new_stream() failed");
						Emu.Pause();
						break;
					}
					adec.ctx = adec.fmt->streams[0]->codec; // TODO: check data

					AVDictionary* opts = nullptr;
					av_dict_set(&opts, "refcounted_frames", "1", 0);
					{
						std::lock_guard<std::mutex> lock(g_mutex_avcodec_open2);
						// not multithread-safe (???)
						err = avcodec_open2(adec.ctx, codec, &opts);
					}
					if (err)
					{
						cellAdec->Error("adecDecodeAu: avcodec_open2() failed");
						Emu.Pause();
						break;
					}
					adec.just_started = false;
				}

				bool last_frame = false;

				while (true)
				{
					if (Emu.IsStopped())
					{
						cellAdec->Warning("adecDecodeAu: aborted");
						return;
					}

					/*if (!adec.ctx) // fake
					{
						AdecFrame frame;
						frame.pts = task.au.pts;
						frame.auAddr = task.au.addr;
						frame.auSize = task.au.size;
						frame.userdata = task.au.userdata;
						frame.size = 4096;
						frame.data = nullptr;
						adec.frames.Push(frame);

						adec.adecCb->ExecAsCallback(adec.cbFunc, false, adec.id, CELL_ADEC_MSG_TYPE_PCMOUT, CELL_OK, adec.cbArg);

						break;
					}*/

					last_frame = av_read_frame(adec.fmt, &au) < 0;
					if (last_frame)
					{
						//break;
						av_free(au.data);
						au.data = NULL;
						au.size = 0;
					}

					struct AdecFrameHolder : AdecFrame
					{
						AdecFrameHolder()
						{
							data = av_frame_alloc();
						}

						~AdecFrameHolder()
						{
							if (data)
							{
								av_frame_unref(data);
								av_frame_free(&data);
							}
						}

					} frame;

					if (!frame.data)
					{
						cellAdec->Error("adecDecodeAu: av_frame_alloc() failed");
						Emu.Pause();
						break;
					}

					int got_frame = 0;

					int decode = avcodec_decode_audio4(adec.ctx, frame.data, &got_frame, &au);

					if (decode <= 0)
					{
						if (!last_frame && decode < 0)
						{
							cellAdec->Error("adecDecodeAu: AU decoding error(0x%x)", decode);
						}
						if (!got_frame && adec.reader.size == 0) break;
					}

					if (got_frame)
					{
						u64 ts = av_frame_get_best_effort_timestamp(frame.data);
						if (ts != AV_NOPTS_VALUE)
						{
							frame.pts = ts/* - adec.first_pts*/;
							adec.last_pts = frame.pts;
						}
						else
						{
							adec.last_pts += ((u64)frame.data->nb_samples) * 90000 / 48000;
							frame.pts = adec.last_pts;
						}
						//frame.pts = adec.last_pts;
						//adec.last_pts += ((u64)frame.data->nb_samples) * 90000 / 48000; // ???
						frame.auAddr = task.au.addr;
						frame.auSize = task.au.size;
						frame.userdata = task.au.userdata;
						frame.size = frame.data->nb_samples * frame.data->channels * sizeof(float);

						if (frame.data->format != AV_SAMPLE_FMT_FLTP)
						{
							cellAdec->Error("adecDecodeaAu: unsupported frame format(%d)", frame.data->format);
							Emu.Pause();
							break;
						}
						if (frame.data->channels != 2)
						{
							cellAdec->Error("adecDecodeAu: unsupported channel count (%d)", frame.data->channels);
							Emu.Pause();
							break;
						}

						//LOG_NOTICE(HLE, "got audio frame (pts=0x%llx, nb_samples=%d, ch=%d, sample_rate=%d, nbps=%d)",
							//frame.pts, frame.data->nb_samples, frame.data->channels, frame.data->sample_rate,
							//av_get_bytes_per_sample((AVSampleFormat)frame.data->format));

						adec.frames.Push(frame);
						frame.data = nullptr; // to prevent destruction

						/*Callback cb;
						cb.SetAddr(adec.cbFunc);
						cb.Handle(adec.id, CELL_ADEC_MSG_TYPE_PCMOUT, CELL_OK, adec.cbArg);
						cb.Branch(false);*/
						adec.adecCb->ExecAsCallback(adec.cbFunc, false, adec.id, CELL_ADEC_MSG_TYPE_PCMOUT, CELL_OK, adec.cbArg);
					}
				}
						
				/*Callback cb;
				cb.SetAddr(adec.cbFunc);
				cb.Handle(adec.id, CELL_ADEC_MSG_TYPE_AUDONE, task.au.auInfo_addr, adec.cbArg);
				cb.Branch(false);*/
				adec.adecCb->ExecAsCallback(adec.cbFunc, false, adec.id, CELL_ADEC_MSG_TYPE_AUDONE, task.au.auInfo_addr, adec.cbArg);
			}
			break;

			case adecClose:
			{
				adec.is_finished = true;
				cellAdec->Notice("Audio Decoder thread ended");
				return;
			}

			default:
				cellAdec->Error("Audio Decoder thread error: unknown task(%d)", task.type);
			}
		}
		adec.is_finished = true;
		cellAdec->Warning("Audio Decoder thread aborted");
	});

	t.detach();

	return adec_id;
}
Beispiel #8
0
void open_context(AVFormatContext **ic, const char *input_file, const char *key, AVInputFormat *ifmt, AVOutputFormat **ofmt, AVFormatContext **oc, AVStream **video_st, AVStream **audio_st, int *video_index, int *audio_index){
    int ret;
    int i;
    AVCodec *codec;

    ret = avformat_open_input(ic, input_file, ifmt, NULL);
    if (ret != 0) {
        fprintf(stderr, "Could not open input file, make sure it is an mpegts file: %d\n", ret);
        exit(1);
    }

    if (avformat_find_stream_info(*ic, NULL) < 0) {
        fprintf(stderr, "Could not read stream information\n");
        exit(1);
    }

    *ofmt = av_guess_format("mpegts", NULL, NULL);
    if (!ofmt) {
        fprintf(stderr, "Could not find MPEG-TS muxer\n");
        exit(1);
    }

    *oc = avformat_alloc_context();

    if (!*oc) {
        fprintf(stderr, "Could not allocated output context");
        exit(1);
    }
    (*oc)->oformat = *ofmt;

    for (i = 0; i < (*ic)->nb_streams && (*video_index < 0 || *audio_index < 0); i++) {
        switch ((*ic)->streams[i]->codec->codec_type) {
            case AVMEDIA_TYPE_VIDEO:
                *video_index = i;
                (*ic)->streams[i]->discard = AVDISCARD_NONE;
                *video_st = add_output_stream(*oc, (*ic)->streams[i]);
                break;
            case AVMEDIA_TYPE_AUDIO:
                *audio_index = i;
                (*ic)->streams[i]->discard = AVDISCARD_NONE;
                *audio_st = add_output_stream(*oc, (*ic)->streams[i]);
                break;
            default:
                (*ic)->streams[i]->discard = AVDISCARD_ALL;
                break;
        }
    }

    // Don't print warnings when PTS and DTS are identical.
    (*ic)->flags |= AVFMT_FLAG_IGNDTS;

    av_dump_format(*oc, 0, key, 1);

    if (*video_st) {
      codec = avcodec_find_decoder((*video_st)->codec->codec_id);
      if (!codec) {
          fprintf(stderr, "Could not find video decoder %x, key frames will not be honored\n", (*video_st)->codec->codec_id);
      }

      if (avcodec_open2((*video_st)->codec, codec, NULL) < 0) {
          fprintf(stderr, "Could not open video decoder, key frames will not be honored\n");
      }
    }

}
Beispiel #9
0
static int hls_read_header(AVFormatContext *s)
{
    URLContext *u = (s->flags & AVFMT_FLAG_CUSTOM_IO) ? NULL : s->pb->opaque;
    HLSContext *c = s->priv_data;
    int ret = 0, i, j, stream_offset = 0;

    c->interrupt_callback = &s->interrupt_callback;

    // if the URL context is good, read important options we must broker later
    if (u && u->prot->priv_data_class) {
        // get the previous user agent & set back to null if string size is zero
        av_freep(&c->user_agent);
        av_opt_get(u->priv_data, "user-agent", 0, (uint8_t**)&(c->user_agent));
        if (c->user_agent && !strlen(c->user_agent))
            av_freep(&c->user_agent);

        // get the previous cookies & set back to null if string size is zero
        av_freep(&c->cookies);
        av_opt_get(u->priv_data, "cookies", 0, (uint8_t**)&(c->cookies));
        if (c->cookies && !strlen(c->cookies))
            av_freep(&c->cookies);

        // get the previous headers & set back to null if string size is zero
        av_freep(&c->headers);
        av_opt_get(u->priv_data, "headers", 0, (uint8_t**)&(c->headers));
        if (c->headers && !strlen(c->headers))
            av_freep(&c->headers);
    }

    if ((ret = parse_playlist(c, s->filename, NULL, s->pb)) < 0)
        goto fail;

    if (c->n_variants == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
        ret = AVERROR_EOF;
        goto fail;
    }
    /* If the playlist only contained playlists (Master Playlist),
     * parse each individual playlist. */
    if (c->n_playlists > 1 || c->playlists[0]->n_segments == 0) {
        for (i = 0; i < c->n_playlists; i++) {
            struct playlist *pls = c->playlists[i];
            if ((ret = parse_playlist(c, pls->url, pls, NULL)) < 0)
                goto fail;
        }
    }

    if (c->variants[0]->playlists[0]->n_segments == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
        ret = AVERROR_EOF;
        goto fail;
    }

    /* If this isn't a live stream, calculate the total duration of the
     * stream. */
    if (c->variants[0]->playlists[0]->finished) {
        int64_t duration = 0;
        for (i = 0; i < c->variants[0]->playlists[0]->n_segments; i++)
            duration += c->variants[0]->playlists[0]->segments[i]->duration;
        s->duration = duration;
    }

    /* Open the demuxer for each playlist */
    for (i = 0; i < c->n_playlists; i++) {
        struct playlist *pls = c->playlists[i];
        AVInputFormat *in_fmt = NULL;

        if (pls->n_segments == 0)
            continue;

        if (!(pls->ctx = avformat_alloc_context())) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        pls->index  = i;
        pls->needed = 1;
        pls->parent = s;

        /* If this is a live stream with more than 3 segments, start at the
         * third last segment. */
        pls->cur_seq_no = pls->start_seq_no;
        if (!pls->finished && pls->n_segments > 3)
            pls->cur_seq_no = pls->start_seq_no + pls->n_segments - 3;

        pls->read_buffer = av_malloc(INITIAL_BUFFER_SIZE);
        ffio_init_context(&pls->pb, pls->read_buffer, INITIAL_BUFFER_SIZE, 0, pls,
                          read_data, NULL, NULL);
        pls->pb.seekable = 0;
        ret = av_probe_input_buffer(&pls->pb, &in_fmt, pls->segments[0]->url,
                                    NULL, 0, 0);
        if (ret < 0) {
            /* Free the ctx - it isn't initialized properly at this point,
             * so avformat_close_input shouldn't be called. If
             * avformat_open_input fails below, it frees and zeros the
             * context, so it doesn't need any special treatment like this. */
            av_log(s, AV_LOG_ERROR, "Error when loading first segment '%s'\n", pls->segments[0]->url);
            avformat_free_context(pls->ctx);
            pls->ctx = NULL;
            goto fail;
        }
        pls->ctx->pb       = &pls->pb;
        pls->stream_offset = stream_offset;
        ret = avformat_open_input(&pls->ctx, pls->segments[0]->url, in_fmt, NULL);
        if (ret < 0)
            goto fail;

        pls->ctx->ctx_flags &= ~AVFMTCTX_NOHEADER;
        ret = avformat_find_stream_info(pls->ctx, NULL);
        if (ret < 0)
            goto fail;

        /* Create new AVStreams for each stream in this playlist */
        for (j = 0; j < pls->ctx->nb_streams; j++) {
            AVStream *st = avformat_new_stream(s, NULL);
            AVStream *ist = pls->ctx->streams[j];
            if (!st) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
            st->id = i;
            avpriv_set_pts_info(st, ist->pts_wrap_bits, ist->time_base.num, ist->time_base.den);
            avcodec_copy_context(st->codec, pls->ctx->streams[j]->codec);
        }

        stream_offset += pls->ctx->nb_streams;
    }

    /* Create a program for each variant */
    for (i = 0; i < c->n_variants; i++) {
        struct variant *v = c->variants[i];
        char bitrate_str[20];
        AVProgram *program;

        snprintf(bitrate_str, sizeof(bitrate_str), "%d", v->bandwidth);

        program = av_new_program(s, i);
        if (!program)
            goto fail;
        av_dict_set(&program->metadata, "variant_bitrate", bitrate_str, 0);

        for (j = 0; j < v->n_playlists; j++) {
            struct playlist *pls = v->playlists[j];
            int is_shared = playlist_in_multiple_variants(c, pls);
            int k;

            for (k = 0; k < pls->ctx->nb_streams; k++) {
                struct AVStream *st = s->streams[pls->stream_offset + k];

                ff_program_add_stream_index(s, i, pls->stream_offset + k);

                /* Set variant_bitrate for streams unique to this variant */
                if (!is_shared && v->bandwidth)
                    av_dict_set(&st->metadata, "variant_bitrate", bitrate_str, 0);
            }
        }
    }

    c->first_packet = 1;
    c->first_timestamp = AV_NOPTS_VALUE;
    c->seek_timestamp  = AV_NOPTS_VALUE;

    return 0;
fail:
    free_playlist_list(c);
    free_variant_list(c);
    return ret;
}
Beispiel #10
0
bool visualiserWin::play(std::string &file)
{
	if(MPDMode)
	{
		mpdargst *args = new mpdargst;
		args->dspman = dspman;
		args->file = MPDFile;
		args->win = this;
		ffmpegworkerthread = new pthread_t;
		pthread_create(ffmpegworkerthread, NULL, MPDWorkerEntry, args);
		return true;
	}
	//Initalise ffmpeg.
	av_register_all();

	//Attempt to open the file.
	AVFormatContext* fmtCtx = NULL;

	if(avformat_open_input(&fmtCtx, file.c_str(), NULL, NULL) != 0)
	{
		std::cerr << "Could not open file." << std::endl;
		return false;
	}

	if(avformat_find_stream_info(fmtCtx, NULL) < 0)
	{
		std::cerr << "Could not find stream information." << std::cerr;
		return false;
	}

	AVCodecContext* codecCtx;
	int audioStream = -1;
	for(int i = 0; i < fmtCtx->nb_streams; i++)
	{
		if(fmtCtx->streams[i]->codec->codec_type ==
		   AVMEDIA_TYPE_AUDIO)
		{
			audioStream = i;
			break;
		}
	}

	if(audioStream == -1)
	{
		std::cerr << "Couldn't find audio stream." << std::endl;
		return false;
	}

	codecCtx = fmtCtx->streams[audioStream]->codec;

	AVCodec *codec;
	codec = avcodec_find_decoder(codecCtx->codec_id);
	if(!codec)
	{
		std::cerr << "Could not find codec!" << std::endl;
		return false;
	}
	avcodec_open2(codecCtx, codec, NULL);

	SDL_AudioSpec wantedSpec;
	SDL_AudioSpec gotSpec;

	packetQueue* queue = new packetQueue;

	sdlargst* SDLArgs = new sdlargst;

	SDLArgs->avcodeccontext = codecCtx;
	SDLArgs->queue = queue;
	SDLArgs->dspman = dspman;

	wantedSpec.freq = codecCtx->sample_rate;
	wantedSpec.format = AUDIO_S16SYS;
	wantedSpec.channels = codecCtx->channels;
	wantedSpec.silence = 0;
	wantedSpec.samples = 1024;
	wantedSpec.callback = audioThreadEntryPoint;
	wantedSpec.userdata = (void*)SDLArgs;

	if(SDL_OpenAudio(&wantedSpec, &gotSpec) < 0)
	{
		throw(SDLException());
		return false;
	}

	SDL_PauseAudio(0);

	//Construct worker thread arguments.
	ffmpegargst* args = new ffmpegargst;
	args->audiostream = audioStream;
	args->avformatcontext = fmtCtx;
	args->queue = queue;

	//Begin ffmpeg worker thread.
	ffmpegworkerthread = new pthread_t;

	//Run the thread.
	pthread_create(ffmpegworkerthread, NULL, ffmpegWorkerEntry, args);

	// Also run the sound.
	return false;
}
Beispiel #11
0
int dc_video_decoder_open(VideoInputFile *video_input_file, VideoDataConf *video_data_conf, int mode, int no_loop, int nb_consumers)
{
	s32 ret;
	u32 i;
	s32 open_res;
	AVInputFormat *in_fmt = NULL;
	AVDictionary *options = NULL;
	AVCodecContext *codec_ctx;
	AVCodec *codec;

	memset(video_input_file, 0, sizeof(VideoInputFile));

	if (video_data_conf->width > 0 && video_data_conf->height > 0) {
		char vres[16];
		snprintf(vres, sizeof(vres), "%dx%d", video_data_conf->width, video_data_conf->height);
		ret = av_dict_set(&options, "video_size", vres, 0);
		if (ret < 0) {
			GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Could not set video size %s.\n", vres));
			return -1;
		}
	}

	if (video_data_conf->framerate > 0) {
		char vfr[16];
		snprintf(vfr, sizeof(vfr), "%d", video_data_conf->framerate);
		ret = av_dict_set(&options, "framerate", vfr, 0);
		if (ret < 0) {
			GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Could not set video framerate %s.\n", vfr));
			return -1;
		}
	}

	if (strlen(video_data_conf->pixel_format)) {
		ret = av_dict_set(&options, "pixel_format", video_data_conf->pixel_format, 0);
		if (ret < 0) {
			GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Could not set pixel format %s.\n", video_data_conf->pixel_format));
			return -1;
		}
	}

#ifndef WIN32
	if (strcmp(video_data_conf->v4l2f, "") != 0) {
		ret = av_dict_set(&options, "input_format", video_data_conf->v4l2f, 0);
		if (ret < 0) {
			GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Could not set input format %s.\n", video_data_conf->v4l2f));
			return -1;
		}
	}
#endif

	if (video_data_conf->format && strcmp(video_data_conf->format, "") != 0) {
		in_fmt = av_find_input_format(video_data_conf->format);
		if (in_fmt == NULL) {
			GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot find the format %s.\n", video_data_conf->format));
			return -1;
		}
	}

	video_input_file->av_fmt_ctx = NULL;

	/* Open video */
	open_res = avformat_open_input(&video_input_file->av_fmt_ctx, video_data_conf->filename, in_fmt, options ? &options : NULL);
	if ( (open_res < 0) && !stricmp(video_data_conf->filename, "screen-capture-recorder") ) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Buggy screen capture input (open failed with code %d), retrying without specifying resolution\n", open_res));
		av_dict_set(&options, "video_size", NULL, 0);
		open_res = avformat_open_input(&video_input_file->av_fmt_ctx, video_data_conf->filename, in_fmt, options ? &options : NULL);
	}

	if ( (open_res < 0) && options) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Error %d opening input - retrying without options\n", open_res));
		av_dict_free(&options);
		open_res = avformat_open_input(&video_input_file->av_fmt_ctx, video_data_conf->filename, in_fmt, NULL);
	}

	if (open_res < 0) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot open file %s\n", video_data_conf->filename));
		return -1;
	}

	/* Retrieve stream information */
	if (avformat_find_stream_info(video_input_file->av_fmt_ctx, NULL) < 0) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot find stream information\n"));
		return -1;
	}

	av_dump_format(video_input_file->av_fmt_ctx, 0, video_data_conf->filename, 0);

	/* Find the first video stream */
	video_input_file->vstream_idx = -1;
	for (i = 0; i < video_input_file->av_fmt_ctx->nb_streams; i++) {
		if (video_input_file->av_fmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			video_input_file->vstream_idx = i;
			break;
		}
	}
	if (video_input_file->vstream_idx == -1) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot find a video stream\n"));
		return -1;
	}

	/* Get a pointer to the codec context for the video stream */
	codec_ctx = video_input_file->av_fmt_ctx->streams[video_input_file->vstream_idx]->codec;

	/* Find the decoder for the video stream */
	codec = avcodec_find_decoder(codec_ctx->codec_id);
	if (codec == NULL) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Codec is not supported.\n"));
		if (!video_input_file->av_fmt_ctx_ref_cnt)
			avformat_close_input(&video_input_file->av_fmt_ctx);
		return -1;
	}

	/* Open codec */
	if (avcodec_open2(codec_ctx, codec, NULL) < 0) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot open codec.\n"));
		if (!video_input_file->av_fmt_ctx_ref_cnt)
			avformat_close_input(&video_input_file->av_fmt_ctx);
		return -1;
	}

	video_input_file->width = codec_ctx->width;
	video_input_file->height = codec_ctx->height;
	video_input_file->sar = codec_ctx->sample_aspect_ratio;

	video_input_file->pix_fmt = codec_ctx->pix_fmt;
	if (video_data_conf->framerate >= 0 && codec_ctx->time_base.num) {
		video_data_conf->framerate = codec_ctx->time_base.den / codec_ctx->time_base.num;
	}
	if (video_data_conf->framerate <= 1 || video_data_conf->framerate > 1000) {
		const int num = video_input_file->av_fmt_ctx->streams[video_input_file->vstream_idx]->avg_frame_rate.num;
		const int den = video_input_file->av_fmt_ctx->streams[video_input_file->vstream_idx]->avg_frame_rate.den == 0 ? 1 : video_input_file->av_fmt_ctx->streams[video_input_file->vstream_idx]->avg_frame_rate.den;
		video_data_conf->framerate = num / den;
		if (video_data_conf->framerate / 1000 != 0) {
			GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Framerate %d was divided by 1000: %d\n", video_data_conf->framerate, video_data_conf->framerate/1000));
			video_data_conf->framerate = video_data_conf->framerate / 1000;
		}

		if (video_data_conf->framerate <= 1 || video_data_conf->framerate > 1000) {
			video_data_conf->framerate = num / den;
			if (video_data_conf->framerate / 1000 != 0) {
				video_data_conf->framerate = video_data_conf->framerate / 1000;
			}
		}
	}

	if (video_data_conf->framerate <= 1 || video_data_conf->framerate > 1000) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Invalid input framerate.\n"));
		return -1;
	}

	video_data_conf->time_base = video_input_file->av_fmt_ctx->streams[video_input_file->vstream_idx]->time_base;
	video_input_file->mode = mode;
	video_input_file->no_loop = no_loop;
	video_input_file->nb_consumers = nb_consumers;
	return 0;
}
Beispiel #12
0
int demux(const char *in_filename, const char *out_filename_v,
		const char *out_filename_a) {
	AVOutputFormat *ofmt_a = NULL, *ofmt_v = NULL;
	// Input AVFormatContext and Output AVFormatContext
	AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx_a = NULL, *ofmt_ctx_v = NULL;
	AVPacket pkt, enc_pkt;
	int ret, i;
	int video_index = -1, audio_index = -1;
	int frame_index = 0;

	av_register_all();
	// Input
	if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
		printf("Could not open input file.");
		goto end;
	}
	if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
		printf("Failed to retrieve input stream information");
		goto end;
	}

	// Output
	avformat_alloc_output_context2(&ofmt_ctx_v, NULL, NULL, out_filename_v);
	if (!ofmt_ctx_v) {
		printf("Could not create output context.\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}
	ofmt_v = ofmt_ctx_v->oformat;

	avformat_alloc_output_context2(&ofmt_ctx_a, NULL, NULL, out_filename_a);
	if (!ofmt_ctx_a) {
		printf("Could not create output context\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}
	ofmt_a = ofmt_ctx_a->oformat;

	for (i = 0; i < ifmt_ctx->nb_streams; i++) {
		// Create output AVStream according to input AVStream
		AVFormatContext *ofmt_ctx;
		AVStream *in_stream = ifmt_ctx->streams[i];
		AVStream *out_stream = NULL;

		if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			video_index = i;
			out_stream = avformat_new_stream(ofmt_ctx_v,
					in_stream->codec->codec);
			ofmt_ctx = ofmt_ctx_v;
		} else if (ifmt_ctx->streams[i]->codec->codec_type
				== AVMEDIA_TYPE_AUDIO) {
			audio_index = i;
			out_stream = avformat_new_stream(ofmt_ctx_a,
					in_stream->codec->codec);
			ofmt_ctx = ofmt_ctx_a;
		} else {
			break;
		}

		if (!out_stream) {
			printf("Failed allocating output stream\n");
			ret = AVERROR_UNKNOWN;
			goto end;
		}
		// Copy the settings of AVCodecContext
		if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
			printf(
					"Failed to copy context from input to output stream codec context\n");
			goto end;
		}
		out_stream->codec->codec_tag = 0;

		if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
			out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}

	// Open output file
	if (!(ofmt_v->flags & AVFMT_NOFILE)) {
		if (avio_open(&ofmt_ctx_v->pb, out_filename_v, AVIO_FLAG_WRITE) < 0) {
			printf("Could not open output file '%s'", out_filename_v);
			goto end;
		}
	}

	if (!(ofmt_a->flags & AVFMT_NOFILE)) {
		if (avio_open(&ofmt_ctx_a->pb, out_filename_a, AVIO_FLAG_WRITE) < 0) {
			printf("Could not open output file '%s'", out_filename_a);
			goto end;
		}
	}

	// Write file header
	if (avformat_write_header(ofmt_ctx_v, NULL) < 0) {
		printf("Error occurred when opening video output file\n");
		goto end;
	}
//	if (avformat_write_header(ofmt_ctx_a, NULL) < 0) {
//		printf("Error occurred when opening audio output file\n");
//		goto end;
//	}

#if USE_H264BSF
	AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
#endif

	while (1) {
		AVFormatContext *ofmt_ctx;
		AVStream *in_stream, *out_stream;

		AVCodecContext *dec_ctx = NULL, *enc_ctx = NULL;
		AVCodec *dec = NULL, *encoder = NULL;

		AVFrame *frame = NULL;

		int got_frame;

		// Get an AVPacket
		if (av_read_frame(ifmt_ctx, &pkt) < 0)
			break;
		in_stream = ifmt_ctx->streams[pkt.stream_index];

		if (pkt.stream_index == video_index) {
			ofmt_ctx = ofmt_ctx_v;
			out_stream = avformat_new_stream(ofmt_ctx, NULL);

			/* find decoder for the stream */
			dec_ctx = in_stream->codec;
			dec = avcodec_find_decoder(dec_ctx->codec_id);
			if (!dec) {
				fprintf(stderr, "Failed to find %s codec\n",
						av_get_media_type_string(AVMEDIA_TYPE_VIDEO));
				return AVERROR(EINVAL);
			}

			/* Open decoder */
			int ret = avcodec_open2(dec_ctx, dec, NULL);
			if (ret < 0) {
				av_log(NULL, AV_LOG_ERROR,
						"Failed to open decoder for stream #%u\n", i);
				return ret;
			}

			// decoder is MPEG-4 part 2
			printf("decoder is %s\n", dec->long_name);

			// NOTE
			frame = av_frame_alloc();

			ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &pkt);
			if (ret < 0) {
				av_frame_free(&frame);
				av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
				break;
			}

			// printf("frame duration is %d\n", frame->pkt_duration);

			// encode
			encoder = avcodec_find_encoder(AV_CODEC_ID_H264);

			// avcodec_copy_context(enc_ctx, dec_ctx);
			enc_ctx = avcodec_alloc_context3(encoder);
			if (!encoder) {
				av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
				return AVERROR_INVALIDDATA;
			}

			enc_ctx->height = dec_ctx->height;
			enc_ctx->width = dec_ctx->width;
			enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
			enc_ctx->pix_fmt = encoder->pix_fmts[0];
			enc_ctx->time_base = dec_ctx->time_base;
			//enc_ctx->time_base.num = 1;
			//enc_ctx->time_base.den = 25;
			//H264的必备选项,没有就会错
			enc_ctx->me_range = 16;
			enc_ctx->max_qdiff = 4;
			enc_ctx->qmin = 10;
			enc_ctx->qmax = 51;
			enc_ctx->qcompress = 0.6;
			enc_ctx->refs = 3;
			enc_ctx->bit_rate = 1500;

			ret = avcodec_open2(enc_ctx, encoder, NULL);
			if (ret < 0) {
				av_log(NULL, AV_LOG_ERROR,
						"Cannot open video encoder for stream #%u\n", i);
				return ret;
			}

			av_opt_set(enc_ctx->priv_data, "preset", "slow", 0);

			// AVOutputFormat *formatOut = av_guess_format(NULL, out_filename_v, NULL);

			enc_pkt.data = NULL;
			enc_pkt.size = 0;
			av_init_packet(&enc_pkt);
			ret = avcodec_open2(enc_ctx, encoder, NULL);
			if (ret < 0) {
				av_log(NULL, AV_LOG_ERROR,
						"Failed to open encoder for stream #%u\n", i);
				return ret;
			}
			ret = avcodec_encode_video2(enc_ctx, &enc_pkt, frame, &got_frame);

			printf("demo is %s\n", "hello");

			av_frame_free(&frame);
			avcodec_close(enc_ctx);
			avcodec_close(dec_ctx);

			// printf("Write Video Packet. size:%d\tpts:%lld\n", pkt.size, pkt.pts);
#if USE_H264BSF
			av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif
		} else {
			continue;
		}

		// Convert PTS/DTS
		enc_pkt.pts = av_rescale_q_rnd(enc_pkt.pts, in_stream->time_base,
				out_stream->time_base,
				(AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		enc_pkt.dts = av_rescale_q_rnd(enc_pkt.dts, in_stream->time_base,
				out_stream->time_base,
				(AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		enc_pkt.duration = av_rescale_q(enc_pkt.duration, in_stream->time_base,
				out_stream->time_base);
		// enc_pkt.pos = -1;
		enc_pkt.stream_index = video_index;

		if (av_interleaved_write_frame(ofmt_ctx, &enc_pkt) < 0) {
			printf("Error muxing packet\n");
			break;
		}
		av_free_packet(&enc_pkt);
		av_free_packet(&pkt);
		frame_index++;
	}

#if USE_H264BSF
	av_bitstream_filter_close(h264bsfc);
#endif

	// Write file trailer
	av_write_trailer(ofmt_ctx_a);
	av_write_trailer(ofmt_ctx_v);

	end: avformat_close_input(&ifmt_ctx);
	/* close output */
	if (ofmt_ctx_a && !(ofmt_a->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx_a->pb);

	if (ofmt_ctx_v && !(ofmt_v->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx_v->pb);

	avformat_free_context(ofmt_ctx_a);
	avformat_free_context(ofmt_ctx_v);

	if (ret < 0 && ret != AVERROR_EOF) {
		printf("Error occurred.\n");
		return -1;
	}
	return 0;
}
int JPG_to_Pixel(const unsigned char *jpgBuff, int jpgSize, int pixelFmt, int pixelWidth, int pixelHeight, unsigned char *pixelBuff, int *pixelSize) {	
	AVFormatContext *formatContext;
	AVInputFormat *inputFormat;
	AVIOContext *ioContext;
	AVStream *stream;
	AVCodecContext *codecContext;
	AVCodec *codec;
	AVFrame *frame, *frame2;
	AVPacket packet;
	struct SwsContext *swsContext;
	int streamIndex;
	int gotFrame;
	int codecRet;
	int result = -1;

	av_register_all();
	formatContext = avformat_alloc_context();
	ioContext = avio_alloc_context((unsigned char *)jpgBuff, jpgSize, 0, NULL, NULL, NULL, NULL);
	inputFormat = av_find_input_format("mjpeg");
	av_probe_input_buffer2(ioContext, &inputFormat, NULL, NULL, 0, 0);
	formatContext->pb = ioContext;
	formatContext->iformat = inputFormat;
	avformat_open_input(&formatContext, NULL, NULL, NULL);
	av_find_stream_info(formatContext);

	av_init_packet(&packet);
	for (streamIndex = 0; streamIndex < formatContext->nb_streams; streamIndex++) {
		av_read_frame(formatContext, &packet);
		if (formatContext->streams[streamIndex]->codec->codec_type == AVMEDIA_TYPE_VIDEO && 0 < packet.size) {
			stream = formatContext->streams[streamIndex];
			codecContext = stream->codec;
			codec = avcodec_find_decoder(codecContext->codec_id);
			avcodec_open2(codecContext, codec, NULL);
			frame = avcodec_alloc_frame();
			codecRet = avcodec_decode_video2(codecContext, frame, &gotFrame, &packet);
			if (0 <= codecRet && 1 == gotFrame) {
				frame2 = av_frame_clone(frame);
				frame2->format = PF(pixelFmt);
				swsContext = sws_getContext(codecContext->width, codecContext->height, codecContext->pix_fmt, pixelWidth, pixelHeight, (AVPixelFormat)frame2->format, SWS_BICUBIC, NULL, NULL, NULL);   
				sws_scale(swsContext, (const uint8_t *const *)frame->data, frame->linesize, 0, codecContext->height, frame2->data, frame2->linesize);  
				sws_freeContext(swsContext);

				*pixelSize = avpicture_layout((const AVPicture *)frame2, (enum AVPixelFormat)frame2->format, pixelWidth, pixelHeight, pixelBuff, *pixelSize);
				result = *pixelSize;

				av_frame_free(&frame2);
			}	
			if (1 == codecContext->refcounted_frames) av_frame_unref(frame); 
			avcodec_free_frame(&frame);
			avcodec_close(codecContext);
		}
		av_free_packet(&packet);
		if (-1 != result)
			break;
	}

	avformat_close_input(&formatContext);
	av_free(ioContext->buffer);
	av_free(ioContext);
	avformat_free_context(formatContext);
	return result;
}
Beispiel #14
0
int VideoFFmpeg::openStream(const char *filename, AVInputFormat *inputFormat, AVDictionary **formatParams)
{
	AVFormatContext *formatCtx = NULL;
	int				i, videoStream;
	AVCodec			*codec;
	AVCodecContext	*codecCtx;

	if (avformat_open_input(&formatCtx, filename, inputFormat, formatParams)!=0)
		return -1;

	if (avformat_find_stream_info(formatCtx, NULL) < 0)
	{
		avformat_close_input(&formatCtx);
		return -1;
	}

	/* Find the first video stream */
	videoStream=-1;
	for (i=0; i<formatCtx->nb_streams; i++)
	{
		if (formatCtx->streams[i] &&
			get_codec_from_stream(formatCtx->streams[i]) && 
			(get_codec_from_stream(formatCtx->streams[i])->codec_type==AVMEDIA_TYPE_VIDEO))
		{
			videoStream=i;
			break;
		}
	}

	if (videoStream==-1) 
	{
		avformat_close_input(&formatCtx);
		return -1;
	}

	codecCtx = get_codec_from_stream(formatCtx->streams[videoStream]);

	/* Find the decoder for the video stream */
	codec=avcodec_find_decoder(codecCtx->codec_id);
	if (codec==NULL) 
	{
		avformat_close_input(&formatCtx);
		return -1;
	}
	codecCtx->workaround_bugs = 1;
	if (avcodec_open2(codecCtx, codec, NULL) < 0)
	{
		avformat_close_input(&formatCtx);
		return -1;
	}

#ifdef FFMPEG_OLD_FRAME_RATE
	if (codecCtx->frame_rate>1000 && codecCtx->frame_rate_base==1)
		codecCtx->frame_rate_base=1000;
	m_baseFrameRate = (double)codecCtx->frame_rate / (double)codecCtx->frame_rate_base;
#else
	m_baseFrameRate = av_q2d(av_get_r_frame_rate_compat(formatCtx->streams[videoStream]));
#endif
	if (m_baseFrameRate <= 0.0) 
		m_baseFrameRate = defFrameRate;

	m_codec = codec;
	m_codecCtx = codecCtx;
	m_formatCtx = formatCtx;
	m_videoStream = videoStream;
	m_frame = avcodec_alloc_frame();
	m_frameDeinterlaced = avcodec_alloc_frame();

	// allocate buffer if deinterlacing is required
	avpicture_fill((AVPicture*)m_frameDeinterlaced, 
		(uint8_t*)MEM_callocN(avpicture_get_size(
		m_codecCtx->pix_fmt,
		m_codecCtx->width, m_codecCtx->height), 
		"ffmpeg deinterlace"), 
		m_codecCtx->pix_fmt, m_codecCtx->width, m_codecCtx->height);

	// check if the pixel format supports Alpha
	if (m_codecCtx->pix_fmt == PIX_FMT_RGB32 ||
		m_codecCtx->pix_fmt == PIX_FMT_BGR32 ||
		m_codecCtx->pix_fmt == PIX_FMT_RGB32_1 ||
		m_codecCtx->pix_fmt == PIX_FMT_BGR32_1) 
	{
		// allocate buffer to store final decoded frame
		m_format = RGBA32;
		// allocate sws context
		m_imgConvertCtx = sws_getContext(
			m_codecCtx->width,
			m_codecCtx->height,
			m_codecCtx->pix_fmt,
			m_codecCtx->width,
			m_codecCtx->height,
			PIX_FMT_RGBA,
			SWS_FAST_BILINEAR,
			NULL, NULL, NULL);
	} else
	{
		// allocate buffer to store final decoded frame
		m_format = RGB24;
		// allocate sws context
		m_imgConvertCtx = sws_getContext(
			m_codecCtx->width,
			m_codecCtx->height,
			m_codecCtx->pix_fmt,
			m_codecCtx->width,
			m_codecCtx->height,
			PIX_FMT_RGB24,
			SWS_FAST_BILINEAR,
			NULL, NULL, NULL);
	}
	m_frameRGB = allocFrameRGB();

	if (!m_imgConvertCtx) {
		avcodec_close(m_codecCtx);
		m_codecCtx = NULL;
		avformat_close_input(&m_formatCtx);
		m_formatCtx = NULL;
		av_free(m_frame);
		m_frame = NULL;
		MEM_freeN(m_frameDeinterlaced->data[0]);
		av_free(m_frameDeinterlaced);
		m_frameDeinterlaced = NULL;
		MEM_freeN(m_frameRGB->data[0]);
		av_free(m_frameRGB);
		m_frameRGB = NULL;
		return -1;
	}
	return 0;
}
Beispiel #15
0
static int open_input_file(InputFile *ifile, const char *filename)
{
    int err, i;
    AVFormatContext *fmt_ctx = NULL;
    AVDictionaryEntry *t;

    if ((err = avformat_open_input(&fmt_ctx, filename,
                                   iformat, &format_opts)) < 0) {
        print_error(filename, err);
        return err;
    }
    if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
        av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
        return AVERROR_OPTION_NOT_FOUND;
    }


    /* fill the streams in the format context */
    if ((err = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
        print_error(filename, err);
        return err;
    }

    av_dump_format(fmt_ctx, 0, filename, 0);

    ifile->streams = av_mallocz_array(fmt_ctx->nb_streams,
                                      sizeof(*ifile->streams));
    if (!ifile->streams)
        exit(1);
    ifile->nb_streams = fmt_ctx->nb_streams;

    /* bind a decoder to each input stream */
    for (i = 0; i < fmt_ctx->nb_streams; i++) {
        InputStream *ist = &ifile->streams[i];
        AVStream *stream = fmt_ctx->streams[i];
        AVCodec *codec;

        ist->st = stream;

        if (stream->codecpar->codec_id == AV_CODEC_ID_PROBE) {
            fprintf(stderr, "Failed to probe codec for input stream %d\n",
                    stream->index);
            continue;
        }

        codec = avcodec_find_decoder(stream->codecpar->codec_id);
        if (!codec) {
            fprintf(stderr,
                    "Unsupported codec with id %d for input stream %d\n",
                    stream->codecpar->codec_id, stream->index);
            continue;
        }

        ist->dec_ctx = avcodec_alloc_context3(codec);
        if (!ist->dec_ctx)
            exit(1);

        err = avcodec_parameters_to_context(ist->dec_ctx, stream->codecpar);
        if (err < 0)
            exit(1);

        err = avcodec_open2(ist->dec_ctx, NULL, NULL);
        if (err < 0) {
            fprintf(stderr, "Error while opening codec for input stream %d\n",
                    stream->index);
            exit(1);

        }
    }

    ifile->fmt_ctx = fmt_ctx;
    return 0;
}
Beispiel #16
0
int main(int argc, char *argv[])
{
	AVFormatContext *pFormatCtx = NULL;
	int i, videoStream;
	AVCodecContext *pCodecCtx;
	AVCodec *pCodec;
	AVFrame *pFrame;
	AVPacket packet;
	int frameFinished;

	int *stream_size=NULL;
	int *stream_max=NULL;
	int *stream_min=NULL;
	double *stream_ave=NULL;
	int total_max=0, total_min=INT32_MAX;
	double total_ave=0;
	int numberStreams;
	int frame_counter=0;
	int total_size=0;
	int tave=0;

	struct settings programSettings;
	struct stat fileStat;
	off_t total_file_size;
	double framerate;

	// default settings
	programSettings.ave_len=1;
	programSettings.output_stderr=0;
	programSettings.output_interval=1;
	programSettings.output_interval_seconds=0;
	programSettings.output_progress=0;


	// parse commandline options
	const static char *legal_flags = "s:i:I:ePh";

	int c;
	char *error=NULL;
	while ((c = getopt (argc, argv, legal_flags)) != -1) {
		switch (c) {
			case 's':
				// been programming in java too much recently
				// I want to catch a number format exception here
				// And tell the user of their error.
				programSettings.ave_len=(int)strtol(optarg, &error, 10);
				if (*error || programSettings.ave_len < 1) {
					fprintf(stderr,"Smoothing value is invalid\n");
					print_usage();
					return -1;
				}

				break;
			case 'e':
				programSettings.output_stderr=1;
				break;
			case 'P':
				programSettings.output_progress=1;
				break;

			case 'i':
				programSettings.output_interval=(int)strtol(optarg, &error, 10);
				if (*error || programSettings.output_interval<1) {
					fprintf(stderr,"Interval is invalid\n");
					print_usage();

					return -1;
				}
				break;
			case 'I':
				programSettings.output_interval_seconds=strtod(optarg, &error);

				if (*error || programSettings.output_interval_seconds <= 0) {
					fprintf(stderr,"Interval Seconds is invalid\n");
					print_usage();

					return -1;
				}

				break;
			case 'h':
			case '*':
				print_usage();
				return 0;
				break;
		}
	}


	optind--;
	// argc -= optind;
	argv += optind;

	//fprintf (stderr, "optind = %d. Trying file: %s\n",optind,argv[1]);

	// Register all formats and codecs
	av_register_all();

	if (argv[1] == NULL)
	{
		fprintf(stderr,"Error: No filename.\n");
		print_usage();
		return -1; // Couldn't open file
	}


	if (programSettings.output_progress) {
		stat(argv[1], &fileStat);
		// check for return error
		progress_init(0,fileStat.st_size);
	}


	// Open video file
#if LIBAVFORMAT_VERSION_MAJOR < 53
	if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
#else
		if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
#endif
		{
			fprintf(stderr,"Error: could not open file.\n");
			return -1; // Couldn't open file
		}

	// Retrieve stream information
#if LIBAVFORMAT_VERSION_MAJOR < 53
	if(av_find_stream_info(pFormatCtx)<0)
#else
		if(avformat_find_stream_info(pFormatCtx,NULL)<0)
#endif
		{
			fprintf(stderr,"Error: could not interpret file.\n");
			return -1; // Couldn't find stream information
		}

	// Dump information about file onto standard error
#if LIBAVFORMAT_VERSION_MAJOR < 53
	dump_format(pFormatCtx, 0, argv[1], 0);
#else
	av_dump_format(pFormatCtx, 0, argv[1], 0);
#endif

	// As this program outputs based on video frames.
	// Find the first video stream
	// To determine the bitrate.
	videoStream=-1;

	numberStreams = pFormatCtx->nb_streams;

	stream_size = (int *)malloc(numberStreams * sizeof(int));
	stream_min = (int *)malloc(numberStreams * sizeof(int));
	stream_max = (int *)malloc(numberStreams * sizeof(int));
	stream_ave = (double *)malloc(numberStreams * sizeof(double));


	for(i=0; i<numberStreams; i++) {
		//	fprintf (stderr,"stream: %d = %d (%s)\n",i,pFormatCtx->streams[i]->codec->codec_type ,pFormatCtx->streams[i]->codec->codec_name);
		// Initialise statistic counters
		stream_size[i] = 0;
		stream_min[i]=INT32_MAX;
		stream_max[i]=0;
		stream_ave[i] = 0;

#if LIBAVFORMAT_VERSION_MAJOR < 53
		if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO)
#else
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
#endif
		{
			videoStream=i;
#if LIBAVFORMAT_VERSION_MAJOR < 55
			framerate = pFormatCtx->streams[i]->r_frame_rate.num;
#else 
                        framerate = pFormatCtx->streams[i]->avg_frame_rate.num;
#endif

#if LIBAVFORMAT_VERSION_MAJOR < 55
			if (pFormatCtx->streams[i]->r_frame_rate.den != 0)
				framerate /= pFormatCtx->streams[i]->r_frame_rate.den;
			//	fprintf (stderr,"Video Stream: %d Frame Rate: %d:%d\n",videoStream,pFormatCtx->streams[i]->r_frame_rate.num,pFormatCtx->streams[i]->r_frame_rate.den);
#else 
			if (pFormatCtx->streams[i]->avg_frame_rate.den != 0)
				framerate /= pFormatCtx->streams[i]->avg_frame_rate.den;
			//	fprintf (stderr,"Video Stream: %d Frame Rate: %d:%d\n",videoStream,pFormatCtx->streams[i]->avg_frame_rate.num,pFormatCtx->streams[i]->avg_frame_rate.den);
#endif
		}
	}

	if(videoStream==-1) {
		free (stream_size); free (stream_min); free (stream_max); free (stream_ave);
		return -1; // Didn't find a video stream
	}

	// Get a pointer to the codec context for the video stream
	pCodecCtx=pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL) {
		free (stream_size); free (stream_min); free (stream_max); free (stream_ave);
		return -1; // Codec not found
	}

	if (framerate == 0)
	{
		//fprintf(stderr,"frame rate %d:%d\n",pCodecCtx->time_base.num,pCodecCtx->time_base.den);
		framerate = pCodecCtx->time_base.den;
		if (pCodecCtx->time_base.den != 0)
			framerate /= pCodecCtx->time_base.num;
	}



	if (programSettings.output_interval_seconds >0)
	{
		if (INT32_MAX / framerate > programSettings.output_interval_seconds)
		{
			programSettings.output_interval = programSettings.output_interval_seconds * framerate;
		} else {
			fprintf(stderr,"Interval seconds too large\n");
			free (stream_size); free (stream_min); free (stream_max); free (stream_ave);
			return -1;
		}
	}
	//	fprintf (stderr,"Video Stream: %d Frame Rate: %g\n",videoStream,framerate);


	// Open codec
#if LIBAVCODEC_VERSION_MAJOR < 52
	if(avcodec_open(pCodecCtx, *pCodec)<0)
#else
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
#endif
	{
		free (stream_size); free (stream_min); free (stream_max); free (stream_ave);
		return -1; // Could not open codec
	}

	// Allocate video frame
#if LIBAVCODEC_VERSION_MAJOR < 55
	pFrame=avcodec_alloc_frame();
#else
	pFrame=av_frame_alloc();
#endif

	int counter_interval=0;


	total_file_size=0;
	// Loop until nothing read
	while(av_read_frame(pFormatCtx, &packet)>=0)
	{
		stream_size[packet.stream_index] += packet.size;

		if (programSettings.output_progress) {
			total_file_size += packet.size;
			progress_loadBar(total_file_size);
		}
		// Is this a packet from the video stream?
		if(packet.stream_index==videoStream)
		{
			// Decode video frame
			// I'm not entirely sure when avcodec_decode_video was deprecated. most likely earlier than 53
	#if LIBAVCODEC_VERSION_MAJOR < 52
			avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet.data, packet.size);
	#else
			avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
	#endif

			if (counter_interval++ >= programSettings.output_interval) {

				//if (!(frame_counter % ave_len)) {
				// print the statistics in gnuplot friendly format...
				total_size=0;
				for(i=0; i<numberStreams; i++) { total_size += stream_size[i]; }
				// if (tave == -1) { tave = total_size; }

				tave = ((tave * (programSettings.ave_len-1)) + total_size) / programSettings.ave_len / programSettings.output_interval;

				if(total_min > total_size)
					total_min = total_size;

				if(total_max < total_size)
					total_max = total_size;

				total_ave += total_size;


				printf ("%f ",frame_counter/framerate);
				printf ("%f ",tave*8*framerate);

				for(i=0; i<numberStreams; i++) {

					// double rate = stream_size[i]*8*framerate/ programSettings.output_interval;

					if(stream_min[i] > stream_size[i])
						stream_min[i] = stream_size[i];

					if(stream_max[i] < stream_size[i])
						stream_max[i] = stream_size[i];

					stream_ave[i] += stream_size[i];


					printf ("%f ",stream_size[i]*8*framerate/ programSettings.output_interval);

					stream_size[i]=0;
				}
				printf("\n");

				//}
				counter_interval = 1;
			}
			frame_counter++;
		}

		// Free the packet that was allocated by av_read_frame
#if LIBAVCODEC_VERSION_MAJOR < 52
		av_freep(&packet);
#else
		av_free_packet(&packet);
#endif
	}

	free(stream_size);


	// Free the YUV frame
	av_free(pFrame);

	// Close the codec
	avcodec_close(pCodecCtx);

	// Close the video file
#if LIBAVCODEC_VERSION_MAJOR < 53
	av_close_input_file(pFormatCtx);
#else
	avformat_close_input(&pFormatCtx);
#endif

	// Print statistics
	if (programSettings.output_stderr)
	{
		fprintf(stderr,"%20s %20s %20s %20s\n","Stream","Min Bitrate","Average bitrate","Max bitrate");

		fprintf(stderr,"%20s %20f %20f %20f\n","Total",total_min*8*framerate/ programSettings.output_interval,
				total_ave * 8*framerate/ programSettings.output_interval/(frame_counter/programSettings.output_interval),
				total_max*8*framerate/ programSettings.output_interval);
		for(i=0; i<numberStreams; i++) {
			fprintf(stderr,"%20d %20f %20f %20f\n",i,stream_min[i]*8*framerate/ programSettings.output_interval,
					stream_ave[i] *8*framerate/ programSettings.output_interval/(frame_counter/programSettings.output_interval),
					stream_max[i]*8*framerate/ programSettings.output_interval);
		}
	}
	free (stream_min); free (stream_max); free (stream_ave);

	return 0;
}
Beispiel #17
0
/*!
 * \copydoc MetaIO::read()
 */
MusicMetadata* MetaIOMP4::read(const QString &filename)
{
    QString title, artist, album, genre;
    int year = 0, tracknum = 0, length = 0;
    bool compilation = false;

    AVFormatContext* p_context = NULL;
    AVInputFormat* p_inputformat = NULL;

    QByteArray local8bit = filename.toLocal8Bit();
    if ((avformat_open_input(&p_context, local8bit.constData(),
                             p_inputformat, NULL) < 0))
    {
        return NULL;
    }

    if (avformat_find_stream_info(p_context, NULL) < 0)
        return NULL;

#if 0
    //### Debugging, enable to dump a list of all field names/values found

    AVDictionaryEntry *tag = av_dict_get(p_context->metadata, "\0", NULL,
                                         AV_METADATA_IGNORE_SUFFIX);
    while (tag != NULL)
    {
        LOG(VB_GENERAL, LOG_DEBUG, QString("Tag: %1 Value: %2")
                .arg(tag->key) .arg(QString::fromUtf8(tag->value)));
        tag = av_dict_get(p_context->metadata, "\0", tag,
                          AV_METADATA_IGNORE_SUFFIX);
    }
    //####
#endif

    title = getFieldValue(p_context, "title");
    if (title.isEmpty())
    {
        readFromFilename(filename, artist, album, title, genre, tracknum);
    }
    else
    {
        title = getFieldValue(p_context, "title");
        artist = getFieldValue(p_context, "author");
        // Author is the correct fieldname, but
        // we've been saving to artist for years
        if (artist.isEmpty())
            artist = getFieldValue(p_context, "artist");
        album = getFieldValue(p_context, "album");
        year = getFieldValue(p_context, "year").toInt();
        genre = getFieldValue(p_context, "genre");
        tracknum = getFieldValue(p_context, "track").toInt();
        compilation = getFieldValue(p_context, "").toInt();
        length = getTrackLength(p_context);
    }

    metadataSanityCheck(&artist, &album, &title, &genre);

    MusicMetadata *retdata = new MusicMetadata(filename,
                                     artist,
                                     compilation ? artist : "",
                                     album,
                                     title,
                                     genre,
                                     year,
                                     tracknum,
                                     length);

    retdata->setCompilation(compilation);

    avformat_close_input(&p_context);

    return retdata;
}
Beispiel #18
0
int main(int argc, char **argv){	
	struct 				tm start_time_tm;
	int outputPorts;

	pthread_t *audioThreads;
	pthread_attr_t custom_sched_attr;	
	int fifo_max_prio = 0;
	int fifo_min_prio = 0;
	int fifo_mid_prio = 0;	
	struct sched_param fifo_param;

	syncbuffer = 0;
	normalbuffer = 0;

	if(argc < 3){
		printf("./<audio_decoder> udp://[IP]:[PORT] [ptsDelay] [Amount of channel] [Channel 0] [Channel n]\n");
		return 0;
	}

	if(argc != 3){

	}

	
	ff_ctx = malloc(sizeof(ff_ctx_t));

	av_register_all();
	avformat_network_init();

	InitFF(ff_ctx, argv[1], argv[2]);
	

	if (avformat_open_input (&ff_ctx->avInputCtx, ff_ctx->udp_address, NULL , &ff_ctx->avDic) != 0) {
		printf ("Cloud not open UDP input stream at %s\n", ff_ctx->udp_address);
		return -1;
	}

	if (avformat_find_stream_info(ff_ctx->avInputCtx, NULL) < 0) {
		printf ("Cloud not get stream info\n");
		return -1;
	}

	if (ff_ctx->audioIndexStream = av_find_best_stream(ff_ctx->avInputCtx, AVMEDIA_TYPE_AUDIO, -1, -1, &ff_ctx->avCodec, 0) < 0) {
		printf ("No audio streams found\n");
		return -1;
	}

	printf ("Audio stream found at %d\n", ff_ctx->audioIndexStream);

	ff_ctx->avDicentry = av_dict_get(ff_ctx->avInputCtx->metadata, "service_name", NULL, 0);

	if(ff_ctx->avDicentry != NULL){
		strptime( ff_ctx->avDicentry->value, "%Y-%m-%d %H:%M:%S", &start_time_tm);
		start_time = mktime(&start_time_tm);
	}
	else {
		start_time = getSystemTime(NULL);
	}
	
	ff_ctx->avCodecCtx = ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->codec;
	ff_ctx->avCodec = avcodec_find_decoder(ff_ctx->avCodecCtx->codec_id);

	av_dump_format(ff_ctx->avInputCtx, 0, ff_ctx->udp_address, 0);

	if (avcodec_open2 (ff_ctx->avCodecCtx, ff_ctx->avCodec, NULL) < 0) {
		return -1;
	}

	outputPorts = ff_ctx->avCodecCtx->channels;
	InitBF(ff_ctx->avCodecCtx->channels, &to_audio_buffer, TO_AUDIO_BUFFER_SIZE);
	InitBF(ff_ctx->avCodecCtx->channels, &to_jack_buffer, TO_JACK_BUFFER_SIZE);

	//One thread for each channel
	audioThreads = malloc (sizeof(pthread_t)*outputPorts);

	pthread_attr_init(&custom_sched_attr);	
 	pthread_attr_setinheritsched(&custom_sched_attr, PTHREAD_INHERIT_SCHED /* PTHREAD_EXPLICIT_SCHED */);

 	//Options below only are applied when PTHREAD_EXPLICIT_SCHED is used!
 	pthread_attr_setscope(&custom_sched_attr, PTHREAD_SCOPE_SYSTEM );	
 	pthread_attr_setschedpolicy(&custom_sched_attr, SCHED_FIFO);	

 	fifo_max_prio = sched_get_priority_max(SCHED_FIFO);	
 	fifo_min_prio = sched_get_priority_min(SCHED_FIFO);	
 	fifo_mid_prio = (fifo_min_prio + fifo_max_prio) / 2;	
 	fifo_param.sched_priority = fifo_mid_prio;	
 	pthread_attr_setschedparam(&custom_sched_attr, &fifo_param);

 	int i;
 	threadArgs_t args[outputPorts];
 	for (i = 0; i < outputPorts; i++) {
 		args[i].channel = i;
 		args[i].process_block_size = AUDIO_PROCESS_BLOCK_SIZE;
 		if (pthread_create(&audioThreads[i], &custom_sched_attr, audioThreadFunction, &args[i])) {
 			printf ("Unable to create audio_thread %d\n", i);
 			return 0;
 		}
 	}
    
    av_init_packet(&ff_ctx->avPacket);

	static AVFrame frame;
	int frameFinished;
	int nb, ch;

	char samplebuf[30];
	av_get_sample_fmt_string (samplebuf, 30, ff_ctx->avCodecCtx->sample_fmt);
	printf ("Audio sample format is %s\n", samplebuf);

	audio_sync_sample_t **sync_samples;
	sync_samples = malloc (outputPorts*sizeof(audio_sync_sample_t*));

	long double initPTS, PTS, frame_pts_offset;
	unsigned long int frame_count, framePTS, sample_count;

	int sample_rate = ff_ctx->avCodecCtx->sample_rate;

	if (init_jack(&jackCtx, outputPorts)) {
		return 1;
	}

	while(av_read_frame (ff_ctx->avInputCtx, &ff_ctx->avPacket)>=0) {

		if(ff_ctx->avPacket.stream_index == ff_ctx->audioIndexStream ) {
			int contador = 0;
			long double time_1 = getSystemTime(NULL);

			int len = avcodec_decode_audio4 (ff_ctx->avCodecCtx, &frame, &frameFinished, &ff_ctx->avPacket);

			if (frameFinished) {
				int data_size = frame.nb_samples * av_get_bytes_per_sample(frame.format);
				int sync_size = frame.nb_samples * sizeof (audio_sync_sample_t);

				framePTS = av_frame_get_best_effort_timestamp (&frame);

				frame_count = framePTS - ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->start_time;
				frame_pts_offset = frame_count * av_q2d(ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->time_base) ;

				initPTS = start_time + frame_pts_offset + ff_ctx->ptsDelay;

#ifdef _DBG_PTS
				printf ("frame decoded PTS %lu, frame count %lu, TB %d/%d, PTS %Lf\n", framePTS, frame_count, ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->time_base.num, ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->time_base.den, initPTS);
#endif

				//Build sync info data, sample timing
				for (ch = 0; ch < ff_ctx->avCodecCtx->channels; ch++) {
					sync_samples[ch] =  malloc(sync_size);

					PTS = initPTS;

					for (sample_count = 0; sample_count < frame.nb_samples; sample_count++) {
						PTS += (1/(float) sample_rate);
						sync_samples[ch][sample_count].samplePTS = PTS;
					}
				}

#ifdef _DBG_PTS
				printf ("ended samples PTS %Lf\n", PTS);
#endif
				for (ch = 0; ch < ff_ctx->avCodecCtx->channels; ch++) {
					ProduceSyncToBuffer (&to_audio_buffer, ch, (uint8_t*) sync_samples[ch], sync_size);
					ProduceAudioToBuffer(&to_audio_buffer, ch, (uint8_t*) frame.extended_data[ch], data_size);

					free(sync_samples[ch]);
				}
			}

	       	long double time_2 = getSystemTime(NULL);
	       	adaptativeSleep( (1/READ_INPUT_FRAME_RATE) - (time_2 - time_1));
		}
	}
}
Beispiel #19
0
int decode_thread(void *arg) {

	VideoState *is = (VideoState *)arg;
	AVFormatContext *pFormatCtx=NULL;
	AVPacket pkt1, *packet = &pkt1;

	int video_index = -1;
	int audio_index = -1;
	int i;

	is->videoStream = -1;
	is->audioStream = -1;

	global_video_state = is;

	// Open video file
	if (avformat_open_input(&pFormatCtx, is->filename, NULL, NULL) != 0)
		return -1; // Couldn't open file

	is->pFormatCtx = pFormatCtx;

	// Retrieve stream information
	if (avformat_find_stream_info(pFormatCtx, NULL)<0)
		return -1; // Couldn't find stream information

				   // Dump information about file onto standard error
	av_dump_format(pFormatCtx, 0, is->filename, 0);

	// Find the first video stream

	for (i = 0; i<pFormatCtx->nb_streams; i++) {
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
			video_index < 0) {
			video_index = i;
		}
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
			audio_index < 0) {
			audio_index = i;
		}
	}
	if (audio_index >= 0) {
		stream_component_open(is, audio_index);
	}
	if (video_index >= 0) {
		stream_component_open(is, video_index);
	}

	if (is->videoStream < 0 || is->audioStream < 0) {
		fprintf(stderr, "%s: could not open codecs\n", is->filename);
		goto fail;
	}

	// main decode loop

	for (;;) {
		if (is->quit) {
			break;
		}
		// seek stuff goes here
		if (is->audioq.size > MAX_AUDIOQ_SIZE ||
			is->videoq.size > MAX_VIDEOQ_SIZE) {
			SDL_Delay(10);
			continue;
		}
		if (av_read_frame(is->pFormatCtx, packet) < 0) {
			if (is->pFormatCtx->pb->error == 0) {
				SDL_Delay(100); /* no error; wait for user input */
				continue;
			}
			else {
				break;
			}
		}
		// Is this a packet from the video stream?
		if (packet->stream_index == is->videoStream) {
			packet_queue_put(&is->videoq, packet);
		}
		else if (packet->stream_index == is->audioStream) {
			packet_queue_put(&is->audioq, packet);
		}
		else {
			av_free_packet(packet);
		}
	}
	/* all done - wait for it */
	while (!is->quit) {
		SDL_Delay(100);
	}

fail:
	if (1) {
		SDL_Event event;
		event.type = FF_QUIT_EVENT;
		event.user.data1 = is;
		SDL_PushEvent(&event);
	}
	return 0;
}
Beispiel #20
0
mfxStatus FFmpeg_Reader_Init(const char *strFileName, mfxU32 videoType)
{
    MSDK_CHECK_POINTER(strFileName, MFX_ERR_NULL_PTR);

    int res;

    g_videoType = videoType;

    // Initialize libavcodec, and register all codecs and formats
    av_register_all();

    // Open input container
    res = avformat_open_input(&g_pFormatCtx, strFileName, NULL, NULL);
    if(res) {
        printf("FFMPEG: Could not open input container\n");
        return MFX_ERR_UNKNOWN;
    }

    // Retrieve stream information
    res = avformat_find_stream_info(g_pFormatCtx, NULL);
    if(res < 0) {
        printf("FFMPEG: Couldn't find stream information\n");
        return MFX_ERR_UNKNOWN;
    }
    

    // Dump container info to console
    av_dump_format(g_pFormatCtx, 0, strFileName, 0);

    // Find the streams in the container
    g_videoStreamIdx = -1;
    for(unsigned int i=0; i<g_pFormatCtx->nb_streams; i++)
    {
        if(g_pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && g_videoStreamIdx == -1)
        {
            g_videoStreamIdx = i;
 
            // save decoded stream timestamp time base
            g_dec_time_base = g_pFormatCtx->streams[i]->time_base;

            if(videoType == MFX_CODEC_AVC)
            {
                // Retrieve required h264_mp4toannexb filter
                g_pBsfc = av_bitstream_filter_init("h264_mp4toannexb");
                if (!g_pBsfc) {
                    printf("FFMPEG: Could not aquire h264_mp4toannexb filter\n");
                    return MFX_ERR_UNKNOWN;
                }
            }
        }
#ifdef PROCESS_AUDIO
        else if(g_pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
        {
            g_audioStreamIdx = i;
            g_pAudioStream = g_pFormatCtx->streams[i];
            g_audio_dec_time_base = g_pAudioStream->time_base;
        }
#endif
    }
    if(g_videoStreamIdx == -1)
        return MFX_ERR_UNKNOWN; // Didn't find any video streams in container

    return MFX_ERR_NONE;
}
Beispiel #21
0
static bool openStream (player_t * const player ) {
	assert (player != NULL);
	/* no leak? */
	assert (player->fctx == NULL);

	int ret;

	/* stream setup */
	AVDictionary *options = NULL;
#ifdef HAVE_AV_TIMEOUT
	/* 10 seconds timeout on TCP r/w */
	av_dict_set (&options, "timeout", "20000000", 0);
#else
	/* libav does not support the timeout option above. the workaround stores
	 * the current time with ping() now and then, registers an interrupt
	 * callback (below) and compares saved/current time in this callback. it’s
	 * not bullet-proof, but seems to work fine for av_read_frame. */
	player->fctx = avformat_alloc_context ();
	player->fctx->interrupt_callback.callback = intCb;
	player->fctx->interrupt_callback.opaque = player;
#endif


	assert (player->url != NULL);
	ping ();

	if ((ret = avformat_open_input (&player->fctx, player->url, NULL, &options)) < 0) {
                int tries=0;
		printError (player->settings, "Unable to open audio file at avformat_open_input", ret); 
		printError (player->settings, player->url, ret); 
                while ((ret<0) && (tries++ <10)) {
                        sleep(1);
			ret = avformat_open_input (&player->fctx, player->url, NULL, &options);
                        fprintf(stderr,"Retry yielded status %d\n",ret);
			}
                if (ret<0) softfail("Unable to open audio file/URL"); 
	}

	ping ();
	if ((ret = avformat_find_stream_info (player->fctx, NULL)) < 0) {
		softfail ("find_stream_info");
	}

	/* ignore all streams, undone for audio stream below */
	for (size_t i = 0; i < player->fctx->nb_streams; i++) {
		player->fctx->streams[i]->discard = AVDISCARD_ALL;
	}

	ping ();
	player->streamIdx = av_find_best_stream (player->fctx, AVMEDIA_TYPE_AUDIO,
			-1, -1, NULL, 0);
	if (player->streamIdx < 0) {
		softfail ("find_best_stream");
	}

	player->st = player->fctx->streams[player->streamIdx];
	AVCodecContext * const cctx = player->st->codec;
	player->st->discard = AVDISCARD_DEFAULT;

	/* decoder setup */
	AVCodec * const decoder = avcodec_find_decoder (cctx->codec_id);
	if (decoder == NULL) {
		softfail ("find_decoder");
	}

	if ((ret = avcodec_open2 (cctx, decoder, NULL)) < 0) {
		softfail ("codec_open2");
	}

	if (player->lastTimestamp > 0) {
		ping ();
		av_seek_frame (player->fctx, player->streamIdx, player->lastTimestamp, 0);
	}

	player->songPlayed = 0;
	player->songDuration = av_q2d (player->st->time_base) *
			(double) player->st->duration;

	return true;
}
	bool CGEVideoDecodeHandler::open(const char* filename)
	{		
		

		if(avformat_open_input(&m_context->pFormatCtx, filename, nullptr, nullptr)!=0 ||
			avformat_find_stream_info(m_context->pFormatCtx, nullptr)<0)
		{
			return false;  //解码失败
		}

		av_dump_format(m_context->pFormatCtx, 0, filename, 0);
		m_context->videoStreamIndex = -1;
		m_context->audioStreamIndex = -1;

		for(unsigned int i = 0; i < m_context->pFormatCtx->nb_streams; i++)  
		{
			if(m_context->pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
			{  
				m_context->videoStreamIndex = i;
				m_context->pVideoStream = m_context->pFormatCtx->streams[i];
				m_context->pVideoCodecCtx = m_context->pFormatCtx->streams[i]->codec;
			}
			else if(m_context->pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
			{
				m_context->audioStreamIndex = i;
				m_context->pAudioStream = m_context->pFormatCtx->streams[i];
				m_context->pAudioCodecCtx = m_context->pFormatCtx->streams[i]->codec;
			}
		}

		if(m_context->videoStreamIndex == -1)
		{
			return false; //找不到视频文件流
		}

		if(m_context->audioStreamIndex == -1)
		{
			CGE_LOG_INFO("未找到音频流, 视频将处于静音状态...\n");
		}

		//////////////////////////////////////////////////////////////////////////

		m_context->pVideoCodec = avcodec_find_decoder(m_context->pVideoCodecCtx->codec_id);

		if(m_context->pVideoCodec == nullptr || avcodec_open2(m_context->pVideoCodecCtx, m_context->pVideoCodec, nullptr) < 0)
		{
			return false; //视频解码失败
		}

		if(m_context->audioStreamIndex != -1)
		{
			m_context->pAudioCodec = avcodec_find_decoder(m_context->pAudioCodecCtx->codec_id);

			if(m_context->pAudioCodec == nullptr || avcodec_open2(m_context->pAudioCodecCtx, m_context->pAudioCodec, nullptr) < 0)
			{
				CGE_LOG_ERROR("音频解码失败! 静音处理...\n");
				m_context->audioStreamIndex = -1;
				m_context->pAudioCodec = nullptr;
				m_context->pAudioCodecCtx = nullptr;
			}
		}

		m_width = m_context->pVideoCodecCtx->width;
		m_height = m_context->pVideoCodecCtx->height;

		m_context->pVideoFrame = av_frame_alloc();
		m_context->pAudioFrame = av_frame_alloc();

		av_init_packet(&m_context->packet);
		m_context->packet.data = nullptr;
		m_context->packet.size = 0;

		return m_context->pVideoFrame != nullptr && m_context->pAudioFrame != nullptr;// && initFrameRGB();
	}
Beispiel #23
0
int init_input(INPUT_CONTEXT *ptr_input_ctx, char* input_file) {

	av_register_all();
	avformat_network_init();

	//open input file
	ptr_input_ctx->ptr_format_ctx = NULL;
	if( avformat_open_input(&ptr_input_ctx->ptr_format_ctx, input_file, NULL, NULL) != 0){
		printf("inputfile init ,avformat_open_input failed .\n");
		exit(AV_OPEN_INPUT_FAIL);
	}

	//find stream info
	if ( avformat_find_stream_info(ptr_input_ctx->ptr_format_ctx, NULL) < 0){
		printf("inputfile init ,avformat_find_stream_info failed .\n");
		exit(AV_FIND_STREAM_INFO_FAIL);
	}

	//find streams in the input file
	ptr_input_ctx->audio_index = -1;
	ptr_input_ctx->video_index = -1;
	int i;
	for (i = 0; i < ptr_input_ctx->ptr_format_ctx->nb_streams; i++) {

		//the first video stream
		if (ptr_input_ctx->video_index < 0
				&& ptr_input_ctx->ptr_format_ctx->streams[i]->codec->codec_type
						== AVMEDIA_TYPE_VIDEO) {
			ptr_input_ctx->video_index = i;

		}

		//the first audio stream
		if (ptr_input_ctx->audio_index < 0
				&& ptr_input_ctx->ptr_format_ctx->streams[i]->codec->codec_type
						== AVMEDIA_TYPE_AUDIO) {
			ptr_input_ctx->audio_index = i;

		}
	}

	printf("audio_index = %d ,video_index = %d \n" ,ptr_input_ctx->audio_index ,
			ptr_input_ctx->video_index);

	if(ptr_input_ctx->video_index < 0 ){

		printf("do not find video stream ..\n");
		exit(NO_VIDEO_STREAM);
	}

	if(ptr_input_ctx->audio_index < 0 ){

		printf("do not find audio stream ..\n");
		exit(NO_AUDIO_STREAM);
	}

	//open video codec
	ptr_input_ctx->video_codec_ctx = ptr_input_ctx->ptr_format_ctx->streams[ptr_input_ctx->video_index]->codec;
	ptr_input_ctx->video_codec = avcodec_find_decoder(ptr_input_ctx->video_codec_ctx->codec_id);
	if(ptr_input_ctx->video_codec == NULL ){

		printf("in inputfile init ,unsupported video codec ..\n");
		exit(UNSPPORT_VIDEO_CODEC);
	}

	if(avcodec_open2(ptr_input_ctx->video_codec_ctx ,ptr_input_ctx->video_codec ,NULL) < 0){

		printf("in inputfile init ,can not open video_codec_ctx ..\n");
		exit(OPEN_VIDEO_CODEC_FAIL);
	}

	//open audio codec
	ptr_input_ctx->audio_codec_ctx = ptr_input_ctx->ptr_format_ctx->streams[ptr_input_ctx->audio_index]->codec;
	ptr_input_ctx->audio_codec = avcodec_find_decoder(ptr_input_ctx->audio_codec_ctx->codec_id);
	if(ptr_input_ctx->audio_codec == NULL ){

		printf("in inputfile init ,unsupported audio codec ..\n");
		exit(UNSPPORT_AUDIO_CODEC);
	}

	if(avcodec_open2(ptr_input_ctx->audio_codec_ctx ,ptr_input_ctx->audio_codec ,NULL) < 0){

		printf("in inputfile init ,can not open audio_codec_ctx ..\n");
		exit(OPEN_AUDIO_CODEC_FAIL);
	}

	printf("in here ,have open video codec ,and audio codec .\n");

	/*	set some mark 	*/
	//ptr_input_ctx->mark_have_frame = 0;

	/*	malloc memory 	*/
	ptr_input_ctx->yuv_frame = avcodec_alloc_frame();
	if(ptr_input_ctx->yuv_frame == NULL){
		printf("yuv_frame allocate failed %s ,%d line\n" ,__FILE__ ,__LINE__);
		exit(MEMORY_MALLOC_FAIL);
	}
//    /*	malloc buffer	*/
//	ptr_input_ctx->encoded_pict = avcodec_alloc_frame();
//    if(ptr_input_ctx->encoded_pict == NULL){
//		printf("yuv_frame allocate failed %s ,%d line\n" ,__FILE__ ,__LINE__);
//		exit(MEMORY_MALLOC_FAIL);
//	}
//    int size = avpicture_get_size(ptr_input_ctx->video_codec_ctx->pix_fmt ,
//    		ptr_input_ctx->video_codec_ctx->width ,
//    		ptr_input_ctx->video_codec_ctx->height);
//
//    ptr_input_ctx->pict_buf = av_malloc(size);
//    if(ptr_input_ctx->pict_buf == NULL){
//    	printf("pict allocate failed ...\n");
//    	exit(MEMORY_MALLOC_FAIL);
//    }
//    //bind
//    avpicture_fill((AVPicture *)ptr_input_ctx->encoded_pict ,ptr_input_ctx->pict_buf ,
//    		ptr_input_ctx->video_codec_ctx->pix_fmt ,
//    		ptr_input_ctx->video_codec_ctx->width ,
//    		ptr_input_ctx->video_codec_ctx->height);

	//audio frame
	ptr_input_ctx->audio_decode_frame = avcodec_alloc_frame();
	if(ptr_input_ctx->audio_decode_frame == NULL){
		printf("audio_decode_frame allocate failed %s ,%d line\n" ,__FILE__ ,__LINE__);
		exit(MEMORY_MALLOC_FAIL);
	}

	return 0;

}
Beispiel #24
0
u32 vdecOpen(VideoDecoder* data)
{
	VideoDecoder& vdec = *data;

	vdec.vdecCb = &Emu.GetCPU().AddThread(CPU_THREAD_PPU);

	u32 vdec_id = cellVdec->GetNewId(data);

	vdec.id = vdec_id;

	vdec.vdecCb->SetName("Video Decoder[" + std::to_string(vdec_id) + "] Callback");

	thread t("Video Decoder[" + std::to_string(vdec_id) + "] Thread", [&]()
	{
		cellVdec->Notice("Video Decoder thread started");

		VdecTask& task = vdec.task;

		while (true)
		{
			if (Emu.IsStopped())
			{
				break;
			}

			if (!vdec.job.GetCountUnsafe() && vdec.is_running)
			{
				std::this_thread::sleep_for(std::chrono::milliseconds(1));
				continue;
			}

			if (vdec.frames.GetCount() >= 50)
			{
				std::this_thread::sleep_for(std::chrono::milliseconds(1));
				continue;
			}

			if (!vdec.job.Pop(task))
			{
				break;
			}

			switch (task.type)
			{
			case vdecStartSeq:
			{
				// TODO: reset data
				cellVdec->Warning("vdecStartSeq:");

				vdec.reader.addr = 0;
				vdec.reader.size = 0;
				vdec.is_running = true;
				vdec.just_started = true;
			}
			break;

			case vdecEndSeq:
			{
				// TODO: finalize
				cellVdec->Warning("vdecEndSeq:");

				vdec.vdecCb->ExecAsCallback(vdec.cbFunc, false, vdec.id, CELL_VDEC_MSG_TYPE_SEQDONE, CELL_OK, vdec.cbArg);
				/*Callback cb;
				cb.SetAddr(vdec.cbFunc);
				cb.Handle(vdec.id, CELL_VDEC_MSG_TYPE_SEQDONE, CELL_OK, vdec.cbArg);
				cb.Branch(true); // ???*/

				vdec.is_running = false;
				vdec.just_finished = true;
			}
			break;

			case vdecDecodeAu:
			{
				int err;

				if (task.mode != CELL_VDEC_DEC_MODE_NORMAL)
				{
					cellVdec->Error("vdecDecodeAu: unsupported decoding mode(%d)", task.mode);
					break;
				}

				vdec.reader.addr = task.addr;
				vdec.reader.size = task.size;
				//LOG_NOTICE(HLE, "Video AU: size = 0x%x, pts = 0x%llx, dts = 0x%llx", task.size, task.pts, task.dts);

				if (vdec.just_started)
				{
					vdec.first_pts = task.pts;
					vdec.last_pts = task.pts;
					vdec.first_dts = task.dts;
				}

				struct AVPacketHolder : AVPacket
				{
					AVPacketHolder(u32 size)
					{
						av_init_packet(this);

						if (size)
						{
							data = (u8*)av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
							memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
							this->size = size + FF_INPUT_BUFFER_PADDING_SIZE;
						}
						else
						{
							data = NULL;
							size = 0;
						}
					}

					~AVPacketHolder()
					{
						av_free(data);
						//av_free_packet(this);
					}

				} au(0);

				if (vdec.just_started && vdec.just_finished)
				{
					avcodec_flush_buffers(vdec.ctx);
					vdec.just_started = false;
					vdec.just_finished = false;
				}
				else if (vdec.just_started) // deferred initialization
				{
					err = avformat_open_input(&vdec.fmt, NULL, av_find_input_format("mpeg"), NULL);
					if (err)
					{
						cellVdec->Error("vdecDecodeAu: avformat_open_input() failed");
						Emu.Pause();
						break;
					}
					AVCodec* codec = avcodec_find_decoder(AV_CODEC_ID_H264); // ???
					if (!codec)
					{
						cellVdec->Error("vdecDecodeAu: avcodec_find_decoder() failed");
						Emu.Pause();
						break;
					}
					/*err = avformat_find_stream_info(vdec.fmt, NULL);
					if (err)
					{
						LOG_ERROR(HLE, "vdecDecodeAu: avformat_find_stream_info() failed");
						Emu.Pause();
						break;
					}
					if (!vdec.fmt->nb_streams)
					{
						LOG_ERROR(HLE, "vdecDecodeAu: no stream found");
						Emu.Pause();
						break;
					}*/
					if (!avformat_new_stream(vdec.fmt, codec))
					{
						cellVdec->Error("vdecDecodeAu: avformat_new_stream() failed");
						Emu.Pause();
						break;
					}
					vdec.ctx = vdec.fmt->streams[0]->codec; // TODO: check data
						
					AVDictionary* opts = nullptr;
					av_dict_set(&opts, "refcounted_frames", "1", 0);
					{
						std::lock_guard<std::mutex> lock(g_mutex_avcodec_open2);
						// not multithread-safe (???)
						err = avcodec_open2(vdec.ctx, codec, &opts);
					}
					if (err)
					{
						cellVdec->Error("vdecDecodeAu: avcodec_open2() failed");
						Emu.Pause();
						break;
					}
					vdec.just_started = false;
				}

				bool last_frame = false;

				while (true)
				{
					if (Emu.IsStopped() || vdec.job.PeekIfExist().type == vdecClose)
					{
						vdec.is_finished = true;
						cellVdec->Warning("vdecDecodeAu: aborted");
						return;
					}

					last_frame = av_read_frame(vdec.fmt, &au) < 0;
					if (last_frame)
					{
						//break;
						av_free(au.data);
						au.data = NULL;
						au.size = 0;
					}

					struct VdecFrameHolder : VdecFrame
					{
						VdecFrameHolder()
						{
							data = av_frame_alloc();
						}

						~VdecFrameHolder()
						{
							if (data)
							{
								av_frame_unref(data);
								av_frame_free(&data);
							}
						}

					} frame;

					if (!frame.data)
					{
						cellVdec->Error("vdecDecodeAu: av_frame_alloc() failed");
						Emu.Pause();
						break;
					}

					int got_picture = 0;

					int decode = avcodec_decode_video2(vdec.ctx, frame.data, &got_picture, &au);

					if (decode <= 0)
					{
						if (!last_frame && decode < 0)
						{
							cellVdec->Error("vdecDecodeAu: AU decoding error(0x%x)", decode);
						}
						if (!got_picture && vdec.reader.size == 0) break; // video end?
					}

					if (got_picture)
					{
						u64 ts = av_frame_get_best_effort_timestamp(frame.data);
						if (ts != AV_NOPTS_VALUE)
						{
							frame.pts = ts/* - vdec.first_pts*/; // ???
							vdec.last_pts = frame.pts;
						}
						else
						{
							vdec.last_pts += vdec.ctx->time_base.num * 90000 / (vdec.ctx->time_base.den / vdec.ctx->ticks_per_frame);
							frame.pts = vdec.last_pts;
						}
						//frame.pts = vdec.last_pts;
						//vdec.last_pts += 3754;
						frame.dts = (frame.pts - vdec.first_pts) + vdec.first_dts;
						frame.userdata = task.userData;

						//LOG_NOTICE(HLE, "got picture (pts=0x%llx, dts=0x%llx)", frame.pts, frame.dts);

						vdec.frames.Push(frame); // !!!!!!!!
						frame.data = nullptr; // to prevent destruction

						vdec.vdecCb->ExecAsCallback(vdec.cbFunc, false, vdec.id, CELL_VDEC_MSG_TYPE_PICOUT, CELL_OK, vdec.cbArg);
						/*Callback cb;
						cb.SetAddr(vdec.cbFunc);
						cb.Handle(vdec.id, CELL_VDEC_MSG_TYPE_PICOUT, CELL_OK, vdec.cbArg);
						cb.Branch(false);*/
					}
				}

				vdec.vdecCb->ExecAsCallback(vdec.cbFunc, false, vdec.id, CELL_VDEC_MSG_TYPE_AUDONE, CELL_OK, vdec.cbArg);
				/*Callback cb;
				cb.SetAddr(vdec.cbFunc);
				cb.Handle(vdec.id, CELL_VDEC_MSG_TYPE_AUDONE, CELL_OK, vdec.cbArg);
				cb.Branch(false);*/
			}
			break;

			case vdecClose:
			{
				vdec.is_finished = true;
				cellVdec->Notice("Video Decoder thread ended");
				return;
			}

			case vdecSetFrameRate:
			{
				cellVdec->Error("TODO: vdecSetFrameRate(%d)", task.frc);
			}
			break;

			default:
				cellVdec->Error("Video Decoder thread error: unknown task(%d)", task.type);
			}
		}

		vdec.is_finished = true;
		cellVdec->Warning("Video Decoder thread aborted");
	});

	t.detach();

	return vdec_id;
}
Beispiel #25
0
bool FFmpegDecoder::open(const std::string & filename, FFmpegParameters* parameters)
{
    try
    {
        // Open video file
        AVFormatContext * p_format_context = 0;
        AVInputFormat *iformat = 0;

        if (filename.compare(0, 5, "/dev/")==0)
        {
#ifdef ANDROID
            throw std::runtime_error("Device not supported on Android");
#else
            avdevice_register_all();

            if (parameters)
            {
                av_dict_set(parameters->getOptions(), "video_size", "640x480", 0);
                av_dict_set(parameters->getOptions(), "framerate", "1:30", 0);
            }

            std::string format = "video4linux2";
            iformat = av_find_input_format(format.c_str());

            if (iformat)
            {
                OSG_INFO<<"Found input format: "<<format<<std::endl;
            }
            else
            {
                OSG_INFO<<"Failed to find input format: "<<format<<std::endl;
            }

#endif
        }
        else
        {
            iformat = parameters ? parameters->getFormat() : 0;
            AVIOContext* context = parameters ? parameters->getContext() : 0;
            if (context != NULL)
            {
                p_format_context = avformat_alloc_context();
                p_format_context->pb = context;
            }
        }

        int error = avformat_open_input(&p_format_context, filename.c_str(), iformat, parameters->getOptions());
        if (error != 0)
        {
            std::string error_str;
            switch (error)
            {
                //case AVERROR_UNKNOWN: error_str = "AVERROR_UNKNOWN"; break;   // same value as AVERROR_INVALIDDATA
                case AVERROR_IO: error_str = "AVERROR_IO"; break;
                case AVERROR_NUMEXPECTED: error_str = "AVERROR_NUMEXPECTED"; break;
                case AVERROR_INVALIDDATA: error_str = "AVERROR_INVALIDDATA"; break;
                case AVERROR_NOMEM: error_str = "AVERROR_NOMEM"; break;
                case AVERROR_NOFMT: error_str = "AVERROR_NOFMT"; break;
                case AVERROR_NOTSUPP: error_str = "AVERROR_NOTSUPP"; break;
                case AVERROR_NOENT: error_str = "AVERROR_NOENT"; break;
                case AVERROR_PATCHWELCOME: error_str = "AVERROR_PATCHWELCOME"; break;
                default: error_str = "Unknown error"; break;
            }

            throw std::runtime_error("av_open_input_file() failed : " + error_str);
        }

        m_format_context.reset(p_format_context);

        // Retrieve stream info
        // Only buffer up to one and a half seconds
        p_format_context->max_analyze_duration = AV_TIME_BASE * 1.5f;
        if (avformat_find_stream_info(p_format_context, NULL) < 0)
            throw std::runtime_error("av_find_stream_info() failed");

        m_duration = double(m_format_context->duration) / AV_TIME_BASE;
        if (m_format_context->start_time != static_cast<int64_t>(AV_NOPTS_VALUE))
            m_start = double(m_format_context->start_time) / AV_TIME_BASE;
        else
            m_start = 0;

        // TODO move this elsewhere
        m_clocks.reset(m_start);

        // Dump info to stderr
        av_dump_format(p_format_context, 0, filename.c_str(), false);

        // Find and open the first video and audio streams (note that audio stream is optional and only opened if possible)
        if ((m_video_index = av_find_best_stream(m_format_context.get(), AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0)) < 0)
            throw std::runtime_error("Could not open video stream");
        m_video_stream = m_format_context->streams[m_video_index];

        if ((m_audio_index = av_find_best_stream(m_format_context.get(), AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0)) >= 0)
            m_audio_stream = m_format_context->streams[m_audio_index];
        else
        {
            m_audio_stream = 0;
            m_audio_index = std::numeric_limits<unsigned int>::max();
        }

        m_video_decoder.open(m_video_stream);

        try
        {
            m_audio_decoder.open(m_audio_stream);
        }

        catch (const std::runtime_error & error)
        {
            OSG_WARN << "FFmpegImageStream::open audio failed, audio stream will be disabled: " << error.what() << std::endl;
        }
    }

    catch (const std::runtime_error & error)
    {
        OSG_WARN << "FFmpegImageStream::open : " << error.what() << std::endl;
        return false;
    }

    return true;
}
Beispiel #26
0
int open_input_file(int check_yuv)
{
	AVStream *stream = NULL;
	AVCodecContext *codecCtx = NULL;
	AVCodec *dec = NULL;
	int ret;
	int streamIdx = 0;
	unsigned int i;

	/*
	*	reads file header and stores information
	*	about the file format in the AVFormatContext structure
	*	@param fmt, options If NULL acuto-detect file format,
	*						buffer size, and format options
	*/
	

	ret = avformat_open_input(&inFmtCtx, file_input, NULL, NULL);
	if (ret < 0)
	{
		av_log(NULL, AV_LOG_ERROR, "Cannot open input file [%s]\n", file_input);
		return ret;
	}

	if (check_yuv == 1){
		printf("it is yuvfile\n");
		printf("So you must input media size\n");
		printf("width : ");
		scanf("%d", &(inFmtCtx)->streams[streamIdx]->codec->width);
		printf("height : ");
		scanf("%d", &(inFmtCtx)->streams[streamIdx]->codec->height);

	}


	av_log(NULL, AV_LOG_INFO, "File [%s] Open Success\n", file_input);
	av_log(NULL, AV_LOG_DEBUG, "Format: %s\n", inFmtCtx->iformat->name);


	//retrieve stream information
	ret = avformat_find_stream_info(inFmtCtx, NULL);
	if (ret < 0)
	{
		av_log(NULL, AV_LOG_ERROR, "Cannot find stream information");
		return ret;
	}
	av_log(NULL, AV_LOG_INFO, "Get Stream Information Success\n");

	for (i = 0; i < inFmtCtx->nb_streams; i++)
	{
		stream = inFmtCtx->streams[i];
		codecCtx = stream->codec;

		if (codecCtx->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			streamIdx = i;
			//find decoder
			dec = avcodec_find_decoder(codecCtx->codec_id);
			if (dec < 0)
			{
				av_log(NULL, AV_LOG_ERROR, "Could not Find [%s] Codec\n", av_get_media_type_string(dec->type));
				return AVERROR(EINVAL);
			}

			ret = avcodec_open2(codecCtx, dec, NULL);
			if (ret < 0)
			{
				av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
				return ret;
			}
		}
	}

	clip_width = inFmtCtx->streams[streamIdx]->codec->width;
	clip_height = inFmtCtx->streams[streamIdx]->codec->height;
	time_base_den = inFmtCtx->streams[streamIdx]->codec->time_base.den;
	time_base_num = inFmtCtx->streams[streamIdx]->codec->time_base.num;

	//debugging function
	av_dump_format(inFmtCtx, 0, file_input, 0);

	return 0;
}
int _tmain(int argc, _TCHAR* argv[])
{
	AVFormatContext *pFormatCtx;
	char *filepath = "Titanic.ts";
	char *file_out = "Titanic.yuv";
	char *file_out1 = "Titanic.h264";
	FILE *fp_out;
	FILE *fp_out1;
	errno_t err,err1;

	err = fopen_s(&fp_out, file_out, "wb+");
	if (err != 0)
	{
		printf("The file 'crt_fopen_s.c' was opened\n");
		return -1;
	}

	err1 = fopen_s(&fp_out1, file_out1, "wb+");
	if (err1 != 0)
	{
		printf("The file 'crt_fopen_s.c' was opened\n");
		return -1;
	}

	av_register_all();    //注册所有组件
	avformat_network_init();
	pFormatCtx = avformat_alloc_context();   //开辟内存
	if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) < 0) //打开输入视频文件
	{
		printf("Can't open the input stream.\n");
		return -1;
	}
	if (avformat_find_stream_info(pFormatCtx,NULL)<0)     //判断文件流,视频流还是音频流
	{
		printf("Can't find the stream information!\n");
		return -1;
	}

	int i, index_video = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++)
	{
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)      //如果是视频流,则记录存储。
		{
			index_video = i;
			break;
		}		
	}
	if (index_video == -1)
	{
		printf("Can't find a video stream;\n");
		return -1;
	}

	AVCodecContext *pCodecCtx = pFormatCtx->streams[index_video]->codec;

	AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);     //查找解码器
	if (pCodec == NULL)
	{
		printf("Can't find a decoder!\n");
		return -1;
	}

	//if (pCodecCtx->codec_id == AV_CODEC_ID_H264)
	//{
	//	av_opt_set(pCodecCtx->priv_data, "preset", "slow", 0);
	//	av_opt_set(pCodecCtx->priv_data, "tune", "zerolatency", 0);
	//}


	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)   //打开编码器
	{
		printf("Can't open the decoder!\n");
		return -1;
	}


	AVFrame *pFrame = av_frame_alloc();  //this only allocates the AVFrame itself, not the data buffers
	AVFrame *pFrameYUV = av_frame_alloc();

	uint8_t *out_buffer = (uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P,pCodecCtx->width,pCodecCtx->height));  //开辟缓冲区
	avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);//帧和申请的内存结合

	AVPacket *pkt = (AVPacket *)av_malloc(sizeof(AVPacket));;
	av_init_packet(pkt);
	
	SwsContext * img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
		pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
	
	int frame_cnt = 0;
	int ret;
	int get_frame;
	int y_size = pCodecCtx->width*pCodecCtx->height;
	while (av_read_frame(pFormatCtx,pkt) >=0  )
	{
		if (pkt->stream_index == index_video)
		{
			fwrite(pkt->data,1,pkt->size,fp_out1);
			if (avcodec_decode_video2(pCodecCtx, pFrame, &get_frame, pkt) < 0)
			{
				printf("Decode Error!\n");
				return -1;
			}
			if (get_frame)
			{
				printf("Decoded frame index: %d\n", frame_cnt);
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
					pFrameYUV->data, pFrameYUV->linesize);
				fwrite(pFrameYUV->data[0], 1, y_size, fp_out);
				fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_out);
				fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_out);
				frame_cnt++;
			}

		}
		av_free_packet(pkt);
	}
	while (1)
	{
		if (avcodec_decode_video2(pCodecCtx, pFrame, &get_frame, pkt) < 0)
		{
			printf("Decode Error!\n");
			break;
		}
		if (get_frame)
		{
			printf("Flush Decoded frame index: %d\n", frame_cnt);
			sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
				pFrameYUV->data, pFrameYUV->linesize);
			fwrite(pFrameYUV->data[0], 1, y_size, fp_out);
			fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_out);
			fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_out);
			frame_cnt++;
		}
		else
			break;
	}

	//close
	fclose(fp_out);
	fclose(fp_out1);

	//free
	sws_freeContext(img_convert_ctx);
	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);
	avformat_free_context(pFormatCtx);

	return 0;
}
Beispiel #28
0
static int
artwork_get(char *filename, int max_w, int max_h, int format, struct evbuffer *evbuf)
{
  AVFormatContext *src_ctx;
  AVCodecContext *src;
  int s;
  int target_w;
  int target_h;
  int need_rescale;
  int format_ok;
  int ret;

  DPRINTF(E_DBG, L_ART, "Artwork request parameters: max w = %d, max h = %d\n", max_w, max_h);

  src_ctx = NULL;

#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 3)
  ret = avformat_open_input(&src_ctx, filename, NULL, NULL);
#else
  ret = av_open_input_file(&src_ctx, filename, NULL, 0, NULL);
#endif
  if (ret < 0)
    {
      DPRINTF(E_WARN, L_ART, "Cannot open artwork file '%s': %s\n", filename, strerror(AVUNERROR(ret)));

      return -1;
    }

#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 3)
  ret = avformat_find_stream_info(src_ctx, NULL);
#else
  ret = av_find_stream_info(src_ctx);
#endif
  if (ret < 0)
    {
      DPRINTF(E_WARN, L_ART, "Cannot get stream info: %s\n", strerror(AVUNERROR(ret)));

#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 21)
      avformat_close_input(&src_ctx);
#else
      av_close_input_file(src_ctx);
#endif
      return -1;
    }

  format_ok = 0;
  for (s = 0; s < src_ctx->nb_streams; s++)
    {
      if (src_ctx->streams[s]->codec->codec_id == CODEC_ID_PNG)
	{
	  format_ok = (format & ART_CAN_PNG) ? ART_FMT_PNG : 0;
	  break;
	}
      else if (src_ctx->streams[s]->codec->codec_id == CODEC_ID_MJPEG)
	{
	  format_ok = (format & ART_CAN_JPEG) ? ART_FMT_JPEG : 0;
	  break;
	}
    }

  if (s == src_ctx->nb_streams)
    {
      DPRINTF(E_LOG, L_ART, "Artwork file '%s' not a PNG or JPEG file\n", filename);

#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 21)
      avformat_close_input(&src_ctx);
#else
      av_close_input_file(src_ctx);
#endif
      return -1;
    }

  src = src_ctx->streams[s]->codec;

  DPRINTF(E_DBG, L_ART, "Original image '%s': w %d h %d\n", filename, src->width, src->height);

  need_rescale = 1;

  if ((src->width <= max_w) && (src->height <= max_h)) /* Smaller than target */
    {
      need_rescale = 0;

      target_w = src->width;
      target_h = src->height;
    }
  else if (src->width * max_h > src->height * max_w)   /* Wider aspect ratio than target */
    {
      target_w = max_w;
      target_h = (double)max_w * ((double)src->height / (double)src->width);
    }
  else                                                 /* Taller or equal aspect ratio */
    {
      target_w = (double)max_h * ((double)src->width / (double)src->height);
      target_h = max_h;
    }

  DPRINTF(E_DBG, L_ART, "Raw destination width %d height %d\n", target_w, target_h);

  if (target_h > max_h)
    target_h = max_h;

  /* PNG prefers even row count */
  target_w += target_w % 2;

  if (target_w > max_w)
    target_w = max_w - (max_w % 2);

  DPRINTF(E_DBG, L_ART, "Destination width %d height %d\n", target_w, target_h);

  /* Fastpath */
  if (!need_rescale && format_ok)
    {
      ret = artwork_read(filename, evbuf);
      if (ret == 0)
	ret = format_ok;
    }
  else
    ret = artwork_rescale(src_ctx, s, target_w, target_h, format, evbuf);

#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 21)
  avformat_close_input(&src_ctx);
#else
  av_close_input_file(src_ctx);
#endif

  if (ret < 0)
    {
      if (EVBUFFER_LENGTH(evbuf) > 0)
	evbuffer_drain(evbuf, EVBUFFER_LENGTH(evbuf));
    }

  return ret;
}
Beispiel #29
0
int main(int argc, char *argv[]) {
    AVFormatContext *pFormatCtx = NULL;
    int i, videoStream;
    AVCodecContext *pCodecCtx = NULL;
    AVCodec *pCodec = NULL;
    AVDictionary *optionsDict = NULL;
    AVFrame *pFrame = NULL;
    int frameFinished;
    AVPacket packet;
    struct SwsContext *sws_ctx = NULL;

    SDL_Surface *screen = NULL;
    SDL_Overlay *bmp = NULL;
    SDL_Rect rect;
    SDL_Event event;

    if (argc < 2) {
        printf("Please provide a movie file\n");
        exit(1);
    }

    // Register all file formats and codecs
    av_register_all();

    // Initialize SDL
    if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        exit(1);
    }

    // Open video file
    if (avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0) {
        return -1; // Couldn't open file
    }

    // Retrieve stream information
    if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
        return -1; // Couldn't find stream information
    }

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream
    videoStream = -1;
    for (i = 0; i < pFormatCtx->nb_streams; i++) {
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoStream = i;
            break;
        }
    }
    if (videoStream == -1) {
        return -1; // Didn't find a video stream
    }

    // Get a pointer to the codec context for the video stream
    pCodecCtx = pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (pCodec == NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }

    // Open codec
    if (avcodec_open2(pCodecCtx, pCodec, &optionsDict) < 0) {
        return -1; // Could not open codec
    }

    // Allocate video frame
    pFrame = avcodec_alloc_frame();

    // Make a screen to put our video
#ifndef __DARWIN__
    screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
#else
    screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif
    if (!screen) {
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
        exit(1);
    }

    // Allocate a place to put our YUV image on that screen
    bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height,
                               SDL_YV12_OVERLAY, screen);

    sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
                pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
                PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);

    // Read frames and save first five frames to disk
    i = 0;
    while (av_read_frame(pFormatCtx, &packet) >= 0) {
        // Is this a packet from the video stream?
        if (packet.stream_index == videoStream) {
            // Decode video frame
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
                                 &packet);

            // Did we get a video frame?
            if (frameFinished) {
                SDL_LockYUVOverlay(bmp);

                AVPicture pict;
                pict.data[0] = bmp->pixels[0];
                pict.data[1] = bmp->pixels[2];
                pict.data[2] = bmp->pixels[1];

                pict.linesize[0] = bmp->pitches[0];
                pict.linesize[1] = bmp->pitches[2];
                pict.linesize[2] = bmp->pitches[1];

                // Convert the image into YUV format that SDL uses
                sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
                          pFrame->linesize, 0, pCodecCtx->height,
                          pict.data, pict.linesize);

                SDL_UnlockYUVOverlay(bmp);

                rect.x = 0;
                rect.y = 0;
                rect.w = pCodecCtx->width;
                rect.h = pCodecCtx->height;
                SDL_DisplayYUVOverlay(bmp, &rect);
            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
        SDL_PollEvent(&event);
        switch (event.type) {
        case SDL_QUIT:
            SDL_Quit();
            exit(0);
            break;
        default:
            break;
        }
    }

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);

    return 0;
}
Beispiel #30
-2
//----------------------------------------------------------------
// main_utf8
// 
int main_utf8(int argc, char **argv)
{
	const char *input = NULL;
	const char *output_prefix = "";
	double target_segment_duration = 0.0;
	char *segment_duration_check = NULL;
	const char *playlist_filename = NULL;
	const char *http_prefix = "";
	long max_tsfiles = 0;
	char *max_tsfiles_check = NULL;
	double prev_segment_time = 0.0;
	double segment_duration = 0.0;
	unsigned int output_index = 1;
	const AVClass *fc = avformat_get_class();
	AVDictionary *format_opts = NULL;
	AVOutputFormat *ofmt = NULL;
	AVFormatContext *ic = NULL;
	AVFormatContext *oc = NULL;
	AVStream *video_st = NULL;
	AVStream *audio_st = NULL;
	AVCodec *codec = NULL;
	char *output_filename = NULL; 	
	int if_save_keyframe = 0;			//add by wanggm
	char *keyframeinfo_filename = NULL;	//add by wanggm
	json_object *obj = NULL;			//add by wanggm
	json_object *info_arr_obj = NULL;	//add by wanggm

	int if_monitor_related_process = 0;	//add by wanggm
	pid_t relatedProcessPid = 1; 		//add by wanggm
	char *pid_filename = NULL;
	int video_index = -1;
	int audio_index = -1;
	int kill_file = 0;
	int decode_done = 0;
	int ret = 0;
	int i = 0;
	TSMStreamLace * streamLace = NULL;
	TSMPlaylist * playlist = NULL;
	const double segment_duration_error_tolerance = 0.05;
	double extra_duration_needed = 0;
	int strict_segment_duration = 0;

	av_log_set_level(AV_LOG_INFO);


	for (i = 1; i < argc; i++)
	{
		if (strcmp(argv[i], "-i") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -i parameter");
			i++;
			input = argv[i];
		}
		else if (strcmp(argv[i], "-o") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -o parameter");
			i++;
			output_prefix = argv[i];
		}
		else if (strcmp(argv[i], "-d") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -d parameter");
			i++;

			target_segment_duration = strtod(argv[i], &segment_duration_check);
			if (segment_duration_check
					== argv[i] || target_segment_duration == HUGE_VAL
					|| target_segment_duration == -HUGE_VAL){
				usage3(argv, "invalid segment duration: ", argv[i]);
			}
		}
		else if (strcmp(argv[i], "-x") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -x parameter");
			i++;
			playlist_filename = argv[i];
		}
		else if (strcmp(argv[i], "-p") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -p parameter");
			i++;
			http_prefix = argv[i];
		}
		else if (strcmp(argv[i], "-w") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -w parameter");
			i++;

			max_tsfiles = strtol(argv[i], &max_tsfiles_check, 10);
			if (max_tsfiles_check == argv[i] || max_tsfiles < 0
					|| max_tsfiles >= INT_MAX)
			{
				usage3(argv, "invalid live stream max window size: ", argv[i]);
			}
		}
		else if (strcmp(argv[i], "-P") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -P parameter");
			i++;
			pid_filename = argv[i];
		}
		else if (strcmp(argv[i], "--watch-for-kill-file") == 0)
		{
			// end program when it finds a file with name 'kill':
			kill_file = 1;
		}
		else if (strcmp(argv[i], "--strict-segment-duration") == 0)
		{
			// force segment creation on non-keyframe boundaries:
			strict_segment_duration = 1;
		}
		else if (strcmp(argv[i], "--avformat-option") == 0)
		{
			const AVOption *of;
			const char *opt;
			const char *arg;
			if ((argc - i) <= 1)
				usage(argv, "could not parse --avformat-option parameter");
			i++;
			opt = argv[i];
			if ((argc - i) <= 1)
				usage(argv, "could not parse --avformat-option parameter");
			i++;
			arg = argv[i];

			if ((of = av_opt_find(&fc, opt, NULL, 0,
					AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)))
				av_dict_set(&format_opts, opt, arg,
						(of->type == AV_OPT_TYPE_FLAGS) ? AV_DICT_APPEND : 0);
			else
				usage3(argv, "unknown --avformat-option parameter: ", opt);
		}
		else if (strcmp(argv[i], "--loglevel") == 0)
		{
			const char *arg;
			if ((argc - i) <= 1)
				usage(argv, "could not parse --loglevel parameter");
			i++;
			arg = argv[i];

			if (loglevel(arg))
				usage3(argv, "unknown --loglevel parameter: ", arg);
		}
		else if (strcmp(argv[i], "-k") == 0)
		{ //add by wanggm for save key frame information into json file.
			if ((argc - i) <= 1)
				usage(argv, "could not parse -k parameter");
			i++;
			if_save_keyframe = atoi(argv[i]);
		}
		else if( strcmp(argv[i], "-s") == 0)
		{//add by wanggm for set the start index of ts file.
			if ( (argc -i ) <= 1)
				usage(argv, "could not parse -s parmeter");
			i++;
			
			char *output_index_check = NULL;
			output_index  = strtol(argv[i], &output_index_check, 10);
			if ( output_index_check== argv[i] || output_index < 0
					|| output_index >= INT_MAX)
			{
				usage3(argv, "invalid start index of ts file: ", argv[i]);
			}
			
		}
		else if( strcmp(argv[i], "-m") == 0)
		{ // add by wanggm for exit by monitor the process of which pid is given. 
			if ((argc - i) <= 1)
				usage(argv, "could not parse -m parmeter");
			i++;

			if_monitor_related_process = 1;
			unsigned int tmpPid= atoi(argv[i]);
			if( tmpPid > 0)
			{  
				relatedProcessPid = (pid_t) tmpPid;
				fprintf(stdout, "%s I will exit when the process PID= %d exit.\n", getSystemTime(timeChar), relatedProcessPid);
			}
		}
	}

	
	if (!input)
	{
		usage(argv, "-i input file parameter must be specified");
	}

	if (!playlist_filename)
	{
		usage(argv, "-x m3u8 playlist file parameter must be specified");
	}

	if (target_segment_duration == 0.0)
	{
		usage(argv, "-d segment duration parameter must be specified");
	}

	if( output_index <= 0 )
	{
		output_index = 1;	
	}

	if( 1 == if_monitor_related_process)
	{
		pthread_t id;
		pthread_create(&id, NULL, (void*)monitor_process, relatedProcessPid);
	}


	// Create PID file
	if (pid_filename)
	{
		FILE* pid_file = fopen_utf8(pid_filename, "wb");
		if (pid_file)
		{
			fprintf(pid_file, "%d", getpid());
			fclose(pid_file);
		}
	}


	av_register_all();
	avformat_network_init();

	if (!strcmp(input, "-"))
	{
		input = "pipe:";
	}

	output_filename = (char*) malloc(
			sizeof(char) * (strlen(output_prefix) + 15));
	//add by wanggm
	if(  if_save_keyframe == 1)
	{ 
		keyframeinfo_filename = (char*) malloc(
			sizeof(char)* (strlen(output_prefix) + 15));
	}
	if (!output_filename || (1 == if_save_keyframe && !keyframeinfo_filename))
	{
		fprintf(stderr, "%s Could not allocate space for output filenames\n", getSystemTime( timeChar));
		goto error;
	}

	playlist = createPlaylist(max_tsfiles, target_segment_duration,
			http_prefix);
	if (!playlist)
	{
		fprintf(stderr,
				"%s Could not allocate space for m3u8 playlist structure\n", getSystemTime( timeChar));
		goto error;
	}

	ret = avformat_open_input(&ic, input, NULL, (format_opts) ? &format_opts : NULL);

	if (ret != 0)
	{
		fprintf(stderr,
				"%sCould not open input file, make sure it is an mpegts or mp4 file: %d\n", getSystemTime(timeChar), ret);
		goto error;
	}
	av_dict_free(&format_opts);

	if (avformat_find_stream_info(ic, NULL) < 0)
	{
		fprintf(stderr, "%s Could not read stream information\n", getSystemTime( timeChar));
		goto error;
	}

#if LIBAVFORMAT_VERSION_MAJOR > 52 || (LIBAVFORMAT_VERSION_MAJOR == 52 && \
                                       LIBAVFORMAT_VERSION_MINOR >= 45)
	ofmt = av_guess_format("mpegts", NULL, NULL);
#else
	ofmt = guess_format("mpegts", NULL, NULL);
#endif

	if (!ofmt)
	{
		fprintf(stderr, "%s Could not find MPEG-TS muxer\n", getSystemTime( timeChar));
		goto error;
	}

	oc = avformat_alloc_context();
	if (!oc)
	{
		fprintf(stderr, "%s Could not allocated output context\n", getSystemTime( timeChar));
		goto error;
	}
	oc->oformat = ofmt;

	video_index = -1;
	audio_index = -1;

	for (i = 0; i < ic->nb_streams && (video_index < 0 || audio_index < 0); i++)
	{
		switch (ic->streams[i]->codec->codec_type)
		{
		case AVMEDIA_TYPE_VIDEO:
			video_index = i;
			ic->streams[i]->discard = AVDISCARD_NONE;
			video_st = add_output_stream(oc, ic->streams[i]);
			break;
		case AVMEDIA_TYPE_AUDIO:
			audio_index = i;
			ic->streams[i]->discard = AVDISCARD_NONE;
			audio_st = add_output_stream(oc, ic->streams[i]);
			break;
		default:
			ic->streams[i]->discard = AVDISCARD_ALL;
			break;
		}
	}

	av_dump_format(oc, 0, output_prefix, 1);

	if (video_index >= 0)
	{
		codec = avcodec_find_decoder(video_st->codec->codec_id);
		if (!codec)
		{
			fprintf(stderr,
					"%s Could not find video decoder, key frames will not be honored\n", getSystemTime( timeChar));
		}

		if (avcodec_open2(video_st->codec, codec, NULL) < 0)
		{
			fprintf(stderr,
					"%s Could not open video decoder, key frames will not be honored\n", getSystemTime( timeChar));
		}
	}

	snprintf(output_filename, strlen(output_prefix) + 15, "%s-%u.ts",
			output_prefix, output_index);

	if( 1 == if_save_keyframe)
	{ 
		snprintf(keyframeinfo_filename, strlen(output_prefix) + 15,
			"%s-%u.idx", output_prefix, output_index);
		obj = json_object_new_object();
		info_arr_obj = create_json_header(obj);
	}

	if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0)
	{
		fprintf(stderr, "%s Could not open '%s'\n", getSystemTime( timeChar),output_filename);
		goto error;
	}

	if (avformat_write_header(oc, NULL))
	{
		fprintf(stderr, "%s Could not write mpegts header to first output file\n", getSystemTime( timeChar));
		goto error;
	}

	prev_segment_time = (double) (ic->start_time) / (double) (AV_TIME_BASE);

	streamLace = createStreamLace(ic->nb_streams);

	// add by houmr
	int continue_error_cnt = 0;
	//int pushcnt = 0;
	//int popcnt = 0;
	int tscnt = 0;
	int audiopktcnt = 0;
	int videopktcnt = 0;
	int kfcnt = 0;
	int errpktcnt = 0;
	/////////////////////////
	do
	{
		double segment_time = 0.0;
		AVPacket packet;
		double packetStartTime = 0.0;
		double packetDuration = 0.0;

		if (!decode_done)
		{
			//fprintf(stdout, "%s av_read_frame() begin.\n", getSystemTime( timeChar));
			decode_done = av_read_frame(ic, &packet);
			//fprintf(stdout, "%s av_read_frame() end. packet.size=%d stream_index=%d duration=%d\n", getSystemTime( timeChar), packet.size, packet.stream_index, packet.duration);
			//fprintf(stdout, "%s decode_done=%d\n", getSystemTime( timeChar),decode_done);
			if (!decode_done)
			{
				if (packet.stream_index != video_index
						&& packet.stream_index != audio_index)
				{
					if( ++errpktcnt >= 10)
					{
						decode_done = 1;	
					}
					fprintf(stderr, "%s packet is not video or audio, packet.stream_index=%d\n", getSystemTime( timeChar), packet.stream_index);
					av_free_packet(&packet);
					continue;
				}
				errpktcnt = 0;

				/*printf("orgin : index - %d\t pts = %s\t duration=%d\n", packet.stream_index,
				 av_ts2str(packet.pts), packet.duration);*/
				// add by houmr
				/*if (adjust_pts(&packet, video_index, audio_index) < 0)
				{
					av_free_packet(&packet);
					continue;
				}
				*/
				/////////////////////////////////////
				double timeStamp =
						(double) (packet.pts)
								* (double) (ic->streams[packet.stream_index]->time_base.num)
								/ (double) (ic->streams[packet.stream_index]->time_base.den);

				if (av_dup_packet(&packet) < 0)
				{
					fprintf(stderr, "%s Could not duplicate packet\n" ,getSystemTime( timeChar));
					av_free_packet(&packet);
					break;
				}
				
				/*
				for(int i = 0; i < streamLace->numStreams; ++i)
				{ 
						fprintf(stdout, "streamLace[%d].size=%d\t", i, streamLace->streams[i]->size);
				}
				fprintf(stdout, "\n");
				*/
				insertPacket(streamLace, &packet, timeStamp);
			}
		}

		if (countPackets(streamLace) < 50 && !decode_done)
		{
			/* allow the queue to fill up so that the packets can be sorted properly */
			continue;
		}

		if (!removePacket(streamLace, &packet))
		{
			fprintf(stdout, "%s get packet failed!!\n", getSystemTime( timeChar));
			if (decode_done)
			{
				/* the queue is empty, we are done */
				break;
			}

			assert(decode_done);
			continue;
		}

		//fprintf(stdout, "%s get 1 packet success. packet info: pts=%ld, dts=%ld\n", getSystemTime( timeChar), packet.pts, packet.dts);
		packetStartTime = (double) (packet.pts)
				* (double) (ic->streams[packet.stream_index]->time_base.num)
				/ (double) (ic->streams[packet.stream_index]->time_base.den);

		packetDuration = (double) (packet.duration)
				* (double) (ic->streams[packet.stream_index]->time_base.num)
				/ (double) (ic->streams[packet.stream_index]->time_base.den);

#if !defined(NDEBUG) && (defined(DEBUG) || defined(_DEBUG))
		if (av_log_get_level() >= AV_LOG_VERBOSE)
		fprintf(stderr,
				"%s stream %i, packet [%f, %f)\n",
				getSystemTime( timeChar),
				packet.stream_index,
				packetStartTime,
				packetStartTime + packetDuration);
#endif

		segment_duration = packetStartTime + packetDuration - prev_segment_time;

		// NOTE: segments are supposed to start on a keyframe.
		// If the keyframe interval and segment duration do not match
		// forcing the segment creation for "better seeking behavior"
		// will result in decoding artifacts after seeking or stream switching.
		if (packet.stream_index == video_index
				&& (packet.flags & AV_PKT_FLAG_KEY || strict_segment_duration))
		{
			//This is video packet and ( packet is key frame or strict time is needed )
			segment_time = packetStartTime;
		}
		else if (video_index < 0)
		{
			//This stream doesn't contain video stream
			segment_time = packetStartTime;
		}
		else
		{
			//This is a video packet or a video packet but not key frame 
			segment_time = prev_segment_time;
		}

		//fprintf(stdout, "%s extra_duration_needed=%f\n", getSystemTime( timeChar), extra_duration_needed);
		if (segment_time - prev_segment_time + segment_duration_error_tolerance
				> target_segment_duration + extra_duration_needed)
		{
			fprintf(stdout, "%s segment_time=%lf prev_segment_time=%lf  > target_segment_duration=%lf  extra_duration_needed=%lf\n", getSystemTime( timeChar), segment_time, prev_segment_time,  target_segment_duration, extra_duration_needed);
			fprintf(stdout, "%s File %s contains %d PES packet, of which %d are audio packet, %d are video packet within %d key frame.\n", getSystemTime( timeChar), output_filename, tscnt, audiopktcnt, videopktcnt, kfcnt);
			fflush(stdout);
			/*
			for(int i = 0; i < streamLace->numStreams; ++i)
			{
					fprintf(stdout, "%s streamLace[%d].size=%d\t", getSystemTime( timeChar), i, streamLace->streams[i]->size);
			}
			*/
			tscnt = audiopktcnt = videopktcnt = kfcnt = 0;
			avio_flush(oc->pb);
			avio_close(oc->pb);

			// Keep track of accumulated rounding error to account for it in later chunks.
		/*
			double segment_duration = segment_time - prev_segment_time;
			int rounded_segment_duration = (int) (segment_duration + 0.5);
			extra_duration_needed += (double) rounded_segment_duration
					- segment_duration;
		*/
			double seg_dur = segment_time - prev_segment_time;
			int rounded_segment_duration = (int) (seg_dur + 0.5);
			extra_duration_needed += (target_segment_duration - seg_dur - segment_duration_error_tolerance);
			//fprintf(stdout, "%s ________extra_duration_needed = %lf\n", getSystemTime( timeChar), extra_duration_needed);
			

			updatePlaylist(playlist, playlist_filename, output_filename,
					output_index, rounded_segment_duration);

			
			snprintf(output_filename, strlen(output_prefix) + 15, "%s-%u.ts",
					output_prefix, ++output_index);
			//add by wanggm
			//Save the all the keyframe information into json file
			if( 1 == if_save_keyframe && NULL != obj)
			{ 
				save_json_to_file(keyframeinfo_filename, obj);
				obj = info_arr_obj = NULL;

				snprintf(keyframeinfo_filename, strlen(output_prefix) + 15,
					"%s-%u.idx", output_prefix, output_index);
			}


			if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0)
			{
				fprintf(stderr, "%s Could not open '%s'\n", getSystemTime( timeChar), output_filename);
				break;
			}

			// close when we find the 'kill' file
			if (kill_file)
			{
				FILE* fp = fopen("kill", "rb");
				if (fp)
				{
					fprintf(stderr, "%s user abort: found kill file\n", getSystemTime( timeChar));
					fclose(fp);
					remove("kill");
					decode_done = 1;
					removeAllPackets(streamLace);
				}
			}
			prev_segment_time = segment_time;
		}

		//add by wanggm.
		++tscnt;
		if( video_index == packet.stream_index)
		{
			++videopktcnt;	
			if(1 == packet.flags)
			{ 
				++kfcnt;	
				if( 1 == if_save_keyframe)
				{
					//If it is key frame, it's information should be saved.
					//fprintf(stdout, "%s packet is keyframe, packet.pts=%ld\n", getSystemTime( timeChar), packet.pts);
					snprintf(keyframeinfo_filename, strlen(output_prefix) + 15,
					"%s-%u.idx", output_prefix, output_index);
					if (NULL == obj && NULL == info_arr_obj)
					{ 
						obj = json_object_new_object();
						info_arr_obj = create_json_header(obj);
					}
					avio_flush(oc->pb);		//flush the previous data into ts file.
					int64_t offset = avio_tell(oc->pb);	//Get the offset of this key frame in the file.
					save_keyframe_info(info_arr_obj, offset, packet.pts);
					//fprintf(stdout, "%s Keyframe.pos=%ld \tkeyframe.pts=%ld\n", getSystemTime( timeChar), offset, (long)packet.pts);
				}
			}
		}else if( audio_index == packet.stream_index)
		{
			++audiopktcnt;	
		}
		//fprintf(stdout, "%s packet is not keyframe.\n", getSystemTime( timeChar));

		ret = av_interleaved_write_frame(oc, &packet);

		if (ret < 0)
		{
			fprintf(stderr, "%s Warning: Could not write frame of stream\n", getSystemTime( timeChar));
			// add by houmr
			continue_error_cnt++;
			if (continue_error_cnt > 10)
			{
				av_free_packet(&packet);
				break;
			}
		}
		else if (ret > 0)
		{
			fprintf(stderr, "%s End of stream requested\n", getSystemTime( timeChar));
			av_free_packet(&packet);
			break;
		}
		else
		{
			// add by houmr error
			continue_error_cnt = 0;
			////////////////////////
		}
		av_free_packet(&packet);
	} while (!decode_done || countPackets(streamLace) > 0);

	av_write_trailer(oc);

	if (video_index >= 0)
	{
		avcodec_close(video_st->codec);
	}

	for (i = 0; i < oc->nb_streams; i++)
	{
		av_freep(&oc->streams[i]->codec);
		av_freep(&oc->streams[i]);
	}

	avio_close(oc->pb);
	av_free(oc);

	updatePlaylist(playlist, playlist_filename, output_filename, output_index,
			segment_duration);
	closePlaylist(playlist);
	releasePlaylist(&playlist);

	//add by wanggm
	if( 1 == if_save_keyframe && obj != NULL)
	{ 
		save_json_to_file(keyframeinfo_filename, obj);
	}

	if (pid_filename)
	{
		remove(pid_filename);
	}
	
	fflush(stdout);
	fflush(stderr);

	return 0;

	error: if (pid_filename)
	{
		remove(pid_filename);
	}

	return 1;

}