Exemplo n.º 1
0
int dc_video_scaler_scale(VideoInputData *video_input_data, VideoScaledData *video_scaled_data)
{
	int ret, index, src_height;
	VideoDataNode *video_data_node;
	VideoScaledDataNode *video_scaled_data_node;
	AVFrame *src_vframe;

	ret = dc_consumer_lock(&video_scaled_data->consumer, &video_input_data->circular_buf);
	if (ret < 0) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Video scaler got an end of buffer!\n"));
		return -2;
	}

	if (video_input_data->circular_buf.size > 1) 
		dc_consumer_unlock_previous(&video_scaled_data->consumer, &video_input_data->circular_buf);

	dc_producer_lock(&video_scaled_data->producer, &video_scaled_data->circular_buf);
	dc_producer_unlock_previous(&video_scaled_data->producer, &video_scaled_data->circular_buf);

	video_data_node = (VideoDataNode*)dc_consumer_consume(&video_scaled_data->consumer, &video_input_data->circular_buf);
	video_scaled_data_node = (VideoScaledDataNode*)dc_producer_produce(&video_scaled_data->producer, &video_scaled_data->circular_buf);
	index = video_data_node->source_number;

	video_scaled_data->frame_duration = video_input_data->frame_duration;

	//crop if necessary
	if (video_input_data->vprop[index].crop_x || video_input_data->vprop[index].crop_y) {
#if 0
		av_frame_copy_props(video_scaled_data_node->cropped_frame, video_data_node->vframe);
		video_scaled_data_node->cropped_frame->width  = video_input_data->vprop[index].width  - video_input_data->vprop[index].crop_x;
		video_scaled_data_node->cropped_frame->height = video_input_data->vprop[index].height - video_input_data->vprop[index].crop_y;
#endif
		if (av_picture_crop((AVPicture*)video_scaled_data_node->cropped_frame, (AVPicture*)video_data_node->vframe, PIX_FMT_YUV420P, video_input_data->vprop[index].crop_y, video_input_data->vprop[index].crop_x) < 0) {
			GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Video scaler: error while cropping picture.\n"));
			return -1;
		}
		src_vframe = video_scaled_data_node->cropped_frame;
		src_height = video_input_data->vprop[index].height - video_input_data->vprop[index].crop_y;
	} else {
		assert(!video_scaled_data_node->cropped_frame);
		src_vframe = video_data_node->vframe;
		src_height = video_input_data->vprop[index].height;
	}

	//rescale the cropped frame
	sws_scale(video_scaled_data->vsprop[index].sws_ctx,
			(const uint8_t * const *)src_vframe->data, src_vframe->linesize, 0, src_height,
			video_scaled_data_node->v_frame->data, video_scaled_data_node->v_frame->linesize);
	
	video_scaled_data_node->v_frame->pts = video_data_node->vframe->pts;

	if (video_data_node->nb_raw_frames_ref) {
		if (video_data_node->nb_raw_frames_ref==1) {
#ifndef GPAC_USE_LIBAV
			av_frame_unref(video_data_node->vframe);
#endif
			av_free_packet(&video_data_node->raw_packet);
		}
		video_data_node->nb_raw_frames_ref--;
	}

	dc_consumer_advance(&video_scaled_data->consumer);
	dc_producer_advance(&video_scaled_data->producer, &video_scaled_data->circular_buf);

	if (video_input_data->circular_buf.size == 1) 
		dc_consumer_unlock_previous(&video_scaled_data->consumer, &video_input_data->circular_buf);
	return 0;
}
Exemplo n.º 2
0
OSStatus AudioConverter::fillComplex(AudioConverterComplexInputDataProc dataProc, void* opaque, UInt32* ioOutputDataPacketSize, AudioBufferList *outOutputData, AudioStreamPacketDescription* outPacketDescription)
{
	AVFrame* srcaudio;

	srcaudio = av_frame_alloc();
	av_frame_unref(srcaudio);
	
	try
	{
		for (uint32_t i = 0; i < outOutputData->mNumberBuffers; i++)
		{
			UInt32 origSize = outOutputData->mBuffers[i].mDataByteSize;
			UInt32& newSize = outOutputData->mBuffers[i].mDataByteSize;
			
			newSize = 0;
			
			while (newSize < origSize)
			{
				if (m_avpktOutUsed < m_avpktOut.size)
				{
					LOG << "case 1 (used " << m_avpktOutUsed << " from " << m_avpktOut.size << ")\n";
					// Feed output from previous conversion
					while (m_avpktOutUsed < m_avpktOut.size && newSize < origSize)
					{
						// Output data
						int tocopy = std::min<int>(m_avpktOut.size - m_avpktOutUsed, origSize - newSize);
						memcpy(((char*) outOutputData->mBuffers[i].mData) + newSize, m_avpktOut.data + m_avpktOutUsed, tocopy);
						newSize += tocopy;
						m_avpktOutUsed += tocopy;
					}
					
					if (m_avpktOutUsed >= m_avpktOut.size)
					{
						m_avpktOutUsed = 0;
						av_free_packet(&m_avpktOut);
					}
				}
				else if (!m_resampler || avresample_available(m_resampler) == 0)
				{
					LOG << "case 2\n";
					feedDecoder(dataProc, opaque, srcaudio);
					if (avresample_available(m_resampler) == 0)
						goto end;
				}
				else
				{
					LOG << "case 3\n";
					feedEncoder();
				}
			}
		}
end:
		
		av_frame_free(&srcaudio);
	}
	catch (const std::exception& e)
	{
		ERROR() << "Exception: " << e.what();
		av_frame_free(&srcaudio);
	}
	catch (OSStatus err)
	{
		ERROR() << "OSStatus error: " << err;
		av_frame_free(&srcaudio);
		return err;
	}
	
	return noErr;
}
Exemplo n.º 3
0
// position pointer in file, position in second
AVFrame *VideoFFmpeg::grabFrame(long position)
{
	AVPacket packet;
	int frameFinished;
	int posFound = 1;
	bool frameLoaded = false;
	int64_t targetTs = 0;
	CacheFrame *frame;
	int64_t dts = 0;

	if (m_cacheStarted)
	{
		// when cache is active, we must not read the file directly
		do {
			pthread_mutex_lock(&m_cacheMutex);
			frame = (CacheFrame *)m_frameCacheBase.first;
			pthread_mutex_unlock(&m_cacheMutex);
			// no need to remove the frame from the queue: the cache thread does not touch the head, only the tail
			if (frame == NULL)
			{
				// no frame in cache, in case of file it is an abnormal situation
				if (m_isFile)
				{
					// go back to no threaded reading
					stopCache();
					break;
				}
				return NULL;
			}
			if (frame->framePosition == -1) 
			{
				// this frame mark the end of the file (only used for file)
				// leave in cache to make sure we don't miss it
				m_eof = true;
				return NULL;
			}
			// for streaming, always return the next frame, 
			// that's what grabFrame does in non cache mode anyway.
			if (m_isStreaming || frame->framePosition == position)
			{
				return frame->frame;
			}
			// for cam, skip old frames to keep image realtime.
			// There should be no risk of clock drift since it all happens on the same CPU
			if (frame->framePosition > position) 
			{
				// this can happen after rewind if the seek didn't find the first frame
				// the frame in the buffer is ahead of time, just leave it there
				return NULL;
			}
			// this frame is not useful, release it
			pthread_mutex_lock(&m_cacheMutex);
			BLI_remlink(&m_frameCacheBase, frame);
			BLI_addtail(&m_frameCacheFree, frame);
			pthread_mutex_unlock(&m_cacheMutex);
		} while (true);
	}
	double timeBase = av_q2d(m_formatCtx->streams[m_videoStream]->time_base);
	int64_t startTs = m_formatCtx->streams[m_videoStream]->start_time;
	if (startTs == AV_NOPTS_VALUE)
		startTs = 0;

	// come here when there is no cache or cache has been stopped
	// locate the frame, by seeking if necessary (seeking is only possible for files)
	if (m_isFile)
	{
		// first check if the position that we are looking for is in the preseek range
		// if so, just read the frame until we get there
		if (position > m_curPosition + 1 
			&& m_preseek 
			&& position - (m_curPosition + 1) < m_preseek) 
		{
			while (av_read_frame(m_formatCtx, &packet)>=0)
			{
				if (packet.stream_index == m_videoStream) 
				{
					avcodec_decode_video2(
						m_codecCtx, 
						m_frame, &frameFinished, 
						&packet);
					if (frameFinished)
					{
						m_curPosition = (long)((packet.dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
					}
				}
				av_free_packet(&packet);
				if (position == m_curPosition+1)
					break;
			}
		}
		// if the position is not in preseek, do a direct jump
		if (position != m_curPosition + 1) 
		{ 
			int64_t pos = (int64_t)((position - m_preseek) / (m_baseFrameRate*timeBase));

			if (pos < 0)
				pos = 0;

			pos += startTs;

			if (position <= m_curPosition || !m_eof)
			{
#if 0
				// Tried to make this work but couldn't: seeking on byte is ignored by the
				// format plugin and it will generally continue to read from last timestamp.
				// Too bad because frame seek is not always able to get the first frame
				// of the file.
				if (position <= m_preseek)
				{
					// we can safely go the beginning of the file
					if (av_seek_frame(m_formatCtx, m_videoStream, 0, AVSEEK_FLAG_BYTE) >= 0)
					{
						// binary seek does not reset the timestamp, must do it now
						av_update_cur_dts(m_formatCtx, m_formatCtx->streams[m_videoStream], startTs);
						m_curPosition = 0;
					}
				}
				else
#endif
				{
					// current position is now lost, guess a value. 
					if (av_seek_frame(m_formatCtx, m_videoStream, pos, AVSEEK_FLAG_BACKWARD) >= 0)
					{
						// current position is now lost, guess a value. 
						// It's not important because it will be set at this end of this function
						m_curPosition = position - m_preseek - 1;
					}
				}
			}
			// this is the timestamp of the frame we're looking for
			targetTs = (int64_t)(position / (m_baseFrameRate * timeBase)) + startTs;

			posFound = 0;
			avcodec_flush_buffers(m_codecCtx);
		}
	} else if (m_isThreaded)
	{
		// cache is not started but threading is possible
		// better not read the stream => make take some time, better start caching
		if (startCache())
			return NULL;
		// Abnormal!!! could not start cache, fall back on direct read
		m_isThreaded = false;
	}

	// find the correct frame, in case of streaming and no cache, it means just
	// return the next frame. This is not quite correct, may need more work
	while (av_read_frame(m_formatCtx, &packet) >= 0)
	{
		if (packet.stream_index == m_videoStream) 
		{
			AVFrame *input = m_frame;
			short counter = 0;

			/* While the data is not read properly (png, tiffs, etc formats may need several pass)*/
			while ((input->data[0] == 0 && input->data[1] == 0 && input->data[2] == 0 && input->data[3] == 0) && counter < 10) {
				avcodec_decode_video2(m_codecCtx, m_frame, &frameFinished, &packet);
				counter++;
			}

			// remember dts to compute exact frame number
			dts = packet.dts;
			if (frameFinished && !posFound) 
			{
				if (dts >= targetTs)
				{
					posFound = 1;
				}
			} 

			if (frameFinished && posFound == 1) 
			{
				AVFrame * input = m_frame;

				/* This means the data wasnt read properly, 
				 * this check stops crashing */
				if (   input->data[0]==0 && input->data[1]==0 
					&& input->data[2]==0 && input->data[3]==0)
				{
					av_free_packet(&packet);
					break;
				}

				if (m_deinterlace) 
				{
					if (avpicture_deinterlace(
						(AVPicture*) m_frameDeinterlaced,
						(const AVPicture*) m_frame,
						m_codecCtx->pix_fmt,
						m_codecCtx->width,
						m_codecCtx->height) >= 0)
					{
						input = m_frameDeinterlaced;
					}
				}
				// convert to RGB24
				sws_scale(m_imgConvertCtx,
					input->data,
					input->linesize,
					0,
					m_codecCtx->height,
					m_frameRGB->data,
					m_frameRGB->linesize);
				av_free_packet(&packet);
				frameLoaded = true;
				break;
			}
		}
		av_free_packet(&packet);
	}
	m_eof = m_isFile && !frameLoaded;
	if (frameLoaded)
	{
		m_curPosition = (long)((dts-startTs) * (m_baseFrameRate*timeBase) + 0.5);
		if (m_isThreaded)
		{
			// normal case for file: first locate, then start cache
			if (!startCache())
			{
				// Abnormal!! could not start cache, return to non-cache mode
				m_isThreaded = false;
			}
		}
		return m_frameRGB;
	}
	return NULL;
}
Exemplo n.º 4
0
/*
 * Class:     com_example_testffmpeg_CFFmpegJni
 * Method:    IPlay
 * Signature: ()I
 */
jint Java_com_example_testffmpeg_CFFmpegJni_IPlay(JNIEnv *env, jobject thiz)
{
	/// 定义返回值
	int nRet = -1;
	/// 打开文件
	if(NULL != m_pFormatCtx)
	{
		avformat_close_input(&m_pFormatCtx);
		/// 释放数据
		av_free(m_pFormatCtx);
		m_pFormatCtx = NULL;
	}

	if(NULL == m_pFormatCtx)
	{
		/// 打开文件
		if(0 != (nRet = avformat_open_input(&m_pFormatCtx, m_szURLPath, 0, NULL/*&m_pDictOptions*/)))
		{
			char szTemp[256];
			memset(szTemp, 0x00, sizeof(szTemp));
			av_strerror(nRet, szTemp, 255);
			/// 打印错误信息
			LOGD("%s, Error Code = %d, %s, Error = %s", m_szURLPath, nRet,
					" The Error URL Or Path--------------->", szTemp);
			return nRet;
		}
	}

	// m_pFormatCtx->max_analyze_duration = 1000;
	// m_pFormatCtx->probesize = 2048;
	if(0 > avformat_find_stream_info(m_pFormatCtx, NULL))
	{
		LOGD("Couldn't find stream information.");
		return -1;
	}

	int nVideoIndex = -1;
	for(int i = 0; i < m_pFormatCtx->nb_streams; i++)
	{
		if(AVMEDIA_TYPE_VIDEO == m_pFormatCtx->streams[i]->codec->codec_type)
		{
			nVideoIndex = i;
			break;
		}
	}
	if(-1 == nVideoIndex)
	{
		LOGD("Didn't find a video stream.");
		return -1;
	}

	AVCodecContext* pCodecCtx = m_pFormatCtx->streams[nVideoIndex]->codec;
	AVCodec* pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if(NULL == pCodec)
	{
		LOGD("Codec not found.");
		return -1;
	}

	if(pCodec->capabilities & CODEC_CAP_TRUNCATED)
	{
		pCodecCtx->flags |= CODEC_FLAG_TRUNCATED;
	}

	if(0 > avcodec_open2(pCodecCtx, pCodec, NULL))
	{
		LOGD("Could not open codec.");
		return -1;
	}

	/// 声明数据帧变量
	AVFrame	*pFrame = NULL, *pFrameYUV = NULL;
	pFrame = avcodec_alloc_frame();
	pFrameYUV = avcodec_alloc_frame();
	/// 创建转换数据缓冲
	int nConvertSize = avpicture_get_size(PIX_FMT_RGB565, iWidth, iHeight);
	uint8_t* pConvertbuffer = new uint8_t[nConvertSize];
	avpicture_fill((AVPicture *)pFrameYUV, pConvertbuffer, PIX_FMT_RGB565, iWidth, iHeight);

	/// 声明解码参数
	int nCodecRet, nHasGetPicture;
	/// 声明数据帧解码数据包
	int nPackgeSize  = pCodecCtx->width * pCodecCtx->height;
	AVPacket* pAVPacket = (AVPacket *)malloc(sizeof(AVPacket));
	av_new_packet(pAVPacket, nPackgeSize);

	/// 列出输出文件的相关流信息
	av_dump_format(m_pFormatCtx, 0, m_szURLPath, 0);
	/// 设置播放状态
	m_bIsPlaying = true;

	/// 声明格式转换参数
	struct SwsContext* img_convert_ctx = NULL;
	/// 格式化像素格式为YUV
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
		iWidth, iHeight, PIX_FMT_RGB565, SWS_BICUBIC, NULL, NULL, NULL);
	/// 读取数据帧
	while(0 <= av_read_frame(m_pFormatCtx, pAVPacket) && true == m_bIsPlaying)
	{
		/// 判断是否是视频数据流
		if(nVideoIndex == pAVPacket->stream_index)
		{
			/// 解码数据包
			nCodecRet = avcodec_decode_video2(pCodecCtx, pFrame, &nHasGetPicture, pAVPacket);
			if(0 < nHasGetPicture)
			{
				/// 转换格式为YUV
				sws_scale(img_convert_ctx, (const uint8_t* const* )pFrame->data, pFrame->linesize,
						0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
				/// 回调显示数据
				e_DisplayCallBack(env, pConvertbuffer, nConvertSize);
			}
		}
		/// 释放解码包,此数据包,在 av_read_frame 调用时被创建
		av_free_packet(pAVPacket);
	}
	/// 释放个格式化信息
	sws_freeContext(img_convert_ctx);

	/// 释放转换图片缓存
	delete[] pConvertbuffer;
	pConvertbuffer = NULL;
	/// 释放数据帧对象指针
	av_free(pFrame); pFrame = NULL;
	av_free(pFrameYUV); pFrameYUV = NULL;

	/// 释放解码信息对象
	avcodec_close(pCodecCtx); pCodecCtx = NULL;
	avformat_close_input(&m_pFormatCtx);
	/// 释放数据
	av_free(m_pFormatCtx);
	m_pFormatCtx = NULL;
	return nRet;
}
Exemplo n.º 5
0
int JPG_to_Pixel(const unsigned char *jpgBuff, int jpgSize, int pixelFmt, int pixelWidth, int pixelHeight, unsigned char *pixelBuff, int *pixelSize) {	
	AVFormatContext *formatContext;
	AVInputFormat *inputFormat;
	AVIOContext *ioContext;
	AVStream *stream;
	AVCodecContext *codecContext;
	AVCodec *codec;
	AVFrame *frame, *frame2;
	AVPacket packet;
	struct SwsContext *swsContext;
	int streamIndex;
	int gotFrame;
	int codecRet;
	int result = -1;

	av_register_all();
	formatContext = avformat_alloc_context();
	ioContext = avio_alloc_context((unsigned char *)jpgBuff, jpgSize, 0, NULL, NULL, NULL, NULL);
	inputFormat = av_find_input_format("mjpeg");
	av_probe_input_buffer2(ioContext, &inputFormat, NULL, NULL, 0, 0);
	formatContext->pb = ioContext;
	formatContext->iformat = inputFormat;
	avformat_open_input(&formatContext, NULL, NULL, NULL);
	av_find_stream_info(formatContext);

	av_init_packet(&packet);
	for (streamIndex = 0; streamIndex < formatContext->nb_streams; streamIndex++) {
		av_read_frame(formatContext, &packet);
		if (formatContext->streams[streamIndex]->codec->codec_type == AVMEDIA_TYPE_VIDEO && 0 < packet.size) {
			stream = formatContext->streams[streamIndex];
			codecContext = stream->codec;
			codec = avcodec_find_decoder(codecContext->codec_id);
			avcodec_open2(codecContext, codec, NULL);
			frame = avcodec_alloc_frame();
			codecRet = avcodec_decode_video2(codecContext, frame, &gotFrame, &packet);
			if (0 <= codecRet && 1 == gotFrame) {
				frame2 = av_frame_clone(frame);
				frame2->format = PF(pixelFmt);
				swsContext = sws_getContext(codecContext->width, codecContext->height, codecContext->pix_fmt, pixelWidth, pixelHeight, (AVPixelFormat)frame2->format, SWS_BICUBIC, NULL, NULL, NULL);   
				sws_scale(swsContext, (const uint8_t *const *)frame->data, frame->linesize, 0, codecContext->height, frame2->data, frame2->linesize);  
				sws_freeContext(swsContext);

				*pixelSize = avpicture_layout((const AVPicture *)frame2, (enum AVPixelFormat)frame2->format, pixelWidth, pixelHeight, pixelBuff, *pixelSize);
				result = *pixelSize;

				av_frame_free(&frame2);
			}	
			if (1 == codecContext->refcounted_frames) av_frame_unref(frame); 
			avcodec_free_frame(&frame);
			avcodec_close(codecContext);
		}
		av_free_packet(&packet);
		if (-1 != result)
			break;
	}

	avformat_close_input(&formatContext);
	av_free(ioContext->buffer);
	av_free(ioContext);
	avformat_free_context(formatContext);
	return result;
}
Exemplo n.º 6
0
int main(int argc, char* argv[])
{
	AVFormatContext	*pFormatCtx;
	int				i, videoindex;
	AVCodecContext	*pCodecCtx;
	AVCodec			*pCodec;
	AVFrame	*pFrame,*pFrameYUV;
	uint8_t *out_buffer;
	AVPacket *packet;
	int y_size;
	int ret, got_picture;
	struct SwsContext *img_convert_ctx;

	char filepath[]="bigbuckbunny_480x272.h265";
	//SDL---------------------------
	int screen_w=0,screen_h=0;
	SDL_Window *screen; 
	SDL_Renderer* sdlRenderer;
	SDL_Texture* sdlTexture;
	SDL_Rect sdlRect;

	FILE *fp_yuv;

	av_register_all();
	avformat_network_init();
	pFormatCtx = avformat_alloc_context();

	if(avformat_open_input(&pFormatCtx,filepath,NULL,NULL)!=0){
		printf("Couldn't open input stream.\n");
		return -1;
	}
	if(avformat_find_stream_info(pFormatCtx,NULL)<0){
		printf("Couldn't find stream information.\n");
		return -1;
	}
	videoindex=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++) 
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			videoindex=i;
			break;
		}
	if(videoindex==-1){
		printf("Didn't find a video stream.\n");
		return -1;
	}

	pCodecCtx=pFormatCtx->streams[videoindex]->codec;
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL){
		printf("Codec not found.\n");
		return -1;
	}
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){
		printf("Could not open codec.\n");
		return -1;
	}
	
	pFrame=av_frame_alloc();
	pFrameYUV=av_frame_alloc();
	out_buffer=(uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
	avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
	packet=(AVPacket *)av_malloc(sizeof(AVPacket));
	//Output Info-----------------------------
	printf("--------------- File Information ----------------\n");
	av_dump_format(pFormatCtx,0,filepath,0);
	printf("-------------------------------------------------\n");
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 
		pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); 

#if OUTPUT_YUV420P 
    fp_yuv=fopen("output.yuv","wb+");  
#endif  
	
	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  
		printf( "Could not initialize SDL - %s\n", SDL_GetError()); 
		return -1;
	} 

	screen_w = pCodecCtx->width;
	screen_h = pCodecCtx->height;
	//SDL 2.0 Support for multiple windows
	screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
		screen_w, screen_h,
		SDL_WINDOW_OPENGL);

	if(!screen) {  
		printf("SDL: could not create window - exiting:%s\n",SDL_GetError());  
		return -1;
	}

	sdlRenderer = SDL_CreateRenderer(screen, -1, 0);  
	//IYUV: Y + U + V  (3 planes)
	//YV12: Y + V + U  (3 planes)
	sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height);  

	sdlRect.x=0;
	sdlRect.y=0;
	sdlRect.w=screen_w;
	sdlRect.h=screen_h;

	//SDL End----------------------
	while(av_read_frame(pFormatCtx, packet)>=0){
		if(packet->stream_index==videoindex){
			ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
			if(ret < 0){
				printf("Decode Error.\n");
				return -1;
			}
			if(got_picture){
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, 
					pFrameYUV->data, pFrameYUV->linesize);
				
#if OUTPUT_YUV420P
				y_size=pCodecCtx->width*pCodecCtx->height;  
				fwrite(pFrameYUV->data[0],1,y_size,fp_yuv);    //Y 
				fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv);  //U
				fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv);  //V
#endif
				//SDL---------------------------
#if 0
				SDL_UpdateTexture( sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0] );  
#else
				SDL_UpdateYUVTexture(sdlTexture, &sdlRect,
				pFrameYUV->data[0], pFrameYUV->linesize[0],
				pFrameYUV->data[1], pFrameYUV->linesize[1],
				pFrameYUV->data[2], pFrameYUV->linesize[2]);
#endif	
				
				SDL_RenderClear( sdlRenderer );  
				SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);  
				SDL_RenderPresent( sdlRenderer );  
				//SDL End-----------------------
				//Delay 40ms
				SDL_Delay(40);
			}
		}
		av_free_packet(packet);
	}
	//flush decoder
	//FIX: Flush Frames remained in Codec
	while (1) {
		ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
		if (ret < 0)
			break;
		if (!got_picture)
			break;
		sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, 
			pFrameYUV->data, pFrameYUV->linesize);
#if OUTPUT_YUV420P
		int y_size=pCodecCtx->width*pCodecCtx->height;  
		fwrite(pFrameYUV->data[0],1,y_size,fp_yuv);    //Y 
		fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv);  //U
		fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv);  //V
#endif
		//SDL---------------------------
		SDL_UpdateTexture( sdlTexture, &sdlRect, pFrameYUV->data[0], pFrameYUV->linesize[0] );  
		SDL_RenderClear( sdlRenderer );  
		SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);  
		SDL_RenderPresent( sdlRenderer );  
		//SDL End-----------------------
		//Delay 40ms
		SDL_Delay(40);
	}

	sws_freeContext(img_convert_ctx);

#if OUTPUT_YUV420P 
    fclose(fp_yuv);
#endif 

	SDL_Quit();

	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);

	return 0;
}
Exemplo n.º 7
0
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
    AVCodecContext *codec = s->streams[0]->codec;
    BRSTMDemuxContext *b = s->priv_data;
    uint32_t samples, size, skip = 0;
    int ret, i;

    if (avio_feof(s->pb))
        return AVERROR_EOF;
    b->current_block++;
    if (b->current_block == b->block_count) {
        size    = b->last_block_used_bytes;
        samples = b->last_block_samples;
        skip    = b->last_block_size - b->last_block_used_bytes;
    } else if (b->current_block < b->block_count) {
        size    = b->block_size;
        samples = b->samples_per_block;
    } else {
        return AVERROR_EOF;
    }

    if (codec->codec_id == AV_CODEC_ID_ADPCM_THP ||
        codec->codec_id == AV_CODEC_ID_ADPCM_THP_LE) {
        uint8_t *dst;

        if (av_new_packet(pkt, 8 + (32 + 4 + size) * codec->channels) < 0)
            return AVERROR(ENOMEM);
        dst = pkt->data;
        if (codec->codec_id == AV_CODEC_ID_ADPCM_THP_LE) {
            bytestream_put_le32(&dst, size * codec->channels);
            bytestream_put_le32(&dst, samples);
        } else {
            bytestream_put_be32(&dst, size * codec->channels);
            bytestream_put_be32(&dst, samples);
        }
        bytestream_put_buffer(&dst, b->table, 32 * codec->channels);
        bytestream_put_buffer(&dst, b->adpc + 4 * codec->channels *
                                    (b->current_block - 1), 4 * codec->channels);

        for (i = 0; i < codec->channels; i++) {
            ret = avio_read(s->pb, dst, size);
            dst += size;
            avio_skip(s->pb, skip);
            if (ret != size) {
                av_free_packet(pkt);
                break;
            }
        }
        pkt->duration = samples;
    } else {
        size *= codec->channels;
        ret = av_get_packet(s->pb, pkt, size);
    }

    pkt->stream_index = 0;

    if (ret != size)
        ret = AVERROR(EIO);

    return ret;
}
Exemplo n.º 8
0
Arquivo: nuv.c Projeto: Tjoppen/FFmpeg
static int nuv_packet(AVFormatContext *s, AVPacket *pkt) {
    NUVContext *ctx = s->priv_data;
    AVIOContext *pb = s->pb;
    uint8_t hdr[HDRSIZE];
    nuv_frametype frametype;
    int ret, size;
    while (!url_feof(pb)) {
        int copyhdrsize = ctx->rtjpg_video ? HDRSIZE : 0;
        uint64_t pos = avio_tell(pb);
        ret = avio_read(pb, hdr, HDRSIZE);
        if (ret < HDRSIZE)
            return ret < 0 ? ret : AVERROR(EIO);
        frametype = hdr[0];
        size = PKTSIZE(AV_RL32(&hdr[8]));
        switch (frametype) {
            case NUV_EXTRADATA:
                if (!ctx->rtjpg_video) {
                    avio_skip(pb, size);
                    break;
                }
            case NUV_VIDEO:
                if (ctx->v_id < 0) {
                    av_log(s, AV_LOG_ERROR, "Video packet in file without video stream!\n");
                    avio_skip(pb, size);
                    break;
                }
                ret = av_new_packet(pkt, copyhdrsize + size);
                if (ret < 0)
                    return ret;

                pkt->pos = pos;
                pkt->flags |= hdr[2] == 0 ? AV_PKT_FLAG_KEY : 0;
                pkt->pts = AV_RL32(&hdr[4]);
                pkt->stream_index = ctx->v_id;
                memcpy(pkt->data, hdr, copyhdrsize);
                ret = avio_read(pb, pkt->data + copyhdrsize, size);
                if (ret < 0) {
                    av_free_packet(pkt);
                    return ret;
                }
                if (ret < size)
                    av_shrink_packet(pkt, copyhdrsize + ret);
                return 0;
            case NUV_AUDIO:
                if (ctx->a_id < 0) {
                    av_log(s, AV_LOG_ERROR, "Audio packet in file without audio stream!\n");
                    avio_skip(pb, size);
                    break;
                }
                ret = av_get_packet(pb, pkt, size);
                pkt->flags |= AV_PKT_FLAG_KEY;
                pkt->pos = pos;
                pkt->pts = AV_RL32(&hdr[4]);
                pkt->stream_index = ctx->a_id;
                if (ret < 0) return ret;
                return 0;
            case NUV_SEEKP:
                // contains no data, size value is invalid
                break;
            default:
                avio_skip(pb, size);
                break;
        }
    }
    return AVERROR(EIO);
}
Exemplo n.º 9
0
static int rpl_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    RPLContext *rpl = s->priv_data;
    AVIOContext *pb = s->pb;
    AVStream* stream;
    AVIndexEntry* index_entry;
    uint32_t ret;

    if (rpl->chunk_part == s->nb_streams) {
        rpl->chunk_number++;
        rpl->chunk_part = 0;
    }

    stream = s->streams[rpl->chunk_part];

    if (rpl->chunk_number >= stream->nb_index_entries)
        return -1;

    index_entry = &stream->index_entries[rpl->chunk_number];

    if (rpl->frame_in_part == 0)
        if (avio_seek(pb, index_entry->pos, SEEK_SET) < 0)
            return AVERROR(EIO);

    if (stream->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
        stream->codec->codec_tag == 124) {
        // We have to split Escape 124 frames because there are
        // multiple frames per chunk in Escape 124 samples.
        uint32_t frame_size;

        avio_skip(pb, 4); /* flags */
        frame_size = avio_rl32(pb);
        if (avio_seek(pb, -8, SEEK_CUR) < 0)
            return AVERROR(EIO);

        ret = av_get_packet(pb, pkt, frame_size);
        if (ret != frame_size) {
            av_free_packet(pkt);
            return AVERROR(EIO);
        }
        pkt->duration = 1;
        pkt->pts = index_entry->timestamp + rpl->frame_in_part;
        pkt->stream_index = rpl->chunk_part;

        rpl->frame_in_part++;
        if (rpl->frame_in_part == rpl->frames_per_chunk) {
            rpl->frame_in_part = 0;
            rpl->chunk_part++;
        }
    } else {
        ret = av_get_packet(pb, pkt, index_entry->size);
        if (ret != index_entry->size) {
            av_free_packet(pkt);
            return AVERROR(EIO);
        }

        if (stream->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            // frames_per_chunk should always be one here; the header
            // parsing will warn if it isn't.
            pkt->duration = rpl->frames_per_chunk;
        } else {
            // All the audio codecs supported in this container
            // (at least so far) are constant-bitrate.
            pkt->duration = ret * 8;
        }
        pkt->pts = index_entry->timestamp;
        pkt->stream_index = rpl->chunk_part;
        rpl->chunk_part++;
    }

    // None of the Escape formats have keyframes, and the ADPCM
    // format used doesn't have keyframes.
    if (rpl->chunk_number == 0 && rpl->frame_in_part == 0)
        pkt->flags |= AV_PKT_FLAG_KEY;

    return ret;
}
Exemplo n.º 10
0
static int
artwork_rescale(AVFormatContext *src_ctx, int s, int out_w, int out_h, int format, struct evbuffer *evbuf)
{
  uint8_t *buf;
  uint8_t *outbuf;

  AVCodecContext *src;

  AVFormatContext *dst_ctx;
  AVCodecContext *dst;
  AVOutputFormat *dst_fmt;
  AVStream *dst_st;

  AVCodec *img_decoder;
  AVCodec *img_encoder;

  int64_t pix_fmt_mask;
  const enum PixelFormat *pix_fmts;

  AVFrame *i_frame;
  AVFrame *o_frame;

  struct SwsContext *swsctx;

  AVPacket pkt;
  int have_frame;

  int outbuf_len;

  int ret;

  src = src_ctx->streams[s]->codec;

  img_decoder = avcodec_find_decoder(src->codec_id);
  if (!img_decoder)
    {
      DPRINTF(E_LOG, L_ART, "No suitable decoder found for artwork %s\n", src_ctx->filename);

      return -1;
    }

  ret = avcodec_open(src, img_decoder);
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not open codec for decoding: %s\n", strerror(AVUNERROR(ret)));

      return -1;
    }

  /* Set up output */
#if LIBAVFORMAT_VERSION_MAJOR >= 53 || (LIBAVFORMAT_VERSION_MAJOR == 52 && LIBAVFORMAT_VERSION_MINOR >= 45)
  /* FFmpeg 0.6 */
  dst_fmt = av_guess_format("image2", NULL, NULL);
#else
  dst_fmt = guess_format("image2", NULL, NULL);
#endif
  if (!dst_fmt)
    {
      DPRINTF(E_LOG, L_ART, "ffmpeg image2 muxer not available\n");

      ret = -1;
      goto out_close_src;
    }

  dst_fmt->video_codec = CODEC_ID_NONE;

  /* Try to keep same codec if possible */
  if ((src->codec_id == CODEC_ID_PNG) && (format & ART_CAN_PNG))
    dst_fmt->video_codec = CODEC_ID_PNG;
  else if ((src->codec_id == CODEC_ID_MJPEG) && (format & ART_CAN_JPEG))
    dst_fmt->video_codec = CODEC_ID_MJPEG;

  /* If not possible, select new codec */
  if (dst_fmt->video_codec == CODEC_ID_NONE)
    {
      if (format & ART_CAN_PNG)
	dst_fmt->video_codec = CODEC_ID_PNG;
      else if (format & ART_CAN_JPEG)
	dst_fmt->video_codec = CODEC_ID_MJPEG;
    }

  img_encoder = avcodec_find_encoder(dst_fmt->video_codec);
  if (!img_encoder)
    {
      DPRINTF(E_LOG, L_ART, "No suitable encoder found for codec ID %d\n", dst_fmt->video_codec);

      ret = -1;
      goto out_close_src;
    }

  dst_ctx = avformat_alloc_context();
  if (!dst_ctx)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for format context\n");

      ret = -1;
      goto out_close_src;
    }

  dst_ctx->oformat = dst_fmt;

#if LIBAVFORMAT_VERSION_MAJOR >= 53
  dst_fmt->flags &= ~AVFMT_NOFILE;
#else
  ret = snprintf(dst_ctx->filename, sizeof(dst_ctx->filename), "evbuffer:%p", evbuf);
  if ((ret < 0) || (ret >= sizeof(dst_ctx->filename)))
    {
      DPRINTF(E_LOG, L_ART, "Output artwork URL too long\n");

      ret = -1;
      goto out_free_dst_ctx;
    }
#endif

  dst_st = av_new_stream(dst_ctx, 0);
  if (!dst_st)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for new output stream\n");

      ret = -1;
      goto out_free_dst_ctx;
    }

  dst = dst_st->codec;

#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 64)
  avcodec_get_context_defaults2(dst, AVMEDIA_TYPE_VIDEO);
#else
  avcodec_get_context_defaults2(dst, CODEC_TYPE_VIDEO);
#endif

  if (dst_fmt->flags & AVFMT_GLOBALHEADER)
    dst->flags |= CODEC_FLAG_GLOBAL_HEADER;

  dst->codec_id = dst_fmt->video_codec;
#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 64)
  dst->codec_type = AVMEDIA_TYPE_VIDEO;
#else
  dst->codec_type = CODEC_TYPE_VIDEO;
#endif

  pix_fmt_mask = 0;
  pix_fmts = img_encoder->pix_fmts;
  while (pix_fmts && (*pix_fmts != -1))
    {
      pix_fmt_mask |= (1 << *pix_fmts);
      pix_fmts++;
    }

  dst->pix_fmt = avcodec_find_best_pix_fmt(pix_fmt_mask, src->pix_fmt, 1, NULL);

  if (dst->pix_fmt < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not determine best pixel format\n");

      ret = -1;
      goto out_free_dst;
    }

  DPRINTF(E_DBG, L_ART, "Selected pixel format: %d\n", dst->pix_fmt);

  dst->time_base.num = 1;
  dst->time_base.den = 25;

  dst->width = out_w;
  dst->height = out_h;

#if LIBAVFORMAT_VERSION_MAJOR <= 52 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVCODEC_VERSION_MINOR <= 1)
  ret = av_set_parameters(dst_ctx, NULL);
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Invalid parameters for artwork output: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_free_dst;
    }
#endif

  /* Open encoder */
  ret = avcodec_open(dst, img_encoder);
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not open codec for encoding: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_free_dst;
    }

  i_frame = avcodec_alloc_frame();
  o_frame = avcodec_alloc_frame();

  if (!i_frame || !o_frame)
    {
      DPRINTF(E_LOG, L_ART, "Could not allocate input/output frame\n");

      ret = -1;
      goto out_free_frames;
    }

  ret = avpicture_get_size(dst->pix_fmt, src->width, src->height);

  DPRINTF(E_DBG, L_ART, "Artwork buffer size: %d\n", ret);

  buf = (uint8_t *)av_malloc(ret);
  if (!buf)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for artwork buffer\n");

      ret = -1;
      goto out_free_frames;
    }

  avpicture_fill((AVPicture *)o_frame, buf, dst->pix_fmt, src->width, src->height);

  swsctx = sws_getContext(src->width, src->height, src->pix_fmt,
			  dst->width, dst->height, dst->pix_fmt,
			  SWS_BICUBIC, NULL, NULL, NULL);
  if (!swsctx)
    {
      DPRINTF(E_LOG, L_ART, "Could not get SWS context\n");

      ret = -1;
      goto out_free_buf;
    }

  /* Get frame */
  have_frame = 0;
  while (av_read_frame(src_ctx, &pkt) == 0)
    {
      if (pkt.stream_index != s)
	{
	  av_free_packet(&pkt);
	  continue;
	}

#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 32)
      /* FFmpeg 0.6 */
      avcodec_decode_video2(src, i_frame, &have_frame, &pkt);
#else
      avcodec_decode_video(src, i_frame, &have_frame, pkt.data, pkt.size);
#endif

      break;
    }

  if (!have_frame)
    {
      DPRINTF(E_LOG, L_ART, "Could not decode artwork\n");

      av_free_packet(&pkt);
      sws_freeContext(swsctx);

      ret = -1;
      goto out_free_buf;
    }

  /* Scale */
#if LIBSWSCALE_VERSION_MAJOR >= 1 || (LIBSWSCALE_VERSION_MAJOR == 0 && LIBSWSCALE_VERSION_MINOR >= 9)
  /* FFmpeg 0.6, libav 0.6+ */
  sws_scale(swsctx, (const uint8_t * const *)i_frame->data, i_frame->linesize, 0, src->height, o_frame->data, o_frame->linesize);
#else
  sws_scale(swsctx, i_frame->data, i_frame->linesize, 0, src->height, o_frame->data, o_frame->linesize);
#endif

  sws_freeContext(swsctx);
  av_free_packet(&pkt);

  /* Open output file */
#if LIBAVFORMAT_VERSION_MAJOR >= 53
  dst_ctx->pb = avio_evbuffer_open(evbuf);
#else
  ret = url_fopen(&dst_ctx->pb, dst_ctx->filename, URL_WRONLY);
#endif
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not open artwork destination buffer\n");

      ret = -1;
      goto out_free_buf;
    }

  /* Encode frame */
  outbuf_len = dst->width * dst->height * 3;
  if (outbuf_len < FF_MIN_BUFFER_SIZE)
    outbuf_len = FF_MIN_BUFFER_SIZE;

  outbuf = (uint8_t *)av_malloc(outbuf_len);
  if (!outbuf)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for encoded artwork buffer\n");

#if LIBAVFORMAT_VERSION_MAJOR >= 53
      avio_evbuffer_close(dst_ctx->pb);
#else
      url_fclose(dst_ctx->pb);
#endif

      ret = -1;
      goto out_free_buf;
    }

  ret = avcodec_encode_video(dst, outbuf, outbuf_len, o_frame);
  if (ret <= 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not encode artwork\n");

      ret = -1;
      goto out_fclose_dst;
    }

  av_init_packet(&pkt);
  pkt.stream_index = 0;
  pkt.data = outbuf;
  pkt.size = ret;

#if LIBAVFORMAT_VERSION_MAJOR >= 53 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVCODEC_VERSION_MINOR >= 3)
  ret = avformat_write_header(dst_ctx, NULL);
#else
  ret = av_write_header(dst_ctx);
#endif
  if (ret != 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not write artwork header: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_fclose_dst;
    }

  ret = av_interleaved_write_frame(dst_ctx, &pkt);

  if (ret != 0)
    {
      DPRINTF(E_LOG, L_ART, "Error writing artwork\n");

      ret = -1;
      goto out_fclose_dst;
    }

  ret = av_write_trailer(dst_ctx);
  if (ret != 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not write artwork trailer: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_fclose_dst;
    }

  switch (dst_fmt->video_codec)
    {
      case CODEC_ID_PNG:
	ret = ART_FMT_PNG;
	break;

      case CODEC_ID_MJPEG:
	ret = ART_FMT_JPEG;
	break;

      default:
	DPRINTF(E_LOG, L_ART, "Unhandled rescale output format\n");
	ret = -1;
	break;
    }

 out_fclose_dst:
#if LIBAVFORMAT_VERSION_MAJOR >= 53
  avio_evbuffer_close(dst_ctx->pb);
#else
  url_fclose(dst_ctx->pb);
#endif
  av_free(outbuf);

 out_free_buf:
  av_free(buf);

 out_free_frames:
  if (i_frame)
    av_free(i_frame);
  if (o_frame)
    av_free(o_frame);
  avcodec_close(dst);

 out_free_dst:
  av_free(dst_st);
  av_free(dst);

 out_free_dst_ctx:
  av_free(dst_ctx);

 out_close_src:
  avcodec_close(src);

  return ret;
}
Exemplo n.º 11
0
static int hls_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    HLSContext *c = s->priv_data;
    int ret, i, minplaylist = -1;

    if (c->first_packet) {
        recheck_discard_flags(s, 1);
        c->first_packet = 0;
    }

start:
    c->end_of_segment = 0;
    for (i = 0; i < c->n_playlists; i++) {
        struct playlist *pls = c->playlists[i];
        /* Make sure we've got one buffered packet from each open playlist
         * stream */
        if (pls->needed && !pls->pkt.data) {
            while (1) {
                int64_t ts_diff;
                AVStream *st;
                ret = av_read_frame(pls->ctx, &pls->pkt);
                if (ret < 0) {
                    if (!url_feof(&pls->pb) && ret != AVERROR_EOF)
                        return ret;
                    reset_packet(&pls->pkt);
                    break;
                } else {
                    if (c->first_timestamp == AV_NOPTS_VALUE &&
                        pls->pkt.dts       != AV_NOPTS_VALUE)
                        c->first_timestamp = av_rescale_q(pls->pkt.dts,
                            pls->ctx->streams[pls->pkt.stream_index]->time_base,
                            AV_TIME_BASE_Q);
                }

                if (c->seek_timestamp == AV_NOPTS_VALUE)
                    break;

                if (pls->pkt.dts == AV_NOPTS_VALUE) {
                    c->seek_timestamp = AV_NOPTS_VALUE;
                    break;
                }

                st = pls->ctx->streams[pls->pkt.stream_index];
                ts_diff = av_rescale_rnd(pls->pkt.dts, AV_TIME_BASE,
                                         st->time_base.den, AV_ROUND_DOWN) -
                          c->seek_timestamp;
                if (ts_diff >= 0 && (c->seek_flags  & AVSEEK_FLAG_ANY ||
                                     pls->pkt.flags & AV_PKT_FLAG_KEY)) {
                    c->seek_timestamp = AV_NOPTS_VALUE;
                    break;
                }
                av_free_packet(&pls->pkt);
                reset_packet(&pls->pkt);
            }
        }
        /* Check if this stream still is on an earlier segment number, or
         * has the packet with the lowest dts */
        if (pls->pkt.data) {
            struct playlist *minpls = minplaylist < 0 ?
                                     NULL : c->playlists[minplaylist];
            if (minplaylist < 0 || pls->cur_seq_no < minpls->cur_seq_no) {
                minplaylist = i;
            } else if (pls->cur_seq_no == minpls->cur_seq_no) {
                int64_t dts     =    pls->pkt.dts;
                int64_t mindts  = minpls->pkt.dts;
                AVStream *st    =    pls->ctx->streams[pls->pkt.stream_index];
                AVStream *minst = minpls->ctx->streams[minpls->pkt.stream_index];

                if (dts == AV_NOPTS_VALUE) {
                    minplaylist = i;
                } else if (mindts != AV_NOPTS_VALUE) {
                    if (st->start_time    != AV_NOPTS_VALUE)
                        dts    -= st->start_time;
                    if (minst->start_time != AV_NOPTS_VALUE)
                        mindts -= minst->start_time;

                    if (av_compare_ts(dts, st->time_base,
                                      mindts, minst->time_base) < 0)
                        minplaylist = i;
                }
            }
        }
    }
    if (c->end_of_segment) {
        if (recheck_discard_flags(s, 0))
            goto start;
    }
    /* If we got a packet, return it */
    if (minplaylist >= 0) {
        *pkt = c->playlists[minplaylist]->pkt;
        pkt->stream_index += c->playlists[minplaylist]->stream_offset;
        reset_packet(&c->playlists[minplaylist]->pkt);
        return 0;
    }
    return AVERROR_EOF;
}
static void FFMpegPlayerAndroid_play(JNIEnv *env, jobject obj) {
	AVPacket				packet;
	int						result = -1;
	int 					samples_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
	int16_t*				samples;
	AVFrame*				pFrameRGB;
	
	// Allocate an AVFrame structure
	if(ffmpeg_video.initzialized) {
		pFrameRGB = FFMpegPlayerAndroid_createFrame(env);
		if (pFrameRGB == NULL) {
			jniThrowException(env,
							  "java/io/IOException",
							  "Couldn't crate frame buffer");
			return;
		}
	}
	
	if(ffmpeg_audio.initzialized) {
		samples = (int16_t *) av_malloc(samples_size);
		if(AudioDriver_register() != ANDROID_AUDIOTRACK_RESULT_SUCCESS) {
			jniThrowException(env,
							  "java/io/IOException",
							  "Couldn't register audio track");
			return;
		}
	}
	
	status = STATE_PLAYING;
	while (status != STATE_STOPING) {

		if(status == STATE_PAUSE) {
			usleep(50);
			continue;
		}

		if((result = av_read_frame(ffmpeg_fields.pFormatCtx, &packet)) < 0) {
			status = STATE_STOPING;
			continue;
		}

		// Is this a packet from the video stream?
		if (packet.stream_index == ffmpeg_video.stream &&
				ffmpeg_video.initzialized) {
			if(FFMpegPlayerAndroid_processVideo(env, obj, &packet, pFrameRGB) < 0) {
				__android_log_print(ANDROID_LOG_ERROR, TAG, "Frame wasn't finished by video decoder");
			}
		} else if (packet.stream_index == ffmpeg_audio.stream &&
				ffmpeg_audio.initzialized && ffmpeg_audio.decode) {
			if(FFMpegPlayerAndroid_processAudio(env, &packet, samples, samples_size) < 0 ) {
				return; // exception occured so return to java
			}
		}

		// Free the packet that was allocated by av_read_frame
		av_free_packet(&packet);
	}
	
	if(ffmpeg_video.initzialized) {
		if(VideoDriver_unregister() != ANDROID_SURFACE_RESULT_SUCCESS) {
			jniThrowException(env,
							  "java/io/IOException",
							  "Couldn't unregister vide surface");
		}
	}
	if(ffmpeg_audio.initzialized) {
		if(AudioDriver_unregister() != ANDROID_AUDIOTRACK_RESULT_SUCCESS) {
			jniThrowException(env,
							  "java/io/IOException",
							  "Couldn't unregister audio track");
		}
	}
	
	av_free( samples );
	
	// Free the RGB image
	av_free(pFrameRGB);

	status = STATE_STOPED;
	__android_log_print(ANDROID_LOG_INFO, TAG, "end of playing");
}
/* Called from the main */
int main(int argc, char **argv)
{
	AVFormatContext *pFormatCtx = NULL;
	int err;
	int i;
	int videoStream, audioStream;;
	AVCodecContext *pCodecCtx;
	AVCodec         *pCodec;
	AVFrame         *pFrame; 

	AVPacket        packet;
	int             frameFinished;
	float           aspect_ratio;

	AVCodecContext  *aCodecCtx;
	AVCodec         *aCodec;

	SDL_Overlay     *bmp;
	SDL_Surface     *screen;
	SDL_Rect        rect;
	SDL_Event       event;
	SDL_AudioSpec   wanted_spec, spec;

	//ffplay_info("Start.\n");
	
	if(argc < 2) {
		printf("Please provide a movie file\n");
		return -1;
	}

	// Register all formats and codecs
	av_register_all();

	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
		fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
		exit(1);
	}

	pFormatCtx = avformat_alloc_context();
	
	// Open video file
	err = avformat_open_input(&pFormatCtx, argv[1],NULL,NULL);
	if(err<0)
	{
		printf("error ret =  %d\n",err);
	}

	// Retrieve stream information
	err = avformat_find_stream_info(pFormatCtx, NULL);
	if(err<0)
	{
		printf("error ret =  %d\n",err);
	}

	// Dump information about file onto standard error
	av_dump_format(pFormatCtx, 0, argv[1], 0);

	// Find the first video stream
	videoStream = AVMEDIA_TYPE_UNKNOWN;
	audioStream=AVMEDIA_TYPE_UNKNOWN;
	for(i=0; i<pFormatCtx->nb_streams; i++) {
		if(AVMEDIA_TYPE_VIDEO==pFormatCtx->streams[i]->codec->codec_type &&
			videoStream < 0) {
			videoStream=i;
		}
		if(AVMEDIA_TYPE_AUDIO==pFormatCtx->streams[i]->codec->codec_type &&
			audioStream < 0) {
			audioStream=i;
		}
	}
	
	if(videoStream==AVMEDIA_TYPE_UNKNOWN)
	{
		return -1; // Didn't find a video stream
	}

	if(audioStream==AVMEDIA_TYPE_UNKNOWN)
	{
		return -1; // Didn't find a video stream
	}


	aCodecCtx=pFormatCtx->streams[audioStream]->codec;

	// Set audio settings from codec info
	wanted_spec.freq = aCodecCtx->sample_rate;
	wanted_spec.format = AUDIO_S16SYS;
	wanted_spec.channels = aCodecCtx->channels;
	wanted_spec.silence = 0;
	wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
	wanted_spec.callback = audio_callback;
	wanted_spec.userdata = aCodecCtx;

	if(SDL_OpenAudio(&wanted_spec, &spec) < 0) {
		fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
		return -1;
	}

	aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
	if(!aCodec) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1;
	}

	avcodec_open2(aCodecCtx, aCodec,NULL);

	// audio_st = pFormatCtx->streams[index]
	packet_queue_init(&audioq);
	SDL_PauseAudio(0);


	// Get a pointer to the codec context for the video stream
	pCodecCtx=pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1; // Codec not found
	}
	// Open codec
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
		return -1; // Could not open codec

	// Allocate video frame
	pFrame=avcodec_alloc_frame();


	// Make a screen to put our video
#ifndef __DARWIN__
	screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
#else
	screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif
	if(!screen) {
		fprintf(stderr, "SDL: could not set video mode - exiting\n");
		exit(1);
	}

	// Allocate a place to put our YUV image on that screen
	bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
			 pCodecCtx->height,
			 SDL_YV12_OVERLAY,
			 screen);

	// Read frames and save first five frames to disk
	i=0;
	while(av_read_frame(pFormatCtx, &packet)>=0)
	{
		// Is this a packet from the video stream?
		if(packet.stream_index==videoStream) 
		{
			// Decode video frame
			avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 
									&packet);

			// Did we get a video frame?
			if(frameFinished) 
			{
				SDL_LockYUVOverlay(bmp);

				AVPicture pict;
				pict.data[0] = bmp->pixels[0];
				pict.data[1] = bmp->pixels[2];
				pict.data[2] = bmp->pixels[1];

				pict.linesize[0] = bmp->pitches[0];
				pict.linesize[1] = bmp->pitches[2];
				pict.linesize[2] = bmp->pitches[1];

				// Convert the image into YUV format that SDL uses
				#include <libswscale/swscale.h>
				// other codes
				static struct SwsContext *img_convert_ctx;
				// other codes
				img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
												 pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
												 PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
				// other codes
				// Convert the image from its native format to RGB
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize,
				 0, pCodecCtx->height, pict.data, pict.linesize);

				SDL_UnlockYUVOverlay(bmp);

				rect.x = 0;
				rect.y = 0;
				rect.w = pCodecCtx->width;
				rect.h = pCodecCtx->height;
				SDL_DisplayYUVOverlay(bmp, &rect);
				av_free_packet(&packet);
			}
		} 
		else if(packet.stream_index==audioStream) 
		{
			packet_queue_put(&audioq, &packet);
		} 
		else 
		{
			av_free_packet(&packet);
		}

		SDL_PollEvent(&event);
		switch(event.type) 
		{
			case SDL_QUIT:
				quit = 1;
				SDL_Quit();
				exit(0);
				break;
			default:
				break;
		}		
	}

	// Free the YUV frame
	av_free(pFrame);

	// Close the codec
	avcodec_close(pCodecCtx);

	// Close the video file
	av_close_input_file(pFormatCtx);
	//ffplay_info("end.\n");

	return 0;
}
Exemplo n.º 14
0
int main(int argc, char **argv)
{
    double prev_segment_time = 0;
    unsigned int output_index = 1;
    AVInputFormat *ifmt;
    AVOutputFormat *ofmt;
    AVFormatContext *ic = NULL;
    AVFormatContext *oc;
    AVStream *video_st = NULL;
    AVStream *audio_st = NULL;
    char *output_filename;
    int video_index = -1;
    int audio_index = -1;
    unsigned int first_segment = 1;
    unsigned int last_segment = 0;
    int decode_done;
    char *dot;
    int ret;
    unsigned int i;
    int remove_file;
    struct sigaction act;
    int64_t timestamp;
    int opt;
    int longindex;
    char *endptr;
    struct options_t options;

/*
Usage: recorder [options]

Options:

  -T SOCKETTIMEOUT, --sockettimeout=SOCKETTIMEOUT
                        Socket timeout (default: 30)
  -B SOCKETBUFFER, --socketbuffer=SOCKETBUFFER
                        Socket buffer in bytes(default: 1500)
  -v VERBOSE, --verbose=VERBOSE
                        Verbosity level (default: info) (ops: ['emerg',
                        'alert', 'crit', 'err', 'warning', 'notice', 'info',
                        'debug'])
  -L LOGFILE, --logfile=LOGFILE
                        Log file (default: ./recorder.log)

/root/npvr/recorder -v info -L /var/log/npvr/recorder.udctvlive00202.log -w  /mfs/npvr/storage/stream/pvr/ -C 10 -K udctvlive00202 -P http -U http://10.14.10.102:8082/stream/udctvlive00202

/mnt/mfs/npvr/storage/stream/pvr/ts/rcclive001/1359968328_10_19.ts

*/
    static const char *optstring = "i:C:K:w:s:ovh?";

    static const struct option longopts[] = {
        { "input",         required_argument, NULL, 'i' },
        { "duration",      required_argument, NULL, 'C' },
        { "key",           required_argument, NULL, 'K' },
        { "workdir",       required_argument, NULL, 'w' },
        { "stop",          no_argument,       NULL, 's' },
        { "help",          no_argument,       NULL, 'h' },
        { 0, 0, 0, 0 }
    };
    

    memset(&options, 0 ,sizeof(options));

    /* Set some defaults */
    options.segment_duration = 10;
    options.stop = 0;
    do {
        opt = getopt_long(argc, argv, optstring, longopts, &longindex );
        switch (opt) {
            case 'i':
                options.input_file = optarg;
                if (!strcmp(options.input_file, "-")) {
                    options.input_file = "pipe:";
                }
                break;

            case 'C':
                options.segment_duration = strtol(optarg, &endptr, 10);
                if (optarg == endptr || options.segment_duration < 0 || options.segment_duration == -LONG_MAX) {
                    fprintf(stderr, "Segment duration time (%s) invalid\n", optarg);
                    exit(1);
                }
                break;

            case 'K':
                options.key = optarg;
                break;

            case 'w':
                options.workdir = optarg;
                break;

            case 's':
                options.stop = 1;
                break;

            case 'h':
                display_usage();
                break;
        }
    } while (opt != -1);


    /* Check required args where set*/
    if (options.input_file == NULL) {
        fprintf(stderr, "Please specify an input file.\n");
        exit(1);
    }

    if (options.key == NULL) {
        fprintf(stderr, "Please specify an output prefix.\n");
        exit(1);
    }

    if (options.workdir == NULL) {
        fprintf(stderr, "Please working directory.\n");
        exit(1);
    }

    avformat_network_init();
    av_register_all();


    output_filename = malloc(sizeof(char) * (strlen(options.workdir) + strlen(options.key) + 15));
    if (!output_filename) {
        fprintf(stderr, "Could not allocate space for output filenames\n");
        exit(1);
    }

    ifmt = av_find_input_format("mpegts");
    if (!ifmt) {
        fprintf(stderr, "Could not find MPEG-TS demuxer\n");
        exit(1);
    }

    open_context(&ic, options.input_file, options.key, ifmt, &ofmt, &oc, &video_st, &audio_st, &video_index, &audio_index);

    timestamp = av_gettime() / 1000000;
    snprintf(output_filename, strlen(options.workdir) + strlen(options.key) + 75, "%s/ts/%s/%d_%d_%u.ts", options.workdir, options.key, (int)timestamp, (int)options.segment_duration, output_index++);
    if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0) {
        fprintf(stderr, "Could not open '%s'\n", output_filename);
        exit(1);
    }

    if (avformat_write_header(oc, NULL)) {
        fprintf(stderr, "Could not write mpegts header to first output file\n");
        exit(1);
    }

    /* Setup signals */
    memset(&act, 0, sizeof(act));
    act.sa_handler = &handler;

    sigaction(SIGINT, &act, NULL);
    sigaction(SIGTERM, &act, NULL);

    do {
        double segment_time = prev_segment_time;
        AVPacket packet;

        if (terminate) {
          break;
        }

        decode_done = av_read_frame(ic, &packet);
        if (decode_done < 0) {
            break;
        }

        if (av_dup_packet(&packet) < 0) {
            fprintf(stderr, "Could not duplicate packet");
            av_free_packet(&packet);
            break;
        }

        // Use video stream as time base and split at keyframes. Otherwise use audio stream
        if (packet.stream_index == video_index && (packet.flags & AV_PKT_FLAG_KEY)) {
            segment_time = packet.pts * av_q2d(video_st->time_base);
        }
        else if (video_index < 0) {
            segment_time = packet.pts * av_q2d(audio_st->time_base);
        }
        else {
          segment_time = prev_segment_time;
        }


        if (segment_time - prev_segment_time >= options.segment_duration) {
            av_write_trailer(oc);   // close ts file and free memory
            avio_flush(oc->pb);
            avio_close(oc->pb);

            timestamp = av_gettime() / 1000000;
            snprintf(output_filename, strlen(options.workdir) + strlen(options.key) + 75, "%s/ts/%s/%d_%d_%u.ts", options.workdir, options.key, (int)timestamp, (int)options.segment_duration, output_index++);
            if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0) {
                fprintf(stderr, "Could not open '%s'\n", output_filename);
                break;
            } 

            // Write a new header at the start of each file
            if (avformat_write_header(oc, NULL)) {
              fprintf(stderr, "Could not write mpegts header to first output file\n");
              exit(1);
            }

            prev_segment_time = segment_time;
        }

        ret = av_interleaved_write_frame(oc, &packet);
        if (ret < 0) {
            fprintf(stderr, "Warning: Could not write frame of stream\n");
        }
        else if (ret > 0) {
            fprintf(stderr, "End of stream requested\n");
            av_free_packet(&packet);
            break;
        }

        av_free_packet(&packet);
    } while (1);


    close_context(&oc, &video_st);

    return 0;
}
Exemplo n.º 15
0
Arquivo: 4xm.c Projeto: 1c0n/xbmc
static int fourxm_read_packet(AVFormatContext *s,
                              AVPacket *pkt)
{
    FourxmDemuxContext *fourxm = s->priv_data;
    AVIOContext *pb = s->pb;
    unsigned int fourcc_tag;
    unsigned int size;
    int ret = 0;
    unsigned int track_number;
    int packet_read = 0;
    unsigned char header[8];
    int audio_frame_count;

    while (!packet_read) {

        if ((ret = avio_read(s->pb, header, 8)) < 0)
            return ret;
        fourcc_tag = AV_RL32(&header[0]);
        size = AV_RL32(&header[4]);
        if (url_feof(pb))
            return AVERROR(EIO);
        switch (fourcc_tag) {

        case LIST_TAG:
            /* this is a good time to bump the video pts */
            fourxm->video_pts ++;

            /* skip the LIST-* tag and move on to the next fourcc */
            avio_rl32(pb);
            break;

        case ifrm_TAG:
        case pfrm_TAG:
        case cfrm_TAG:
        case ifr2_TAG:
        case pfr2_TAG:
        case cfr2_TAG:
            /* allocate 8 more bytes than 'size' to account for fourcc
             * and size */
            if (size + 8 < size || av_new_packet(pkt, size + 8))
                return AVERROR(EIO);
            pkt->stream_index = fourxm->video_stream_index;
            pkt->pts = fourxm->video_pts;
            pkt->pos = avio_tell(s->pb);
            memcpy(pkt->data, header, 8);
            ret = avio_read(s->pb, &pkt->data[8], size);

            if (ret < 0){
                av_free_packet(pkt);
            }else
                packet_read = 1;
            break;

        case snd__TAG:
            track_number = avio_rl32(pb);
            avio_skip(pb, 4);
            size-=8;

            if (track_number < fourxm->track_count && fourxm->tracks[track_number].channels>0) {
                ret= av_get_packet(s->pb, pkt, size);
                if(ret<0)
                    return AVERROR(EIO);
                pkt->stream_index =
                    fourxm->tracks[track_number].stream_index;
                pkt->pts = fourxm->tracks[track_number].audio_pts;
                packet_read = 1;

                /* pts accounting */
                audio_frame_count = size;
                if (fourxm->tracks[track_number].adpcm)
                    audio_frame_count -=
                        2 * (fourxm->tracks[track_number].channels);
                audio_frame_count /=
                      fourxm->tracks[track_number].channels;
                if (fourxm->tracks[track_number].adpcm){
                    audio_frame_count *= 2;
                }else
                    audio_frame_count /=
                    (fourxm->tracks[track_number].bits / 8);
                fourxm->tracks[track_number].audio_pts += audio_frame_count;

            } else {
                avio_skip(pb, size);
            }
            break;

        default:
            avio_skip(pb, size);
            break;
        }
    }
    return ret;
}
Exemplo n.º 16
0
static int index_rebuild_ffmpeg(struct anim * anim, 
				IMB_Timecode_Type tcs_in_use,
				IMB_Proxy_Size proxy_sizes_in_use,
				int quality,
				short *stop, short *do_update, 
				float *progress)
{
	int i, videoStream;
	unsigned long long seek_pos = 0;
	unsigned long long last_seek_pos = 0;
	unsigned long long seek_pos_dts = 0;
	unsigned long long seek_pos_pts = 0;
	unsigned long long last_seek_pos_dts = 0;
	unsigned long long start_pts = 0;
	double frame_rate;
	double pts_time_base;
	int frameno = 0;
	int start_pts_set = FALSE;

	AVFormatContext *iFormatCtx;
	AVCodecContext *iCodecCtx;
	AVCodec *iCodec;
	AVStream *iStream;
	AVFrame* in_frame = 0;
	AVPacket next_packet;
	int streamcount;

	struct proxy_output_ctx * proxy_ctx[IMB_PROXY_MAX_SLOT];
	anim_index_builder * indexer [IMB_TC_MAX_SLOT];

	int num_proxy_sizes = IMB_PROXY_MAX_SLOT;
	int num_indexers = IMB_TC_MAX_SLOT;
	uint64_t stream_size;

	memset(proxy_ctx, 0, sizeof(proxy_ctx));
	memset(indexer, 0, sizeof(indexer));

	if(av_open_input_file(&iFormatCtx, anim->name, NULL, 0, NULL) != 0) {
		return 0;
	}

	if (av_find_stream_info(iFormatCtx) < 0) {
		av_close_input_file(iFormatCtx);
		return 0;
	}

	streamcount = anim->streamindex;

	/* Find the video stream */
	videoStream = -1;
	for (i = 0; i < iFormatCtx->nb_streams; i++)
		if(iFormatCtx->streams[i]->codec->codec_type
		   == AVMEDIA_TYPE_VIDEO) {
			if (streamcount > 0) {
				streamcount--;
				continue;
			}
			videoStream = i;
			break;
		}

	if (videoStream == -1) {
		av_close_input_file(iFormatCtx);
		return 0;
	}

	iStream = iFormatCtx->streams[videoStream];
	iCodecCtx = iStream->codec;

	iCodec = avcodec_find_decoder(iCodecCtx->codec_id);
	
	if (iCodec == NULL) {
		av_close_input_file(iFormatCtx);
		return 0;
	}

	iCodecCtx->workaround_bugs = 1;

	if (avcodec_open(iCodecCtx, iCodec) < 0) {
		av_close_input_file(iFormatCtx);
		return 0;
	}

	in_frame = avcodec_alloc_frame();

	stream_size = avio_size(iFormatCtx->pb);

	for (i = 0; i < num_proxy_sizes; i++) {
		if (proxy_sizes_in_use & proxy_sizes[i]) {
			proxy_ctx[i] = alloc_proxy_output_ffmpeg(
				anim, iStream, proxy_sizes[i],
				iCodecCtx->width * proxy_fac[i],
				iCodecCtx->height * proxy_fac[i],
				quality);
			if (!proxy_ctx[i]) {
				proxy_sizes_in_use &= ~proxy_sizes[i];
			}
		}
	}

	for (i = 0; i < num_indexers; i++) {
		if (tcs_in_use & tc_types[i]) {
			char fname[FILE_MAXDIR+FILE_MAXFILE];

			get_tc_filename(anim, tc_types[i], fname);

			indexer[i] = IMB_index_builder_create(fname);
			if (!indexer[i]) {
				tcs_in_use &= ~tc_types[i];
			}
		}
	}

	frame_rate = av_q2d(iStream->r_frame_rate);
	pts_time_base = av_q2d(iStream->time_base);

	while(av_read_frame(iFormatCtx, &next_packet) >= 0) {
		int frame_finished = 0;
		float next_progress =  ((int)floor(((double) next_packet.pos) * 100 /
		                                   ((double) stream_size)+0.5)) / 100;

		if (*progress != next_progress) {
			*progress = next_progress;
			*do_update = 1;
		}

		if (*stop) {
			av_free_packet(&next_packet);
			break;
		}

		if (next_packet.stream_index == videoStream) {
			if (next_packet.flags & AV_PKT_FLAG_KEY) {
				last_seek_pos = seek_pos;
				last_seek_pos_dts = seek_pos_dts;
				seek_pos = next_packet.pos;
				seek_pos_dts = next_packet.dts;
				seek_pos_pts = next_packet.pts;
			}

			avcodec_decode_video2(
				iCodecCtx, in_frame, &frame_finished, 
				&next_packet);
		}

		if (frame_finished) {
			unsigned long long s_pos = seek_pos;
			unsigned long long s_dts = seek_pos_dts;
			unsigned long long pts 
				= av_get_pts_from_frame(iFormatCtx, in_frame);

			for (i = 0; i < num_proxy_sizes; i++) {
				add_to_proxy_output_ffmpeg(
					proxy_ctx[i], in_frame);
			}

			if (!start_pts_set) {
				start_pts = pts;
				start_pts_set = TRUE;
			}

			frameno = (pts - start_pts) 
				* pts_time_base * frame_rate; 

			/* decoding starts *always* on I-Frames,
			   so: P-Frames won't work, even if all the
			   information is in place, when we seek
			   to the I-Frame presented *after* the P-Frame,
			   but located before the P-Frame within
			   the stream */

			if (pts < seek_pos_pts) {
				s_pos = last_seek_pos;
				s_dts = last_seek_pos_dts;
			}

			for (i = 0; i < num_indexers; i++) {
				if (tcs_in_use & tc_types[i]) {
					IMB_index_builder_proc_frame(
						indexer[i], 
						next_packet.data, 
						next_packet.size,
						frameno, s_pos,	s_dts, pts);
				}
			}
		}
		av_free_packet(&next_packet);
	}

	for (i = 0; i < num_indexers; i++) {
		if (tcs_in_use & tc_types[i]) {
			IMB_index_builder_finish(indexer[i], *stop);
		}
	}

	for (i = 0; i < num_proxy_sizes; i++) {
		if (proxy_sizes_in_use & proxy_sizes[i]) {
			free_proxy_output_ffmpeg(proxy_ctx[i], *stop);
		}
	}

	av_free(in_frame);

	return 1;
}
Exemplo n.º 17
0
Arquivo: psxstr.c Projeto: AndyA/ffmbc
static int str_read_packet(AVFormatContext *s,
                           AVPacket *ret_pkt)
{
    ByteIOContext *pb = s->pb;
    StrDemuxContext *str = s->priv_data;
    unsigned char sector[RAW_CD_SECTOR_SIZE];
    int channel;
    AVPacket *pkt;
    AVStream *st;

    while (1) {

        if (get_buffer(pb, sector, RAW_CD_SECTOR_SIZE) != RAW_CD_SECTOR_SIZE)
            return AVERROR(EIO);

        channel = sector[0x11];
        if (channel >= 32)
            return AVERROR_INVALIDDATA;

        switch (sector[0x12] & CDXA_TYPE_MASK) {

        case CDXA_TYPE_DATA:
        case CDXA_TYPE_VIDEO:
            {

                int current_sector = AV_RL16(&sector[0x1C]);
                int sector_count   = AV_RL16(&sector[0x1E]);
                int frame_size = AV_RL32(&sector[0x24]);

                if(!(   frame_size>=0
                     && current_sector < sector_count
                     && sector_count*VIDEO_DATA_CHUNK_SIZE >=frame_size)){
                    av_log(s, AV_LOG_ERROR, "Invalid parameters %d %d %d\n", current_sector, sector_count, frame_size);
                    break;
                }

                if(str->channels[channel].video_stream_index < 0){
                    /* allocate a new AVStream */
                    st = av_new_stream(s, 0);
                    if (!st)
                        return AVERROR(ENOMEM);
                    av_set_pts_info(st, 64, 1, 15);

                    str->channels[channel].video_stream_index = st->index;

                    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
                    st->codec->codec_id   = CODEC_ID_MDEC;
                    st->codec->codec_tag  = 0;  /* no fourcc */
                    st->codec->width      = AV_RL16(&sector[0x28]);
                    st->codec->height     = AV_RL16(&sector[0x2A]);
                }

                /* if this is the first sector of the frame, allocate a pkt */
                pkt = &str->channels[channel].tmp_pkt;

                if(pkt->size != sector_count*VIDEO_DATA_CHUNK_SIZE){
                    if(pkt->data)
                        av_log(s, AV_LOG_ERROR, "missmatching sector_count\n");
                    av_free_packet(pkt);
                    if (av_new_packet(pkt, sector_count*VIDEO_DATA_CHUNK_SIZE))
                        return AVERROR(EIO);

                    pkt->pos= url_ftell(pb) - RAW_CD_SECTOR_SIZE;
                    pkt->stream_index =
                        str->channels[channel].video_stream_index;
                }

                memcpy(pkt->data + current_sector*VIDEO_DATA_CHUNK_SIZE,
                       sector + VIDEO_DATA_HEADER_SIZE,
                       VIDEO_DATA_CHUNK_SIZE);

                if (current_sector == sector_count-1) {
                    pkt->size= frame_size;
                    *ret_pkt = *pkt;
                    pkt->data= NULL;
                    pkt->size= -1;
                    return 0;
                }

            }
            break;

        case CDXA_TYPE_AUDIO:
            if(str->channels[channel].audio_stream_index < 0){
                int fmt = sector[0x13];
                /* allocate a new AVStream */
                st = av_new_stream(s, 0);
                if (!st)
                    return AVERROR(ENOMEM);

                str->channels[channel].audio_stream_index = st->index;

                st->codec->codec_type  = AVMEDIA_TYPE_AUDIO;
                st->codec->codec_id    = CODEC_ID_ADPCM_XA;
                st->codec->codec_tag   = 0;  /* no fourcc */
                st->codec->channels    = (fmt&1)?2:1;
                st->codec->sample_rate = (fmt&4)?18900:37800;
            //    st->codec->bit_rate = 0; //FIXME;
                st->codec->block_align = 128;

                av_set_pts_info(st, 64, 128, st->codec->sample_rate);
            }
            pkt = ret_pkt;
            if (av_new_packet(pkt, 2304))
                return AVERROR(EIO);
            memcpy(pkt->data,sector+24,2304);

            pkt->stream_index =
                str->channels[channel].audio_stream_index;
            return 0;
            break;
        default:
            av_log(s, AV_LOG_WARNING, "Unknown sector type %02X\n", sector[0x12]);
            /* drop the sector and move on */
            break;
        }

        if (url_feof(pb))
            return AVERROR(EIO);
    }
}
Exemplo n.º 18
0
static int flic_read_packet(AVFormatContext *s,
                            AVPacket *pkt)
{
    FlicDemuxContext *flic = s->priv_data;
    AVIOContext *pb = s->pb;
    int packet_read = 0;
    unsigned int size;
    int magic;
    int ret = 0;
    unsigned char preamble[FLIC_PREAMBLE_SIZE];

    while (!packet_read) {

        if ((ret = avio_read(pb, preamble, FLIC_PREAMBLE_SIZE)) !=
            FLIC_PREAMBLE_SIZE) {
            ret = AVERROR(EIO);
            break;
        }

        size = AV_RL32(&preamble[0]);
        magic = AV_RL16(&preamble[4]);

        if (((magic == FLIC_CHUNK_MAGIC_1) || (magic == FLIC_CHUNK_MAGIC_2)) && size > FLIC_PREAMBLE_SIZE) {
            if (av_new_packet(pkt, size)) {
                ret = AVERROR(EIO);
                break;
            }
            pkt->stream_index = flic->video_stream_index;
            pkt->pts = flic->frame_number++;
            pkt->pos = avio_tell(pb);
            memcpy(pkt->data, preamble, FLIC_PREAMBLE_SIZE);
            ret = avio_read(pb, pkt->data + FLIC_PREAMBLE_SIZE,
                size - FLIC_PREAMBLE_SIZE);
            if (ret != size - FLIC_PREAMBLE_SIZE) {
                av_free_packet(pkt);
                ret = AVERROR(EIO);
            }
            packet_read = 1;
        } else if (magic == FLIC_TFTD_CHUNK_AUDIO) {
            if (av_new_packet(pkt, size)) {
                ret = AVERROR(EIO);
                break;
            }

            /* skip useless 10B sub-header (yes, it's not accounted for in the chunk header) */
            avio_skip(pb, 10);

            pkt->stream_index = flic->audio_stream_index;
            pkt->pos = avio_tell(pb);
            ret = avio_read(pb, pkt->data, size);

            if (ret != size) {
                av_free_packet(pkt);
                ret = AVERROR(EIO);
            }

            packet_read = 1;
        } else {
            /* not interested in this chunk */
            avio_skip(pb, size - 6);
        }
    }

    return ret;
}
Exemplo n.º 19
0
/**
* @brief 
*
* @return 
*/
int LocalVideoInput::run()
{
    AVInputFormat *inputFormat = av_find_input_format( "video4linux2" );
    if ( inputFormat == NULL)
        Fatal( "Can't load input format" );

#if 0
    AVProbeData probeData;
    probeData.filename = mSource.c_str();
    probeData.buf = new unsigned char[1024];
    probeData.buf_size = 1024;
    inputFormat = av_probe_input_format( &probeData, 0 );
    if ( inputFormat == NULL)
        Fatal( "Can't probe input format" );

    AVFormatParameters formatParameters ;
    memset( &formatParameters, 0, sizeof(formatParameters) );
    formatParameters.channels = 1;
    formatParameters.channel = 0;
    formatParameters.standard = "PAL";
    formatParameters.pix_fmt = PIX_FMT_RGB24;
    //formatParameters.time_base.num = 1;
    //formatParameters.time_base.den = 10;
    formatParameters.width = 352;
    formatParameters.height = 288;
    //formatParameters.prealloced_context = 1;
#endif

    /* New API */
    AVDictionary *opts = NULL;
    av_dict_set( &opts, "standard", "PAL", 0 );
    av_dict_set( &opts, "video_size", "320x240", 0 );
    av_dict_set( &opts, "channel", "0", 0 );
    av_dict_set( &opts, "pixel_format", "rgb24", 0 );
    //av_dict_set( &opts, "framerate", "10", 0 );
    avDumpDict( opts );

    int avError = 0;
    AVFormatContext *formatContext = NULL;
    //if ( av_open_input_file( &formatContext, mSource.c_str(), inputFormat, 0, &formatParameters ) !=0 )
    if ( (avError = avformat_open_input( &formatContext, mSource.c_str(), inputFormat, &opts )) < 0 )
        Fatal( "Unable to open input %s due to: %s", mSource.c_str(), avStrError(avError) );

    avDumpDict( opts );
#if 0
    if ( av_open_input_stream( &formatContext, 0, mSource.c_str(), inputFormat, &formatParameters ) !=0 )
        Fatal( "Unable to open input %s due to: %s", mSource.c_str(), strerror(errno) );
#endif

    // Locate stream info from input
    if ( (avError = avformat_find_stream_info( formatContext, &opts )) < 0 )
        Fatal( "Unable to find stream info from %s due to: %s", mSource.c_str(), avStrError(avError) );
    
    if ( dbgLevel > DBG_INF )
        av_dump_format( formatContext, 0, mSource.c_str(), 0 );

    // Find first video stream present
    int videoStreamId = -1;
    for ( int i=0; i < formatContext->nb_streams; i++ )
    {
        if ( formatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
        {
            videoStreamId = i;
            //set_context_opts( formatContext->streams[i]->codec, avcodec_opts[CODEC_TYPE_VIDEO], AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
            break;
        }
    }
    if ( videoStreamId == -1 )
        Fatal( "Unable to locate video stream in %s", mSource.c_str() );
    mStream = formatContext->streams[videoStreamId];
    mCodecContext = mStream->codec;

    // Try and get the codec from the codec context
    AVCodec *codec = NULL;
    if ( (codec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
        Fatal( "Can't find codec for video stream from %s", mSource.c_str() );

    // Open the codec
    if ( avcodec_open2( mCodecContext, codec, &opts ) < 0 )
        Fatal( "Unable to open codec for video stream from %s", mSource.c_str() );

    //AVFrame *savedFrame = avcodec_alloc_frame();

    // Allocate space for the native video frame
    AVFrame *frame = avcodec_alloc_frame();

    // Determine required buffer size and allocate buffer
    int pictureSize = avpicture_get_size( mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height );
    
    ByteBuffer frameBuffer( pictureSize );
    
    //avpicture_fill( (AVPicture *)savedFrame, mLastFrame.mBuffer.data(), mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height);

    AVPacket packet;
    while( !mStop )
    {
        int frameComplete = false;
        while ( !frameComplete && (av_read_frame( formatContext, &packet ) >= 0) )
        {
            Debug( 5, "Got packet from stream %d", packet.stream_index );
            if ( packet.stream_index == videoStreamId )
            {
                frameComplete = false;
                if ( avcodec_decode_video2( mCodecContext, frame, &frameComplete, &packet ) < 0 )
                    Fatal( "Unable to decode frame at frame %ju", mFrameCount );

                Debug( 3, "Decoded video packet at frame %ju, pts %jd", mFrameCount, packet.pts );

                if ( frameComplete )
                {
                    Debug( 3, "Got frame %d, pts %jd (%.3f)", mCodecContext->frame_number, frame->pkt_pts, (((double)(packet.pts-mStream->start_time)*mStream->time_base.num)/mStream->time_base.den) );

                    avpicture_layout( (AVPicture *)frame, mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height, frameBuffer.data(), frameBuffer.capacity() );

                    uint64_t timestamp = packet.pts;
                    //Debug( 3, "%d: TS: %lld, TS1: %lld, TS2: %lld, TS3: %.3f", time( 0 ), timestamp, packet.pts, ((1000000LL*packet.pts*mStream->time_base.num)/mStream->time_base.den), (((double)packet.pts*mStream->time_base.num)/mStream->time_base.den) );
                    //Info( "%ld:TS: %lld, TS1: %lld, TS2: %lld, TS3: %.3f", time( 0 ), timestamp, packet.pts, ((1000000LL*packet.pts*mStream->time_base.num)/mStream->time_base.den), (((double)packet.pts*mStream->time_base.num)/mStream->time_base.den) );

                    VideoFrame *videoFrame = new VideoFrame( this, mCodecContext->frame_number, timestamp, frameBuffer );
                    distributeFrame( FramePtr( videoFrame ) );
                }
            }
            av_free_packet( &packet );
        }
        usleep( INTERFRAME_TIMEOUT );
    }
    cleanup();

    av_freep( &frame );
    if ( mCodecContext )
    {
       avcodec_close( mCodecContext );
       mCodecContext = NULL; // Freed by avformat_close_input
    }
    if ( formatContext )
    {
        avformat_close_input( &formatContext );
        formatContext = NULL;
        //av_free( formatContext );
    }
    return( !ended() );
}
Exemplo n.º 20
0
int audio_decode_frame(VideoState *is, double *pts_ptr) {
  int len1, data_size = 0, n;
  AVPacket *pkt = &is->audio_pkt;
  double pts;

  for(;;) {
    while(is->audio_pkt_size > 0) {
      int got_frame;
      len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt);
      if(len1 < 0) {
	/* if error, skip frame */
	is->audio_pkt_size = 0;
	break;
      }
      if (got_frame)
      {
          data_size = 
            av_samples_get_buffer_size
            (
                NULL, 
                is->audio_st->codec->channels,
                is->audio_frame.nb_samples,
                is->audio_st->codec->sample_fmt,
                1
            );
          memcpy(is->audio_buf, is->audio_frame.data[0], data_size);
      }
      is->audio_pkt_data += len1;
      is->audio_pkt_size -= len1;
      if(data_size <= 0) {
	/* No data yet, get more frames */
	continue;
      }
      pts = is->audio_clock;
      *pts_ptr = pts;
      n = 2 * is->audio_st->codec->channels;
      is->audio_clock += (double)data_size /
	(double)(n * is->audio_st->codec->sample_rate);

      /* We have data, return it and come back for more later */
      return data_size;
    }
    if(pkt->data)
      av_free_packet(pkt);

    if(is->quit) {
      return -1;
    }
    /* next packet */
    if(packet_queue_get(&is->audioq, pkt, 1) < 0) {
      return -1;
    }
    is->audio_pkt_data = pkt->data;
    is->audio_pkt_size = pkt->size;
    /* if update, update the audio clock w/pts */
    if(pkt->pts != AV_NOPTS_VALUE) {
      is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
    }

  }
}
Exemplo n.º 21
0
static int run_test(AVCodec *enc, AVCodec *dec, AVCodecContext *enc_ctx,
                    AVCodecContext *dec_ctx)
{
    AVPacket enc_pkt;
    AVFrame *in_frame, *out_frame;
    uint8_t *raw_in = NULL, *raw_out = NULL;
    int in_offset = 0, out_offset = 0;
    int frame_data_size = 0;
    int result = 0;
    int got_output = 0;
    int i = 0;

    in_frame = av_frame_alloc();
    if (!in_frame) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate input frame\n");
        return AVERROR(ENOMEM);
    }

    in_frame->nb_samples = enc_ctx->frame_size;
    in_frame->format = enc_ctx->sample_fmt;
    in_frame->channel_layout = enc_ctx->channel_layout;
    if (av_frame_get_buffer(in_frame, 32) != 0) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate a buffer for input frame\n");
        return AVERROR(ENOMEM);
    }

    out_frame = av_frame_alloc();
    if (!out_frame) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate output frame\n");
        return AVERROR(ENOMEM);
    }

    raw_in = av_malloc(in_frame->linesize[0] * NUMBER_OF_FRAMES);
    if (!raw_in) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate memory for raw_in\n");
        return AVERROR(ENOMEM);
    }

    raw_out = av_malloc(in_frame->linesize[0] * NUMBER_OF_FRAMES);
    if (!raw_out) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate memory for raw_out\n");
        return AVERROR(ENOMEM);
    }

    for (i = 0; i < NUMBER_OF_FRAMES; i++) {
        av_init_packet(&enc_pkt);
        enc_pkt.data = NULL;
        enc_pkt.size = 0;

        generate_raw_frame((uint16_t*)(in_frame->data[0]), i, enc_ctx->sample_rate,
                           enc_ctx->channels, enc_ctx->frame_size);
        memcpy(raw_in + in_offset, in_frame->data[0], in_frame->linesize[0]);
        in_offset += in_frame->linesize[0];
        result = avcodec_encode_audio2(enc_ctx, &enc_pkt, in_frame, &got_output);
        if (result < 0) {
            av_log(NULL, AV_LOG_ERROR, "Error encoding audio frame\n");
            return result;
        }

        /* if we get an encoded packet, feed it straight to the decoder */
        if (got_output) {
            result = avcodec_decode_audio4(dec_ctx, out_frame, &got_output, &enc_pkt);
            if (result < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error decoding audio packet\n");
                return result;
            }

            if (got_output) {
                if (result != enc_pkt.size) {
                    av_log(NULL, AV_LOG_INFO, "Decoder consumed only part of a packet, it is allowed to do so -- need to update this test\n");
                    return AVERROR_UNKNOWN;
                }

                if (in_frame->nb_samples != out_frame->nb_samples) {
                    av_log(NULL, AV_LOG_ERROR, "Error frames before and after decoding has different number of samples\n");
                    return AVERROR_UNKNOWN;
                }

                if (in_frame->channel_layout != out_frame->channel_layout) {
                    av_log(NULL, AV_LOG_ERROR, "Error frames before and after decoding has different channel layout\n");
                    return AVERROR_UNKNOWN;
                }

                if (in_frame->format != out_frame->format) {
                    av_log(NULL, AV_LOG_ERROR, "Error frames before and after decoding has different sample format\n");
                    return AVERROR_UNKNOWN;
                }
                memcpy(raw_out + out_offset, out_frame->data[0], out_frame->linesize[0]);
                out_offset += out_frame->linesize[0];
            }
        }
        av_free_packet(&enc_pkt);
    }

    if (memcmp(raw_in, raw_out, frame_data_size * NUMBER_OF_FRAMES) != 0) {
        av_log(NULL, AV_LOG_ERROR, "Output differs\n");
        return 1;
    }

    av_log(NULL, AV_LOG_INFO, "OK\n");

    av_freep(&raw_in);
    av_freep(&raw_out);
    av_frame_free(&in_frame);
    av_frame_free(&out_frame);
    return 0;
}
Exemplo n.º 22
0
int decode_thread(void *arg) {

  VideoState *is = (VideoState *)arg;
  AVFormatContext *pFormatCtx = NULL;
  AVPacket pkt1, *packet = &pkt1;

  AVDictionary *io_dict = NULL;
  AVIOInterruptCB callback;

  int video_index = -1;
  int audio_index = -1;
  int i;

  is->videoStream=-1;
  is->audioStream=-1;

  global_video_state = is;
  // will interrupt blocking functions if we quit!
  callback.callback = decode_interrupt_cb;
  callback.opaque = is;
  if (avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict))
  {
    fprintf(stderr, "Unable to open I/O for %s\n", is->filename);
    return -1;
  }

  // Open video file
  if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL)!=0)
    return -1; // Couldn't open file

  is->pFormatCtx = pFormatCtx;
  
  // Retrieve stream information
  if(avformat_find_stream_info(pFormatCtx, NULL)<0)
    return -1; // Couldn't find stream information
  
  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, is->filename, 0);
  
  // Find the first video stream

  for(i=0; i<pFormatCtx->nb_streams; i++) {
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO &&
       video_index < 0) {
      video_index=i;
    }
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO &&
       audio_index < 0) {
      audio_index=i;
    }
  }
  if(audio_index >= 0) {
    stream_component_open(is, audio_index);
  }
  if(video_index >= 0) {
    stream_component_open(is, video_index);
  }   

  if(is->videoStream < 0 || is->audioStream < 0) {
    fprintf(stderr, "%s: could not open codecs\n", is->filename);
    goto fail;
  }

  // main decode loop

  for(;;) {
    if(is->quit) {
      break;
    }
    // seek stuff goes here
    if(is->audioq.size > MAX_AUDIOQ_SIZE ||
       is->videoq.size > MAX_VIDEOQ_SIZE) {
      SDL_Delay(10);
      continue;
    }
    if(av_read_frame(is->pFormatCtx, packet) < 0) {
      if(is->pFormatCtx->pb->error == 0) {
	SDL_Delay(100); /* no error; wait for user input */
	continue;
      } else {
	break;
      }
    }
    // Is this a packet from the video stream?
    if(packet->stream_index == is->videoStream) {
      packet_queue_put(&is->videoq, packet);
    } else if(packet->stream_index == is->audioStream) {
      packet_queue_put(&is->audioq, packet);
    } else {
      av_free_packet(packet);
    }
  }
  /* all done - wait for it */
  while(!is->quit) {
    SDL_Delay(100);
  }

 fail:
  {
    SDL_Event event;
    event.type = FF_QUIT_EVENT;
    event.user.data1 = is;
    SDL_PushEvent(&event);
  }
  return 0;
}
Exemplo n.º 23
0
static int write_audio_frame(void) 
{
	AVCodecContext *c = NULL;
	AVPacket pkt;
	AVFrame *frame = NULL;
	int got_output = 0;

	c = audio_stream->codec;

	av_init_packet(&pkt);
	pkt.size = 0;
	pkt.data = NULL;

	AUD_readDevice(audio_mixdown_device, audio_input_buffer, audio_input_samples);
	audio_time += (double) audio_input_samples / (double) c->sample_rate;

#ifdef FFMPEG_HAVE_ENCODE_AUDIO2
	frame = avcodec_alloc_frame();
	avcodec_get_frame_defaults(frame);
	frame->pts = audio_time / av_q2d(c->time_base);
	frame->nb_samples = audio_input_samples;
	frame->format = c->sample_fmt;
#ifdef FFMPEG_HAVE_FRAME_CHANNEL_LAYOUT
	frame->channel_layout = c->channel_layout;
#endif

	if (audio_deinterleave) {
		int channel, i;
		uint8_t *temp;

		for (channel = 0; channel < c->channels; channel++) {
			for (i = 0; i < frame->nb_samples; i++) {
				memcpy(audio_deinterleave_buffer + (i + channel * frame->nb_samples) * audio_sample_size,
					   audio_input_buffer + (c->channels * i + channel) * audio_sample_size, audio_sample_size);
			}
		}

		temp = audio_deinterleave_buffer;
		audio_deinterleave_buffer = audio_input_buffer;
		audio_input_buffer = temp;
	}

	avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, audio_input_buffer,
	                         audio_input_samples * c->channels * audio_sample_size, 1);

	if (avcodec_encode_audio2(c, &pkt, frame, &got_output) < 0) {
		// XXX error("Error writing audio packet");
		return -1;
	}

	if (!got_output) {
		avcodec_free_frame(&frame);
		return 0;
	}
#else
	pkt.size = avcodec_encode_audio(c, audio_output_buffer, audio_outbuf_size, (short *) audio_input_buffer);

	if (pkt.size < 0) {
		// XXX error("Error writing audio packet");
		return -1;
	}

	pkt.data = audio_output_buffer;
	got_output = 1;
#endif

	if (got_output) {
		if (pkt.pts != AV_NOPTS_VALUE)
			pkt.pts = av_rescale_q(pkt.pts, c->time_base, audio_stream->time_base);
		if (pkt.dts != AV_NOPTS_VALUE)
			pkt.dts = av_rescale_q(pkt.dts, c->time_base, audio_stream->time_base);
		if (pkt.duration > 0)
			pkt.duration = av_rescale_q(pkt.duration, c->time_base, audio_stream->time_base);

		pkt.stream_index = audio_stream->index;

		pkt.flags |= AV_PKT_FLAG_KEY;

		if (av_interleaved_write_frame(outfile, &pkt) != 0) {
			fprintf(stderr, "Error writing audio packet!\n");
			if (frame)
				avcodec_free_frame(&frame);
			return -1;
		}

		av_free_packet(&pkt);
	}

	if (frame)
		avcodec_free_frame(&frame);

	return 0;
}
Exemplo n.º 24
0
Error FFMpegVideoProvider::Impl::entry() {
	mState = STATE_CAPTURING;
	Error rc;

	AVFormatContext *fmtCtx = NULL;
	AVCodecContext  *decodeCtx = NULL, *encodeCtx = NULL;
	AVCodec         *decoder= NULL, *encoder = NULL;
	SwsContext      *swsCtx;
	AVFrame         *convertedFrame = avcodec_alloc_frame();
	AVPacket pkt;
	VBufferPtr buf;

	FFMpegCodecInfo encodeInfo = FFMpegInfo::findCodecInfo(mCurrentParam.currentCodec);

	// Open file
	{
		AVInputFormat *inputFmt = av_find_input_format(mCtx.inputFmt);
		AVFormatParameters ap;
		::memset(&ap,0,sizeof(ap));

		ap.width = 640;
		ap.height = 480;
		ap.time_base = (AVRational) {1,25};
		if (av_open_input_file(&fmtCtx,mFileName.c_str(),inputFmt,0,&ap) != 0
				|| fmtCtx == NULL) {
			rc.setErrorString("Open input error");
			goto open_input_file_failed;
		}
	}

	// Find video track
	{
		for (int i=0; i<fmtCtx->nb_streams; i++) {
			if (fmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
				decodeCtx = fmtCtx->streams[i]->codec;
				break;
			}
		}
		if (decodeCtx == NULL) {
			rc.setErrorString("No video track found");
			goto find_video_track_failed;
		}
	}

	// Find decoder
	{
		decoder = avcodec_find_decoder(decodeCtx->codec_id);
		if (decoder == NULL) {
			rc.setErrorString("Can't find decoder");
			goto find_decoder_failed;
		}

		if (decodeCtx->pix_fmt == PIX_FMT_NONE ) {
			decodeCtx->pix_fmt = PIX_FMT_YUVJ420P;
		}
	}

	// Open decoder
	{
		if (avcodec_open(decodeCtx,decoder) != 0) {
			rc.setErrorString("Can't open decoder");
			goto open_decoder_failed;
		}
	}

	// Find encoder
	{
		encoder = avcodec_find_encoder(encodeInfo.codecId);
		if (encoder == NULL) {
			rc.setErrorString("Can't find encoder");
			goto find_encoder_failed;
		}

		encodeCtx = avcodec_alloc_context();
		encodeCtx->width = mCurrentParam.currentGeometry.width;
		encodeCtx->height = mCurrentParam.currentGeometry.height;
		encodeCtx->time_base = (AVRational){mCurrentParam.currentFrameRate.num, mCurrentParam.currentFrameRate.den};
		encodeCtx->pix_fmt = encodeInfo.supportedPixelFormat.front();
		encodeCtx->bit_rate = 480000;
		encodeCtx->max_b_frames = 1;
	}

	// Open encoder
	{
		if (avcodec_open(encodeCtx, encoder) != 0) {
			rc.setErrorString("Open encoder failed");
			goto open_encoder_failed;
		}
	}

	// Init swscale
	{
		swsCtx = sws_getCachedContext(NULL, decodeCtx->width, decodeCtx->height,decodeCtx->pix_fmt,
				encodeCtx->width,encodeCtx->height,encodeCtx->pix_fmt, SWS_FAST_BILINEAR,
				NULL,NULL,NULL);
		if (swsCtx == NULL) {
			rc.setErrorString("Init sws context failed");
			goto init_sws_context_failed;
		}
	}

	// Allocate space for sws conversion
	{
		if (avpicture_alloc((AVPicture *)convertedFrame,encodeCtx->pix_fmt,encodeCtx->width,encodeCtx->height) != 0) {
			rc.setErrorString("Allocate space for conversion failed");
			goto allocate_space_failed;
		}
	}

	// All init finish successfully. Notify it
	{
		mThreadError = rc;
		mThreadMutex.lock();
		mThreadCond.signal();
		mThreadMutex.unlock();
	}

	// The mainloop
	while (!shouldStop()) {

		int got_frame = 0;
		AVFrame * decodedFrame = avcodec_alloc_frame();
		// Get a frame of decoded picture
		while (!got_frame && !shouldStop()){
			av_init_packet(&pkt);
			if (av_read_frame(fmtCtx, &pkt) != 0){
				rc.setErrorString("Read frame error");
				av_free_packet (&pkt);
				goto read_frame_error;
			}

			if ( fmtCtx->streams[pkt.stream_index]->codec != decodeCtx) {
				av_free_packet(&pkt);
				continue;
			}

			if (avcodec_decode_video2(decodeCtx,decodedFrame,&got_frame,&pkt) < 0){
				rc.setErrorString("Decode video frame error");
				av_free_packet (&pkt);
				goto decode_frame_error;
			}
			av_free_packet (&pkt);
		}

		if (!got_frame) {
			rc.setErrorString("Decode thread is requested to stop");
			goto decode_stop;
		}

		// Do sws scale
		{
			if (sws_scale(swsCtx,decodedFrame->data,decodedFrame->linesize,
					0,decodeCtx->height, convertedFrame->data, convertedFrame->linesize) < 1) {
				rc.setErrorString("sws scale failed");
				goto sws_scale_failed;
			}
		}

		// Wait a buffer to encode to
		{
			mBufferMutex.lock();
			while (!shouldStop() && (mBuffers.size() < 1)) {
				rc = mBufferCond.wait(mBufferMutex,500);
				if (rc.isError()) {
					if (rc.getErrorType() == Error::ERR_TIMEOUT) continue;
					else {
						mBufferMutex.unlock();
						goto wait_buffer_error;
					}
				}else{
					break;
				}
			}
			if (mBuffers.size() < 1) {
				mBufferMutex.unlock();
				goto wait_buffer_error;
			}
			buf = mBuffers.front();
			mBuffers.pop_front();
			mBufferMutex.unlock();
		}

		// Encode the video
		{
			int encoded = avcodec_encode_video(encodeCtx, buf->buf.getData(),buf->buf.getSize(),convertedFrame);
			if (encoded < 0) {
				rc.setErrorString("Encode video error");
				goto encode_video_error;
			}
			buf->size = encoded;
		}


		encode_video_error:
		buf->returned = rc;
		buf->cond.signal();
		wait_buffer_error:
		sws_scale_failed:
		decode_stop:
		decode_frame_error:
		read_frame_error:

		av_free(decodedFrame);
		if (rc.isError()) break;
	}


	// Notify all waiter(if exists)
	{
		mBufferMutex.lock();
		for (std::list<VBufferPtr>::const_iterator iter = mBuffers.begin();
				iter != mBuffers.end(); ++ iter) {
			(*iter)->mutex.lock();
			(*iter)->returned = rc;
			(*iter)->cond.signal();
			(*iter)->size = 0;
			(*iter)->mutex.unlock();
		}
		mBuffers.clear();
		mBufferMutex.unlock();
	}

	allocate_space_failed:
	sws_freeContext(swsCtx);
	init_sws_context_failed:
	avcodec_close(encodeCtx);
	open_encoder_failed:
	av_free(encodeCtx);
	find_encoder_failed:
	avcodec_close(decodeCtx);
	open_decoder_failed:
	find_decoder_failed:
	find_video_track_failed:
	av_close_input_file(fmtCtx);
	open_input_file_failed:
	av_free(convertedFrame);

	mState = STATE_READY;
	mThreadMutex.lock();
	mThreadError = rc;
	mThreadCond.signal();
	mThreadMutex.unlock();
	return rc;
}
Exemplo n.º 25
0
int my_spdif_read_packet(AVFormatContext *s, AVPacket *pkt,
		uint8_t * garbagebuffer, int garbagebuffersize, int * garbagebufferfilled)
{
    AVIOContext *pb = s->pb;
    enum IEC61937DataType data_type;
    enum AVCodecID codec_id;
    uint32_t state = 0;
    int pkt_size_bits, offset, ret;
    *garbagebufferfilled = 0;
    while (state != (AV_BSWAP16C(SYNCWORD1) << 16 | AV_BSWAP16C(SYNCWORD2))) {
    	if(*garbagebufferfilled < garbagebuffersize){
    		*garbagebuffer = avio_r8(pb);
    		(*garbagebufferfilled)++;

    		state = (state << 8) | *garbagebuffer;
    		garbagebuffer++;

    		if (avio_feof(pb)){
                return AVERROR_EOF;
            }
    	}else{
    		return AVERROR_STREAM_NOT_FOUND;
    	}
    }
    *garbagebufferfilled -= 4;
    data_type = avio_rl16(pb);
    pkt_size_bits = avio_rl16(pb);

    if (pkt_size_bits % 16)
        avpriv_request_sample(s, "Packet not ending at a 16-bit boundary");

    ret = av_new_packet(pkt, FFALIGN(pkt_size_bits, 16) >> 3);
    if (ret)
        return ret;

    pkt->pos = avio_tell(pb) - BURST_HEADER_SIZE;

    if (avio_read(pb, pkt->data, pkt->size) < pkt->size) {
        av_free_packet(pkt);
        return AVERROR_EOF;
    }
    my_spdif_bswap_buf16((uint16_t *)pkt->data, (uint16_t *)pkt->data, pkt->size >> 1);

    ret = spdif_get_offset_and_codec(s, data_type, pkt->data,
                                     &offset, &codec_id);
    if (ret) {
        av_free_packet(pkt);
        return ret;
    }

    /* skip over the padding to the beginning of the next frame */
    avio_skip(pb, offset - pkt->size - BURST_HEADER_SIZE);

    if (!s->nb_streams) {
        /* first packet, create a stream */
        AVStream *st = avformat_new_stream(s, NULL);
        if (!st) {
            av_free_packet(pkt);
            return AVERROR(ENOMEM);
        }
        st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        st->codec->codec_id = codec_id;
    } else if (codec_id != s->streams[0]->codec->codec_id) {
        avpriv_report_missing_feature(s, "Codec change in IEC 61937");
        return AVERROR_PATCHWELCOME;
    }

    if (!s->bit_rate && s->streams[0]->codec->sample_rate)
        /* stream bitrate matches 16-bit stereo PCM bitrate for currently
           supported codecs */
        s->bit_rate = 2 * 16 * s->streams[0]->codec->sample_rate;

    return 0;
}
Exemplo n.º 26
0
MediaScanImage *video_create_image_from_frame(MediaScanVideo *v, MediaScanResult *r) {
  MediaScanImage *i = image_create();
  AVFormatContext *avf = (AVFormatContext *)r->_avf;
  av_codecs_t *codecs = (av_codecs_t *)v->_codecs;
  AVCodec *codec = (AVCodec *)v->_avc;
  AVFrame *frame = NULL;
  AVPacket packet;
  struct SwsContext *swsc = NULL;
  int got_picture;
  int64_t duration_tb = ((double)avf->duration / AV_TIME_BASE) / av_q2d(codecs->vs->time_base);
  uint8_t *src;
  int x, y;
  int ofs = 0;
  int no_keyframe_found = 0;
  int skipped_frames = 0;

  if ((avcodec_open(codecs->vc, codec)) < 0) {
    LOG_ERROR("Couldn't open video codec %s for thumbnail creation\n", codec->name);
    goto err;
  }

  frame = avcodec_alloc_frame();
  if (!frame) {
    LOG_ERROR("Couldn't allocate a video frame\n");
    goto err;
  }

  av_init_packet(&packet);

  i->path = v->path;
  i->width = v->width;
  i->height = v->height;

  // XXX select best video frame, for example:
  // * Skip frames of all the same color (e.g. blank intro frames
  // * Use edge detection to skip blurry frames
  //   * http://code.google.com/p/fast-edge/
  //   * http://en.wikipedia.org/wiki/Canny_edge_detector 
  // * Use a frame some percentage into the video, what percentage?
  // * If really ambitious, use OpenCV for finding a frame with a face?

  // XXX other ways to seek if this fails
  // XXX for now, seek 10% into the video
  av_seek_frame(avf, codecs->vsid, (int)((double)duration_tb * 0.1), 0);

  for (;;) {
    int ret;
    int rgb_bufsize;
    AVFrame *frame_rgb = NULL;
    uint8_t *rgb_buffer = NULL;

    // Give up if we already tried the first frame
    if (no_keyframe_found) {
      LOG_ERROR("Error decoding video frame for thumbnail: %s\n", v->path);
      goto err;
    }

    if ((ret = av_read_frame(avf, &packet)) < 0) {
      if (ret == AVERROR_EOF || skipped_frames > 200) {
        LOG_DEBUG("Couldn't find a keyframe, using first frame\n");
        no_keyframe_found = 1;
        av_seek_frame(avf, codecs->vsid, 0, 0);
        av_read_frame(avf, &packet);
      }
      else {
        LOG_ERROR("Couldn't read video frame (%s): ", v->path);
        print_averror(ret);
        goto err;
      }
    }

    // Skip frame if it's not from the video stream
    if (!no_keyframe_found && packet.stream_index != codecs->vsid) {
      av_free_packet(&packet);
      skipped_frames++;
      continue;
    }

    // Skip non-key-frames
    if (!no_keyframe_found && !(packet.flags & AV_PKT_FLAG_KEY)) {
      av_free_packet(&packet);
      skipped_frames++;
      continue;
    }

    // Skip invalid packets, not sure why this isn't an error from av_read_frame
    if (packet.pos < 0) {
      av_free_packet(&packet);
      skipped_frames++;
      continue;
    }

    LOG_DEBUG("Using video packet: pos %lld size %d, stream_index %d, duration %d\n",
              packet.pos, packet.size, packet.stream_index, packet.duration);

    if ((ret = avcodec_decode_video2(codecs->vc, frame, &got_picture, &packet)) < 0) {
      LOG_ERROR("Error decoding video frame for thumbnail: %s\n", v->path);
      print_averror(ret);
      goto err;
    }

    if (!got_picture) {
      if (skipped_frames > 200) {
        LOG_ERROR("Error decoding video frame for thumbnail: %s\n", v->path);
        goto err;
      }
      if (!no_keyframe_found) {
        // Try next frame
        av_free_packet(&packet);
        skipped_frames++;
        continue;
      }
    }

    // use swscale to convert from source format to RGBA in our buffer with no resizing
    // XXX what scaler is fastest here when not actually resizing?
    swsc = sws_getContext(i->width, i->height, codecs->vc->pix_fmt,
                          i->width, i->height, PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL);
    if (!swsc) {
      LOG_ERROR("Unable to get swscale context\n");
      goto err;
    }

    frame_rgb = avcodec_alloc_frame();
    if (!frame_rgb) {
      LOG_ERROR("Couldn't allocate a video frame\n");
      goto err;
    }

    // XXX There is probably a way to get sws_scale to write directly to i->_pixbuf in our RGBA format

    rgb_bufsize = avpicture_get_size(PIX_FMT_RGB24, i->width, i->height);
    rgb_buffer = av_malloc(rgb_bufsize);
    if (!rgb_buffer) {
      LOG_ERROR("Couldn't allocate an RGB video buffer\n");
      av_free(frame_rgb);
      goto err;
    }
    LOG_MEM("new rgb_buffer of size %d @ %p\n", rgb_bufsize, rgb_buffer);

    avpicture_fill((AVPicture *)frame_rgb, rgb_buffer, PIX_FMT_RGB24, i->width, i->height);

    // Convert image to RGB24
    sws_scale(swsc, frame->data, frame->linesize, 0, i->height, frame_rgb->data, frame_rgb->linesize);

    // Allocate space for our version of the image
    image_alloc_pixbuf(i, i->width, i->height);

    src = frame_rgb->data[0];
    ofs = 0;
    for (y = 0; y < i->height; y++) {
      for (x = 0; x < i->width * 3; x += 3) {
        i->_pixbuf[ofs++] = COL(src[x], src[x + 1], src[x + 2]);
      }
      src += i->width * 3;
    }

    // Free the frame
    LOG_MEM("destroy rgb_buffer @ %p\n", rgb_buffer);
    av_free(rgb_buffer);

    av_free(frame_rgb);

    // Done!
    goto out;
  }

err:
  image_destroy(i);
  i = NULL;

out:
  sws_freeContext(swsc);
  av_free_packet(&packet);
  if (frame)
    av_free(frame);

  avcodec_close(codecs->vc);

  return i;
}
Exemplo n.º 27
0
/*
 * This thread is used to load video frame asynchronously.
 * It provides a frame caching service. 
 * The main thread is responsible for positioning the frame pointer in the
 * file correctly before calling startCache() which starts this thread.
 * The cache is organized in two layers: 1) a cache of 20-30 undecoded packets to keep
 * memory and CPU low 2) a cache of 5 decoded frames. 
 * If the main thread does not find the frame in the cache (because the video has restarted
 * or because the GE is lagging), it stops the cache with StopCache() (this is a synchronous
 * function: it sends a signal to stop the cache thread and wait for confirmation), then
 * change the position in the stream and restarts the cache thread.
 */
void *VideoFFmpeg::cacheThread(void *data)
{
	VideoFFmpeg* video = (VideoFFmpeg*)data;
	// holds the frame that is being decoded
	CacheFrame *currentFrame = NULL;
	CachePacket *cachePacket;
	bool endOfFile = false;
	int frameFinished = 0;
	double timeBase = av_q2d(video->m_formatCtx->streams[video->m_videoStream]->time_base);
	int64_t startTs = video->m_formatCtx->streams[video->m_videoStream]->start_time;

	if (startTs == AV_NOPTS_VALUE)
		startTs = 0;

	while (!video->m_stopThread)
	{
		// packet cache is used solely by this thread, no need to lock
		// In case the stream/file contains other stream than the one we are looking for,
		// allow a bit of cycling to get rid quickly of those frames
		frameFinished = 0;
		while (	   !endOfFile 
				&& (cachePacket = (CachePacket *)video->m_packetCacheFree.first) != NULL 
				&& frameFinished < 25)
		{
			// free packet => packet cache is not full yet, just read more
			if (av_read_frame(video->m_formatCtx, &cachePacket->packet)>=0) 
			{
				if (cachePacket->packet.stream_index == video->m_videoStream)
				{
					// make sure fresh memory is allocated for the packet and move it to queue
					av_dup_packet(&cachePacket->packet);
					BLI_remlink(&video->m_packetCacheFree, cachePacket);
					BLI_addtail(&video->m_packetCacheBase, cachePacket);
					break;
				} else {
					// this is not a good packet for us, just leave it on free queue
					// Note: here we could handle sound packet
					av_free_packet(&cachePacket->packet);
					frameFinished++;
				}
				
			} else {
				if (video->m_isFile)
					// this mark the end of the file
					endOfFile = true;
				// if we cannot read a packet, no need to continue
				break;
			}
		}
		// frame cache is also used by main thread, lock
		if (currentFrame == NULL) 
		{
			// no current frame being decoded, take free one
			pthread_mutex_lock(&video->m_cacheMutex);
			if ((currentFrame = (CacheFrame *)video->m_frameCacheFree.first) != NULL)
				BLI_remlink(&video->m_frameCacheFree, currentFrame);
			pthread_mutex_unlock(&video->m_cacheMutex);
		}
		if (currentFrame != NULL)
		{
			// this frame is out of free and busy queue, we can manipulate it without locking
			frameFinished = 0;
			while (!frameFinished && (cachePacket = (CachePacket *)video->m_packetCacheBase.first) != NULL)
			{
				BLI_remlink(&video->m_packetCacheBase, cachePacket);
				// use m_frame because when caching, it is not used in main thread
				// we can't use currentFrame directly because we need to convert to RGB first
				avcodec_decode_video2(video->m_codecCtx, 
					video->m_frame, &frameFinished, 
					&cachePacket->packet);
				if (frameFinished) 
				{
					AVFrame * input = video->m_frame;

					/* This means the data wasnt read properly, this check stops crashing */
					if (   input->data[0]!=0 || input->data[1]!=0 
						|| input->data[2]!=0 || input->data[3]!=0)
					{
						if (video->m_deinterlace) 
						{
							if (avpicture_deinterlace(
								(AVPicture*) video->m_frameDeinterlaced,
								(const AVPicture*) video->m_frame,
								video->m_codecCtx->pix_fmt,
								video->m_codecCtx->width,
								video->m_codecCtx->height) >= 0)
							{
								input = video->m_frameDeinterlaced;
							}
						}
						// convert to RGB24
						sws_scale(video->m_imgConvertCtx,
							input->data,
							input->linesize,
							0,
							video->m_codecCtx->height,
							currentFrame->frame->data,
							currentFrame->frame->linesize);
						// move frame to queue, this frame is necessarily the next one
						video->m_curPosition = (long)((cachePacket->packet.dts-startTs) * (video->m_baseFrameRate*timeBase) + 0.5);
						currentFrame->framePosition = video->m_curPosition;
						pthread_mutex_lock(&video->m_cacheMutex);
						BLI_addtail(&video->m_frameCacheBase, currentFrame);
						pthread_mutex_unlock(&video->m_cacheMutex);
						currentFrame = NULL;
					}
				}
				av_free_packet(&cachePacket->packet);
				BLI_addtail(&video->m_packetCacheFree, cachePacket);
			} 
			if (currentFrame && endOfFile) 
			{
				// no more packet and end of file => put a special frame that indicates that
				currentFrame->framePosition = -1;
				pthread_mutex_lock(&video->m_cacheMutex);
				BLI_addtail(&video->m_frameCacheBase, currentFrame);
				pthread_mutex_unlock(&video->m_cacheMutex);
				currentFrame = NULL;
				// no need to stay any longer in this thread
				break;
			}
		}
		// small sleep to avoid unnecessary looping
		PIL_sleep_ms(10);
	}
	// before quitting, put back the current frame to queue to allow freeing
	if (currentFrame)
	{
		pthread_mutex_lock(&video->m_cacheMutex);
		BLI_addtail(&video->m_frameCacheFree, currentFrame);
		pthread_mutex_unlock(&video->m_cacheMutex);
	}
	return 0;
}
Exemplo n.º 28
0
static u32 FFDemux_Run(void *par)
{
	AVPacket pkt;
	s64 seek_to;
	u64 seek_audio, seek_video;
	Bool video_init, do_seek, map_audio_time, map_video_time;
	GF_NetworkCommand com;
	GF_NetworkCommand map;
	GF_SLHeader slh;
	FFDemux *ffd = (FFDemux *) par;

	memset(&map, 0, sizeof(GF_NetworkCommand));
	map.command_type = GF_NET_CHAN_MAP_TIME;

	memset(&com, 0, sizeof(GF_NetworkCommand));
	com.command_type = GF_NET_CHAN_BUFFER_QUERY;

	memset(&slh, 0, sizeof(GF_SLHeader));

	slh.compositionTimeStampFlag = slh.decodingTimeStampFlag = 1;
	seek_to = (s64) (AV_TIME_BASE*ffd->seek_time);
	map_video_time = !ffd->seekable;

	video_init = (seek_to && ffd->video_ch) ? 0 : 1;
	seek_audio = seek_video = 0;
	if (ffd->seekable && (ffd->audio_st>=0)) seek_audio = (u64) (s64) (ffd->seek_time*ffd->audio_tscale.den);
	if (ffd->seekable && (ffd->video_st>=0)) seek_video = (u64) (s64) (ffd->seek_time*ffd->video_tscale.den);

	/*it appears that ffmpeg has trouble resyncing on some mpeg files - we trick it by restarting to 0 to get the
	first video frame, and only then seek*/
	if (ffd->seekable) av_seek_frame(ffd->ctx, -1, video_init ? seek_to : 0, AVSEEK_FLAG_BACKWARD);
	do_seek = !video_init;
	map_audio_time = video_init ? ffd->unreliable_audio_timing : 0;

	while (ffd->is_running) {

		pkt.stream_index = -1;
		/*EOF*/
		if (av_read_frame(ffd->ctx, &pkt) <0) break;
		if (pkt.pts == AV_NOPTS_VALUE) pkt.pts = pkt.dts;
		if (!pkt.dts) pkt.dts = pkt.pts;

		slh.compositionTimeStamp = pkt.pts;
		slh.decodingTimeStamp = pkt.dts;

		gf_mx_p(ffd->mx);
		/*blindly send audio as soon as video is init*/
		if (ffd->audio_ch && (pkt.stream_index == ffd->audio_st) && !do_seek) {
			slh.compositionTimeStamp *= ffd->audio_tscale.num;
			slh.decodingTimeStamp *= ffd->audio_tscale.num;

			if (map_audio_time) {
				map.base.on_channel = ffd->audio_ch;
				map.map_time.media_time = ffd->seek_time;
				/*mapwith TS=0 since we don't use SL*/
				map.map_time.timestamp = 0;
				map.map_time.reset_buffers = 1;
				map_audio_time = 0;
				gf_term_on_command(ffd->service, &map, GF_OK);
			}
			else if (slh.compositionTimeStamp < seek_audio) {
				slh.decodingTimeStamp = slh.compositionTimeStamp = seek_audio;
			}
			gf_term_on_sl_packet(ffd->service, ffd->audio_ch, pkt.data, pkt.size, &slh, GF_OK);
		}
		else if (ffd->video_ch && (pkt.stream_index == ffd->video_st)) {
			slh.compositionTimeStamp *= ffd->video_tscale.num;
			slh.decodingTimeStamp *= ffd->video_tscale.num;

			/*if we get pts = 0 after a seek the demuxer is reseting PTSs, so force map time*/
			if ((!do_seek && seek_to && !slh.compositionTimeStamp) || (map_video_time) ) {
				seek_to = 0;
				map_video_time = 0;

				map.base.on_channel = ffd->video_ch;
				map.map_time.timestamp = (u64) pkt.pts;
//				map.map_time.media_time = ffd->seek_time;
				map.map_time.media_time = 0;
				map.map_time.reset_buffers = 0;
				gf_term_on_command(ffd->service, &map, GF_OK);
			}
			else if (slh.compositionTimeStamp < seek_video) {
				slh.decodingTimeStamp = slh.compositionTimeStamp = seek_video;
			}
			gf_term_on_sl_packet(ffd->service, ffd->video_ch, pkt.data, pkt.size, &slh, GF_OK);
			video_init = 1;
		}
		gf_mx_v(ffd->mx);
		av_free_packet(&pkt);

		/*here's the trick - only seek after sending the first packets of each stream - this allows ffmpeg video decoders
		to resync properly*/
		if (do_seek && video_init && ffd->seekable) {
			av_seek_frame(ffd->ctx, -1, seek_to, AVSEEK_FLAG_BACKWARD);
			do_seek = 0;
			map_audio_time = ffd->unreliable_audio_timing;
		}
		/*sleep untill the buffer occupancy is too low - note that this work because all streams in this
		demuxer are synchronized*/
		while (1) {
			if (ffd->audio_ch) {
				com.base.on_channel = ffd->audio_ch;
				gf_term_on_command(ffd->service, &com, GF_OK);
				if (com.buffer.occupancy < ffd->data_buffer_ms) break;
			}
			if (ffd->video_ch) {
				com.base.on_channel = ffd->video_ch;
				gf_term_on_command(ffd->service, &com, GF_OK);
				if (com.buffer.occupancy < ffd->data_buffer_ms) break;
			}
			gf_sleep(10);

			/*escape if disconnect*/
			if (!ffd->audio_run && !ffd->video_run) break;
		}
		if (!ffd->audio_run && !ffd->video_run) break;
	}
	/*signal EOS*/
	if (ffd->audio_ch) gf_term_on_sl_packet(ffd->service, ffd->audio_ch, NULL, 0, NULL, GF_EOS);
	if (ffd->video_ch) gf_term_on_sl_packet(ffd->service, ffd->video_ch, NULL, 0, NULL, GF_EOS);
	ffd->is_running = 2;

	return 0;
}
Exemplo n.º 29
0
static int img_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
    VideoData *s = s1->priv_data;
    char filename[1024];
    int i;
    int size[3]={0}, ret[3]={0};
    ByteIOContext *f[3];
    AVCodecContext *codec= s1->streams[0]->codec;

    if (!s->is_pipe) {
        /* loop over input */
        if (s1->loop_input && s->img_number > s->img_last) {
            s->img_number = s->img_first;
        }
        if (s->img_number > s->img_last)
            return AVERROR_EOF;
        if (av_get_frame_filename(filename, sizeof(filename),
                                  s->path, s->img_number)<0 && s->img_number > 1)
            return AVERROR(EIO);
        for(i=0; i<3; i++){
            if (url_fopen(&f[i], filename, URL_RDONLY) < 0) {
                if(i==1)
                    break;
                av_log(s1, AV_LOG_ERROR, "Could not open file : %s\n",filename);
                return AVERROR(EIO);
            }
            size[i]= url_fsize(f[i]);

            if(codec->codec_id != CODEC_ID_RAWVIDEO)
                break;
            filename[ strlen(filename) - 1 ]= 'U' + i;
        }

        if(codec->codec_id == CODEC_ID_RAWVIDEO && !codec->width)
            infer_size(&codec->width, &codec->height, size[0]);
    } else {
        f[0] = s1->pb;
        if (url_feof(f[0]))
            return AVERROR(EIO);
        size[0]= 4096;
    }

    av_new_packet(pkt, size[0] + size[1] + size[2]);
    pkt->stream_index = 0;
    pkt->flags |= AV_PKT_FLAG_KEY;

    pkt->size= 0;
    for(i=0; i<3; i++){
        if(size[i]){
            ret[i]= get_buffer(f[i], pkt->data + pkt->size, size[i]);
            if (!s->is_pipe)
                url_fclose(f[i]);
            if(ret[i]>0)
                pkt->size += ret[i];
        }
    }

    if (ret[0] <= 0 || ret[1]<0 || ret[2]<0) {
        av_free_packet(pkt);
        return AVERROR(EIO); /* signal EOF */
    } else {
        s->img_count++;
        s->img_number++;
        return 0;
    }
}
Exemplo n.º 30
0
int dc_video_decoder_read(VideoInputFile *video_input_file, VideoInputData *video_input_data, int source_number, int use_source_timing, int is_live_capture, const int *exit_signal_addr)
{
#ifdef DASHCAST_DEBUG_TIME_
	struct timeval start, end;
	long elapsed_time;
#endif
	AVPacket packet;
	int ret, got_frame, already_locked = 0;
	AVCodecContext *codec_ctx;
	VideoDataNode *video_data_node;

	/* Get a pointer to the codec context for the video stream */
	codec_ctx = video_input_file->av_fmt_ctx->streams[video_input_file->vstream_idx]->codec;

	/* Read frames */
	while (1) {
#ifdef DASHCAST_DEBUG_TIME_
		gf_gettimeofday(&start, NULL);
#endif
		memset(&packet, 0, sizeof(AVPacket));
		ret = av_read_frame(video_input_file->av_fmt_ctx, &packet);
#ifdef DASHCAST_DEBUG_TIME_
		gf_gettimeofday(&end, NULL);
		elapsed_time = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec);
		fprintf(stdout, "fps: %f\n", 1000000.0/elapsed_time);
#endif

		/* If we demux for the audio thread, send the packet to the audio */
		if (video_input_file->av_fmt_ctx_ref_cnt && ((packet.stream_index != video_input_file->vstream_idx) || (ret == AVERROR_EOF))) {
			AVPacket *packet_copy = NULL;
			if (ret != AVERROR_EOF) {
				GF_SAFEALLOC(packet_copy, AVPacket);
				memcpy(packet_copy, &packet, sizeof(AVPacket));
			}

			assert(video_input_file->av_pkt_list);
			gf_mx_p(video_input_file->av_pkt_list_mutex);
			gf_list_add(video_input_file->av_pkt_list, packet_copy);
			gf_mx_v(video_input_file->av_pkt_list_mutex);

			if (ret != AVERROR_EOF) {
				continue;
			}
		}

		if (ret == AVERROR_EOF) {
			if (video_input_file->mode == LIVE_MEDIA && video_input_file->no_loop == 0) {
				av_seek_frame(video_input_file->av_fmt_ctx, video_input_file->vstream_idx, 0, 0);
				av_free_packet(&packet);
				continue;
			}

			dc_producer_lock(&video_input_data->producer, &video_input_data->circular_buf);
			dc_producer_unlock_previous(&video_input_data->producer, &video_input_data->circular_buf);
			video_data_node = (VideoDataNode *) dc_producer_produce(&video_input_data->producer, &video_input_data->circular_buf);
			video_data_node->source_number = source_number;
			/* Flush decoder */
			memset(&packet, 0, sizeof(AVPacket));
#ifndef FF_API_AVFRAME_LAVC
			avcodec_get_frame_defaults(video_data_node->vframe);
#else
			av_frame_unref(video_data_node->vframe);
#endif

			avcodec_decode_video2(codec_ctx, video_data_node->vframe, &got_frame, &packet);
			if (got_frame) {
				dc_producer_advance(&video_input_data->producer, &video_input_data->circular_buf);
				return 0;
			}

			dc_producer_end_signal(&video_input_data->producer, &video_input_data->circular_buf);
			dc_producer_unlock(&video_input_data->producer, &video_input_data->circular_buf);
			return -2;
		}
		else if (ret < 0)
		{
			GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot read video frame.\n"));
			continue;
		}

		/* Is this a packet from the video stream? */
		if (packet.stream_index == video_input_file->vstream_idx) {
			if (!already_locked) {
				if (dc_producer_lock(&video_input_data->producer, &video_input_data->circular_buf) < 0) {
					GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[dashcast] Live system dropped a video frame\n"));
					continue;
				}

				dc_producer_unlock_previous(&video_input_data->producer, &video_input_data->circular_buf);

				already_locked = 1;
			}

			video_data_node = (VideoDataNode *) dc_producer_produce(&video_input_data->producer, &video_input_data->circular_buf);
			video_data_node->source_number = source_number;

			/* Set video frame to default */
#ifndef FF_API_AVFRAME_LAVC
			avcodec_get_frame_defaults(video_data_node->vframe);
#else
			av_frame_unref(video_data_node->vframe);
#endif

			/* Decode video frame */
			if (avcodec_decode_video2(codec_ctx, video_data_node->vframe, &got_frame, &packet) < 0) {
				av_free_packet(&packet);
				GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Error while decoding video.\n"));
				dc_producer_end_signal(&video_input_data->producer, &video_input_data->circular_buf);
				dc_producer_unlock(&video_input_data->producer, &video_input_data->circular_buf);
				return -1;
			}

			/* Did we get a video frame? */
			if (got_frame) {
				if (use_source_timing && is_live_capture) {
					u64 pts;
					if (video_input_file->pts_init == 0) {
						video_input_file->pts_init = 1;
						video_input_file->utc_at_init = gf_net_get_utc();
						video_input_file->first_pts = packet.pts;
						video_input_file->computed_pts = 0;
						video_input_data->frame_duration = codec_ctx->time_base.num;
						video_input_file->sync_tolerance = 9*video_input_data->frame_duration/5;
						//TODO - check with audio if sync is OK
					}
					//perform FPS re-linearisation
					pts = packet.pts - video_input_file->first_pts;
					if (pts - video_input_file->prev_pts > video_input_file->sync_tolerance) {
						u32 nb_lost=0;
						while (pts > video_input_file->computed_pts) {
							video_input_file->computed_pts += video_input_data->frame_duration;
							nb_lost++;
						}

						if (nb_lost) {
							GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[DashCast] Capture lost %d video frames \n", nb_lost));
						}
					}

//					fprintf(stdout, "Capture PTS %g - UTC diff %g - Computed PTS %g\n", (Double) pts / codec_ctx->time_base.den, (Double) (gf_net_get_utc() - video_input_file->utc_at_init) / 1000, (Double) video_input_file->computed_pts / codec_ctx->time_base.den);

					video_input_file->prev_pts = pts;
					video_data_node->vframe->pts = video_input_file->computed_pts;
					video_input_file->computed_pts += video_input_data->frame_duration;
				}

				if (video_data_node->vframe->pts==AV_NOPTS_VALUE) {
					if (!use_source_timing) {
						video_data_node->vframe->pts = video_input_file->frame_decoded;
					} else {
						video_data_node->vframe->pts = video_data_node->vframe->pkt_pts;
					}
				}
				video_input_file->frame_decoded++;

				GF_LOG(GF_LOG_INFO, GF_LOG_DASH, ("[DashCast] Video Frame TS "LLU" decoded at UTC "LLU" ms\n", video_data_node->vframe->pts, gf_net_get_utc() ));

				// For a decode/encode process we must free this memory.
				//But if the input is raw and there is no need to decode then
				// the packet is directly passed for decoded frame. We must wait until rescale is done before freeing it

				if (codec_ctx->codec->id == CODEC_ID_RAWVIDEO) {
					video_data_node->nb_raw_frames_ref = video_input_file->nb_consumers;

					video_data_node->raw_packet = packet;

					dc_producer_advance(&video_input_data->producer, &video_input_data->circular_buf);
					while (video_data_node->nb_raw_frames_ref && ! *exit_signal_addr) {
						gf_sleep(0);
					}
				} else {
					dc_producer_advance(&video_input_data->producer, &video_input_data->circular_buf);
					av_free_packet(&packet);
				}
				return 0;

			}
		}

		/* Free the packet that was allocated by av_read_frame */
		av_free_packet(&packet);
	}

	GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Unknown error while reading video frame.\n"));
	return -1;
}