コード例 #1
0
ファイル: vsrc_buffer.c プロジェクト: Akuaksh/FFmpeg-alsenc
static int request_frame(AVFilterLink *link)
{
    BufferSourceContext *c = link->src->priv;
    AVFilterBufferRef *picref;

    if (!c->has_frame) {
        av_log(link->src, AV_LOG_ERROR,
               "request_frame() called with no available frame!\n");
        //return -1;
    }

    /* This picture will be needed unmodified later for decoding the next
     * frame */
    picref = avfilter_get_video_buffer(link, AV_PERM_WRITE | AV_PERM_PRESERVE |
                                       AV_PERM_REUSE2,
                                       link->w, link->h);

    av_picture_copy((AVPicture *)&picref->data, (AVPicture *)&c->frame,
                    picref->format, link->w, link->h);

    picref->pts             = c->pts;
    picref->pixel_aspect    = c->pixel_aspect;
    picref->interlaced      = c->frame.interlaced_frame;
    picref->top_field_first = c->frame.top_field_first;
    avfilter_start_frame(link, avfilter_ref_buffer(picref, ~0));
    avfilter_draw_slice(link, 0, link->h, 1);
    avfilter_end_frame(link);
    avfilter_unref_buffer(picref);

    c->has_frame = 0;

    return 0;
}
コード例 #2
0
ファイル: visualize.c プロジェクト: hannesweisbach/ATLAS
void hook_slice_end(const AVCodecContext *c)
{
	/* round width and height to integer macroblock multiples */
	const int width  = (2 * (c->width  + ((1 << mb_size_log) - 1)) >> mb_size_log) << mb_size_log;
	const int height = (2 * (c->height + ((1 << mb_size_log) - 1)) >> mb_size_log) << mb_size_log;
	AVPicture quad;
	
	if (c->slice.flag_last) {
		/* use per-frame storage to keep the replaced image */
		avpicture_fill(&quad, private_data(c->frame.current), c->pix_fmt, width, height);
		/* left upper quadrant: original */
		av_picture_copy(&quad, (const AVPicture *)c->frame.current, PIX_FMT_YUV420P, c->width, c->height);
		/* right upper quadrant: replacement */
		quad.data[0] += c->width;
		quad.data[1] += c->width / 2;
		quad.data[2] += c->width / 2;
		do_replacement(c, &quad, SLICE_MAX, NULL);
		/* left lower quadrant: slice error map */
		quad.data[0] += (c->height    ) * quad.linesize[0] - (c->width    );
		quad.data[1] += (c->height / 2) * quad.linesize[1] - (c->width / 2);
		quad.data[2] += (c->height / 2) * quad.linesize[2] - (c->width / 2);
		draw_slice_error(&quad);
		/* right lower quadrant: replacement with borders */
		quad.data[0] += c->width;
		quad.data[1] += c->width / 2;
		quad.data[2] += c->width / 2;
		do_replacement(c, &quad, SLICE_MAX, NULL);
		draw_border(proc.frame->replacement, &quad);
	}
}
コード例 #3
0
ファイル: qtrleenc.c プロジェクト: Britz/FFmpeg
static int qtrle_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                              const AVFrame *pict, int *got_packet)
{
    QtrleEncContext * const s = avctx->priv_data;
    AVFrame * const p = &s->frame;
    int ret;

    *p = *pict;

    if ((ret = ff_alloc_packet2(avctx, pkt, s->max_buf_size)) < 0)
        return ret;

    if (avctx->gop_size == 0 || (s->avctx->frame_number % avctx->gop_size) == 0) {
        /* I-Frame */
        p->pict_type = AV_PICTURE_TYPE_I;
        p->key_frame = 1;
    } else {
        /* P-Frame */
        p->pict_type = AV_PICTURE_TYPE_P;
        p->key_frame = 0;
    }

    pkt->size = encode_frame(s, pict, pkt->data);

    /* save the current frame */
    av_picture_copy(&s->previous_frame, (AVPicture *)p, avctx->pix_fmt, avctx->width, avctx->height);

    if (p->key_frame)
        pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}
コード例 #4
0
ファイル: grabber.cpp プロジェクト: GoUbiq/FFmpeg-Windows
void grabber::decloop()
{
	AVFrame* frame = av_frame_alloc();
	while(!b_stop)
	{
		AVPacket* packet = decFifo.pop();
		if (!packet)
		{
			break;
		}
		
		int got_picture = 0;
		int ret = avcodec_decode_video2(pCodecCtx, frame, &got_picture, packet);
		if(ret < 0)
		{
			printf("Decode Error.\n");
			break;
		}
		if (got_picture)
		{
			AVFrame* p = free_fifo.pop();
			if (!p)
			{
				break;
			}
			av_picture_copy((AVPicture*)p, (AVPicture*)frame, (AVPixelFormat)frame->format, frame->width, frame->height);
			valid_fifo.push(p);
		}
		//delete []packet;
		av_free_packet(packet);
		delete packet;
	}
	av_frame_free(&frame);
}
コード例 #5
0
ファイル: tsmf_ffmpeg.c プロジェクト: DavBfr/FreeRDP
static BOOL tsmf_ffmpeg_decode_video(ITSMFDecoder* decoder, const BYTE *data, UINT32 data_size, UINT32 extensions)
{
	TSMFFFmpegDecoder* mdecoder = (TSMFFFmpegDecoder*) decoder;
	int decoded;
	int len;
	AVFrame *frame;
	BOOL ret = TRUE;
#if LIBAVCODEC_VERSION_MAJOR < 52 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR <= 20)
	len = avcodec_decode_video(mdecoder->codec_context, mdecoder->frame, &decoded, data, data_size);
#else
	{
		AVPacket pkt;
		av_init_packet(&pkt);
		pkt.data = (BYTE *) data;
		pkt.size = data_size;
		if (extensions & TSMM_SAMPLE_EXT_CLEANPOINT)
			pkt.flags |= AV_PKT_FLAG_KEY;
		len = avcodec_decode_video2(mdecoder->codec_context, mdecoder->frame, &decoded, &pkt);
	}
#endif
	if (len < 0)
	{
		WLog_ERR(TAG, "data_size %d, avcodec_decode_video failed (%d)", data_size, len);
		ret = FALSE;
	}
	else if (!decoded)
	{
		WLog_ERR(TAG, "data_size %d, no frame is decoded.", data_size);
		ret = FALSE;
	}
	else
	{
		DEBUG_TSMF("linesize[0] %d linesize[1] %d linesize[2] %d linesize[3] %d "
				   "pix_fmt %d width %d height %d",
				   mdecoder->frame->linesize[0], mdecoder->frame->linesize[1],
				   mdecoder->frame->linesize[2], mdecoder->frame->linesize[3],
				   mdecoder->codec_context->pix_fmt,
				   mdecoder->codec_context->width, mdecoder->codec_context->height);
		mdecoder->decoded_size = avpicture_get_size(mdecoder->codec_context->pix_fmt,
								 mdecoder->codec_context->width, mdecoder->codec_context->height);
		mdecoder->decoded_data = calloc(1, mdecoder->decoded_size);
		if (!mdecoder->decoded_data)
			return FALSE;

#if LIBAVCODEC_VERSION_MAJOR < 55
		frame = avcodec_alloc_frame();
#else
		frame = av_frame_alloc();
#endif
		avpicture_fill((AVPicture*) frame, mdecoder->decoded_data,
					   mdecoder->codec_context->pix_fmt,
					   mdecoder->codec_context->width, mdecoder->codec_context->height);
		av_picture_copy((AVPicture*) frame, (AVPicture*) mdecoder->frame,
						mdecoder->codec_context->pix_fmt,
						mdecoder->codec_context->width, mdecoder->codec_context->height);
		av_free(frame);
	}

	return ret;
}
コード例 #6
0
ファイル: moviedecoder.cpp プロジェクト: KDE/ffmpegthumbs
bool MovieDecoder::processFilterGraph(AVPicture *dst, const AVPicture *src,
                                enum AVPixelFormat pixfmt, int width, int height)
{
    if (!m_filterGraph || width != m_lastWidth ||
        height != m_lastHeight || pixfmt != m_lastPixfmt) {

        if (!initFilterGraph(pixfmt, width, height)) {
            return false;
        }
    }

    memcpy(m_filterFrame->data, src->data, sizeof(src->data));
    memcpy(m_filterFrame->linesize, src->linesize, sizeof(src->linesize));
    m_filterFrame->width = width;
    m_filterFrame->height = height;
    m_filterFrame->format = pixfmt;

    int ret = av_buffersrc_add_frame(m_bufferSourceContext, m_filterFrame);
    if (ret < 0) {
        return false;
    }

    ret = av_buffersink_get_frame(m_bufferSinkContext, m_filterFrame);
    if (ret < 0) {
        return false;
    }

    av_picture_copy(dst, (const AVPicture *) m_filterFrame, pixfmt, width, height);
    av_frame_unref(m_filterFrame);

    return true;
}
コード例 #7
0
ファイル: qtrleenc.c プロジェクト: WangCrystal/FFplayer
static int qtrle_encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void *data)
{
    QtrleEncContext * const s = avctx->priv_data;
    AVFrame *pict = data;
    AVFrame * const p = &s->frame;
    int chunksize;

    *p = *pict;

    if (buf_size < s->max_buf_size) {
        /* Upper bound check for compressed data */
        av_log(avctx, AV_LOG_ERROR, "buf_size %d <  %d\n", buf_size, s->max_buf_size);
        return -1;
    }

    if (avctx->gop_size == 0 || (s->avctx->frame_number % avctx->gop_size) == 0) {
        /* I-Frame */
        p->pict_type = FF_I_TYPE;
        p->key_frame = 1;
    } else {
        /* P-Frame */
        p->pict_type = FF_P_TYPE;
        p->key_frame = 0;
    }

    chunksize = encode_frame(s, pict, buf);

    /* save the current frame */
    av_picture_copy(&s->previous_frame, (AVPicture *)p, avctx->pix_fmt, avctx->width, avctx->height);
    return chunksize;
}
コード例 #8
0
ファイル: pgm.cpp プロジェクト: Saner2oo2/mythtv
int pgm_overlay(AVPicture *dst, const AVPicture *s1, int s1height,
                int s1row, int s1col, const AVPicture *s2, int s2height)
{
    const int   dstwidth = dst->linesize[0];
    const int   s1width = s1->linesize[0];
    const int   s2width = s2->linesize[0];
    int         rr;

    if (dstwidth != s1width)
    {
        LOG(VB_COMMFLAG, LOG_ERR, QString("pgm_overlay want width %1, have %2")
                .arg(s1width).arg(dst->linesize[0]));
        return -1;
    }

    av_picture_copy(dst, s1, PIX_FMT_GRAY8, s1width, s1height);

    /* Overwrite overlay area of "dst" with "s2". */
    for (rr = 0; rr < s2height; rr++)
        memcpy(dst->data[0] + (s1row + rr) * s1width + s1col,
                s2->data[0] + rr * s2width,
                s2width);

    return 0;
}
コード例 #9
0
ファイル: tsmf_ffmpeg.c プロジェクト: FreeRDP/FreeRDP-old
static int
tsmf_ffmpeg_decode_video(ITSMFDecoder * decoder, const uint8 * data, uint32 data_size, uint32 extensions)
{
	TSMFFFmpegDecoder * mdecoder = (TSMFFFmpegDecoder *) decoder;
	int decoded;
	int len;
	int ret = 0;
	AVFrame * frame;

#if LIBAVCODEC_VERSION_MAJOR < 52 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR <= 20)
	len = avcodec_decode_video(mdecoder->codec_context, mdecoder->frame, &decoded, data, data_size);
#else
	{
		AVPacket pkt;
		av_init_packet(&pkt);
		pkt.data = (uint8 *) data;
		pkt.size = data_size;
		if (extensions & TSMM_SAMPLE_EXT_CLEANPOINT)
			pkt.flags |= AV_PKT_FLAG_KEY;
		len = avcodec_decode_video2(mdecoder->codec_context, mdecoder->frame, &decoded, &pkt);
	}
#endif

	if (len < 0)
	{
		LLOGLN(0, ("tsmf_ffmpeg_decode_video: data_size %d, avcodec_decode_video failed (%d)", data_size, len));
		ret = 1;
	}
	else if (!decoded)
	{
		LLOGLN(0, ("tsmf_ffmpeg_decode_video: data_size %d, no frame is decoded.", data_size));
		ret = 1;
	}
	else
	{
		LLOGLN(10, ("tsmf_ffmpeg_decode_video: linesize[0] %d linesize[1] %d linesize[2] %d linesize[3] %d "
			"pix_fmt %d width %d height %d",
			mdecoder->frame->linesize[0], mdecoder->frame->linesize[1],
			mdecoder->frame->linesize[2], mdecoder->frame->linesize[3],
			mdecoder->codec_context->pix_fmt,
			mdecoder->codec_context->width, mdecoder->codec_context->height));

		mdecoder->decoded_size = avpicture_get_size(mdecoder->codec_context->pix_fmt,
			mdecoder->codec_context->width, mdecoder->codec_context->height);
		mdecoder->decoded_data = malloc(mdecoder->decoded_size);
		frame = avcodec_alloc_frame();
		avpicture_fill((AVPicture *) frame, mdecoder->decoded_data,
			mdecoder->codec_context->pix_fmt,
			mdecoder->codec_context->width, mdecoder->codec_context->height);

		av_picture_copy((AVPicture *) frame, (AVPicture *) mdecoder->frame,
			mdecoder->codec_context->pix_fmt,
			mdecoder->codec_context->width, mdecoder->codec_context->height);

		av_free(frame);
	}

	return ret;
}
コード例 #10
0
ファイル: nuv.c プロジェクト: AnthonyNystrom/MobiVU
/**
 * \brief copy frame data from buffer to AVFrame, handling stride.
 * \param f destination AVFrame
 * \param src source buffer, does not use any line-stride
 * \param width width of the video frame
 * \param height height of the video frame
 */
static void copy_frame(AVFrame *f, const uint8_t *src,
                       int width, int height) {
    AVPicture pic;
#ifdef __CW32__
    avpicture_fill(&pic, (unsigned char*)src, PIX_FMT_YUV420P, width, height);
#else
    avpicture_fill(&pic, src, PIX_FMT_YUV420P, width, height);
#endif
    av_picture_copy((AVPicture *)f, &pic, PIX_FMT_YUV420P, width, height);
}
コード例 #11
0
  void
  VideoPicture :: copyAVFrame(AVFrame* frame, IPixelFormat::Type pixel,
      int32_t width, int32_t height)
  {
    try
    {
      // Need to copy the contents of frame->data to our
      // internal buffer.
      VS_ASSERT(frame, "no frame?");
      VS_ASSERT(frame->data[0], "no data in frame");
      // resize the frame to the AVFrame
      mFrame->width = width;
      mFrame->height = height;
      mFrame->format = (int)pixel;

      int bufSize = getSize();
      if (bufSize <= 0)
        throw std::runtime_error("invalid size for frame");

      if (!mBuffer || mBuffer->getBufferSize() < bufSize)
        // reuse buffers if we can.
        allocInternalFrameBuffer();

      uint8_t* buffer = (uint8_t*)mBuffer->getBytes(0, bufSize);
      if (!buffer)
        throw std::runtime_error("really?  no buffer");

      if (frame->data[0])
      {
        // Make sure the frame isn't already using our buffer
        if(buffer != frame->data[0])
        {
          avpicture_fill((AVPicture*)mFrame, buffer,
              (enum PixelFormat) pixel, width, height);
          av_picture_copy((AVPicture*)mFrame, (AVPicture*)frame,
              (PixelFormat)frame->format, frame->width, frame->height);
        }
        mFrame->key_frame = frame->key_frame;
      }
      else
      {
        throw std::runtime_error("no data in frame to copy");
      }
    }
    catch (std::exception & e)
    {
      VS_LOG_DEBUG("error: %s", e.what());
    }
  }
コード例 #12
0
ファイル: DecodeVideo.cpp プロジェクト: chubahowsmall/ead-cel
void DecodeVideo::_Deinterlace(AVFrame * inFrame, AVFrame * outFrame)
{
    bool deinterlaced = false;

    if (avpicture_deinterlace((AVPicture *)outFrame, (AVPicture *)inFrame,
                              _codecCtx->pix_fmt,
                              _codecCtx->width, _codecCtx->height) >= 0) {
        deinterlaced = true;
    }

    if (!deinterlaced) {
        av_picture_copy((AVPicture *)outFrame, (AVPicture *)inFrame,
                        _codecCtx->pix_fmt, _codecCtx->width, _codecCtx->height);
    }
}
コード例 #13
0
ファイル: mediasink.cpp プロジェクト: zhenyouluo/rtsp_client
int StreamMediaSink::showFrame()
{
	int ret = SDL_LockYUVOverlay(m_bmp);

#if 1
	AVPicture pict; // = { { 0 } };

	pict.data[0] = m_bmp->pixels[0];
	pict.data[1] = m_bmp->pixels[2];
	pict.data[2] = m_bmp->pixels[1];

	pict.linesize[0] = m_bmp->pitches[0];
	pict.linesize[1] = m_bmp->pitches[2];
	pict.linesize[2] = m_bmp->pitches[1];

#if 1
	sws_scale(img_convert_ctx, m_avFrame->data, m_avFrame->linesize, 0, m_avCodecContext->height, pict.data, pict.linesize);
#else
	av_picture_copy(&pict, (AVPicture *) m_avFrame, AV_PIX_FMT_YUV420P, m_avCodecContext->width, m_avCodecContext->height);
#endif
#else
	//m_bmp->format = SDL_YV12_OVERLAY;
	//m_bmp->format = SDL_IYUV_OVERLAY;
	//m_bmp->h = m_avFrame->height;
	//m_bmp->w = m_avFrame->width;
	m_bmp->pixels[0] = m_avFrame->data[0];
	m_bmp->pixels[2] = m_avFrame->data[1];
	m_bmp->pixels[1] = m_avFrame->data[2];

	m_bmp->pitches[0] = m_avFrame->linesize[0];
	m_bmp->pitches[2] = m_avFrame->linesize[1];
	m_bmp->pitches[1] = m_avFrame->linesize[2];
#endif
	//AV_PIX_FMT_YUV420P
	// Convert the image into YUV format that SDL uses
	//img_convert(&pict, PIX_FMT_YUV420P, (AVPicture *) pFrame, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);

	SDL_UnlockYUVOverlay(m_bmp);

	m_rect.x = 0;
	m_rect.y = 0;
	m_rect.w = m_avFrame->width;
	m_rect.h = m_avFrame->height;
	ret = SDL_DisplayYUVOverlay(m_bmp, &m_rect);

	return ret;
}
コード例 #14
0
ファイル: editor.cpp プロジェクト: kostyll/justcutit
void Editor::cut_cut(CutPoint::Direction dir)
{
	int w = m_videoCodecCtx->width;
	int h = m_videoCodecCtx->height;
	
	AVFrame* frame = avcodec_alloc_frame();
	
	avpicture_fill(
		(AVPicture*)frame,
		(uint8_t*)av_malloc(avpicture_get_size(
			PIX_FMT_YUV420P,
			w, h
		)),
		PIX_FMT_YUV420P,
		w, h
	);
	
	av_picture_copy(
		(AVPicture*)frame,
		(AVPicture*)m_frameBuffer[m_frameIdx],
		PIX_FMT_YUV420P,
		w, h
	);
	
	if(dir == CutPoint::CUT_OUT)
		seek_nextFrame();
	
	int64_t pts = av_rescale_q(
		pts_val(m_frameTimestamps[m_frameIdx] - m_timeStampStart),
		m_videoTimeBase_q,
		AV_TIME_BASE_Q
	);
	
	int num = m_cutPoints.addCutPoint(frameTime(), dir, frame, pts);
	QModelIndex idx = m_cutPointModel.idxForNum(num);
	m_ui->cutPointView->setCurrentIndex(idx);
	
	if(dir == CutPoint::CUT_OUT)
		seek_prevFrame();
	
	cut_pointActivated(idx);
}
コード例 #15
0
int main(int argc, char ** argv)
{
	if(argc < 4) {
		printf("\nScrub, you need to specify a bitrate, number of frames, and server."
				"\nLike this: pixieHD 350 1000 rtmp://domain.com/live/matt\n"
				"\nNOTE, it is: progname bitrate frames server\n\n"
				"The bitrate is understood to be kbits/sec.\n"
				"You should enter frames or else you the program will\n"
				"continue to stream until you forcefully close it.\n"
				"THANK YOU: while(1) { /* stream! */ }\n");
		return 0;
	}
	printf("\nYou have set the following options:\n\n%5cbitrate: %s,"
			"\n%5cframes: %s\n%5cserver: %s\n\n",
			' ',argv[1],' ',argv[2],' ',argv[3]);
	
	/*int p;
	printf("Initializing noob options");
	for(p=0; p<3; ++p) {
		printf("%5c",'.');
		Sleep(1500);
	}
	printf("\n\n");

	char *input;
	printf("You hating on my GFX or wat? Please Answer: ");
	input = getline();

	printf("\n\n");
	printf("Your answer: ");

	size_t input_len = strlen(input);
	for(p=0; p<input_len; ++p) {
		Sleep(300);
		printf("%c",input[p]);
	}
	printf("\nkk here we go...");
	Sleep(1000);*/

	printf("\n\nPress the CONTROL key to begin streaming or ESC key to QUIT.\n\n");
    while (1)
    {
	   if (ButtonPress(VK_ESCAPE)) {
          printf("Quit.\n\n");
          break;
       } else if (ButtonPress(VK_CONTROL)) {
    	   // Decoder local variable declaration
    	   	AVFormatContext *pFormatCtx = NULL;
    	   	int i, videoStream;
    	   	AVCodecContext *pCodecCtx = NULL;
    	   	AVCodec *pCodec;
    	   	AVFrame *pFrame;
    	   	AVPacket packet;
    	   	int frameFinished;

    	   	// Encoder local variable declaration
    	   	const char *filename;
    	   	AVOutputFormat *fmt;
    	   	AVFormatContext *oc;
    	   	AVStream *video_st;
    	   	AVCodec *video_codec;
    	   	int ret; unsigned int frame_count, frame_count2;
    	   	StreamInfo sInfo;

    	   	size_t max_frames = strtol(argv[2], NULL, 0);

    	   	// Register all formats, codecs and network
    	   	av_register_all();
    	   	avcodec_register_all();
    	   	avformat_network_init();

    	   	// Setup mux
    	   	//filename = "output_file.flv";
    	   	//filename = "rtmp://chineseforall.org/live/beta";
    	   	filename = argv[3];
    	   	fmt = av_guess_format("flv", filename, NULL);
    	   	if (fmt == NULL) {
    	   		printf("Could not guess format.\n");
    	   		return -1;
    	   	}
    	   	// allocate the output media context
    	   	oc = avformat_alloc_context();
    	   	if (oc == NULL) {
    	   		printf("could not allocate context.\n");
    	   		return -1;
    	   	}


    	   HDC hScreen = GetDC(GetDesktopWindow());
		   ScreenX = GetDeviceCaps(hScreen, HORZRES);
		   ScreenY = GetDeviceCaps(hScreen, VERTRES);

		   // Temp. hard-code the resolution
		   int new_width = 1024, new_height = 576;
		   double v_ratio = 1.7786458333333333333333333333333;

    	   	// Set output format context to the format ffmpeg guessed
    	   	oc->oformat = fmt;

    	   	// Add the video stream using the h.264
    	   	// codec and initialize the codec.
    	   	video_st = NULL;
    	   	sInfo.width = new_width;
    	   	sInfo.height = new_height;
    	   	sInfo.pix_fmt = AV_PIX_FMT_YUV420P;
    	   	sInfo.frame_rate = 10;
    	   	sInfo.bitrate = strtol(argv[1], NULL, 0)*1000;
    	   	video_st = add_stream(oc, &video_codec, AV_CODEC_ID_H264, &sInfo);

    	   	// Now that all the parameters are set, we can open the audio and
    	   	// video codecs and allocate the necessary encode buffers.
    	   	if (video_st)
    	   		open_video(oc, video_codec, video_st);

    	   	/* open the output file, if needed */
    	   	if (!(fmt->flags & AVFMT_NOFILE)) {
    	   		ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
    	   		if (ret < 0) {
    	   			fprintf(stderr, "Could not open '%s': %s\n", filename, av_err2str(ret));
    	   			return 1;
    	   		}
    	   	}

    	   	// dump output format
    	   	av_dump_format(oc, 0, filename, 1);

    	   	// Write the stream header, if any.
    	   	ret = avformat_write_header(oc, NULL);
    	   	if (ret < 0) {
    	   		fprintf(stderr, "Error occurred when opening output file: %s\n", av_err2str(ret));
    	   		return 1;
    	   	}

    	   	// Read frames, decode, and re-encode
    	   	frame_count = 1;
    	   	frame_count2 = 1;

		   HDC hdcMem = CreateCompatibleDC (hScreen);
		   HBITMAP hBitmap = CreateCompatibleBitmap(hScreen, ScreenX, ScreenY);
		   HGDIOBJ hOld;
		   BITMAPINFOHEADER bmi = {0};
		   bmi.biSize = sizeof(BITMAPINFOHEADER);
		   bmi.biPlanes = 1;
		   bmi.biBitCount = 32;
		   bmi.biWidth = ScreenX;
		   bmi.biHeight = -ScreenY;
		   bmi.biCompression = BI_RGB;
		   bmi.biSizeImage = 0;// 3 * ScreenX * ScreenY;


		   if(ScreenData)
			   free(ScreenData);
		   ScreenData = (BYTE*)malloc(4 * ScreenX * ScreenY);
		   AVPacket pkt;

		   clock_t start_t = GetTickCount();
		   long long wait_time = 0;

		   uint64_t total_size;

    	   while(1) {
			hOld = SelectObject(hdcMem, hBitmap);
			BitBlt(hdcMem, 0, 0, ScreenX, ScreenY, hScreen, 0, 0, SRCCOPY);
			SelectObject(hdcMem, hOld);

			GetDIBits(hdcMem, hBitmap, 0, ScreenY, ScreenData, (BITMAPINFO*)&bmi, DIB_RGB_COLORS);

			//calculate the bytes needed for the output image
			int nbytes = avpicture_get_size(AV_PIX_FMT_YUV420P, new_width, new_height);

			//create buffer for the output image
			uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes);

			//create ffmpeg frame structures.  These do not allocate space for image data,
			//just the pointers and other information about the image.
			AVFrame* inpic = avcodec_alloc_frame();
			AVFrame* outpic = avcodec_alloc_frame();

			//this will set the pointers in the frame structures to the right points in
			//the input and output buffers.
			avpicture_fill((AVPicture*)inpic, ScreenData, AV_PIX_FMT_RGB32, ScreenX, ScreenY);
			avpicture_fill((AVPicture*)outpic, outbuffer, AV_PIX_FMT_YUV420P, new_width, new_height);

			//create the conversion context
			struct SwsContext *fooContext = sws_getContext(ScreenX, ScreenY, AV_PIX_FMT_RGB32, new_width, new_height, AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);

			//perform the conversion
			sws_scale(fooContext, inpic->data, inpic->linesize, 0, ScreenY, outpic->data, outpic->linesize);
			
			// Initialize a new frame
			AVFrame* newFrame = avcodec_alloc_frame();

			int size = avpicture_get_size(video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);
			uint8_t* picture_buf = av_malloc(size);

			avpicture_fill((AVPicture *) newFrame, picture_buf, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);

			// Copy only the frame content without additional fields
			av_picture_copy((AVPicture*) newFrame, (AVPicture*) outpic, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);

			// encode the image
			int got_output;
			av_init_packet(&pkt);
			pkt.data = NULL; // packet data will be allocated by the encoder
			pkt.size = 0;

			// Set the frame's pts (this prevents the warning notice 'non-strictly-monotonic PTS')
			newFrame->pts = frame_count2;

			ret = avcodec_encode_video2(video_st->codec, &pkt, newFrame, &got_output);
			if (ret < 0) {
				fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
				exit(1);
			}

			if (got_output) {
				if (video_st->codec->coded_frame->key_frame)
					pkt.flags |= AV_PKT_FLAG_KEY;
				pkt.stream_index = video_st->index;

				if (pkt.pts != AV_NOPTS_VALUE)
					pkt.pts = av_rescale_q(pkt.pts, video_st->codec->time_base, video_st->time_base);
				if (pkt.dts != AV_NOPTS_VALUE)
					pkt.dts = av_rescale_q(pkt.dts, video_st->codec->time_base, video_st->time_base);

				// Write the compressed frame to the media file.
				ret = av_interleaved_write_frame(oc, &pkt);

				fprintf(stderr, "encoded frame #%d\n", frame_count);
				frame_count++;
			} else {
				ret = 0;
			}
			if (ret != 0) {
				fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
				exit(1);
			}

			++frame_count2;

			// Free the YUV picture frame we copied from the
			// decoder to eliminate the additional fields
			// and other packets/frames used
			av_free(picture_buf);
			av_free_packet(&pkt);
			av_free(newFrame);

			//free memory
			av_free(outbuffer);
			av_free(inpic);
			av_free(outpic);


          if(frame_count == max_frames) {
			/* Write the trailer, if any. The trailer must be written before you
			 * close the CodecContexts open when you wrote the header; otherwise
			 * av_write_trailer() may try to use memory that was freed on
			 * av_codec_close().
			 */
			av_write_trailer(oc);

			/* Close the video codec (encoder) */
			if (video_st) {
				close_video(oc, video_st);
			}
			// Free the output streams.
			for (i = 0; i < oc->nb_streams; i++) {
				av_freep(&oc->streams[i]->codec);
				av_freep(&oc->streams[i]);
			}
			if (!(fmt->flags & AVFMT_NOFILE)) {
				/* Close the output file. */
				avio_close(oc->pb);
			}
			/* free the output format context */
			av_free(oc);

			ReleaseDC(GetDesktopWindow(),hScreen);
			DeleteDC(hdcMem);

			printf("\n\nPress the CONTROL key to begin streaming or ESC key to QUIT.\n\n");
			break;
          }
       }
       }
    }
    return 0;
}
コード例 #16
0
ファイル: pgm.cpp プロジェクト: Saner2oo2/mythtv
int pgm_convolve_radial(AVPicture *dst, AVPicture *s1, AVPicture *s2,
                        const AVPicture *src, int srcheight,
                        const double *mask, int mask_radius)
{
    /*
     * Pad and convolve an image.
     *
     * "s1" and "s2" are caller-pre-allocated "scratch space" (avoid repeated
     * per-frame allocation/deallocation).
     *
     * Remove noise from image; smooth by convolving with a Gaussian mask. See
     * http://www.cogs.susx.ac.uk/users/davidy/teachvision/vision0.html
     *
     * Optimization for radially-symmetric masks: implement a single
     * two-dimensional convolution with two commutative single-dimensional
     * convolutions.
     */
    const int       srcwidth = src->linesize[0];
    const int       newwidth = srcwidth + 2 * mask_radius;
    const int       newheight = srcheight + 2 * mask_radius;
    int             ii, rr, cc, rr2, cc2;
    double          sum;

    /* Get a padded copy of the src image for use by the convolutions. */
    if (pgm_expand_uniform(s1, src, srcheight, mask_radius))
        return -1;

    /* copy s1 to s2 and dst */
    av_picture_copy(s2, s1, PIX_FMT_GRAY8, newwidth, newheight);
    av_picture_copy(dst, s1, PIX_FMT_GRAY8, newwidth, newheight);

    /* "s1" convolve with column vector => "s2" */
    rr2 = mask_radius + srcheight;
    cc2 = mask_radius + srcwidth;
    for (rr = mask_radius; rr < rr2; rr++)
    {
        for (cc = mask_radius; cc < cc2; cc++)
        {
            sum = 0;
            for (ii = -mask_radius; ii <= mask_radius; ii++)
            {
                sum += mask[ii + mask_radius] *
                    s1->data[0][(rr + ii) * newwidth + cc];
            }
            s2->data[0][rr * newwidth + cc] = (unsigned char)(sum + 0.5);
        }
    }

    /* "s2" convolve with row vector => "dst" */
    for (rr = mask_radius; rr < rr2; rr++)
    {
        for (cc = mask_radius; cc < cc2; cc++)
        {
            sum = 0;
            for (ii = -mask_radius; ii <= mask_radius; ii++)
            {
                sum += mask[ii + mask_radius] *
                    s2->data[0][rr * newwidth + cc + ii];
            }
            dst->data[0][rr * newwidth + cc] = (unsigned char)(sum + 0.5);
        }
    }

    return 0;
}
コード例 #17
0
ファイル: editor.cpp プロジェクト: kostyll/justcutit
void Editor::cut_openList()
{
	QString filename = QFileDialog::getOpenFileName(
		this,
		tr("Open cutlist"),
		QString(),
		tr("Cutlists (*.cut)")
	);
	
	if(filename.isNull())
		return;
	
	QFile file(filename);
	if(!file.open(QIODevice::ReadOnly))
	{
		QMessageBox::critical(this, tr("Error"), tr("Could not open cutlist file"));
		return;
	}
	
	if(!m_cutPoints.readFrom(&file))
	{
		QMessageBox::critical(this, tr("Error"), tr("Cutlist file is damaged"));
		return;
	}
	
	file.close();
	
	// Generate images
	int w = m_videoCodecCtx->width;
	int h = m_videoCodecCtx->height;
	
	for(int i = 0; i < m_cutPoints.count(); ++i)
	{
		CutPoint& p = m_cutPoints.at(i);
		
		int64_t stream_pts = av_rescale_q(
			p.pts,
			AV_TIME_BASE_Q,
			m_videoTimeBase_q
		);
		p.time = m_videoTimeBase * stream_pts;
		
		log_debug("CutPoint %d has stream PTS %10lld", i, stream_pts);
		
		if(p.direction == CutPoint::CUT_OUT)
			seek_timeExactBefore(p.time, false);
		else
			seek_timeExact(p.time, false);
		
		p.img = avcodec_alloc_frame();
		avpicture_fill(
			(AVPicture*)p.img,
			(uint8_t*)av_malloc(avpicture_get_size(
				PIX_FMT_YUV420P,
				w, h
			)),
			PIX_FMT_YUV420P,
			w, h
		);
		
		av_picture_copy(
			(AVPicture*)p.img,
			(AVPicture*)m_frameBuffer[m_frameIdx],
			PIX_FMT_YUV420P,
			w, h
		);
	}
	
	if(m_cutPoints.count())
	{
		QModelIndex first = m_cutPointModel.idxForNum(0);
		m_ui->cutPointView->setCurrentIndex(first);
		cut_pointActivated(first);
	}
}
コード例 #18
0
ファイル: editor.cpp プロジェクト: kostyll/justcutit
void Editor::readFrame(bool needKeyFrame)
{
	AVPacket packet;
	AVFrame frame;
	int frameFinished;
	bool gotKeyFramePacket = false;
	
	avcodec_get_frame_defaults(&frame);
	
	while(av_read_frame(m_stream, &packet) == 0)
	{
		if(packet.stream_index != m_videoID)
			continue;
		
#if PACKET_DEBUG
		if(needKeyFrame)
			printf("DTS = %'10lld\n", packet.dts);
#endif
		
		if(needKeyFrame && !gotKeyFramePacket)
		{
			if(packet.flags & AV_PKT_FLAG_KEY)
				gotKeyFramePacket = true;
			else
				continue;
		}
		
		if(avcodec_decode_video2(m_videoCodecCtx, &frame, &frameFinished, &packet) < 0)
		{
			error("Could not decode packet");
			return;
		}
		
		if(!frameFinished)
			continue;
		
		if(m_videoCodecCtx->pix_fmt != PIX_FMT_YUV420P)
		{
			error("Pixel format %d is unsupported.", m_videoCodecCtx->pix_fmt);
			return;
		}
		
		if(needKeyFrame && !frame.key_frame)
		{
			av_free_packet(&packet);
			continue;
		}
		
		m_frameTimestamps[m_headFrame] = packet.dts;
		av_picture_copy(
			(AVPicture*)m_frameBuffer[m_headFrame],
			(AVPicture*)&frame,
			PIX_FMT_YUV420P,
			m_videoCodecCtx->width,
			m_videoCodecCtx->height
		);
		
		m_frameBuffer[m_headFrame]->pict_type = frame.pict_type;
		
		av_free_packet(&packet);
		
		if(!needKeyFrame)
			return;
		
		if(frame.key_frame)
		{
			log_debug("key frame seek: got keyframe at %'10lld", pts_val(packet.dts - m_timeStampStart));
			return;
		}
	}
}
コード例 #19
0
ファイル: Tool.cpp プロジェクト: corefan/rabbitim
int CTool::ConvertFormat(/*[in]*/ const AVPicture &inFrame,
                         /*[in]*/ int nInWidth,
                         /*[in]*/ int nInHeight,
                         /*[in]*/ AVPixelFormat inPixelFormat,
                         /*[out]*/AVPicture &outFrame,
                         /*[in]*/ int nOutWidth,
                         /*[in]*/ int nOutHeight,
                         /*[in]*/ AVPixelFormat outPixelFormat)
{
    int nRet = 0;
    struct SwsContext* pSwsCtx = NULL;
    
    //分配输出空间  
    nRet = avpicture_alloc(&outFrame, outPixelFormat, nOutWidth, nOutHeight);
    if(nRet)
    {
        LOG_MODEL_ERROR("Tool", "avpicture_alloc fail:%x", nRet);
        return nRet;
    }
    
    if(inPixelFormat == outPixelFormat
            && nInWidth == nOutWidth
            && nInHeight == nOutHeight)
    {
        av_picture_copy(&outFrame, &inFrame, inPixelFormat,
                        nInWidth, nInHeight);
        return 0;
    }
    
    //设置图像转换上下文  
    pSwsCtx = sws_getCachedContext (NULL,
                                    nInWidth,                //源宽度  
                                    nInHeight,               //源高度  
                                    inPixelFormat,           //源格式  
                                    nOutWidth,               //目标宽度  
                                    nOutHeight,              //目标高度  
                                    outPixelFormat,          //目的格式  
                                    SWS_FAST_BILINEAR,       //转换算法  
                                    NULL, NULL, NULL);
    if(NULL == pSwsCtx)
    {
        LOG_MODEL_ERROR("Tool", "sws_getContext false");
        avpicture_free(&outFrame);
        return -3;
    }
    
    //进行图片转换  
    nRet = sws_scale(pSwsCtx,
                     inFrame.data, inFrame.linesize,
                     0, nInHeight,
                     outFrame.data, outFrame.linesize);
    if(nRet < 0)
    {
        LOG_MODEL_ERROR("Tool", "sws_scale fail:%x", nRet);
        avpicture_free(&outFrame);
    }
    else
    {
        nRet = 0;
    }
    
    sws_freeContext(pSwsCtx);
    return nRet;
}
コード例 #20
0
static GstFlowReturn
gst_ffmpegmux_collected (GstCollectPads * pads, gpointer user_data)
{
  GstFFMpegMux *ffmpegmux = (GstFFMpegMux *) user_data;
  GSList *collected;
  GstFFMpegMuxPad *best_pad;
  GstClockTime best_time;
  const GstTagList *tags;

  /* open "file" (gstreamer protocol to next element) */
  if (!ffmpegmux->opened) {
    int open_flags = URL_WRONLY;

    /* we do need all streams to have started capsnego,
     * or things will go horribly wrong */
    for (collected = ffmpegmux->collect->data; collected;
        collected = g_slist_next (collected)) {
      GstFFMpegMuxPad *collect_pad = (GstFFMpegMuxPad *) collected->data;
      AVStream *st = ffmpegmux->context->streams[collect_pad->padnum];

      /* check whether the pad has successfully completed capsnego */
      if (st->codec->codec_id == CODEC_ID_NONE) {
        GST_ELEMENT_ERROR (ffmpegmux, CORE, NEGOTIATION, (NULL),
            ("no caps set on stream %d (%s)", collect_pad->padnum,
                (st->codec->codec_type == CODEC_TYPE_VIDEO) ?
                "video" : "audio"));
        return GST_FLOW_ERROR;
      }
      /* set framerate for audio */
      if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
        switch (st->codec->codec_id) {
          case CODEC_ID_PCM_S16LE:
          case CODEC_ID_PCM_S16BE:
          case CODEC_ID_PCM_U16LE:
          case CODEC_ID_PCM_U16BE:
          case CODEC_ID_PCM_S8:
          case CODEC_ID_PCM_U8:
            st->codec->frame_size = 1;
            break;
          default:
          {
            GstBuffer *buffer;

            /* FIXME : This doesn't work for RAW AUDIO...
             * in fact I'm wondering if it even works for any kind of audio... */
            buffer = gst_collect_pads_peek (ffmpegmux->collect,
                (GstCollectData *) collect_pad);
            if (buffer) {
              st->codec->frame_size =
                  st->codec->sample_rate *
                  GST_BUFFER_DURATION (buffer) / GST_SECOND;
              gst_buffer_unref (buffer);
            }
          }
        }
      }
    }

    /* tags */
    tags = gst_tag_setter_get_tag_list (GST_TAG_SETTER (ffmpegmux));
    if (tags) {
      gint i;
      gchar *s;

      /* get the interesting ones */
      if (gst_tag_list_get_string (tags, GST_TAG_TITLE, &s)) {
        strncpy (ffmpegmux->context->title, s,
            sizeof (ffmpegmux->context->title));
      }
      if (gst_tag_list_get_string (tags, GST_TAG_ARTIST, &s)) {
        strncpy (ffmpegmux->context->author, s,
            sizeof (ffmpegmux->context->author));
      }
      if (gst_tag_list_get_string (tags, GST_TAG_COPYRIGHT, &s)) {
        strncpy (ffmpegmux->context->copyright, s,
            sizeof (ffmpegmux->context->copyright));
      }
      if (gst_tag_list_get_string (tags, GST_TAG_COMMENT, &s)) {
        strncpy (ffmpegmux->context->comment, s,
            sizeof (ffmpegmux->context->comment));
      }
      if (gst_tag_list_get_string (tags, GST_TAG_ALBUM, &s)) {
        strncpy (ffmpegmux->context->album, s,
            sizeof (ffmpegmux->context->album));
      }
      if (gst_tag_list_get_string (tags, GST_TAG_GENRE, &s)) {
        strncpy (ffmpegmux->context->genre, s,
            sizeof (ffmpegmux->context->genre));
      }
      if (gst_tag_list_get_int (tags, GST_TAG_TRACK_NUMBER, &i)) {
        ffmpegmux->context->track = i;
      }
    }

    /* set the streamheader flag for gstffmpegprotocol if codec supports it */
    if (!strcmp (ffmpegmux->context->oformat->name, "flv")) {
      open_flags |= GST_FFMPEG_URL_STREAMHEADER;
    }

    if (url_fopen (&ffmpegmux->context->pb,
            ffmpegmux->context->filename, open_flags) < 0) {
      GST_ELEMENT_ERROR (ffmpegmux, LIBRARY, TOO_LAZY, (NULL),
          ("Failed to open stream context in ffmux"));
      return GST_FLOW_ERROR;
    }

    if (av_set_parameters (ffmpegmux->context, NULL) < 0) {
      GST_ELEMENT_ERROR (ffmpegmux, LIBRARY, INIT, (NULL),
          ("Failed to initialize muxer"));
      return GST_FLOW_ERROR;
    }

    /* now open the mux format */
    if (av_write_header (ffmpegmux->context) < 0) {
      GST_ELEMENT_ERROR (ffmpegmux, LIBRARY, SETTINGS, (NULL),
          ("Failed to write file header - check codec settings"));
      return GST_FLOW_ERROR;
    }

    /* we're now opened */
    ffmpegmux->opened = TRUE;

    /* flush the header so it will be used as streamheader */
    put_flush_packet (ffmpegmux->context->pb);
  }

  /* take the one with earliest timestamp,
   * and push it forward */
  best_pad = NULL;
  best_time = GST_CLOCK_TIME_NONE;
  for (collected = ffmpegmux->collect->data; collected;
      collected = g_slist_next (collected)) {
    GstFFMpegMuxPad *collect_pad = (GstFFMpegMuxPad *) collected->data;
    GstBuffer *buffer = gst_collect_pads_peek (ffmpegmux->collect,
        (GstCollectData *) collect_pad);

    /* if there's no buffer, just continue */
    if (buffer == NULL) {
      continue;
    }

    /* if we have no buffer yet, just use the first one */
    if (best_pad == NULL) {
      best_pad = collect_pad;
      best_time = GST_BUFFER_TIMESTAMP (buffer);
      goto next_pad;
    }

    /* if we do have one, only use this one if it's older */
    if (GST_BUFFER_TIMESTAMP (buffer) < best_time) {
      best_time = GST_BUFFER_TIMESTAMP (buffer);
      best_pad = collect_pad;
    }

  next_pad:
    gst_buffer_unref (buffer);

    /* Mux buffers with invalid timestamp first */
    if (!GST_CLOCK_TIME_IS_VALID (best_time))
      break;
  }

  /* now handle the buffer, or signal EOS if we have
   * no buffers left */
  if (best_pad != NULL) {
    GstBuffer *buf;
    AVPacket pkt;
    gboolean need_free = FALSE;

    /* push out current buffer */
    buf = gst_collect_pads_pop (ffmpegmux->collect,
        (GstCollectData *) best_pad);

    ffmpegmux->context->streams[best_pad->padnum]->codec->frame_number++;

    /* set time */
    pkt.pts = gst_ffmpeg_time_gst_to_ff (GST_BUFFER_TIMESTAMP (buf),
        ffmpegmux->context->streams[best_pad->padnum]->time_base);
    pkt.dts = pkt.pts;

    if (strcmp (ffmpegmux->context->oformat->name, "gif") == 0) {
      AVStream *st = ffmpegmux->context->streams[best_pad->padnum];
      AVPicture src, dst;

      need_free = TRUE;
      pkt.size = st->codec->width * st->codec->height * 3;
      pkt.data = g_malloc (pkt.size);

      dst.data[0] = pkt.data;
      dst.data[1] = NULL;
      dst.data[2] = NULL;
      dst.linesize[0] = st->codec->width * 3;

      gst_ffmpeg_avpicture_fill (&src, GST_BUFFER_DATA (buf),
          PIX_FMT_RGB24, st->codec->width, st->codec->height);

      av_picture_copy (&dst, &src, PIX_FMT_RGB24,
          st->codec->width, st->codec->height);
    } else {
      pkt.data = GST_BUFFER_DATA (buf);
      pkt.size = GST_BUFFER_SIZE (buf);
    }

    pkt.stream_index = best_pad->padnum;
    pkt.flags = 0;

    if (!GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT))
      pkt.flags |= PKT_FLAG_KEY;

    if (GST_BUFFER_DURATION_IS_VALID (buf))
      pkt.duration =
          gst_ffmpeg_time_gst_to_ff (GST_BUFFER_DURATION (buf),
          ffmpegmux->context->streams[best_pad->padnum]->time_base);
    else
      pkt.duration = 0;
    av_write_frame (ffmpegmux->context, &pkt);
    gst_buffer_unref (buf);
    if (need_free)
      g_free (pkt.data);
  } else {
    /* close down */
    av_write_trailer (ffmpegmux->context);
    ffmpegmux->opened = FALSE;
    put_flush_packet (ffmpegmux->context->pb);
    url_fclose (ffmpegmux->context->pb);
    gst_pad_push_event (ffmpegmux->srcpad, gst_event_new_eos ());
    return GST_FLOW_UNEXPECTED;
  }

  return GST_FLOW_OK;
}
コード例 #21
0
// run the FFThread
void FFThread::run()
{
    int                 firstrun = 1;
    AVFormatContext     *pFormatCtx=NULL;
    int                 videoStream;
    AVCodecContext      *pCodecCtx;
    AVCodec             *pCodec;
    AVPacket            packet;
    int                 frameFinished, len;
    AVFrame             *tmpFrame = avcodec_alloc_frame();

    while (True) {
        if (firstrun) {
            firstrun = 0;
        } else{            
            // wait for 10s to avoid spinning
            sleep(10);
        }
        
        // Open video file
        printf("Open %s\n", this->url);
        if (avformat_open_input(&pFormatCtx, this->url, NULL, NULL)!=0) {
            printf("Opening input '%s' failed\n", this->url);
            continue;
        }

        // Find the first video stream
        videoStream=-1;
        for (unsigned int i=0; i<pFormatCtx->nb_streams; i++) {
            if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
                videoStream=i;
                break;
            }
        }
        if( videoStream==-1) {
            printf("Finding video stream in '%s' failed\n", this->url);
            continue;
        }

        // Get a pointer to the codec context for the video stream
        pCodecCtx=pFormatCtx->streams[videoStream]->codec;

        // Find the decoder for the video stream
        pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
        if(pCodec==NULL) {
            printf("Could not find decoder for '%s'\n", this->url);
            continue;
        }

        // Open codec
        ffmutex->lock();
        if(avcodec_open2(pCodecCtx, pCodec, NULL)<0) {
            printf("Could not open codec for '%s'\n", this->url);
            continue;
        }
        ffmutex->unlock();

        // read frames into the packets
        while (stopping !=1 && av_read_frame(pFormatCtx, &packet) >= 0) {

            // Is this a packet from the video stream?
            if (packet.stream_index!=videoStream) {
                // Free the packet if not
                printf("Non video packet. Shouldn't see this...\n");
                av_free_packet(&packet);
                continue;
            }

            // grab a buffer to decode into
            FFBuffer *raw = findFreeBuffer(rawbuffers);        
            if (raw == NULL) {
                printf("Couldn't get a free buffer, skipping packet\n");
                av_free_packet(&packet);
                continue;
            }

            // Decode video frame
            len = avcodec_decode_video2(pCodecCtx, tmpFrame, &frameFinished, &packet);
            if (!frameFinished) {
                printf("Frame not finished. Shouldn't see this...\n");
                av_free_packet(&packet);
                raw->release();
                continue;
            }
            
            // Copy it into the raw frame
            avpicture_fill((AVPicture *) raw->pFrame, raw->mem,
                pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
            av_picture_copy((AVPicture *) raw->pFrame, (const AVPicture *) tmpFrame,
                pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height); 
                        
            // Fill in the output buffer
            raw->pix_fmt = pCodecCtx->pix_fmt;         
            raw->height = pCodecCtx->height;
            raw->width = pCodecCtx->width;                

            // Emit and free
            emit updateSignal(raw);        
            av_free_packet(&packet);
        }
        // Emit blank frame
        emit updateSignal(NULL);
        
        // tidy up
        ffmutex->lock();
        avcodec_close(pCodecCtx);
        avformat_close_input(&pFormatCtx);
        pCodecCtx = NULL;
        ffmutex->unlock();        
    }
}
コード例 #22
0
ファイル: avframe.cpp プロジェクト: eriban/dvbcut-qt4
avframe::avframe() : tobefreed(0),w(0),h(0),dw(0),pix_fmt(),img_convert_ctx(0)
#else
avframe::avframe() : tobefreed(0),w(0),h(0),dw(0),pix_fmt()
#endif
  {
  f=avcodec_alloc_frame();
  }

avframe::avframe(AVFrame *src, AVCodecContext *ctx) : f(0),tobefreed(0)
  {
  f=avcodec_alloc_frame();
  tobefreed=malloc(avpicture_get_size(ctx->pix_fmt, ctx->width, ctx->height));

  avpicture_fill((AVPicture *)f,
                 (u_int8_t*)tobefreed,
                 ctx->pix_fmt,ctx->width,ctx->height);

#if LIBAVCODEC_VERSION_INT >= (51 << 16)
  av_picture_copy((AVPicture *)f, (const AVPicture *) src,
                  ctx->pix_fmt, ctx->width, ctx->height);
#else
  img_copy((AVPicture *)f, (const AVPicture *) src,
           ctx->pix_fmt, ctx->width, ctx->height);
#endif

  f->pict_type              = src->pict_type;
  f->quality                = src->quality;
  f->coded_picture_number   = src->coded_picture_number;
  f->display_picture_number = src->display_picture_number;
  f->pts                    = src->pts;
  f->interlaced_frame       = src->interlaced_frame;
  f->top_field_first        = src->top_field_first;
  f->repeat_pict            = src->repeat_pict;
  f->quality                = src->quality;

  w=ctx->width;
  h=ctx->height;
  pix_fmt=ctx->pix_fmt;
  dw=w*ctx->sample_aspect_ratio.num/ctx->sample_aspect_ratio.den;
#ifdef HAVE_LIB_SWSCALE
  img_convert_ctx=sws_getContext(w, h, pix_fmt, 
                                 w, h, PIX_FMT_BGR24, SWS_BICUBIC, 
                                 NULL, NULL, NULL);
#endif
  }

avframe::~avframe()
  {
  if (tobefreed)
    free(tobefreed);
  if (f)
    av_free(f);
#ifdef HAVE_LIB_SWSCALE
  if (img_convert_ctx)
    sws_freeContext(img_convert_ctx);
#endif
  }

QImage avframe::getqimage(bool scaled, double viewscalefactor)
  {
#ifdef HAVE_LIB_SWSCALE
  if (w<=0 || h<=0 || img_convert_ctx==NULL)
#else
  if (w<=0 || h<=0)
#endif
    return QImage();

  uint8_t *rgbbuffer=(uint8_t*)malloc(avpicture_get_size(PIX_FMT_RGB24, w, h)+64);
  int headerlen=sprintf((char *) rgbbuffer, "P6\n%d %d\n255\n", w, h);

  AVFrame *avframergb=avcodec_alloc_frame();

  avpicture_fill((AVPicture*)avframergb,
                 rgbbuffer+headerlen,
                 PIX_FMT_RGB24,w,h);

#ifdef HAVE_LIB_SWSCALE
  sws_scale(img_convert_ctx, f->data, f->linesize, 0, h, 
              avframergb->data, avframergb->linesize);
#else
  img_convert((AVPicture *)avframergb, PIX_FMT_RGB24, (AVPicture*)f, pix_fmt, w, h);
#endif

  QImage im;
  im.loadFromData(rgbbuffer, headerlen+w*h*3, "PPM");

#ifdef HAVE_LIB_SWSCALE
//  im = im.swapRGB();
  im = im.rgbSwapped();
#endif

  if ((scaled && w!=dw)||(viewscalefactor!=1.0)) {
#ifdef SMOOTHSCALE
    im = im.smoothScale(int((scaled?dw:w)/viewscalefactor+0.5), int(h/viewscalefactor+0.5));
#else
//    im = im.scale(int((scaled?dw:w)/viewscalefactor+0.5), int(h/viewscalefactor+0.5));
    im = im.scaled(int((scaled?dw:w)/viewscalefactor+0.5), int(h/viewscalefactor+0.5));
#endif
    }

  free(rgbbuffer);
  av_free(avframergb);
  return (im);
  }
コード例 #23
0
ファイル: nuv.c プロジェクト: 119/dropcam_for_iphone
/**
 * \brief copy frame data from buffer to AVFrame, handling stride.
 * \param f destination AVFrame
 * \param src source buffer, does not use any line-stride
 * \param width width of the video frame
 * \param height height of the video frame
 */
static void copy_frame(AVFrame *f, const uint8_t *src,
                       int width, int height) {
    AVPicture pic;
    avpicture_fill(&pic, src, PIX_FMT_YUV420P, width, height);
    av_picture_copy((AVPicture *)f, &pic, PIX_FMT_YUV420P, width, height);
}
コード例 #24
0
int main(int argc, char *argv[]) {
	// Decoder local variable declaration
	AVFormatContext *pFormatCtx = NULL;
	int i, videoStream;
	AVCodecContext *pCodecCtx = NULL;
	AVCodec *pCodec;
	AVFrame *pFrame;
	AVPacket packet;
	int frameFinished;

	// Encoder local variable declaration
	const char *filename;
	AVOutputFormat *fmt;
	AVFormatContext *oc;
	AVStream *video_st;
	AVCodec *video_codec;
	int ret, frame_count;
	StreamInfo sInfo;

	// Register all formats, codecs and network
	av_register_all();
	avcodec_register_all();
	avformat_network_init();

	// Open video file
	if (avformat_open_input(&pFormatCtx, "input_file.wmv", NULL, NULL) != 0)
		return -1; // Couldn't open file

	// Retrieve stream information
	if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
		return -1; // Couldn't find stream information

	// Dump information about file onto standard error
	av_dump_format(pFormatCtx, 0, "input_file.wmv", 0);

	// Find the first video stream
	videoStream = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++)
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			videoStream = i;
			break;
		}
	if (videoStream == -1)
		return -1; // Didn't find a video stream

	// Get a pointer to the codec context for the video stream
	pCodecCtx = pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1; // Codec not found
	}
	// Open codec (decoder)
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
		return -1; // Could not open codec

	// Allocate video frame
	pFrame = avcodec_alloc_frame();

	// Setup mux
	filename = "output_file.flv";
	
	// To stream to a media server (e.g. FMS)
	// filename = "rtmp://chineseforall.org/live/beta";
	
	fmt = av_guess_format("flv", filename, NULL);
	if (fmt == NULL) {
		printf("Could not guess format.\n");
		return -1;
	}
	// allocate the output media context
	oc = avformat_alloc_context();
	if (oc == NULL) {
		printf("could not allocate context.\n");
		return -1;
	}

	// Set output format context to the format ffmpeg guessed
	oc->oformat = fmt;

	// Add the video stream using the h.264
	// codec and initialize the codec.
	video_st = NULL;
	sInfo.width = pFormatCtx->streams[i]->codec->width;
	sInfo.height = pFormatCtx->streams[i]->codec->height;
	sInfo.pix_fmt = AV_PIX_FMT_YUV420P;
	sInfo.frame_rate = 30;
	sInfo.bitrate = 450*1000;
	video_st = add_stream(oc, &video_codec, AV_CODEC_ID_H264, &sInfo);

	// Now that all the parameters are set, we can open the audio and
	// video codecs and allocate the necessary encode buffers.
	if (video_st)
		open_video(oc, video_codec, video_st);

	/* open the output file, if needed */
	if (!(fmt->flags & AVFMT_NOFILE)) {
		ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
		if (ret < 0) {
			fprintf(stderr, "Could not open '%s': %s\n", filename, av_err2str(ret));
			return 1;
		}
	}

	// dump output format
	av_dump_format(oc, 0, filename, 1);

	// Write the stream header, if any.
	ret = avformat_write_header(oc, NULL);
	if (ret < 0) {
		fprintf(stderr, "Error occurred when opening output file: %s\n", av_err2str(ret));
		return 1;
	}

	// Read frames, decode, and re-encode
	frame_count = 1;
	while (av_read_frame(pFormatCtx, &packet) >= 0) {
		// Is this a packet from the video stream?
		if (packet.stream_index == videoStream) {
			// Decode video frame
			avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

			// Did we get a video frame?
			if (frameFinished) {

				// Initialize a new frame
				AVFrame* newFrame = avcodec_alloc_frame();

				int size = avpicture_get_size(video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);
				uint8_t* picture_buf = av_malloc(size);

				avpicture_fill((AVPicture *) newFrame, picture_buf, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);

				// Copy only the frame content without additional fields
				av_picture_copy((AVPicture*) newFrame, (AVPicture*) pFrame, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);

				// encode the image
				AVPacket pkt;
				int got_output;
				av_init_packet(&pkt);
				pkt.data = NULL; // packet data will be allocated by the encoder
				pkt.size = 0;

				// Set the frame's pts (this prevents the warning notice 'non-strictly-monotonic PTS'
				newFrame->pts = frame_count;

				ret = avcodec_encode_video2(video_st->codec, &pkt, newFrame, &got_output);
				if (ret < 0) {
					fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
					exit(1);
				}

				if (got_output) {
					if (video_st->codec->coded_frame->key_frame)
						pkt.flags |= AV_PKT_FLAG_KEY;
					pkt.stream_index = video_st->index;

					if (pkt.pts != AV_NOPTS_VALUE)
						pkt.pts = av_rescale_q(pkt.pts, video_st->codec->time_base, video_st->time_base);
					if (pkt.dts != AV_NOPTS_VALUE)
						pkt.dts = av_rescale_q(pkt.dts, video_st->codec->time_base, video_st->time_base);

					// Write the compressed frame to the media file.
					ret = av_interleaved_write_frame(oc, &pkt);
				} else {
					ret = 0;
				}
				if (ret != 0) {
					fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
					exit(1);
				}

				fprintf(stderr, "encoded frame #%d\n", frame_count);
				frame_count++;

				// Free the YUV picture frame we copied from the
				// decoder to eliminate the additional fields
				// and other packets/frames used
				av_free(picture_buf);
				av_free_packet(&pkt);
				av_free(newFrame);
			}
		}

		// Free the packet that was allocated by av_read_frame
		av_free_packet(&packet);
	}

	/* Write the trailer, if any. The trailer must be written before you
	 * close the CodecContexts open when you wrote the header; otherwise
	 * av_write_trailer() may try to use memory that was freed on
	 * av_codec_close(). */
	av_write_trailer(oc);

	/* Close the video codec (encoder) */
	if (video_st)
		close_video(oc, video_st);
	// Free the output streams.
	for (i = 0; i < oc->nb_streams; i++) {
		av_freep(&oc->streams[i]->codec);
		av_freep(&oc->streams[i]);
	}
	if (!(fmt->flags & AVFMT_NOFILE))
		/* Close the output file. */
		avio_close(oc->pb);
	/* free the output format context */
	av_free(oc);

	// Free the YUV frame populated by the decoder
	av_free(pFrame);

	// Close the video codec (decoder)
	avcodec_close(pCodecCtx);

	// Close the input video file
	avformat_close_input(&pFormatCtx);

	return 0;
}