Beispiel #1
1
int avpicture_alloc(AVPicture *picture,
                    enum AVPixelFormat pix_fmt, int width, int height)
{
    int ret = av_image_alloc(picture->data, picture->linesize,
                             width, height, pix_fmt, 1);
    if (ret < 0) {
        memset(picture, 0, sizeof(AVPicture));
        return ret;
    }

    return 0;
}
Beispiel #2
0
int CH264Encoder::Encode(char* data)
{
	AVFrame* frame = avcodec_alloc_frame();
    if (!frame) {
        exit(1);
    }
    frame->format = encContext->pix_fmt;
    frame->width  = encContext->width;
    frame->height = encContext->height;

    /* the image can be allocated by any means and av_image_alloc() is
     * just the most convenient way if av_malloc() is to be used */
   int ret = av_image_alloc(frame->data, frame->linesize, encContext->width, encContext->height,
                         encContext->pix_fmt, 32);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate raw picture buffer\n");
        exit(1);
    }

	memcpy(frame->data[0],data,encContext->width*encContext->height);
	memcpy(frame->data[1],data + encContext->width*encContext->height,encContext->width*encContext->height/4);
	memcpy(frame->data[2],data + encContext->width*encContext->height*5/4,encContext->width*encContext->height/4);

	
	//int out_size = avcodec_encode_video(encContext, cdata, 32*1024, picture);

	AVPacket pkt;
	av_init_packet(&pkt);
	pkt.data = NULL;    // packet data will be allocated by the encoder
	pkt.size = 0;

	frame->pts++;

	int got_output;
	av_init_packet(&pkt);
	pkt.data = NULL;    // packet data will be allocated by the encoder
	pkt.size = 0;
	ret = avcodec_encode_video2(encContext, &pkt, frame, &got_output);

	static int kind = 0;
	
	if(got_output>0)
	{
		kind ++;
		write(fd_write,pkt.data,pkt.size); 
		av_free_packet(&pkt);
	}

	av_freep(&frame->data[0]);
	avcodec_free_frame(&frame);
}
Beispiel #3
0
/**
 * @brief Makes a copy of the vpx_image_t and emits it as a new VideoFrame.
 * @param vpxframe Frame to copy.
 */
void CoreVideoSource::pushFrame(const vpx_image_t* vpxframe)
{
    if (stopped)
        return;

    QMutexLocker locker(&biglock);

    std::shared_ptr<VideoFrame> vframe;
    int width = vpxframe->d_w;
    int height = vpxframe->d_h;

    if (subscribers <= 0)
        return;

    AVFrame* avframe = av_frame_alloc();
    if (!avframe)
        return;

    avframe->width = width;
    avframe->height = height;
    avframe->format = AV_PIX_FMT_YUV420P;

    int bufSize = av_image_alloc(avframe->data, avframe->linesize,
                                 width, height,
                                 static_cast<AVPixelFormat>(AV_PIX_FMT_YUV420P), VideoFrame::dataAlignment);

    if(bufSize < 0){
        av_frame_free(&avframe);
        return;
    }

    for (int i = 0; i < 3; ++i)
    {
        int dstStride = avframe->linesize[i];
        int srcStride = vpxframe->stride[i];
        int minStride = std::min(dstStride, srcStride);
        int size = (i == 0) ? height : height / 2;

        for (int j = 0; j < size; ++j)
        {
            uint8_t* dst = avframe->data[i] + dstStride * j;
            uint8_t* src = vpxframe->planes[i] + srcStride * j;
            memcpy(dst, src, minStride);
        }
    }

    vframe = std::make_shared<VideoFrame>(id, avframe, true);
    emit frameAvailable(vframe);
}
Beispiel #4
0
/* TODO: set the buffer's priv member to a context structure for the whole
 * filter chain.  This will allow for a buffer pool instead of the constant
 * alloc & free cycle currently implemented. */
AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
{
    int linesize[4];
    uint8_t *data[4];
    int i;
    AVFilterBufferRef *picref = NULL;
    AVFilterPool *pool = link->pool;

    if (pool) {
        for (i = 0; i < POOL_SIZE; i++) {
            picref = pool->pic[i];
            if (picref && picref->buf->format == link->format && picref->buf->w == w && picref->buf->h == h) {
                AVFilterBuffer *pic = picref->buf;
                pool->pic[i] = NULL;
                pool->count--;
                picref->video->w = w;
                picref->video->h = h;
                picref->perms = perms | AV_PERM_READ;
                picref->format = link->format;
                pic->refcount = 1;
                memcpy(picref->data,     pic->data,     sizeof(picref->data));
                memcpy(picref->linesize, pic->linesize, sizeof(picref->linesize));
                pool->refcount++;
                return picref;
            }
        }
    } else {
        pool = link->pool = av_mallocz(sizeof(AVFilterPool));
        pool->refcount = 1;
    }

    // align: +2 is needed for swscaler, +16 to be SIMD-friendly
    if ((i = av_image_alloc(data, linesize, w, h, link->format, 32)) < 0)
        return NULL;

    picref = avfilter_get_video_buffer_ref_from_arrays(data, linesize,
                                                       perms, w, h, link->format);
    if (!picref) {
        av_free(data[0]);
        return NULL;
    }
    memset(data[0], 128, i);

    picref->buf->priv = pool;
    picref->buf->free = NULL;
    pool->refcount++;

    return picref;
}
void FFMS_VideoSource::ReAdjustOutputFormat() {
	if (SWS) {
		sws_freeContext(SWS);
		SWS = nullptr;
	}

	DetectInputFormat();

	OutputFormat = FindBestPixelFormat(TargetPixelFormats, InputFormat);
	if (OutputFormat == FFMS_PIX_FMT(NONE)) {
		ResetOutputFormat();
		throw FFMS_Exception(FFMS_ERROR_SCALING, FFMS_ERROR_INVALID_ARGUMENT,
			"No suitable output format found");
	}

	OutputColorRange = handle_jpeg(&OutputFormat);
	if (OutputColorRange == AVCOL_RANGE_UNSPECIFIED)
		OutputColorRange = CodecContext->color_range;
	if (OutputColorRange == AVCOL_RANGE_UNSPECIFIED)
		OutputColorRange = InputColorRange;

	OutputColorSpace = CodecContext->colorspace;
	if (OutputColorSpace == AVCOL_SPC_UNSPECIFIED)
		OutputColorSpace = InputColorSpace;

	if (InputFormat != OutputFormat ||
		TargetWidth != CodecContext->width ||
		TargetHeight != CodecContext->height ||
		InputColorSpace != OutputColorSpace ||
		InputColorRange != OutputColorRange)
	{
		SWS = GetSwsContext(
			CodecContext->width, CodecContext->height, InputFormat, InputColorSpace, InputColorRange,
			TargetWidth, TargetHeight, OutputFormat, OutputColorSpace, OutputColorRange,
			TargetResizer);

		if (!SWS) {
			ResetOutputFormat();
			throw FFMS_Exception(FFMS_ERROR_SCALING, FFMS_ERROR_INVALID_ARGUMENT,
				"Failed to allocate SWScale context");
		}
	}

	av_freep(&SWSFrameData[0]);
	if (av_image_alloc(SWSFrameData, SWSFrameLinesize, TargetWidth, TargetHeight, OutputFormat, 4) < 0)
		throw FFMS_Exception(FFMS_ERROR_SCALING, FFMS_ERROR_ALLOCATION_FAILED,
			"Could not allocate frame with new resolution.");
}
Beispiel #6
0
/* Allocate resources and write header data to the output file. */
void ffmpeg_encoder_start(const char *filename, int codec_id, int fps, int width, int height) {
    AVCodec *codec;
    int ret;
    avcodec_register_all();
    codec = avcodec_find_encoder(codec_id);
    if (!codec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }
    c = avcodec_alloc_context3(codec);
    if (!c) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }
    c->bit_rate = 400000;
    c->width = width;
    c->height = height;
    c->time_base.num = 1;
    c->time_base.den = fps;
    c->gop_size = 10;
    c->max_b_frames = 1;
    c->pix_fmt = AV_PIX_FMT_YUV420P;
    if (codec_id == AV_CODEC_ID_H264)
        av_opt_set(c->priv_data, "preset", "slow", 0);
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }
    file = fopen(filename, "wb");
    if (!file) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }
    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }
    frame->format = c->pix_fmt;
    frame->width  = c->width;
    frame->height = c->height;
    ret = av_image_alloc(frame->data, frame->linesize, frame->width, frame->height, frame->format, 32);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate raw picture buffer\n");
        exit(1);
    }
}
Beispiel #7
0
int encoder::init()
{
	m_ffvcodec = avcodec_find_encoder(AV_CODEC_ID_H264);
	if (!m_ffvcodec)  {
		printf("Couldn't find encoder H264.\n");
		return -1;
	} 
	m_ffvpCodecCtx = avcodec_alloc_context3(m_ffvcodec);

	/* put sample parameters */
	/* resolution must be a multiple of two */
	m_ffvpCodecCtx->width = m_width;
	m_ffvpCodecCtx->height = m_height;
	/* frames per second */
	m_ffvpCodecCtx->time_base.num = 1;
	m_ffvpCodecCtx->time_base.den = 25;
	m_ffvpCodecCtx->gop_size = 10; /* emit one intra frame every ten frames */
	m_ffvpCodecCtx->max_b_frames=0;
	m_ffvpCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P; 

	av_opt_set(m_ffvpCodecCtx->priv_data,"preset","ultrafast",0); 
	av_opt_set(m_ffvpCodecCtx->priv_data, "tune", "zerolatency", 0);
	av_opt_set(m_ffvpCodecCtx->priv_data, "crf", "28" , 0);
	if (avcodec_open2(m_ffvpCodecCtx, m_ffvcodec, NULL) < 0) {
		printf("Couldn't open encoder H264.\n");
		return -1; 
	} 
	for (int i =0; i<10;i++)
	{
		AVFrame* frame = avcodec_alloc_frame();
		frame->format = AV_PIX_FMT_YUV420P;
		frame->width  = m_width;
		frame->height = m_height;
		int dst_bufsize = av_image_alloc(frame->data, frame->linesize, m_ffvpCodecCtx->width, m_ffvpCodecCtx->height,
			m_ffvpCodecCtx->pix_fmt, 32);
		if(dst_bufsize < 0)
		{
			printf("Couldn't alloc image\n");
			return -1; 
		}
		free_fifo.push(frame);
	}

	return 0 ;

}
Beispiel #8
0
static AVFrame *get_av_frame(AVCodecContext *codec_context) {
  int res;
  AVFrame *frame;

  frame = avcodec_alloc_frame();
  check(frame != NULL, "unable to allocate frame");
  frame->height = codec_context->height;
  frame->width = codec_context->width;
  frame->format = codec_context->pix_fmt;
  frame->pts = 0;

  res = av_image_alloc(frame->data, frame->linesize, frame->width, frame->height, frame->format, 1);
  check(res >= 0, "failed to allocate memory for video frame");

  return frame;
error:
  return NULL;
}
Beispiel #9
0
static void ffmpeg_encoder_init_frame(AVFrame **framep, int width, int height) {
    int ret;
    AVFrame *frame;
    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }
    frame->format = c->pix_fmt;
    frame->width  = width;
    frame->height = height;
    ret = av_image_alloc(frame->data, frame->linesize, frame->width, frame->height, frame->format, 32);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate raw picture buffer\n");
        exit(1);
    }
    *framep = frame;
}
Beispiel #10
0
int obe_get_buffer( AVCodecContext *codec, AVFrame *pic )
{
    int w = codec->width;
    int h = codec->height;
    int stride[4];

    avcodec_align_dimensions2( codec, &w, &h, stride );

    /* Only EDGE_EMU codecs are used
     * Allocate an extra line so that SIMD can modify the entire stride for every active line */
    if( av_image_alloc( pic->data, pic->linesize, w, h + 1, codec->pix_fmt, 32 ) < 0 )
        return -1;

    pic->type   = FF_BUFFER_TYPE_USER;
    pic->reordered_opaque = codec->reordered_opaque;
    pic->pkt_pts = codec->pkt ? codec->pkt->pts : AV_NOPTS_VALUE;

    return 0;
}
Beispiel #11
0
/* TODO: set the buffer's priv member to a context structure for the whole
 * filter chain.  This will allow for a buffer pool instead of the constant
 * alloc & free cycle currently implemented. */
AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
{
    int linesize[4];
    uint8_t *data[4];
    AVFilterBufferRef *picref = NULL;

    // +2 is needed for swscaler, +16 to be SIMD-friendly
    if (av_image_alloc(data, linesize, w, h, link->format, 16) < 0)
        return NULL;

    picref = avfilter_get_video_buffer_ref_from_arrays(data, linesize,
             perms, w, h, link->format);
    if (!picref) {
        av_free(data[0]);
        return NULL;
    }

    return picref;
}
Beispiel #12
0
static int get_buffer(AVCodecContext *avctx, AVFrame *pic)
{
	GstBuffer *out_buf;
	GstFlowReturn ret;
	struct obj *self = avctx->opaque;
	int width = avctx->width;
	int height = avctx->height;

	avcodec_align_dimensions(avctx, &width, &height);

	pic->linesize[0] = width;
	pic->linesize[1] = width / 2;
	pic->linesize[2] = width / 2;

	if (avctx->width == width && avctx->height == height) {
		ret = gst_pad_alloc_buffer_and_set_caps(self->srcpad, 0,
				width * height * 3 / 2,
				self->srcpad->caps, &out_buf);
		if (ret != GST_FLOW_OK)
			return 1;
		gst_buffer_ref(out_buf);
		pic->opaque = out_buf;

		pic->data[0] = out_buf->data;
		pic->data[1] = pic->data[0] + pic->linesize[0] * height;
		pic->data[2] = pic->data[1] + pic->linesize[1] * height / 2;
	} else {
		ret = av_image_alloc(pic->base, pic->linesize, width, height, avctx->pix_fmt, 1);
		if (ret < 0)
			return ret;
		for (unsigned i = 0; i < 3; i++)
			pic->data[i] = pic->base[i];
	}

	pic->type = FF_BUFFER_TYPE_USER;

	if (avctx->pkt)
		pic->pkt_pts = avctx->pkt->pts;
	else
		pic->pkt_pts = AV_NOPTS_VALUE;

	return 0;
}
Beispiel #13
0
static int config_props(AVFilterLink *inlink)
{
    KerndeintContext *kerndeint = inlink->dst->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int ret;

    kerndeint->is_packed_rgb = av_pix_fmt_desc_get(inlink->format)->flags & AV_PIX_FMT_FLAG_RGB;
    kerndeint->vsub = desc->log2_chroma_h;

    ret = av_image_alloc(kerndeint->tmp_data, kerndeint->tmp_linesize,
                         inlink->w, inlink->h, inlink->format, 16);
    if (ret < 0)
        return ret;
    memset(kerndeint->tmp_data[0], 0, ret);

    if ((ret = av_image_fill_linesizes(kerndeint->tmp_bwidth, inlink->format, inlink->w)) < 0)
        return ret;

    return 0;
}
Beispiel #14
0
	/*
	* ����ͼ�����Ƶ����
	*/
	AVRaw *make_image_raw(int format, int w, int h)
	{
		AVRaw * praw = (AVRaw*)malloc(sizeof(AVRaw));

		while (praw)
		{
			int size;
			memset(praw, 0, sizeof(AVRaw));
			praw->type = RAW_IMAGE;
			praw->format = format;
			praw->width = w;
			praw->height = h;

			int ret = av_image_alloc(praw->data, praw->linesize, w, h, (AVPixelFormat)format, 32);
			if (ret < 0)
			{
				char errmsg[ERROR_BUFFER_SIZE];
				av_strerror(ret, errmsg, ERROR_BUFFER_SIZE);
				av_log(NULL, AV_LOG_FATAL, "make_image_raw av_image_alloc : %s \n", errmsg);
				break;
			}
			praw->size = ret;
			praw->recount = 1;
			return praw;
		}

		/*
		* �������
		*/
		if (praw)
		{
			ffFreeRaw(praw);
		}
		else
		{
			av_log(NULL, AV_LOG_FATAL, "make_image_raw out of memory.\n");
		}
		praw = NULL;
		return praw;
	}
Beispiel #15
0
static void vcodec_defaults(struct codec_ent* dst, unsigned width,
	unsigned height, float fps, unsigned vbr)
{
	AVCodecContext* ctx   = dst->storage.video.context;

	ctx->width     = width;
	ctx->height    = height;
	ctx->bit_rate  = vbr;
	ctx->pix_fmt   = PIX_FMT_YUV420P;
	ctx->gop_size  = 12;
	ctx->time_base.den = fps;
	ctx->time_base.num = 1;

	AVFrame* pframe = av_frame_alloc();
	pframe->width = width;
	pframe->height = height;
	pframe->format = ctx->pix_fmt;
	av_image_alloc(pframe->data, pframe->linesize,
		width, height, ctx->pix_fmt, 32);

	dst->storage.video.pframe = pframe;
}
Beispiel #16
0
std::shared_ptr<PictureHolder> MImage::create(MSize dim, int avpic_fmt, Lock_t&)
{
    auto ph = std::make_shared<PictureHolder>();
    int res = av_image_fill_linesizes(ph->stridesArray.data(), (enum AVPixelFormat)avpic_fmt, dim.width());
    if (res < 0)
        return nullptr;

    res = av_image_alloc(ph->dataSlicesArray.data(), ph->stridesArray.data(),
                         dim.width(), dim.height(),(enum AVPixelFormat)avpic_fmt, (int)sizeof(void*));
    if (res < 0)
    {
        return nullptr;
    }
    ph->onDestructCB = [](PictureHolder* picture) {
        auto lk = picture->getLocker();
        av_freep(picture->dataSlicesArray.data());
    };
    ph->dimension = dim;
    ph->format = avpic_fmt;

    pv->picture = ph;
    return ph;
}
Beispiel #17
0
bool Encoder::createFrame() {
    assert(codec_context != NULL);

    frame = avcodec_alloc_frame();
    if (!frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        return false;
    }
    frame->format = codec_context->pix_fmt;
    frame->width  = codec_context->width;
    frame->height = codec_context->height;

    /* the image can be allocated by any means and av_image_alloc() is
     * just the most convenient way if av_malloc() is to be used */
    int ret = av_image_alloc(frame->data, frame->linesize, codec_context->width, codec_context->height,
        codec_context->pix_fmt, 32);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate raw picture buffer\n");
        return false;
    }

    return true;
}
// Return the raw data (as string)
//
// #data            - Get raw data with alignment 1
// #data(alignment) - Get raw data with given alignment
VALUE video_frame_data(int argc, VALUE * argv, VALUE self) {
	VideoFrameInternal * internal;
	Data_Get_Struct(self, VideoFrameInternal, internal);

	// Extract alignment
	if (argc > 1) rb_raise(rb_eArgError, "Too many arguments");
	int alignment = (argc == 1) ? NUM2INT(argv[0]) : 1;

	// Allocate buffer
	uint8_t * dst_data[4];
	int dst_linesizes[4];

	int size = av_image_alloc(dst_data,
							  dst_linesizes,
							  internal->width,
							  internal->height,
							  internal->format,
							  alignment);

	if (size < 0) rb_raise(rb_eNoMemError, "Failed to allocate image buffer");

	// Copy image data
	av_image_copy(dst_data,
				  dst_linesizes,
				  (uint8_t const * *)internal->picture->data,
				  (int const *)internal->picture->linesize,
				  internal->format,
				  internal->width,
				  internal->height);

	// Wrap in ruby
	VALUE data = rb_str_new((char const *)dst_data[0], size);
	av_free(dst_data[0]);

	return data;
}
Beispiel #19
0
bool FFmpegEncoderOpen(struct FFmpegEncoder* encoder, const char* outfile) {
	AVCodec* acodec = avcodec_find_encoder_by_name(encoder->audioCodec);
	AVCodec* vcodec = avcodec_find_encoder_by_name(encoder->videoCodec);
	if ((encoder->audioCodec && !acodec) || !vcodec || !FFmpegEncoderVerifyContainer(encoder)) {
		return false;
	}

	encoder->currentAudioSample = 0;
	encoder->currentAudioFrame = 0;
	encoder->currentVideoFrame = 0;
	encoder->nextAudioPts = 0;

	AVOutputFormat* oformat = av_guess_format(encoder->containerFormat, 0, 0);
#ifndef USE_LIBAV
	avformat_alloc_output_context2(&encoder->context, oformat, 0, outfile);
#else
	encoder->context = avformat_alloc_context();
	strncpy(encoder->context->filename, outfile, sizeof(encoder->context->filename));
	encoder->context->oformat = oformat;
#endif

	if (acodec) {
		encoder->audioStream = avformat_new_stream(encoder->context, acodec);
		encoder->audio = encoder->audioStream->codec;
		encoder->audio->bit_rate = encoder->audioBitrate;
		encoder->audio->channels = 2;
		encoder->audio->channel_layout = AV_CH_LAYOUT_STEREO;
		encoder->audio->sample_rate = encoder->sampleRate;
		encoder->audio->sample_fmt = encoder->sampleFormat;
		AVDictionary* opts = 0;
		av_dict_set(&opts, "strict", "-2", 0);
		if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) {
			encoder->audio->flags |= CODEC_FLAG_GLOBAL_HEADER;
		}
		avcodec_open2(encoder->audio, acodec, &opts);
		av_dict_free(&opts);
#if LIBAVCODEC_VERSION_MAJOR >= 55
		encoder->audioFrame = av_frame_alloc();
#else
		encoder->audioFrame = avcodec_alloc_frame();
#endif
		if (!encoder->audio->frame_size) {
			encoder->audio->frame_size = 1;
		}
		encoder->audioFrame->nb_samples = encoder->audio->frame_size;
		encoder->audioFrame->format = encoder->audio->sample_fmt;
		encoder->audioFrame->pts = 0;
		encoder->resampleContext = avresample_alloc_context();
		av_opt_set_int(encoder->resampleContext, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0);
		av_opt_set_int(encoder->resampleContext, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
		av_opt_set_int(encoder->resampleContext, "in_sample_rate", PREFERRED_SAMPLE_RATE, 0);
		av_opt_set_int(encoder->resampleContext, "out_sample_rate", encoder->sampleRate, 0);
		av_opt_set_int(encoder->resampleContext, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
		av_opt_set_int(encoder->resampleContext, "out_sample_fmt", encoder->sampleFormat, 0);
		avresample_open(encoder->resampleContext);
		encoder->audioBufferSize = (encoder->audioFrame->nb_samples * PREFERRED_SAMPLE_RATE / encoder->sampleRate) * 4;
		encoder->audioBuffer = av_malloc(encoder->audioBufferSize);
		encoder->postaudioBufferSize = av_samples_get_buffer_size(0, encoder->audio->channels, encoder->audio->frame_size, encoder->audio->sample_fmt, 0);
		encoder->postaudioBuffer = av_malloc(encoder->postaudioBufferSize);
		avcodec_fill_audio_frame(encoder->audioFrame, encoder->audio->channels, encoder->audio->sample_fmt, (const uint8_t*) encoder->postaudioBuffer, encoder->postaudioBufferSize, 0);

		if (encoder->audio->codec->id == AV_CODEC_ID_AAC &&
			(strcasecmp(encoder->containerFormat, "mp4") ||
			strcasecmp(encoder->containerFormat, "m4v") ||
			strcasecmp(encoder->containerFormat, "mov"))) {
			// MP4 container doesn't support the raw ADTS AAC format that the encoder spits out
			encoder->absf = av_bitstream_filter_init("aac_adtstoasc");
		}
	}

	encoder->videoStream = avformat_new_stream(encoder->context, vcodec);
	encoder->video = encoder->videoStream->codec;
	encoder->video->bit_rate = encoder->videoBitrate;
	encoder->video->width = encoder->width;
	encoder->video->height = encoder->height;
	encoder->video->time_base = (AVRational) { VIDEO_TOTAL_LENGTH, GBA_ARM7TDMI_FREQUENCY };
	encoder->video->pix_fmt = encoder->pixFormat;
	encoder->video->gop_size = 60;
	encoder->video->max_b_frames = 3;
	if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) {
		encoder->video->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}
	if (strcmp(vcodec->name, "libx264") == 0) {
		// Try to adaptively figure out when you can use a slower encoder
		if (encoder->width * encoder->height > 1000000) {
			av_opt_set(encoder->video->priv_data, "preset", "superfast", 0);
		} else if (encoder->width * encoder->height > 500000) {
			av_opt_set(encoder->video->priv_data, "preset", "veryfast", 0);
		} else {
			av_opt_set(encoder->video->priv_data, "preset", "faster", 0);
		}
		av_opt_set(encoder->video->priv_data, "tune", "zerolatency", 0);
	}
	avcodec_open2(encoder->video, vcodec, 0);
#if LIBAVCODEC_VERSION_MAJOR >= 55
	encoder->videoFrame = av_frame_alloc();
#else
	encoder->videoFrame = avcodec_alloc_frame();
#endif
	encoder->videoFrame->format = encoder->video->pix_fmt;
	encoder->videoFrame->width = encoder->video->width;
	encoder->videoFrame->height = encoder->video->height;
	encoder->videoFrame->pts = 0;
	encoder->scaleContext = sws_getContext(VIDEO_HORIZONTAL_PIXELS, VIDEO_VERTICAL_PIXELS,
#ifndef USE_LIBAV
		AV_PIX_FMT_0BGR32,
#else
		AV_PIX_FMT_BGR32,
#endif
		encoder->videoFrame->width, encoder->videoFrame->height, encoder->video->pix_fmt,
		SWS_POINT, 0, 0, 0);
	av_image_alloc(encoder->videoFrame->data, encoder->videoFrame->linesize, encoder->video->width, encoder->video->height, encoder->video->pix_fmt, 32);

	avio_open(&encoder->context->pb, outfile, AVIO_FLAG_WRITE);
	avformat_write_header(encoder->context, 0);

	return true;
}
Beispiel #20
0
void FeVideoImp::preload()
{
	{
		sf::Lock l( image_swap_mutex );

		if (rgba_buffer[0])
			av_freep(&rgba_buffer[0]);

		int ret = av_image_alloc(rgba_buffer, rgba_linesize,
				disptex_width, disptex_height,
				AV_PIX_FMT_RGBA, 1);
		if (ret < 0)
		{
			std::cerr << "Error allocating image during preload" << std::endl;
			return;
		}
	}

	bool keep_going = true;
	while ( keep_going )
	{
		AVPacket *packet = pop_packet();
		if ( packet == NULL )
		{
			if ( !m_parent->end_of_file() )
				m_parent->read_packet();
			else
				keep_going = false;
		}
		else
		{
			//
			// decompress packet and put it in our frame queue
			//
			int got_frame = 0;
#if (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT( 55, 45, 0 ))
			AVFrame *raw_frame = av_frame_alloc();
			codec_ctx->refcounted_frames = 1;
#else
			AVFrame *raw_frame = avcodec_alloc_frame();
#endif

			int len = avcodec_decode_video2( codec_ctx, raw_frame,
				&got_frame, packet );

			if ( len < 0 )
			{
				std::cerr << "Error decoding video" << std::endl;
				keep_going=false;
			}

			if ( got_frame )
			{
				if ( (codec_ctx->width & 0x7) || (codec_ctx->height & 0x7) )
					sws_flags |= SWS_ACCURATE_RND;

				sws_ctx = sws_getCachedContext( NULL,
					codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt,
					disptex_width, disptex_height, AV_PIX_FMT_RGBA,
					sws_flags, NULL, NULL, NULL );

				if ( !sws_ctx )
				{
					std::cerr << "Error allocating SwsContext during preload" << std::endl;
					free_frame( raw_frame );
					free_packet( packet );
					return;
				}

				sf::Lock l( image_swap_mutex );
				sws_scale( sws_ctx, raw_frame->data, raw_frame->linesize,
							0, codec_ctx->height, rgba_buffer,
							rgba_linesize );

				display_texture->update( rgba_buffer[0] );

				keep_going = false;
			}

			free_frame( raw_frame );
			free_packet( packet );
		}
	}
}
Beispiel #21
0
static int encode_frame(AVCodecContext *avctx, AVPacket *avpkt,	const AVFrame *frame, int *got_packet_ptr) {
	MscEncoderContext * mscEncoderContext;
	MscCodecContext * mscContext;
	uint32_t arithBytesEncoded;
	PutBitContext pb;
	int mb_y, mb_x, value, lastNonZero, max, arithCoderIndex = -1, keyFrame;

	// initialize arithmetic encoder registers
	initialize_arithmetic_encoder();

	mscEncoderContext = avctx->priv_data;
	mscContext = &mscEncoderContext->mscContext;

	init_put_bits(&pb, mscEncoderContext->arithBuff, mscEncoderContext->arithBuffSize);

	keyFrame = isKeyFrame(avctx->frame_number);

	if (avctx->frame_number == 0) {
		av_image_alloc(mscContext->referenceFrame->data, mscContext->referenceFrame->linesize, frame->width, frame->height, frame->format, 128);
	}

	avctx->coded_frame->reference = 0;
	avctx->coded_frame->key_frame = 1;
	avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;

	int * qmatrix = keyFrame ? mscContext->q_intra_matrix : mscContext->q_non_intra_matrix;

	for (mb_x = 0; mb_x < mscContext->mb_width; mb_x++) {
		for (mb_y = 0; mb_y < mscContext->mb_height; mb_y++) {
			get_blocks(mscEncoderContext, frame, mb_x, mb_y, mscContext->block);

			if (!keyFrame) {
				get_blocks(mscEncoderContext, mscContext->referenceFrame, mb_x, mb_y, mscContext->tmpBlock);

				diff_blocks(mscContext->block, mscContext->tmpBlock);
			}

			for (int n = 0; n < 6; ++n) {

//				if (avctx->frame_number == 1 && mb_x == 0 && mb_y == 0) {
//					av_log(avctx, AV_LOG_INFO, "Block x=%d, y=%d, n=%d\n", mb_x, mb_y, n);
//					print_debug_block(avctx, mscContext->block[n]);
//				}

				mscContext->dsp.fdct(mscContext->block[n]);

//				if (avctx->frame_number == 0 && mb_x == 0 && mb_y == 0) {
//					av_log(avctx, AV_LOG_INFO, "DCT block x=%d, y=%d, n=%d\n", mb_x, mb_y, n);
//					print_debug_block(avctx, mscContext->block[n]);
//				}

				lastNonZero = quantize(mscContext->block[n], qmatrix, &max);

				av_assert1(lastNonZero < 64);

//				if (overflow) {
//					clip_coeffs(m, m->block[n], m->block_last_index[n]);
//					av_log(avctx, AV_LOG_WARNING, "Overflow detected, frame: %d, mb_x: %d, mb_y: %d, n: %d\n",
//							avctx->frame_number, mb_x, mb_y, n);
//				}

//				if (avctx->frame_number == 0 && mb_x == 3 && mb_y == 0) {
//					av_log(avctx, AV_LOG_INFO, "DCT quantized block x=%d, y=%d, n=%d\n", mb_x, mb_y, n);
//					print_debug_block(avctx, mscContext->block[n]);
//				}

				encode_arith_symbol(&mscContext->lastZeroCodingModel, &pb, lastNonZero);

				if (lastNonZero > 0) {
					arithCoderIndex = get_arith_model_index(max);

					encode_arith_symbol(&mscContext->arithModelIndexCodingModel, &pb, arithCoderIndex);
				}

				for (int i = 0; i <= lastNonZero; ++i) {
					int arithCoderBits = i == 0 ? ARITH_CODER_BITS : arithCoderIndex;

					value = mscContext->block[n][scantab[i]] + mscContext->arithModelAddValue[arithCoderBits];

			        encode_arith_symbol(&mscContext->arithModels[arithCoderBits], &pb, value);
				}

				dequantize(mscContext->block[n], mscContext, keyFrame);
			}

			if (keyFrame) {
				idct_put_block(mscContext, mscContext->referenceFrame, mb_x, mb_y);
			}
			else {
				idct_add_block(mscContext, mscContext->referenceFrame, mb_x, mb_y);
			}
		}
	}

	emms_c();

	// flush arithmetic encoder
	flush_arithmetic_encoder(&pb);
	flush_put_bits(&pb);

	arithBytesEncoded = pb.buf_ptr - pb.buf;

	// alocate packet
	if ((value = ff_alloc_packet(avpkt, arithBytesEncoded)) < 0) {
		return value;
	}

	avpkt->flags |= AV_PKT_FLAG_KEY;

	// store encoded data
	memcpy(avpkt->data, mscEncoderContext->arithBuff, arithBytesEncoded);
	*got_packet_ptr = 1;

	return 0;
}
Beispiel #22
0
int yuv420P2rgb24(unsigned char *yuv_data, unsigned char *rgb24_data, int width, int hight)
{
	//Parameters
//	FILE *src_file =fopen("pre.yuv", "rb");
//	if (src_file == NULL)
//	{
//        perror("open pre.yuv error\n");
//	}
	const int src_w=1328,src_h=hight;//1328*720
	enum AVPixelFormat src_pixfmt=AV_PIX_FMT_YUV420P;

	int src_bpp=av_get_bits_per_pixel(av_pix_fmt_desc_get(src_pixfmt));

//	FILE *dst_file = fopen("sintel_1280x720_rgb24.rgb", "wb");
//    if (dst_file == NULL)
//	{
//        perror("open sintel_1280x720_rgb24.rgb error\n");
//	}
	const int dst_w=width,dst_h=hight;
	enum AVPixelFormat dst_pixfmt=AV_PIX_FMT_RGB24;
	int dst_bpp=av_get_bits_per_pixel(av_pix_fmt_desc_get(dst_pixfmt));

	//Structures
	uint8_t *src_data[4];
	int src_linesize[4];

	uint8_t *dst_data[4];
	int dst_linesize[4];

	int rescale_method=SWS_BICUBIC;
	struct SwsContext *img_convert_ctx;
	uint8_t *temp_buffer=(uint8_t *)malloc(src_w*src_h*src_bpp/8);

	int frame_idx=0;
	int ret=0;
	ret= av_image_alloc(src_data, src_linesize,src_w, src_h, src_pixfmt, 1);
	if (ret< 0) {
		printf( "Could not allocate source image\n");
		return -1;
	}
	ret = av_image_alloc(dst_data, dst_linesize,dst_w, dst_h, dst_pixfmt, 1);
	if (ret< 0) {
		printf( "Could not allocate destination image\n");
		return -1;
	}
	//-----------------------------
	//Init Method 1
	img_convert_ctx =sws_alloc_context();
	//Show AVOption
	//av_opt_show2(img_convert_ctx,stdout,AV_OPT_FLAG_VIDEO_PARAM,0);
	//Set Value
	av_opt_set_int(img_convert_ctx,"sws_flags",SWS_BICUBIC|SWS_PRINT_INFO,0);
	av_opt_set_int(img_convert_ctx,"srcw",src_w,0);
	av_opt_set_int(img_convert_ctx,"srch",src_h,0);
	av_opt_set_int(img_convert_ctx,"src_format",src_pixfmt,0);
	//'0' for MPEG (Y:0-235);'1' for JPEG (Y:0-255)
	av_opt_set_int(img_convert_ctx,"src_range",1,0);
	av_opt_set_int(img_convert_ctx,"dstw",dst_w,0);
	av_opt_set_int(img_convert_ctx,"dsth",dst_h,0);
	av_opt_set_int(img_convert_ctx,"dst_format",dst_pixfmt,0);
	av_opt_set_int(img_convert_ctx,"dst_range",1,0);
	sws_init_context(img_convert_ctx,NULL,NULL);

	//Init Method 2
	//img_convert_ctx = sws_getContext(src_w, src_h,src_pixfmt, dst_w, dst_h, dst_pixfmt,
	//	rescale_method, NULL, NULL, NULL);
	//-----------------------------
	/*
	//Colorspace
	ret=sws_setColorspaceDetails(img_convert_ctx,sws_getCoefficients(SWS_CS_ITU601),0,
		sws_getCoefficients(SWS_CS_ITU709),0,
		 0, 1 << 16, 1 << 16);
	if (ret==-1) {
		printf( "Colorspace not support.\n");
		return -1;
	}
	*/
//	while(1)
//	{
//
        memcpy(temp_buffer, yuv_data, src_w*src_h*src_bpp/8);
//		if (fread(temp_buffer, 1, src_w*src_h*src_bpp/8, src_file) != src_w*src_h*src_bpp/8){
//			break;
//		}

		switch(src_pixfmt){
		case AV_PIX_FMT_GRAY8:{
			memcpy(src_data[0],temp_buffer,src_w*src_h);
			break;
							  }
		case AV_PIX_FMT_YUV420P:{
			memcpy(src_data[0],temp_buffer,src_w*src_h);                    //Y
			memcpy(src_data[1],temp_buffer+src_w*src_h,src_w*src_h/4);      //U
			memcpy(src_data[2],temp_buffer+src_w*src_h*5/4,src_w*src_h/4);  //V
			break;
								}
		case AV_PIX_FMT_YUV422P:{
			memcpy(src_data[0],temp_buffer,src_w*src_h);                    //Y
			memcpy(src_data[1],temp_buffer+src_w*src_h,src_w*src_h/2);      //U
			memcpy(src_data[2],temp_buffer+src_w*src_h*3/2,src_w*src_h/2);  //V
			break;
								}
		case AV_PIX_FMT_YUV444P:{
			memcpy(src_data[0],temp_buffer,src_w*src_h);                    //Y
			memcpy(src_data[1],temp_buffer+src_w*src_h,src_w*src_h);        //U
			memcpy(src_data[2],temp_buffer+src_w*src_h*2,src_w*src_h);      //V
			break;
								}
		case AV_PIX_FMT_YUYV422:{
			memcpy(src_data[0],temp_buffer,src_w*src_h*2);                  //Packed
			break;
								}
		case AV_PIX_FMT_RGB24:{
			memcpy(src_data[0],temp_buffer,src_w*src_h*3);                  //Packed
			break;
								}
		default:{
			printf("Not Support Input Pixel Format.\n");
			break;
							  }
		}

		sws_scale(img_convert_ctx, src_data, src_linesize, 0, src_h, dst_data, dst_linesize);
		frame_idx++;

		switch(dst_pixfmt){
		case AV_PIX_FMT_GRAY8:{
		    memcpy(rgb24_data, dst_data[0], dst_w*dst_h);
			break;
							  }
		case AV_PIX_FMT_YUV420P:{
		    memcpy(rgb24_data, dst_data[0], dst_w*dst_h);rgb24_data+=dst_w*dst_h;
		    memcpy(rgb24_data, dst_data[1], dst_w*dst_h/4);rgb24_data+=dst_w*dst_h/4;
		    memcpy(rgb24_data, dst_data[2], dst_w*dst_h/4);rgb24_data+=dst_w*dst_h/4;
			break;
								}
		case AV_PIX_FMT_YUV422P:{
//			fwrite(dst_data[0],1,dst_w*dst_h,dst_file);					//Y
//			fwrite(dst_data[1],1,dst_w*dst_h/2,dst_file);				//U
//			fwrite(dst_data[2],1,dst_w*dst_h/2,dst_file);				//V
			break;
								}
		case AV_PIX_FMT_YUV444P:{
//			fwrite(dst_data[0],1,dst_w*dst_h,dst_file);                 //Y
//			fwrite(dst_data[1],1,dst_w*dst_h,dst_file);                 //U
//			fwrite(dst_data[2],1,dst_w*dst_h,dst_file);                 //V
			break;
								}
		case AV_PIX_FMT_YUYV422:{
//			fwrite(dst_data[0],1,dst_w*dst_h*2,dst_file);               //Packed
			break;
								}
		case AV_PIX_FMT_RGB24:{
		    memcpy(rgb24_data, dst_data[0], dst_w*dst_h*3);
//			fwrite(dst_data[0],1,dst_w*dst_h*3,dst_file);               //Packed
			break;
							  }
		default:{
			printf("Not Support Output Pixel Format.\n");
			break;
							}
		}
//	}

	sws_freeContext(img_convert_ctx);

	free(temp_buffer);
//	fclose(dst_file);
	av_freep(&src_data[0]);
	av_freep(&dst_data[0]);

	return 0;

}
Beispiel #23
0
int grabber::init()
{
	pFormatCtx = avformat_alloc_context();
#if USE_DSHOW  
				//Use dshow  
				//  
				//Need to Install screen-capture-recorder  
				//screen-capture-recorder  
				 //Website: http://sourceforge.net/projects/screencapturer/  
			//AVDictionary* options = NULL;
			//av_dict_set(&options,"rtbufsize","1500M",0);
				//  
			AVInputFormat *ifmt=av_find_input_format("dshow");  
			if(avformat_open_input(&pFormatCtx,"video=screen-capture-recorder",ifmt,NULL)!=0){  
					printf("Couldn't open input stream.\n");  
					return -1;  
				}  
#else  

	//Use gdigrab
	AVDictionary* options = NULL;
	//Set some options
	//grabbing frame rate
	//av_dict_set(&options,"framerate","5",0);
	//The distance from the left edge of the screen or desktop
	//av_dict_set(&options,"offset_x","20",0);
	//The distance from the top edge of the screen or desktop
	//av_dict_set(&options,"offset_y","40",0);
	//Video frame size. The default is to capture the full screen
	//av_dict_set(&options,"video_size","640x480",0);
	AVInputFormat *ifmt=av_find_input_format("gdigrab");
	if(avformat_open_input(&pFormatCtx,"desktop",ifmt,&options)!=0){
		printf("Couldn't open input stream.\n");
		return -1;
	}
#endif
	if(avformat_find_stream_info(pFormatCtx,NULL)<0)
	{
		printf("Couldn't find stream information.\n");
		return -1;
	}
	
	int i = 0;
	for(; i<pFormatCtx->nb_streams; i++) 
	{
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
		{
			videoindex=i;
			break;
		}
	}
	if(videoindex==-1)
	{
		printf("Didn't find a video stream.\n");
		return -1;
	}
	pCodecCtx=pFormatCtx->streams[videoindex]->codec;
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL)
	{
		printf("Codec not found.\n");
		return -1;
	}

	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
	{
		printf("Could not open codec.\n");
		return -1;
	}

	{
		for (int i=0;i<10;i++)
		{
			AVFrame* frame = av_frame_alloc();
			frame->format = pCodecCtx->pix_fmt;
			frame->width = pCodecCtx->width;
			frame->height = pCodecCtx->height;
			av_image_alloc(frame->data, frame->linesize, frame->width, frame->height, (AVPixelFormat)frame->format, 32);
			free_fifo.push(frame);
		}
	}

	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 
									/*pCodecCtx->width, pCodecCtx->height*/1280, 720, PIX_FMT_YUV420P, 
									SWS_BICUBIC, NULL, NULL, NULL); 

	pEnc = new encoder(/*pCodecCtx->width, pCodecCtx->height*/1280, 720, m_user_data, m_push_data);
	if (-1 == pEnc->init())
	{
		printf("Could not open encoder.\n");
		return -1;
	}
	pEnc->startLoop();
	return 0 ;
}
Beispiel #24
0
static int config_out_props(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    AVFilterLink *inlink = outlink->src->inputs[0];
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
    TInterlaceContext *tinterlace = ctx->priv;
    int i;

    tinterlace->vsub = desc->log2_chroma_h;
    outlink->w = inlink->w;
    outlink->h = tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD ?
        inlink->h*2 : inlink->h;
    if (tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD)
        outlink->sample_aspect_ratio = av_mul_q(inlink->sample_aspect_ratio,
                                                av_make_q(2, 1));

    if (tinterlace->mode == MODE_PAD) {
        uint8_t black[4] = { 16, 128, 128, 16 };
        int i, ret;
        if (ff_fmt_is_in(outlink->format, full_scale_yuvj_pix_fmts))
            black[0] = black[3] = 0;
        ret = av_image_alloc(tinterlace->black_data, tinterlace->black_linesize,
                             outlink->w, outlink->h, outlink->format, 1);
        if (ret < 0)
            return ret;

        /* fill black picture with black */
        for (i = 0; i < 4 && tinterlace->black_data[i]; i++) {
            int h = i == 1 || i == 2 ? FF_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h) : outlink->h;
            memset(tinterlace->black_data[i], black[i],
                   tinterlace->black_linesize[i] * h);
        }
    }
    if ((tinterlace->flags & TINTERLACE_FLAG_VLPF)
            && !(tinterlace->mode == MODE_INTERLEAVE_TOP
              || tinterlace->mode == MODE_INTERLEAVE_BOTTOM)) {
        av_log(ctx, AV_LOG_WARNING, "low_pass_filter flag ignored with mode %d\n",
                tinterlace->mode);
        tinterlace->flags &= ~TINTERLACE_FLAG_VLPF;
    }
    tinterlace->preout_time_base = inlink->time_base;
    if (tinterlace->mode == MODE_INTERLACEX2) {
        tinterlace->preout_time_base.den *= 2;
        outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){2,1});
        outlink->time_base  = av_mul_q(inlink->time_base , (AVRational){1,2});
    } else if (tinterlace->mode != MODE_PAD) {
        outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){1,2});
        outlink->time_base  = av_mul_q(inlink->time_base , (AVRational){2,1});
    }

    for (i = 0; i<FF_ARRAY_ELEMS(standard_tbs); i++){
        if (!av_cmp_q(standard_tbs[i], outlink->time_base))
            break;
    }
    if (i == FF_ARRAY_ELEMS(standard_tbs) ||
        (tinterlace->flags & TINTERLACE_FLAG_EXACT_TB))
        outlink->time_base = tinterlace->preout_time_base;

    if (tinterlace->flags & TINTERLACE_FLAG_VLPF) {
        tinterlace->lowpass_line = lowpass_line_c;
        if (ARCH_X86)
            ff_tinterlace_init_x86(tinterlace);
    }

    av_log(ctx, AV_LOG_VERBOSE, "mode:%d filter:%s h:%d -> h:%d\n",
           tinterlace->mode, (tinterlace->flags & TINTERLACE_FLAG_VLPF) ? "on" : "off",
           inlink->h, outlink->h);

    return 0;
}
Beispiel #25
0
/*
 * Video encoding example
 */
static void video_encode_example(const char *filename, int codec_id)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    int i, ret, x, y, got_output;
    FILE *f;
    AVFrame *frame;
    AVPacket pkt;
    uint8_t endcode[] = { 0, 0, 1, 0xb7 };

    printf("Encode video file %s\n", filename);

    /* find the mpeg1 video encoder */
    codec = avcodec_find_encoder(codec_id);
    if (!codec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    if (!c) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }

    /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width = 352;
    c->height = 288;
    /* frames per second */
	c->time_base.num= 1;
	c->time_base.den= 25;
    c->gop_size = 10; /* emit one intra frame every ten frames */
    c->max_b_frames=1;
    c->pix_fmt = AV_PIX_FMT_YUV420P;

    if(codec_id == AV_CODEC_ID_H264)
        av_opt_set(c->priv_data, "preset", "slow", 0);

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }

    frame = avcodec_alloc_frame();
    if (!frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }
    frame->format = c->pix_fmt;
    frame->width  = c->width;
    frame->height = c->height;

    /* the image can be allocated by any means and av_image_alloc() is
     * just the most convenient way if av_malloc() is to be used */
    ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
                         c->pix_fmt, 32);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate raw picture buffer\n");
        exit(1);
    }

    /* encode 1 second of video */
    for(i=0;i<25;i++) {
        av_init_packet(&pkt);
        pkt.data = NULL;    // packet data will be allocated by the encoder
        pkt.size = 0;

        fflush(stdout);
        /* prepare a dummy image */
        /* Y */
        for(y=0;y<c->height;y++) {
            for(x=0;x<c->width;x++) {
                frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
            }
        }

        /* Cb and Cr */
        for(y=0;y<c->height/2;y++) {
            for(x=0;x<c->width/2;x++) {
                frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
                frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
            }
        }

        frame->pts = i;

        /* encode the image */
        ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
        if (ret < 0) {
            fprintf(stderr, "Error encoding frame\n");
            exit(1);
        }

        if (got_output) {
            printf("Write frame %3d (size=%5d)\n", i, pkt.size);
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }
    }

    /* get the delayed frames */
    for (got_output = 1; got_output; i++) {
        fflush(stdout);

        ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
        if (ret < 0) {
            fprintf(stderr, "Error encoding frame\n");
            exit(1);
        }

        if (got_output) {
            printf("Write frame %3d (size=%5d)\n", i, pkt.size);
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }
    }

    /* add sequence end code to have a real mpeg file */
    fwrite(endcode, 1, sizeof(endcode), f);
    fclose(f);

    avcodec_close(c);
    av_free(c);
    av_freep(&frame->data[0]);
    avcodec_free_frame(&frame);
    printf("\n");
}
Beispiel #26
0
/*
 * Video encoding example
 */
static void video_encode_example(const char *filename)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    int i, ret, x, y, got_output;
    FILE *f;
    AVFrame *picture;
    AVPacket pkt;
    uint8_t endcode[] = { 0, 0, 1, 0xb7 };

    printf("Video encoding\n");

    /* find the mpeg1 video encoder */
    codec = avcodec_find_encoder(AV_CODEC_ID_MPEG1VIDEO);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    picture= avcodec_alloc_frame();

    /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width = 352;
    c->height = 288;
    /* frames per second */
    c->time_base= (AVRational){1,25};
    c->gop_size = 10; /* emit one intra frame every ten frames */
    c->max_b_frames=1;
    c->pix_fmt = AV_PIX_FMT_YUV420P;

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "could not open %s\n", filename);
        exit(1);
    }

    ret = av_image_alloc(picture->data, picture->linesize, c->width, c->height,
                         c->pix_fmt, 32);
    if (ret < 0) {
        fprintf(stderr, "could not alloc raw picture buffer\n");
        exit(1);
    }
    picture->format = c->pix_fmt;
    picture->width  = c->width;
    picture->height = c->height;

    /* encode 1 second of video */
    for(i=0;i<25;i++) {
        av_init_packet(&pkt);
        pkt.data = NULL;    // packet data will be allocated by the encoder
        pkt.size = 0;

        fflush(stdout);
        /* prepare a dummy image */
        /* Y */
        for(y=0;y<c->height;y++) {
            for(x=0;x<c->width;x++) {
                picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
            }
        }

        /* Cb and Cr */
        for(y=0;y<c->height/2;y++) {
            for(x=0;x<c->width/2;x++) {
                picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
                picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
            }
        }

        picture->pts = i;

        /* encode the image */
        ret = avcodec_encode_video2(c, &pkt, picture, &got_output);
        if (ret < 0) {
            fprintf(stderr, "error encoding frame\n");
            exit(1);
        }

        if (got_output) {
            printf("encoding frame %3d (size=%5d)\n", i, pkt.size);
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }
    }

    /* get the delayed frames */
    for (got_output = 1; got_output; i++) {
        fflush(stdout);

        ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
        if (ret < 0) {
            fprintf(stderr, "error encoding frame\n");
            exit(1);
        }

        if (got_output) {
            printf("encoding frame %3d (size=%5d)\n", i, pkt.size);
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }
    }

    /* add sequence end code to have a real mpeg file */
    fwrite(endcode, 1, sizeof(endcode), f);
    fclose(f);

    avcodec_close(c);
    av_free(c);
    av_freep(&picture->data[0]);
    avcodec_free_frame(&picture);
    printf("\n");
}
Beispiel #27
0
void FileOut::encodeFrameTest() const {
   // dummy frame
   AVFrame * frame = av_frame_alloc();
   if (!frame) {
	fprintf(stderr, "Could not allocate video frame\n");
	exit(1);
   }
   frame->format = ccv.get()->pix_fmt;
   frame->width = ccv.get()->width;
   frame->height = ccv.get()->height;

   /* the image can be allocated by any means and av_image_alloc() is
   * just the most convenient way if av_malloc() is to be used */
   int got_output;
   int ret = av_image_alloc(frame->data, frame->linesize, ccv.get()->width, ccv.get()->height, ccv.get()->pix_fmt, 32);
   assert(ret >= 0);


   /* encode 1 second of video */
   AVPacket p;
   int i, x, y;
   auto c = ccv.get();

   for (i = 0; i < 100; i++) {
	av_init_packet(&p);
	p.data = NULL;    // packet data will be allocated by the encoder
	p.size = 0;

	fflush(stdout);
	/* prepare a dummy image */
	/* Y */
	for (y = 0; y < c->height; y++) {
	   for (x = 0; x < c->width; x++) {
		frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
	   }
	}

	/* Cb and Cr */
	for (y = 0; y < c->height / 2; y++) {
	   for (x = 0; x < c->width / 2; x++) {
		frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
		frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
	   }
	}

	//frame->pts = i;

	ret = avcodec_send_frame(ccv.get(), frame);
	assert(ret == 0);

	ret = avcodec_receive_packet(ccv.get(), &p);

	if(ret == AVERROR(EAGAIN)) {
	   cout << "AVERROR(EAGAIN) : output is not available right now - user must try to send input" << endl;
	}else if(ret == AVERROR_EOF) {
	   cout << "AVERROR_EOF: the encoder has been fully flushed, and there will be no more output packets" << endl;
	}else if (ret == AVERROR(EINVAL)) {
	   cout << "AVERROR(EINVAL) : codec not opened, or it is an encoder" << endl;
	}else if (ret ==0) {
	   cout << "Success - got packet" << endl;
	   if (ret == 0) {
		printf("Write frame %3d (size=%5d)\n", i, p.size);
		fwrite(p.data, 1, p.size, f);
		av_packet_unref(&p);
	   }
	} else {
	   cout << "Other errors" << endl;
	}
	

	/* encode the image */
	/*ret = avcodec_encode_video2(c, &p, frame, &got_output);
	if (ret < 0) {
	   fprintf(stderr, "Error encoding frame\n");
	   exit(1);
	}

	if (got_output) {
	   printf("Write frame %3d (size=%5d)\n", i, p.size);
	   fwrite(p.data, 1, p.size, f);
	   av_packet_unref(&p);
	}*/
   }


   /* get the delayed frames */
   ret = avcodec_send_frame(ccv.get(), nullptr);
   assert(ret == 0);

   ret = avcodec_receive_packet(ccv.get(), &p);
   if (ret == 0) {
	printf("Write frame %3d (size=%5d)\n", i, p.size);
	fwrite(p.data, 1, p.size, f);
	av_packet_unref(&p);
   }

   /* get the delayed frames */
   /*for (got_output = 1; got_output; i++) {
	fflush(stdout);

	ret = avcodec_encode_video2(c, &p, NULL, &got_output);
	if (ret < 0) {
	   fprintf(stderr, "Error encoding frame\n");
	   exit(1);
	}

	if (got_output) {
	   printf("Write frame %3d (size=%5d)\n", i, p.size);
	   fwrite(p.data, 1, p.size, f);
	   av_packet_unref(&p);
	}
   }*/

   /* add sequence end code to have a real MPEG file */
   uint8_t endcode[] = { 0, 0, 1, 0xb7 };
   fwrite(endcode, 1, sizeof(endcode), f);
   fclose(f);

   avcodec_close(c);
   av_free(c);
   av_freep(&frame->data[0]);
   av_frame_free(&frame);
   printf("\n");



}
    bool GLFFmpegCamcorder::setup()
    {
        _codec = avcodec_find_encoder(AV_CODEC_ID_MPEG2VIDEO);
        if(_codec == nullptr)
        {
            getLog().postMessage(new Message(
                'E', false,
                "Couldn't find the codec for openGL video  recording",
                "GLFFmpegCamcorder")
            );
            return false;
        }

        _context = avcodec_alloc_context3(_codec);
        _context->bit_rate = _bitRate;
        _context->width = _frameWidth;
        _context->height = _frameHeight;
        _context->time_base = (AVRational){1, _frameRate};
        _context->gop_size = 10;
        _context->max_b_frames = 1;
        _context->pix_fmt = PIX_FMT_YUV420P;


        if(avcodec_open2(_context, _codec, nullptr) < 0)
        {
            getLog().postMessage(new Message(
                'E', false,
                "Couldn't open codec",
                "GLFFmpegCamcorder")
            );
            return false;
        }


        _srcFrame = avcodec_alloc_frame();
        if(_srcFrame == nullptr)
        {
            getLog().postMessage(new Message(
                'E', false,
                "Couldn't allocate frame",
                "GLFFmpegCamcorder")
            );
            return false;
        }

        _srcFrame->format = PIX_FMT_RGB24;
        _srcFrame->width = _context->width;
        _srcFrame->height = _context->height;
        if(av_image_alloc( _srcFrame->data,   _srcFrame->linesize,
                           _context->width,   _context->height,
                           PIX_FMT_RGB24, 32) < 0)
        {
            getLog().postMessage(new Message(
                'E', false,
                "Couldn't allocate raw picture buffer",
                "GLFFmpegCamcorder")
            );
            return false;
        }


        _dstFrame = avcodec_alloc_frame();
        if(_dstFrame == nullptr)
        {
            getLog().postMessage(new Message(
                'E', false,
                "Couldn't allocate frame",
                "GLFFmpegCamcorder")
            );
            return false;
        }

        _dstFrame->format = _context->pix_fmt;
        _dstFrame->width = _context->width;
        _dstFrame->height = _context->height;
        if(av_image_alloc( _dstFrame->data,   _dstFrame->linesize,
                           _context->width,   _context->height,
                           _context->pix_fmt, 32) < 0)
        {
            getLog().postMessage(new Message(
                'E', false,
                "Couldn't allocate raw picture buffer",
                "GLFFmpegCamcorder")
            );
            return false;
        }

        _swsContext = sws_getContext(
                    _frameWidth, _frameHeight, PIX_FMT_RGB24,
                    _frameWidth, _frameHeight, PIX_FMT_YUV420P,
                    SWS_BICUBIC, nullptr, nullptr, nullptr);

        _videoStream.open(_fileName.c_str());
        if(!_videoStream.is_open())
        {
            getLog().postMessage(new Message(
                'E', false,
                "Couldn't open video file",
                "GLFFmpegCamcorder")
            );
            return false;
        }

        _frameBuff = new unsigned char[_frameWidth*_frameHeight*3];

        return true;
    }
Beispiel #29
0
int ff_load_image(uint8_t *data[4], int linesize[4],
                  int *w, int *h, enum AVPixelFormat *pix_fmt,
                  const char *filename, void *log_ctx)
{
    AVInputFormat *iformat = NULL;
    AVFormatContext *format_ctx = NULL;
    AVCodec *codec;
    AVCodecContext *codec_ctx;
    AVFrame *frame;
    int frame_decoded, ret = 0;
    AVPacket pkt;

    av_init_packet(&pkt);

    av_register_all();

    iformat = av_find_input_format("image2");
    if ((ret = avformat_open_input(&format_ctx, filename, iformat, NULL)) < 0) {
        av_log(log_ctx, AV_LOG_ERROR,
               "Failed to open input file '%s'\n", filename);
        return ret;
    }

    codec_ctx = format_ctx->streams[0]->codec;
    codec = avcodec_find_decoder(codec_ctx->codec_id);
    if (!codec) {
        av_log(log_ctx, AV_LOG_ERROR, "Failed to find codec\n");
        ret = AVERROR(EINVAL);
        goto end;
    }

    if ((ret = avcodec_open2(codec_ctx, codec, NULL)) < 0) {
        av_log(log_ctx, AV_LOG_ERROR, "Failed to open codec\n");
        goto end;
    }

    if (!(frame = avcodec_alloc_frame()) ) {
        av_log(log_ctx, AV_LOG_ERROR, "Failed to alloc frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }

    ret = av_read_frame(format_ctx, &pkt);
    if (ret < 0) {
        av_log(log_ctx, AV_LOG_ERROR, "Failed to read frame from file\n");
        goto end;
    }

    ret = avcodec_decode_video2(codec_ctx, frame, &frame_decoded, &pkt);
    if (ret < 0 || !frame_decoded) {
        av_log(log_ctx, AV_LOG_ERROR, "Failed to decode image from file\n");
        goto end;
    }
    ret = 0;

    *w       = frame->width;
    *h       = frame->height;
    *pix_fmt = frame->format;

    if ((ret = av_image_alloc(data, linesize, *w, *h, *pix_fmt, 16)) < 0)
        goto end;
    ret = 0;

    av_image_copy(data, linesize, (const uint8_t **)frame->data, frame->linesize, *pix_fmt, *w, *h);

end:
    av_free_packet(&pkt);
    avcodec_close(codec_ctx);
    avformat_close_input(&format_ctx);
    av_freep(&frame);

    if (ret < 0)
        av_log(log_ctx, AV_LOG_ERROR, "Error loading image file '%s'\n", filename);
    return ret;
}
Beispiel #30
0
static int decode(AVCodecContext * avctx, void *outdata, int *outdata_size, AVPacket *avpkt) {
	AVFrame *frame = avctx->coded_frame;
	MscDecoderContext * mscDecoderContext;
	MscCodecContext * mscContext;
	GetBitContext gb;
	int lastNonZero, value, arithCoderIndex = -1, keyFrame;

	mscDecoderContext = avctx->priv_data;
	mscContext = &mscDecoderContext->mscContext;

	if (frame->data[0]) {
		avctx->release_buffer(avctx, frame);
	}

	frame->reference = 0;
	if (avctx->get_buffer(avctx, frame) < 0) {
		av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n");
		return AVERROR(ENOMEM);
	}

	keyFrame = isKeyFrame(avctx->frame_number);

	if (avctx->frame_number == 0) {
		av_image_alloc(mscContext->referenceFrame->data, mscContext->referenceFrame->linesize, frame->width, frame->height, PIX_FMT_YUV420P, 128);
	}

	if (!keyFrame) {
		av_image_copy(frame->data, frame->linesize, mscContext->referenceFrame->data,
				mscContext->referenceFrame->linesize, PIX_FMT_YUV420P, frame->width, frame->height);
	}

	frame->key_frame = 1;
	frame->pict_type = AV_PICTURE_TYPE_I;

	// init encoded data bit buffer
	init_get_bits(&gb, avpkt->data, avpkt->size * 8);

	initialize_arithmetic_decoder(&gb);

	for (int mb_x = 0; mb_x < mscContext->mb_width; mb_x++) {
		for (int mb_y = 0; mb_y < mscContext->mb_height; mb_y++) {

			for (int n = 0; n < 6; ++n) {

				mscContext->dsp.clear_block(mscContext->block[n]);

				lastNonZero = decode_arith_symbol(&mscContext->lastZeroCodingModel, &gb);

				if (lastNonZero > 0) {
					arithCoderIndex = decode_arith_symbol(&mscContext->arithModelIndexCodingModel, &gb);
				}

				for (int i = 0; i <= lastNonZero; ++i) {
					int arithCoderBits = i == 0 ? ARITH_CODER_BITS : arithCoderIndex;

					value = decode_arith_symbol(&mscContext->arithModels[arithCoderBits], &gb);

					mscContext->block[n][scantab[i]] = value - mscContext->arithModelAddValue[arithCoderBits];
				}

//				if (avctx->frame_number == 0 && mb_x == 3 && mb_y == 0) {
//					av_log(avctx, AV_LOG_INFO, "Quantized x=%d, y=%d, n=%d\n", mb_x, mb_y, n);
//					print_debug_block(avctx, mscContext->block[n]);
//				}

				dequantize(mscContext->block[n], mscContext, keyFrame);

//				if (avctx->frame_number == 0 && mb_x == 0 && mb_y == 0) {
//					av_log(avctx, AV_LOG_INFO, "Dequantized x=%d, y=%d, n=%d\n", mb_x, mb_y, n);
//					print_debug_block(avctx, mscContext->block[n]);
//				}
			}

//			if (avctx->frame_number == 0 && mb_x == 0 && mb_y == 0) {
//				av_log(avctx, AV_LOG_INFO, "IDCT x=%d, y=%d, n=%d\n", mb_x, mb_y, 0);
//				print_debug_block(avctx, mscContext->block[0]);
//			}
			if (keyFrame) {
				idct_put_block(mscContext, frame, mb_x, mb_y);
			}
			else {
				idct_add_block(mscContext, frame, mb_x, mb_y);
			}

			copy_macroblock(frame, mscContext->referenceFrame, mb_x, mb_y);
		}
	}

	emms_c();

	*outdata_size = sizeof(AVFrame);
	*(AVFrame *) outdata = *frame;

	return avpkt->size;
}