예제 #1
0
RasterRenderPrivate::Buffer::Buffer(AVPixelFormat format, QSize size) :
format(AV_PIX_FMT_NONE), size(size)
{
	memset(data, 0, sizeof(data[0]) * 4);
	if (av_image_fill_linesizes(width, format, size.width()) < 0)
		return;
	const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format);
	if (!desc || desc->flags&PIX_FMT_HWACCEL)
		return;
	int i, p[4] = { 0 };
	for (i = 0; i < 4; i++)
		p[desc->comp[i].plane] = 1;
	lines[0] = size.height();
	int n = width[0] * lines[0];
	for (i = 1; i < 4 && p[i]; i++){
		int s = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
		lines[i] = (size.height() + (1 << s) - 1) >> s;
		n += width[i] * lines[i];
	}
	data[0] = new quint8[n];
	for (i = 1; i < 4 && p[i]; i++){
		data[i] = data[i - 1] + width[i - 1] * lines[i - 1];
	}
	this->format = format;
}
예제 #2
0
파일: vf_weave.c 프로젝트: timkingh/FFmpeg
static int config_props_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    WeaveContext *s = ctx->priv;
    AVFilterLink *inlink = ctx->inputs[0];
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int ret;

    outlink->time_base.num = inlink->time_base.num * 2;
    outlink->time_base.den = inlink->time_base.den;
    outlink->frame_rate.num = inlink->frame_rate.num;
    outlink->frame_rate.den = inlink->frame_rate.den * 2;
    outlink->w = inlink->w;
    outlink->h = inlink->h * 2;

    if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
        return ret;

    s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
    s->planeheight[0] = s->planeheight[3] = inlink->h;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);

    return 0;
}
예제 #3
0
파일: ws.c 프로젝트: NeeMeese/mplayer-ce
// ----------------------------------------------------------------------------------------------
//    Put 'Image' to window.
// ----------------------------------------------------------------------------------------------
void wsConvert( wsTWindow * win,unsigned char * Image,unsigned int Size )
{
  const uint8_t *src[4] = { Image, NULL, NULL, NULL };
  int src_stride[4] = { 4 * win->xImage->width, 0, 0, 0 };
  uint8_t *dst[4] = { win->ImageData, NULL, NULL, NULL };
  int dst_stride[4];
  int i;
  sws_ctx = sws_getCachedContext(sws_ctx, win->xImage->width, win->xImage->height, PIX_FMT_RGB32,
                                          win->xImage->width, win->xImage->height, out_pix_fmt,
                                          SWS_POINT, NULL, NULL, NULL);
  av_image_fill_linesizes(dst_stride, out_pix_fmt, win->xImage->width);
  sws_scale(sws_ctx, src, src_stride, 0, win->xImage->height, dst, dst_stride);
  if (!wsNonNativeOrder) return;
  switch (win->xImage->bits_per_pixel) {
    case 32:
    {
      uint32_t *d = (uint32_t *) win->ImageData;
      for (i = 0; i < win->xImage->width * win->xImage->height; i++)
        d[i] = bswap_32(d[i]);
      break;
    }
    case 16:
    case 15:
    {
      uint16_t *d = (uint16_t *) win->ImageData;
      for (i = 0; i < win->xImage->width * win->xImage->height; i++)
        d[i] = bswap_16(d[i]);
      break;
    }
  }
}
예제 #4
0
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext   *ctx = inlink->dst;
    FieldOrderContext *s   = ctx->priv;

    return av_image_fill_linesizes(s->line_size, inlink->format, inlink->w);
}
예제 #5
0
bool ImageConverter::prepareData()
{
    DPTR_D(ImageConverter);
    if (d.fmt_out == QTAV_PIX_FMT_C(NONE) || d.w_out <=0 || d.h_out <= 0)
        return false;
    AV_ENSURE(av_image_check_size(d.w_out, d.h_out, 0, NULL), false);
    const int nb_planes = qMax(av_pix_fmt_count_planes(d.fmt_out), 0);
    d.bits.resize(nb_planes);
    d.pitchs.resize(nb_planes);
    // alignment is 16. sws in ffmpeg is 16, libav10 is 8
    const int kAlign = 16;
    AV_ENSURE(av_image_fill_linesizes((int*)d.pitchs.constData(), d.fmt_out, kAlign > 7 ? FFALIGN(d.w_out, 8) : d.w_out), false);
    for (int i = 0; i < d.pitchs.size(); ++i)
        d.pitchs[i] = FFALIGN(d.pitchs[i], kAlign);
    int s = av_image_fill_pointers((uint8_t**)d.bits.constData(), d.fmt_out, d.h_out, NULL, d.pitchs.constData());
    if (s < 0)
        return false;
    d.data_out.resize(s + kAlign-1);
    const int offset = (kAlign - ((uintptr_t)d.data_out.constData() & (kAlign-1))) & (kAlign-1);
    AV_ENSURE(av_image_fill_pointers((uint8_t**)d.bits.constData(), d.fmt_out, d.h_out, (uint8_t*)d.data_out.constData()+offset, d.pitchs.constData()), false);
    // TODO: special formats
    //if (desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)
       //    avpriv_set_systematic_pal2((uint32_t*)pointers[1], pix_fmt);
    d.update_data = false;
    for (int i = 0; i < d.pitchs.size(); ++i) {
        Q_ASSERT(d.pitchs[i]%kAlign == 0);
        Q_ASSERT(qintptr(d.bits[i])%kAlign == 0);
    }
    return true;
}
예제 #6
0
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    NContext *s = ctx->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int ret;

    if ((ret = av_image_fill_linesizes(s->planewidth, inlink->format, inlink->w)) < 0)
        return ret;

    s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
    s->planeheight[0] = s->planeheight[3] = inlink->h;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);
    s->buffer = av_malloc(3 * (s->planewidth[0] + 32));
    if (!s->buffer)
        return AVERROR(ENOMEM);

    if (!strcmp(ctx->filter->name, "erosion"))
        s->filter = erosion;
    else if (!strcmp(ctx->filter->name, "dilation"))
        s->filter = dilation;
    else if (!strcmp(ctx->filter->name, "deflate"))
        s->filter = deflate;
    else if (!strcmp(ctx->filter->name, "inflate"))
        s->filter = inflate;

    return 0;
}
예제 #7
0
static void video_frame_cksum(AVBPrint *bp, AVFrame *frame)
{
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
    int i, y;
    uint8_t *data;
    int linesize[5] = { 0 };

    av_bprintf(bp, ", %d x %d", frame->width, frame->height);
    if (!desc) {
        av_bprintf(bp, ", unknown");
        return;
    }
    if (av_image_fill_linesizes(linesize, frame->format, frame->width) < 0)
        return;
    av_bprintf(bp, ", %s", desc->name);
    for (i = 0; linesize[i]; i++) {
        unsigned cksum = 0;
        int h = frame->height;
        if ((i == 1 || i == 2) && desc->nb_components >= 3)
            h = AV_CEIL_RSHIFT(h, desc->log2_chroma_h);
        data = frame->data[i];
        for (y = 0; y < h; y++) {
            cksum = av_adler32_update(cksum, data, linesize[i]);
            data += frame->linesize[i];
        }
        av_bprintf(bp, ", 0x%08x", cksum);
    }
}
예제 #8
0
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    MixContext *s = ctx->priv;
    AVRational time_base = ctx->inputs[0]->time_base;
    AVRational frame_rate = ctx->inputs[0]->frame_rate;
    AVFilterLink *inlink = ctx->inputs[0];
    int height = ctx->inputs[0]->h;
    int width = ctx->inputs[0]->w;
    FFFrameSyncIn *in;
    int i, ret;

    for (i = 1; i < s->nb_inputs; i++) {
        if (ctx->inputs[i]->h != height || ctx->inputs[i]->w != width) {
            av_log(ctx, AV_LOG_ERROR, "Input %d size (%dx%d) does not match input %d size (%dx%d).\n", i, ctx->inputs[i]->w, ctx->inputs[i]->h, 0, width, height);
            return AVERROR(EINVAL);
        }
    }

    s->desc = av_pix_fmt_desc_get(outlink->format);
    if (!s->desc)
        return AVERROR_BUG;
    s->nb_planes = av_pix_fmt_count_planes(outlink->format);
    s->depth = s->desc->comp[0].depth;

    outlink->w          = width;
    outlink->h          = height;
    outlink->time_base  = time_base;
    outlink->frame_rate = frame_rate;

    if ((ret = ff_framesync_init(&s->fs, ctx, s->nb_inputs)) < 0)
        return ret;

    in = s->fs.in;
    s->fs.opaque = s;
    s->fs.on_event = process_frame;

    if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
        return ret;

    s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h);
    s->height[0] = s->height[3] = inlink->h;

    for (i = 0; i < s->nb_inputs; i++) {
        AVFilterLink *inlink = ctx->inputs[i];

        in[i].time_base = inlink->time_base;
        in[i].sync   = 1;
        in[i].before = EXT_STOP;
        in[i].after  = (s->duration == 1 || (s->duration == 2 && i == 0)) ? EXT_STOP : EXT_INFINITY;
    }

    return ff_framesync_configure(&s->fs);
}
static int process_frame(FFFrameSync *fs)
{
    AVFilterContext *ctx = fs->parent;
    AVFilterLink *outlink = ctx->outputs[0];
    StackContext *s = fs->opaque;
    AVFrame **in = s->frames;
    AVFrame *out;
    int i, p, ret, offset[4] = { 0 };

    for (i = 0; i < s->nb_inputs; i++) {
        if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0)
            return ret;
    }

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out)
        return AVERROR(ENOMEM);
    out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);

    for (i = 0; i < s->nb_inputs; i++) {
        AVFilterLink *inlink = ctx->inputs[i];
        int linesize[4];
        int height[4];

        if ((ret = av_image_fill_linesizes(linesize, inlink->format, inlink->w)) < 0) {
            av_frame_free(&out);
            return ret;
        }

        height[1] = height[2] = FF_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h);
        height[0] = height[3] = inlink->h;

        for (p = 0; p < s->nb_planes; p++) {
            if (s->is_vertical) {
                av_image_copy_plane(out->data[p] + offset[p] * out->linesize[p],
                                    out->linesize[p],
                                    in[i]->data[p],
                                    in[i]->linesize[p],
                                    linesize[p], height[p]);
                offset[p] += height[p];
            } else {
                av_image_copy_plane(out->data[p] + offset[p],
                                    out->linesize[p],
                                    in[i]->data[p],
                                    in[i]->linesize[p],
                                    linesize[p], height[p]);
                offset[p] += linesize[p];
            }
        }
    }

    return ff_filter_frame(outlink, out);
}
예제 #10
0
static int get_video_buffer(AVFrame *frame, int align)
{
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
    int ret, i;

    if (!desc)
        return AVERROR(EINVAL);

    if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
        return ret;

    if (!frame->linesize[0]) {
        for(i=1; i<=align; i+=i) {
            ret = av_image_fill_linesizes(frame->linesize, frame->format,
                                          FFALIGN(frame->width, i));
            if (ret < 0)
                return ret;
            if (!(frame->linesize[0] & (align-1)))
                break;
        }

        for (i = 0; i < 4 && frame->linesize[i]; i++)
            frame->linesize[i] = FFALIGN(frame->linesize[i], align);
    }

    for (i = 0; i < 4 && frame->linesize[i]; i++) {
        int h = FFALIGN(frame->height, 32);
        if (i == 1 || i == 2)
            h = FF_CEIL_RSHIFT(h, desc->log2_chroma_h);

        frame->buf[i] = av_buffer_alloc(frame->linesize[i] * h + 16 + 16/*STRIDE_ALIGN*/ - 1);
        if (!frame->buf[i])
            goto fail;

        frame->data[i] = frame->buf[i]->data;
    }
    if (desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL) {
        av_buffer_unref(&frame->buf[1]);
        frame->buf[1] = av_buffer_alloc(1024);
        if (!frame->buf[1])
            goto fail;
        frame->data[1] = frame->buf[1]->data;
    }

    frame->extended_data = frame->data;

    return 0;
fail:
    av_frame_unref(frame);
    return AVERROR(ENOMEM);
}
예제 #11
0
bool VideoTransform::init( const Frame& srcFrame, const Frame& dstFrame )
{
	const VideoFrame& src = static_cast<const VideoFrame&>( srcFrame );
	const VideoFrame& dst = static_cast<const VideoFrame&>( dstFrame );

	const AVPixelFormat srcPixelFormat = src.desc().getPixelFormat();
	const AVPixelFormat dstPixelFormat = dst.desc().getPixelFormat();

	_imageConvertContext = sws_getCachedContext( _imageConvertContext,
		src.desc().getWidth(), src.desc().getHeight(), srcPixelFormat,
		dst.desc().getWidth(), dst.desc().getHeight(), dstPixelFormat,
		SWS_POINT, NULL, NULL, NULL);

	if( ! _imageConvertContext )
	{
		throw std::runtime_error( "unable to create color convert context" );
	}

	av_image_fill_linesizes( &_srcLineSize[0], srcPixelFormat, src.desc().getWidth() );
	av_image_fill_linesizes( &_dstLineSize[0], dstPixelFormat, dst.desc().getWidth() );

	const char* srcPixFmt;
	srcPixFmt = av_get_pix_fmt_name( srcPixelFormat );
	const char* dstPixFmt;
	dstPixFmt = av_get_pix_fmt_name( dstPixelFormat );

	LOG_DEBUG( "Video conversion from " << ( srcPixFmt != NULL ? srcPixFmt : "None" ) << " to " << ( dstPixFmt != NULL ? dstPixFmt : "None" ) )

	LOG_DEBUG( "Source, width = " << src.desc().getWidth() )
	LOG_DEBUG( "Source, height = " << src.desc().getHeight() )

	LOG_DEBUG( "Destination, width = " << dst.desc().getWidth() )
	LOG_DEBUG( "Destination, height = " << dst.desc().getHeight() )

	return true;
}
예제 #12
0
static int config_input(AVFilterLink *inlink)
{
    FieldHintContext *s = inlink->dst->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int ret;

    if ((ret = av_image_fill_linesizes(s->planewidth, inlink->format, inlink->w)) < 0)
        return ret;

    s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
    s->planeheight[0] = s->planeheight[3] = inlink->h;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);

    return 0;
}
예제 #13
0
static int decode_packet(display_stream *st, int width, int height)
{
    int ret = 0, got_frame = 0;

    ret = avcodec_decode_video2(st->context, st->frame, &got_frame, &st->packet);
    if (ret < 0) {
        fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
        return ret;
    }

	if(st->sws_ctx == NULL) {
		fprintf(stderr, "new stream. reinit sws\n");

		st->buffer = (uint8_t *)av_malloc(st->frame->linesize[0] * height * 4);
		st->dst_data[0] = st->buffer;
		st->dst_data[1] = NULL;
		st->dst_data[2] = NULL;
		st->dst_data[3] = NULL;

		st->sws_ctx = sws_getContext(
				st->frame->linesize[0],
				height, 
				PIX_FMT_YUV420P,
				st->frame->linesize[0],
				height, 
				PIX_FMT_RGBA, 
				SWS_BICUBIC, NULL, NULL, NULL);

		av_image_fill_linesizes(st->dst_linesize, PIX_FMT_RGBA, st->frame->linesize[0]);
	}

	if (got_frame) {
		sws_scale(st->sws_ctx, 
				(const uint8_t * const*)st->frame->data, st->frame->linesize, 0, height,
				st->dst_data, st->dst_linesize);

		uint8_t *pp = st->dst_data[0];
		int  j = 0;
		for(j = 0; j < height; j++) {
			memcpy(st->out_frame + j * width * 4, pp + j * st->frame->linesize[0] * 4, width * 4);
		}
	}

	return 0;
}
예제 #14
0
static int config_props(AVFilterLink *inlink)
{
    KerndeintContext *kerndeint = inlink->dst->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int ret;

    kerndeint->is_packed_rgb = av_pix_fmt_desc_get(inlink->format)->flags & AV_PIX_FMT_FLAG_RGB;
    kerndeint->vsub = desc->log2_chroma_h;

    ret = av_image_alloc(kerndeint->tmp_data, kerndeint->tmp_linesize,
                         inlink->w, inlink->h, inlink->format, 16);
    if (ret < 0)
        return ret;
    memset(kerndeint->tmp_data[0], 0, ret);

    if ((ret = av_image_fill_linesizes(kerndeint->tmp_bwidth, inlink->format, inlink->w)) < 0)
        return ret;

    return 0;
}
예제 #15
0
static int config_input(AVFilterLink *inlink)
{
    ConvolutionContext *s = inlink->dst->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int ret;

    if ((ret = av_image_fill_linesizes(s->planewidth, inlink->format, inlink->w)) < 0)
        return ret;

    s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
    s->planeheight[0] = s->planeheight[3] = inlink->h;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);

    s->bstride = s->planewidth[0] + 32;
    s->buffer = av_malloc(5 * s->bstride);
    if (!s->buffer)
        return AVERROR(ENOMEM);

    return 0;
}
예제 #16
0
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    AmplifyContext *s = ctx->priv;
    AVFilterLink *inlink = ctx->inputs[0];
    int ret;

    s->desc = av_pix_fmt_desc_get(outlink->format);
    if (!s->desc)
        return AVERROR_BUG;
    s->nb_planes = av_pix_fmt_count_planes(outlink->format);
    s->depth = s->desc->comp[0].depth;

    if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
        return ret;

    s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h);
    s->height[0] = s->height[3] = inlink->h;

    return 0;
}
예제 #17
0
파일: mimage.cpp 프로젝트: zog-camera/zmbaq
std::shared_ptr<PictureHolder> MImage::create(MSize dim, int avpic_fmt, Lock_t&)
{
    auto ph = std::make_shared<PictureHolder>();
    int res = av_image_fill_linesizes(ph->stridesArray.data(), (enum AVPixelFormat)avpic_fmt, dim.width());
    if (res < 0)
        return nullptr;

    res = av_image_alloc(ph->dataSlicesArray.data(), ph->stridesArray.data(),
                         dim.width(), dim.height(),(enum AVPixelFormat)avpic_fmt, (int)sizeof(void*));
    if (res < 0)
    {
        return nullptr;
    }
    ph->onDestructCB = [](PictureHolder* picture) {
        auto lk = picture->getLocker();
        av_freep(picture->dataSlicesArray.data());
    };
    ph->dimension = dim;
    ph->format = avpic_fmt;

    pv->picture = ph;
    return ph;
}
예제 #18
0
int SubtitleDecoder::subtitle_thread(void *arg)
{
    VideoState *is = (VideoState*)arg;
    Frame *sp;
    int got_subtitle;
    double pts;
    int i;

    for (;;) {
        if (!(sp = is->subpq().peek_writable()))
            return 0;

        if ((got_subtitle = is->subdec(). decode_frame((AVFrame*) &sp->sub)) < 0)
            break;

        pts = 0;

        if (got_subtitle && sp->sub.format == 0) {
            if (sp->sub.pts != AV_NOPTS_VALUE)
                pts = sp->sub.pts / (double)AV_TIME_BASE;
            sp->pts = pts;
            sp->serial = is->subdec().pkt_serial;

            for (i = 0; i < sp->sub.num_rects; i++)
            {
                int in_w = sp->sub.rects[i]->w;
                int in_h = sp->sub.rects[i]->h;
                int subw = is->subdec().avctx->width  ? is->subdec().avctx->width  : is->decoders.viddec_width;
                int subh = is->subdec().avctx->height ? is->subdec().avctx->height : is->decoders.viddec_height;
                int out_w = is->decoders.viddec_width  ? in_w * is->decoders.viddec_width  / subw : in_w;
                int out_h = is->decoders.viddec_height ? in_h * is->decoders.viddec_height / subh : in_h;
                AVPicture newpic;

                //can not use avpicture_alloc as it is not compatible with avsubtitle_free()
                av_image_fill_linesizes(newpic.linesize, AV_PIX_FMT_YUVA420P, out_w);
                newpic.data[0] = (uint8_t*) av_malloc(newpic.linesize[0] * out_h);
                newpic.data[3] = (uint8_t*) av_malloc(newpic.linesize[3] * out_h);
                newpic.data[1] = (uint8_t*) av_malloc(newpic.linesize[1] * ((out_h+1)/2));
                newpic.data[2] = (uint8_t*) av_malloc(newpic.linesize[2] * ((out_h+1)/2));

                is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
                    in_w, in_h, AV_PIX_FMT_PAL8, out_w, out_h,
                    AV_PIX_FMT_YUVA420P, sws_flags, NULL, NULL, NULL);
                if (!is->sub_convert_ctx || !newpic.data[0] || !newpic.data[3] ||
                    !newpic.data[1] || !newpic.data[2]
                ) {
                    av_log(NULL, AV_LOG_FATAL, "Cannot initialize the sub conversion context\n");
                    exit(1);
                }
                sws_scale(is->sub_convert_ctx,
                          (const uint8_t* const*)sp->sub.rects[i]->pict.data, sp->sub.rects[i]->pict.linesize,
                          0, in_h, newpic.data, newpic.linesize);

                av_free(sp->sub.rects[i]->pict.data[0]);
                av_free(sp->sub.rects[i]->pict.data[1]);
                sp->sub.rects[i]->pict = newpic;
                sp->sub.rects[i]->w = out_w;
                sp->sub.rects[i]->h = out_h;
                sp->sub.rects[i]->x = sp->sub.rects[i]->x * out_w / in_w;
                sp->sub.rects[i]->y = sp->sub.rects[i]->y * out_h / in_h;
            }

            /* now we can update the picture count */
            is->subpq().push();
        } else if (got_subtitle) {
            avsubtitle_free(&sp->sub);
        }
    }
    return 0;
}
예제 #19
0
int
FFmpegH264Decoder::AllocateYUV420PVideoBuffer(AVCodecContext* aCodecContext,
                                              AVFrame* aFrame)
{
  // Older versions of ffmpeg require that edges be allocated* around* the
  // actual image.
  int edgeWidth = avcodec_get_edge_width();
  int decodeWidth = aCodecContext->width + edgeWidth * 2;
  int decodeHeight = aCodecContext->height + edgeWidth * 2;

  // Align width and height to possibly speed up decode.
  int stride_align[AV_NUM_DATA_POINTERS];
  avcodec_align_dimensions2(aCodecContext, &decodeWidth, &decodeHeight,
                            stride_align);

  // Get strides for each plane.
  av_image_fill_linesizes(aFrame->linesize, aCodecContext->pix_fmt,
                          decodeWidth);

  // Let FFmpeg set up its YUV plane pointers and tell us how much memory we
  // need.
  // Note that we're passing |nullptr| here as the base address as we haven't
  // allocated our image yet. We will adjust |aFrame->data| below.
  size_t allocSize =
    av_image_fill_pointers(aFrame->data, aCodecContext->pix_fmt, decodeHeight,
                           nullptr /* base address */, aFrame->linesize);

  nsRefPtr<Image> image =
    mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
  PlanarYCbCrImage* ycbcr = reinterpret_cast<PlanarYCbCrImage*>(image.get());
  uint8_t* buffer = ycbcr->AllocateAndGetNewBuffer(allocSize);

  if (!buffer) {
    NS_WARNING("Failed to allocate buffer for FFmpeg video decoding");
    return -1;
  }

  // Now that we've allocated our image, we can add its address to the offsets
  // set by |av_image_fill_pointers| above. We also have to add |edgeWidth|
  // pixels of padding here.
  for (uint32_t i = 0; i < AV_NUM_DATA_POINTERS; i++) {
    // The C planes are half the resolution of the Y plane, so we need to halve
    // the edge width here.
    uint32_t planeEdgeWidth = edgeWidth / (i ? 2 : 1);

    // Add buffer offset, plus a horizontal bar |edgeWidth| pixels high at the
    // top of the frame, plus |edgeWidth| pixels from the left of the frame.
    aFrame->data[i] += reinterpret_cast<ptrdiff_t>(
      buffer + planeEdgeWidth * aFrame->linesize[i] + planeEdgeWidth);
  }

  // Unused, but needs to be non-zero to keep ffmpeg happy.
  aFrame->type = GECKO_FRAME_TYPE;

  aFrame->extended_data = aFrame->data;
  aFrame->width = aCodecContext->width;
  aFrame->height = aCodecContext->height;

  mozilla::layers::PlanarYCbCrData data;
  PlanarYCbCrDataFromAVFrame(data, aFrame);
  ycbcr->SetDataNoCopy(data);

  mCurrentImage.swap(image);

  return 0;
}
예제 #20
0
static int config_input(AVFilterLink *inlink)
{
    double x0, x1, x2, x3, x4, x5, x6, x7, q;
    AVFilterContext *ctx = inlink->dst;
    PerspectiveContext *s = ctx->priv;
    double (*ref)[2] = s->ref;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    double values[VAR_VARS_NB] = { [VAR_W] = inlink->w, [VAR_H] = inlink->h };
    int h = inlink->h;
    int w = inlink->w;
    int x, y, i, j, ret;

    for (i = 0; i < 4; i++) {
        for (j = 0; j < 2; j++) {
            if (!s->expr_str[i][j])
                return AVERROR(EINVAL);
            ret = av_expr_parse_and_eval(&s->ref[i][j], s->expr_str[i][j],
                                         var_names, &values[0],
                                         NULL, NULL, NULL, NULL,
                                         0, 0, ctx);
            if (ret < 0)
                return ret;
        }
    }

    s->hsub = desc->log2_chroma_w;
    s->vsub = desc->log2_chroma_h;
    s->nb_planes = av_pix_fmt_count_planes(inlink->format);
    if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
        return ret;

    s->height[1] = s->height[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
    s->height[0] = s->height[3] = inlink->h;

    s->pv = av_realloc_f(s->pv, w * h, 2 * sizeof(*s->pv));
    if (!s->pv)
        return AVERROR(ENOMEM);

    x6 = ((ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0]) *
          (ref[2][1] - ref[3][1]) -
         ( ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1]) *
          (ref[2][0] - ref[3][0])) * h;
    x7 = ((ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1]) *
          (ref[1][0] - ref[3][0]) -
         ( ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0]) *
          (ref[1][1] - ref[3][1])) * w;
    q =  ( ref[1][0] - ref[3][0]) * (ref[2][1] - ref[3][1]) -
         ( ref[2][0] - ref[3][0]) * (ref[1][1] - ref[3][1]);

    x0 = q * (ref[1][0] - ref[0][0]) * h + x6 * ref[1][0];
    x1 = q * (ref[2][0] - ref[0][0]) * w + x7 * ref[2][0];
    x2 = q *  ref[0][0] * w * h;
    x3 = q * (ref[1][1] - ref[0][1]) * h + x6 * ref[1][1];
    x4 = q * (ref[2][1] - ref[0][1]) * w + x7 * ref[2][1];
    x5 = q *  ref[0][1] * w * h;

    for (y = 0; y < h; y++){
        for (x = 0; x < w; x++){
            int u, v;

            u = (int)floor(SUB_PIXELS * (x0 * x + x1 * y + x2) /
                                        (x6 * x + x7 * y + q * w * h) + 0.5);
            v = (int)floor(SUB_PIXELS * (x3 * x + x4 * y + x5) /
                                        (x6 * x + x7 * y + q * w * h) + 0.5);

            s->pv[x + y * w][0] = u;
            s->pv[x + y * w][1] = v;
        }
    }

    for (i = 0; i < SUB_PIXELS; i++){
        double d = i / (double)SUB_PIXELS;
        double temp[4];
        double sum = 0;

        for (j = 0; j < 4; j++)
            temp[j] = get_coeff(j - d - 1);

        for (j = 0; j < 4; j++)
            sum += temp[j];

        for (j = 0; j < 4; j++)
            s->coeff[i][j] = (int)floor((1 << COEFF_BITS) * temp[j] / sum + 0.5);
    }

    return 0;
}
예제 #21
0
int
FFmpegH264Decoder<LIBAV_VER>::AllocateYUV420PVideoBuffer(
  AVCodecContext* aCodecContext, AVFrame* aFrame)
{
  bool needAlign = aCodecContext->codec->capabilities & CODEC_CAP_DR1;
  int edgeWidth =  needAlign ? avcodec_get_edge_width() : 0;
  int decodeWidth = aCodecContext->width + edgeWidth * 2;
  // Make sure the decodeWidth is a multiple of 32, so a UV plane stride will be
  // a multiple of 16. FFmpeg uses SSE2 accelerated code to copy a frame line by
  // line.
  decodeWidth = (decodeWidth + 31) & ~31;
  int decodeHeight = aCodecContext->height + edgeWidth * 2;

  if (needAlign) {
    // Align width and height to account for CODEC_FLAG_EMU_EDGE.
    int stride_align[AV_NUM_DATA_POINTERS];
    avcodec_align_dimensions2(aCodecContext, &decodeWidth, &decodeHeight,
                              stride_align);
  }

  // Get strides for each plane.
  av_image_fill_linesizes(aFrame->linesize, aCodecContext->pix_fmt,
                          decodeWidth);

  // Let FFmpeg set up its YUV plane pointers and tell us how much memory we
  // need.
  // Note that we're passing |nullptr| here as the base address as we haven't
  // allocated our image yet. We will adjust |aFrame->data| below.
  size_t allocSize =
    av_image_fill_pointers(aFrame->data, aCodecContext->pix_fmt, decodeHeight,
                           nullptr /* base address */, aFrame->linesize);

  nsRefPtr<Image> image =
    mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
  PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image.get());
  uint8_t* buffer = ycbcr->AllocateAndGetNewBuffer(allocSize + 64);
  // FFmpeg requires a 16/32 bytes-aligned buffer, align it on 64 to be safe
  buffer = reinterpret_cast<uint8_t*>((reinterpret_cast<uintptr_t>(buffer) + 63) & ~63);

  if (!buffer) {
    NS_WARNING("Failed to allocate buffer for FFmpeg video decoding");
    return -1;
  }

  // Now that we've allocated our image, we can add its address to the offsets
  // set by |av_image_fill_pointers| above. We also have to add |edgeWidth|
  // pixels of padding here.
  for (uint32_t i = 0; i < AV_NUM_DATA_POINTERS; i++) {
    // The C planes are half the resolution of the Y plane, so we need to halve
    // the edge width here.
    uint32_t planeEdgeWidth = edgeWidth / (i ? 2 : 1);

    // Add buffer offset, plus a horizontal bar |edgeWidth| pixels high at the
    // top of the frame, plus |edgeWidth| pixels from the left of the frame.
    aFrame->data[i] += reinterpret_cast<ptrdiff_t>(
      buffer + planeEdgeWidth * aFrame->linesize[i] + planeEdgeWidth);
  }

  // Unused, but needs to be non-zero to keep ffmpeg happy.
  aFrame->type = GECKO_FRAME_TYPE;

  aFrame->extended_data = aFrame->data;
  aFrame->width = aCodecContext->width;
  aFrame->height = aCodecContext->height;

  aFrame->opaque = static_cast<void*>(image.forget().take());

  return 0;
}
int32_t CEncoder::Initialize()
{
	if (g_enc_opt.m_VideoDisable == 0)
	{
		sprintf(video_codec_name, "%ls", g_enc_opt.m_VideoCodec);
		video_bit_rate = g_enc_opt.m_VideoBitrate;
		video_width = g_enc_opt.m_VideoWidth;
		video_height = g_enc_opt.m_VideoHeight;
	}

	if (g_enc_opt.m_AudioDisable == 0)
	{
		sprintf(audio_codec_name, "%ls", g_enc_opt.m_AudioCodec);
		audio_bit_rate = g_enc_opt.m_AudioBitrate;
		audio_sample_rate = g_enc_opt.m_AudioSampleRate;
		audio_channels = g_enc_opt.m_AudioChannels;
	}

	check_options();

	if (enc_open() != 0) return -1;

	if (InterlockedCompareExchange(&g_enc_opt.m_EncInfo, 1, 0) == 0)
	{
		if (g_enc_opt.m_VideoDisable == 0)
		{
			switch(frame_pix_fmt)
			{
			case AV_PIX_FMT_BGR0:
				g_enc_opt.m_EncPixFormat = AV_PIX_FMT_BGRA;
				break;

			default:
				g_enc_opt.m_EncPixFormat = frame_pix_fmt;
				break;
			}
			
			g_enc_opt.m_EncVideoPlanes = av_pix_fmt_count_planes(frame_pix_fmt);
			av_image_fill_linesizes(g_enc_opt.m_EncVideoStride, frame_pix_fmt, g_enc_opt.m_VideoWidth);

			const AVPixFmtDescriptor *dsc = av_pix_fmt_desc_get(frame_pix_fmt);
			g_enc_opt.m_EncVideoHeight[0] = g_enc_opt.m_VideoHeight;
			g_enc_opt.m_EncVideoChromaH = dsc->log2_chroma_h;
			for(int i = 1; i < g_enc_opt.m_EncVideoPlanes; i++)
			{
				g_enc_opt.m_EncVideoHeight[i] = g_enc_opt.m_VideoHeight >> dsc->log2_chroma_h;
			}
		}
		else
		{
			g_enc_opt.m_EncPixFormat = 0;
		}
		if (g_enc_opt.m_AudioDisable == 0)
		{
			g_enc_opt.m_EncAudioFormat = audio_sample_fmt;
			g_enc_opt.m_EncAudioSampleRate = audio_sample_rate;
			g_enc_opt.m_EncAudioChannels = audio_channels;
			g_enc_opt.m_EncAudioChannelLayout = audio_channel_layout;
			if (audio_channels > 1)
			{
				g_enc_opt.m_EncAudioIsPlanar = av_sample_fmt_is_planar(audio_sample_fmt);
			}
			else
			{

				g_enc_opt.m_EncAudioIsPlanar = 0;
			}

			if (g_enc_opt.m_EncAudioIsPlanar)
			{
				g_enc_opt.m_EncAudioPacketBytes = av_get_bytes_per_sample(audio_sample_fmt);
			}
			else
			{
				g_enc_opt.m_EncAudioPacketBytes = av_get_bytes_per_sample(audio_sample_fmt) * g_enc_opt.m_EncAudioChannels;
			}
		}
		else
		{
			g_enc_opt.m_EncAudioFormat = 0;
		}
	}
예제 #23
0
파일: decode.c 프로젝트: Rodeo314/tim-libav
static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
{
    FramePool *pool = avctx->internal->pool;
    int i, ret;

    switch (avctx->codec_type) {
    case AVMEDIA_TYPE_VIDEO: {
        uint8_t *data[4];
        int linesize[4];
        int size[4] = { 0 };
        int w = frame->width;
        int h = frame->height;
        int tmpsize, unaligned;

        if (pool->format == frame->format &&
            pool->width == frame->width && pool->height == frame->height)
            return 0;

        avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);

        do {
            // NOTE: do not align linesizes individually, this breaks e.g. assumptions
            // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
            av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
            // increase alignment of w for next try (rhs gives the lowest bit set in w)
            w += w & ~(w - 1);

            unaligned = 0;
            for (i = 0; i < 4; i++)
                unaligned |= linesize[i] % pool->stride_align[i];
        } while (unaligned);

        tmpsize = av_image_fill_pointers(data, avctx->pix_fmt, h,
                                         NULL, linesize);
        if (tmpsize < 0)
            return -1;

        for (i = 0; i < 3 && data[i + 1]; i++)
            size[i] = data[i + 1] - data[i];
        size[i] = tmpsize - (data[i] - data[0]);

        for (i = 0; i < 4; i++) {
            av_buffer_pool_uninit(&pool->pools[i]);
            pool->linesize[i] = linesize[i];
            if (size[i]) {
                pool->pools[i] = av_buffer_pool_init(size[i] + 16, NULL);
                if (!pool->pools[i]) {
                    ret = AVERROR(ENOMEM);
                    goto fail;
                }
            }
        }
        pool->format = frame->format;
        pool->width  = frame->width;
        pool->height = frame->height;

        break;
        }
    case AVMEDIA_TYPE_AUDIO: {
        int ch     = av_get_channel_layout_nb_channels(frame->channel_layout);
        int planar = av_sample_fmt_is_planar(frame->format);
        int planes = planar ? ch : 1;

        if (pool->format == frame->format && pool->planes == planes &&
            pool->channels == ch && frame->nb_samples == pool->samples)
            return 0;

        av_buffer_pool_uninit(&pool->pools[0]);
        ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
                                         frame->nb_samples, frame->format, 0);
        if (ret < 0)
            goto fail;

        pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
        if (!pool->pools[0]) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        pool->format     = frame->format;
        pool->planes     = planes;
        pool->channels   = ch;
        pool->samples = frame->nb_samples;
        break;
        }
    default: av_assert0(0);
    }
    return 0;
fail:
    for (i = 0; i < 4; i++)
        av_buffer_pool_uninit(&pool->pools[i]);
    pool->format = -1;
    pool->planes = pool->channels = pool->samples = 0;
    pool->width  = pool->height = 0;
    return ret;
}
예제 #24
0
/* reads a complete image as is into image buffer */
static image *pngRead(skin_t *skin, const char *fname)
{
    int i;
    guiImage bmp;
    image *bf;
    char *filename = NULL;
    FILE *fp;

    if(!stricmp(fname, "NULL")) return 0;

    /* find filename in order file file.png */
    if(!(fp = fopen(fname, "rb")))
    {
        filename = calloc(1, strlen(skin->skindir) + strlen(fname) + 6);
        sprintf(filename, "%s\\%s.png", skin->skindir, fname);
        if(!(fp = fopen(filename, "rb")))
        {
            mp_msg(MSGT_GPLAYER, MSGL_ERR, "[png] cannot find image %s\n", filename);
            free(filename);
            return 0;
        }
    }
    fclose(fp);

    for (i=0; i < skin->imagecount; i++)
        if(!strcmp(fname, skin->images[i]->name))
        {
#ifdef DEBUG
            mp_msg(MSGT_GPLAYER, MSGL_DBG4, "[png] skinfile %s already exists\n", fname);
#endif
            free(filename);
            return skin->images[i];
        }
    (skin->imagecount)++;
    skin->images = realloc(skin->images, sizeof(image *) * skin->imagecount);
    bf = skin->images[(skin->imagecount) - 1] = calloc(1, sizeof(image));
    bf->name = strdup(fname);
    bpRead(filename ? filename : fname, &bmp);
    free(filename);
    bf->width = bmp.Width; bf->height = bmp.Height;

    bf->size = bf->width * bf->height * skin->desktopbpp / 8;
    if (skin->desktopbpp == 32)
      bf->data = bmp.Image;
    else {
      const uint8_t *src[4] = { bmp.Image, NULL, NULL, NULL};
      int src_stride[4] = { 4 * bmp.Width, 0, 0, 0 };
      uint8_t *dst[4] = { NULL, NULL, NULL, NULL };
      int dst_stride[4];
      enum PixelFormat out_pix_fmt;
      struct SwsContext *sws;
      if      (skin->desktopbpp == 16) out_pix_fmt = PIX_FMT_RGB555;
      else if (skin->desktopbpp == 24) out_pix_fmt = PIX_FMT_RGB24;
      av_image_fill_linesizes(dst_stride, out_pix_fmt, bmp.Width);
      sws = sws_getContext(bmp.Width, bmp.Height, PIX_FMT_RGB32,
                           bmp.Width, bmp.Height, out_pix_fmt,
                           SWS_POINT, NULL, NULL, NULL);
      bf->data = malloc(bf->size);
      dst[0] = bf->data;
      sws_scale(sws, src, src_stride, 0, bmp.Height, dst, dst_stride);
      sws_freeContext(sws);
      free(bmp.Image);
    }
    return bf;
}
예제 #25
0
FFVideoFramePool *ff_video_frame_pool_init(AVBufferRef* (*alloc)(int size),
                                           int width,
                                           int height,
                                           enum AVPixelFormat format,
                                           int align)
{
    int i, ret;
    FFVideoFramePool *pool;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format);

    if (!desc)
        return NULL;

    pool = av_mallocz(sizeof(FFVideoFramePool));
    if (!pool)
        return NULL;

    pool->width = width;
    pool->height = height;
    pool->format = format;
    pool->align = align;

    if ((ret = av_image_check_size(width, height, 0, NULL)) < 0) {
        goto fail;
    }

    if (!pool->linesize[0]) {
        for(i = 1; i <= align; i += i) {
            ret = av_image_fill_linesizes(pool->linesize, pool->format,
                                          FFALIGN(pool->width, i));
            if (ret < 0) {
                goto fail;
            }
            if (!(pool->linesize[0] & (pool->align - 1)))
                break;
        }

        for (i = 0; i < 4 && pool->linesize[i]; i++) {
            pool->linesize[i] = FFALIGN(pool->linesize[i], pool->align);
        }
    }

    for (i = 0; i < 4 && pool->linesize[i]; i++) {
        int h = FFALIGN(pool->height, 32);
        if (i == 1 || i == 2)
            h = FF_CEIL_RSHIFT(h, desc->log2_chroma_h);

        pool->pools[i] = av_buffer_pool_init(pool->linesize[i] * h + 16 + 16 - 1,
                                             alloc);
        if (!pool->pools[i])
            goto fail;
    }

    if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
        desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL) {
        pool->pools[1] = av_buffer_pool_init(AVPALETTE_SIZE, alloc);
        if (!pool->pools[1])
            goto fail;
    }

    return pool;

fail:
    ff_video_frame_pool_uninit(&pool);
    return NULL;
}