コード例 #1
0
ファイル: ImageConverter.cpp プロジェクト: TanNgocDo/QtAV
bool ImageConverter::prepareData()
{
    DPTR_D(ImageConverter);
    if (d.fmt_out == QTAV_PIX_FMT_C(NONE) || d.w_out <=0 || d.h_out <= 0)
        return false;
    AV_ENSURE(av_image_check_size(d.w_out, d.h_out, 0, NULL), false);
    const int nb_planes = qMax(av_pix_fmt_count_planes(d.fmt_out), 0);
    d.bits.resize(nb_planes);
    d.pitchs.resize(nb_planes);
    // alignment is 16. sws in ffmpeg is 16, libav10 is 8
    const int kAlign = 16;
    AV_ENSURE(av_image_fill_linesizes((int*)d.pitchs.constData(), d.fmt_out, kAlign > 7 ? FFALIGN(d.w_out, 8) : d.w_out), false);
    for (int i = 0; i < d.pitchs.size(); ++i)
        d.pitchs[i] = FFALIGN(d.pitchs[i], kAlign);
    int s = av_image_fill_pointers((uint8_t**)d.bits.constData(), d.fmt_out, d.h_out, NULL, d.pitchs.constData());
    if (s < 0)
        return false;
    d.data_out.resize(s + kAlign-1);
    const int offset = (kAlign - ((uintptr_t)d.data_out.constData() & (kAlign-1))) & (kAlign-1);
    AV_ENSURE(av_image_fill_pointers((uint8_t**)d.bits.constData(), d.fmt_out, d.h_out, (uint8_t*)d.data_out.constData()+offset, d.pitchs.constData()), false);
    // TODO: special formats
    //if (desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)
       //    avpriv_set_systematic_pal2((uint32_t*)pointers[1], pix_fmt);
    d.update_data = false;
    for (int i = 0; i < d.pitchs.size(); ++i) {
        Q_ASSERT(d.pitchs[i]%kAlign == 0);
        Q_ASSERT(qintptr(d.bits[i])%kAlign == 0);
    }
    return true;
}
コード例 #2
0
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    MaskedMergeContext *s = ctx->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int vsub, hsub;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);

    hsub = desc->log2_chroma_w;
    vsub = desc->log2_chroma_h;
    s->height[1] = s->height[2] = FF_CEIL_RSHIFT(inlink->h, vsub);
    s->height[0] = s->height[3] = inlink->h;
    s->width[1]  = s->width[2]  = FF_CEIL_RSHIFT(inlink->w, hsub);
    s->width[0]  = s->width[3]  = inlink->w;

    s->depth = desc->comp[0].depth;
    s->max = 1 << s->depth;
    s->half = s->max / 2;

    if (desc->comp[0].depth == 8)
        s->maskedmerge = maskedmerge8;
    else
        s->maskedmerge = maskedmerge16;

    return 0;
}
コード例 #3
0
ファイル: vf_midequalizer.c プロジェクト: DeHackEd/FFmpeg
static int config_input0(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    MidEqualizerContext *s = ctx->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int vsub, hsub;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);

    hsub = desc->log2_chroma_w;
    vsub = desc->log2_chroma_h;

    s->height[0][0] = s->height[0][3] = inlink->h;
    s->width[0][0]  = s->width[0][3]  = inlink->w;
    s->height[0][1] = s->height[0][2] = AV_CEIL_RSHIFT(inlink->h, vsub);
    s->width[0][1]  = s->width[0][2]  = AV_CEIL_RSHIFT(inlink->w, hsub);

    s->histogram_size = 1 << desc->comp[0].depth;

    s->histogram[0] = av_calloc(s->histogram_size, sizeof(float));
    s->histogram[1] = av_calloc(s->histogram_size, sizeof(float));
    s->cchange      = av_calloc(s->histogram_size, sizeof(unsigned));
    if (!s->histogram[0] || !s->histogram[1] || !s->cchange)
        return AVERROR(ENOMEM);

    if (s->histogram_size == 256) {
        s->midequalizer = midequalizer8;
    } else {
        s->midequalizer = midequalizer16;
    }

    return 0;
}
コード例 #4
0
ファイル: vf_hysteresis.c プロジェクト: DeHackEd/FFmpeg
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    HysteresisContext *s = ctx->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int vsub, hsub;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);

    hsub = desc->log2_chroma_w;
    vsub = desc->log2_chroma_h;
    s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
    s->height[0] = s->height[3] = inlink->h;
    s->width[1]  = s->width[2]  = AV_CEIL_RSHIFT(inlink->w, hsub);
    s->width[0]  = s->width[3]  = inlink->w;

    s->depth = desc->comp[0].depth;

    if (desc->comp[0].depth == 8)
        s->hysteresis = hysteresis8;
    else
        s->hysteresis = hysteresis16;

    s->map = av_calloc(inlink->w, inlink->h * sizeof (*s->map));
    if (!s->map)
        return AVERROR(ENOMEM);

    s->xy = av_calloc(inlink->w, inlink->h * sizeof(*s->xy));
    if (!s->xy)
        return AVERROR(ENOMEM);

    return 0;
}
コード例 #5
0
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    RemapContext *s = ctx->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);
    s->nb_components = desc->nb_components;

    if (desc->comp[0].depth == 8) {
        if (s->nb_planes > 1 || s->nb_components == 1) {
            s->remap = remap_planar;
        } else {
            s->remap = remap_packed;
        }
    } else {
        if (s->nb_planes > 1 || s->nb_components == 1) {
            s->remap = remap_planar16;
        } else {
            s->remap = remap_packed16;
        }
    }

    s->step = av_get_padded_bits_per_pixel(desc) >> 3;
    return 0;
}
コード例 #6
0
ファイル: vf_neighbor.c プロジェクト: 0day-ci/FFmpeg
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    NContext *s = ctx->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int ret;

    if ((ret = av_image_fill_linesizes(s->planewidth, inlink->format, inlink->w)) < 0)
        return ret;

    s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
    s->planeheight[0] = s->planeheight[3] = inlink->h;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);
    s->buffer = av_malloc(3 * (s->planewidth[0] + 32));
    if (!s->buffer)
        return AVERROR(ENOMEM);

    if (!strcmp(ctx->filter->name, "erosion"))
        s->filter = erosion;
    else if (!strcmp(ctx->filter->name, "dilation"))
        s->filter = dilation;
    else if (!strcmp(ctx->filter->name, "deflate"))
        s->filter = deflate;
    else if (!strcmp(ctx->filter->name, "inflate"))
        s->filter = inflate;

    return 0;
}
コード例 #7
0
ファイル: vf_displace.c プロジェクト: DeHackEd/FFmpeg
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    DisplaceContext *s = ctx->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int vsub, hsub;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);
    s->nb_components = desc->nb_components;

    if (s->nb_planes > 1 || s->nb_components == 1)
        s->displace = displace_planar;
    else
        s->displace = displace_packed;

    if (!(desc->flags & AV_PIX_FMT_FLAG_RGB)) {
        s->blank[1] = s->blank[2] = 128;
        s->blank[0] = 16;
    }

    s->step = av_get_padded_bits_per_pixel(desc) >> 3;
    hsub = desc->log2_chroma_w;
    vsub = desc->log2_chroma_h;
    s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
    s->height[0] = s->height[3] = inlink->h;
    s->width[1]  = s->width[2]  = AV_CEIL_RSHIFT(inlink->w, hsub);
    s->width[0]  = s->width[3]  = inlink->w;

    return 0;
}
コード例 #8
0
ファイル: vf_weave.c プロジェクト: timkingh/FFmpeg
static int config_props_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    WeaveContext *s = ctx->priv;
    AVFilterLink *inlink = ctx->inputs[0];
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int ret;

    outlink->time_base.num = inlink->time_base.num * 2;
    outlink->time_base.den = inlink->time_base.den;
    outlink->frame_rate.num = inlink->frame_rate.num;
    outlink->frame_rate.den = inlink->frame_rate.den * 2;
    outlink->w = inlink->w;
    outlink->h = inlink->h * 2;

    if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
        return ret;

    s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
    s->planeheight[0] = s->planeheight[3] = inlink->h;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);

    return 0;
}
コード例 #9
0
ファイル: vf_convolution.c プロジェクト: LongChair/FFmpeg
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    ConvolutionContext *s = ctx->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int p;

    s->depth = desc->comp[0].depth;

    s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
    s->planewidth[0] = s->planewidth[3] = inlink->w;
    s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
    s->planeheight[0] = s->planeheight[3] = inlink->h;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);
    s->nb_threads = ff_filter_get_nb_threads(ctx);
    s->bptrs = av_calloc(s->nb_threads, sizeof(*s->bptrs));
    if (!s->bptrs)
        return AVERROR(ENOMEM);

    s->bstride = s->planewidth[0] + 64;
    s->bpc = (s->depth + 7) / 8;
    s->buffer = av_malloc_array(7 * s->bstride * s->nb_threads, s->bpc);
    if (!s->buffer)
        return AVERROR(ENOMEM);

    for (p = 0; p < s->nb_threads; p++) {
        s->bptrs[p] = s->buffer + 7 * s->bstride * s->bpc * p;
    }

    if (!strcmp(ctx->filter->name, "convolution")) {
        if (s->depth > 8) {
            for (p = 0; p < s->nb_planes; p++) {
                if (s->size[p] == 3)
                    s->filter[p] = filter16_3x3;
                else if (s->size[p] == 5)
                    s->filter[p] = filter16_5x5;
                else if (s->size[p] == 7)
                    s->filter[p] = filter16_7x7;
            }
        }
    } else if (!strcmp(ctx->filter->name, "prewitt")) {
        if (s->depth > 8)
            for (p = 0; p < s->nb_planes; p++)
                s->filter[p] = filter16_prewitt;
    } else if (!strcmp(ctx->filter->name, "roberts")) {
        if (s->depth > 8)
            for (p = 0; p < s->nb_planes; p++)
                s->filter[p] = filter16_roberts;
    } else if (!strcmp(ctx->filter->name, "sobel")) {
        if (s->depth > 8)
            for (p = 0; p < s->nb_planes; p++)
                s->filter[p] = filter16_sobel;
    }

    return 0;
}
コード例 #10
0
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    StackContext *s = ctx->priv;
    AVRational time_base = ctx->inputs[0]->time_base;
    AVRational frame_rate = ctx->inputs[0]->frame_rate;
    int height = ctx->inputs[0]->h;
    int width = ctx->inputs[0]->w;
    FFFrameSyncIn *in;
    int i, ret;

    if (s->is_vertical) {
        for (i = 1; i < s->nb_inputs; i++) {
            if (ctx->inputs[i]->w != width) {
                av_log(ctx, AV_LOG_ERROR, "Input %d width %d does not match input %d width %d.\n", i, ctx->inputs[i]->w, 0, width);
                return AVERROR(EINVAL);
            }
            height += ctx->inputs[i]->h;
        }
    } else {
        for (i = 1; i < s->nb_inputs; i++) {
            if (ctx->inputs[i]->h != height) {
                av_log(ctx, AV_LOG_ERROR, "Input %d height %d does not match input %d height %d.\n", i, ctx->inputs[i]->h, 0, height);
                return AVERROR(EINVAL);
            }
            width += ctx->inputs[i]->w;
        }
    }

    s->desc = av_pix_fmt_desc_get(outlink->format);
    if (!s->desc)
        return AVERROR_BUG;
    s->nb_planes = av_pix_fmt_count_planes(outlink->format);

    outlink->w          = width;
    outlink->h          = height;
    outlink->time_base  = time_base;
    outlink->frame_rate = frame_rate;

    if ((ret = ff_framesync_init(&s->fs, ctx, s->nb_inputs)) < 0)
        return ret;

    in = s->fs.in;
    s->fs.opaque = s;
    s->fs.on_event = process_frame;

    for (i = 0; i < s->nb_inputs; i++) {
        AVFilterLink *inlink = ctx->inputs[i];

        in[i].time_base = inlink->time_base;
        in[i].sync   = 1;
        in[i].before = EXT_STOP;
        in[i].after  = EXT_INFINITY;
    }

    return ff_framesync_configure(&s->fs);
}
コード例 #11
0
ファイル: hwcontext_dxva2.c プロジェクト: mstorsjo/libav
static int dxva2_map_frame(AVHWFramesContext *ctx, AVFrame *dst, const AVFrame *src,
                           int flags)
{
    IDirect3DSurface9 *surface = (IDirect3DSurface9*)src->data[3];
    DXVA2Mapping      *map;
    D3DSURFACE_DESC    surfaceDesc;
    D3DLOCKED_RECT     LockedRect;
    HRESULT            hr;
    int i, err, nb_planes;
    int lock_flags = 0;

    nb_planes = av_pix_fmt_count_planes(dst->format);

    hr = IDirect3DSurface9_GetDesc(surface, &surfaceDesc);
    if (FAILED(hr)) {
        av_log(ctx, AV_LOG_ERROR, "Error getting a surface description\n");
        return AVERROR_UNKNOWN;
    }

    if (!(flags & AV_HWFRAME_MAP_WRITE))
        lock_flags |= D3DLOCK_READONLY;
    if (flags & AV_HWFRAME_MAP_OVERWRITE)
        lock_flags |= D3DLOCK_DISCARD;

    hr = IDirect3DSurface9_LockRect(surface, &LockedRect, NULL, lock_flags);
    if (FAILED(hr)) {
        av_log(ctx, AV_LOG_ERROR, "Unable to lock DXVA2 surface\n");
        return AVERROR_UNKNOWN;
    }

    map = av_mallocz(sizeof(*map));
    if (!map)
        goto fail;

    err = ff_hwframe_map_create(src->hw_frames_ctx, dst, src,
                                dxva2_unmap_frame, map);
    if (err < 0) {
        av_freep(&map);
        goto fail;
    }

    for (i = 0; i < nb_planes; i++)
        dst->linesize[i] = LockedRect.Pitch;

    av_image_fill_pointers(dst->data, dst->format, surfaceDesc.Height,
                           (uint8_t*)LockedRect.pBits, dst->linesize);

    if (dst->format == AV_PIX_FMT_PAL8)
        dst->data[1] = (uint8_t*)map->palette_dummy;

    return 0;
fail:
    IDirect3DSurface9_UnlockRect(surface);
    return err;
}
コード例 #12
0
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    MixContext *s = ctx->priv;
    AVRational time_base = ctx->inputs[0]->time_base;
    AVRational frame_rate = ctx->inputs[0]->frame_rate;
    AVFilterLink *inlink = ctx->inputs[0];
    int height = ctx->inputs[0]->h;
    int width = ctx->inputs[0]->w;
    FFFrameSyncIn *in;
    int i, ret;

    for (i = 1; i < s->nb_inputs; i++) {
        if (ctx->inputs[i]->h != height || ctx->inputs[i]->w != width) {
            av_log(ctx, AV_LOG_ERROR, "Input %d size (%dx%d) does not match input %d size (%dx%d).\n", i, ctx->inputs[i]->w, ctx->inputs[i]->h, 0, width, height);
            return AVERROR(EINVAL);
        }
    }

    s->desc = av_pix_fmt_desc_get(outlink->format);
    if (!s->desc)
        return AVERROR_BUG;
    s->nb_planes = av_pix_fmt_count_planes(outlink->format);
    s->depth = s->desc->comp[0].depth;

    outlink->w          = width;
    outlink->h          = height;
    outlink->time_base  = time_base;
    outlink->frame_rate = frame_rate;

    if ((ret = ff_framesync_init(&s->fs, ctx, s->nb_inputs)) < 0)
        return ret;

    in = s->fs.in;
    s->fs.opaque = s;
    s->fs.on_event = process_frame;

    if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
        return ret;

    s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h);
    s->height[0] = s->height[3] = inlink->h;

    for (i = 0; i < s->nb_inputs; i++) {
        AVFilterLink *inlink = ctx->inputs[i];

        in[i].time_base = inlink->time_base;
        in[i].sync   = 1;
        in[i].before = EXT_STOP;
        in[i].after  = (s->duration == 1 || (s->duration == 2 && i == 0)) ? EXT_STOP : EXT_INFINITY;
    }

    return ff_framesync_configure(&s->fs);
}
コード例 #13
0
ファイル: VideoFormat.cpp プロジェクト: sweatball/QtAV
 void init() {
     if (pixfmt_ff == QTAV_PIX_FMT_C(NONE)) {
         qWarning("Invalid pixel format");
         return;
     }
     planes = qMax(av_pix_fmt_count_planes(pixfmt_ff), 0);
     bpps.resize(planes);
     bpps_pad.resize(planes);
     pixdesc = const_cast<AVPixFmtDescriptor*>(av_pix_fmt_desc_get(pixfmt_ff));
     if (!pixdesc)
         return;
     initBpp();
 }
コード例 #14
0
ファイル: vf_removegrain.c プロジェクト: 0day-ci/FFmpeg
static int config_input(AVFilterLink *inlink)
{
    RemoveGrainContext *s = inlink->dst->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int i;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);

    s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
    s->planeheight[0] = s->planeheight[3] = inlink->h;
    s->planewidth[1]  = s->planewidth[2]  = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
    s->planewidth[0]  = s->planewidth[3]  = inlink->w;

    for (i = 0; i < s->nb_planes; i++) {
        switch (s->mode[i]) {
        case 1:  s->rg[i] = mode01;   break;
        case 2:  s->rg[i] = mode02;   break;
        case 3:  s->rg[i] = mode03;   break;
        case 4:  s->rg[i] = mode04;   break;
        case 5:  s->rg[i] = mode05;   break;
        case 6:  s->rg[i] = mode06;   break;
        case 7:  s->rg[i] = mode07;   break;
        case 8:  s->rg[i] = mode08;   break;
        case 9:  s->rg[i] = mode09;   break;
        case 10: s->rg[i] = mode10;   break;
        case 11: s->rg[i] = mode1112; break;
        case 12: s->rg[i] = mode1112; break;
        case 13: s->skip_odd = 1;
                 s->rg[i] = mode1314; break;
        case 14: s->skip_even = 1;
                 s->rg[i] = mode1314; break;
        case 15: s->skip_odd = 1;
                 s->rg[i] = mode1516; break;
        case 16: s->skip_even = 1;
                 s->rg[i] = mode1516; break;
        case 17: s->rg[i] = mode17;   break;
        case 18: s->rg[i] = mode18;   break;
        case 19: s->rg[i] = mode19;   break;
        case 20: s->rg[i] = mode20;   break;
        case 21: s->rg[i] = mode21;   break;
        case 22: s->rg[i] = mode22;   break;
        case 23: s->rg[i] = mode23;   break;
        case 24: s->rg[i] = mode24;   break;
        }
    }

    if (ARCH_X86)
        ff_removegrain_init_x86(s);

    return 0;
}
コード例 #15
0
static inline int copy_field
(
    lw_log_handler_t *lhp,
    AVFrame          *dst,
    AVFrame          *src,
    int               line_offset
)
{
    /* Check if the destination is writable. */
    if( av_frame_is_writable( dst ) == 0 )
    {
        /* The destination is NOT writable, so allocate new buffers and copy the data. */
        av_frame_unref( dst );
        if( av_frame_ref( dst, src ) < 0 )
        {
            if( lhp->show_log )
                lhp->show_log( lhp, LW_LOG_ERROR, "Failed to reference a video frame.\n" );
            return -1;
        }
        if( av_frame_make_writable( dst ) < 0 )
        {
            if( lhp->show_log )
                lhp->show_log( lhp, LW_LOG_ERROR, "Failed to make a video frame writable.\n" );
            return -1;
        }
        /* For direct rendering, the destination can not know
         * whether the value at the address held by the opaque pointer is valid or not.
         * Anyway, the opaque pointer for direct rendering shall be set to NULL. */
        dst->opaque = NULL;
    }
    else
    {
        /* The destination is writable. Copy field data from the source. */
        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get( (enum AVPixelFormat)dst->format );
        int number_of_planes = av_pix_fmt_count_planes( (enum AVPixelFormat)dst->format );
        int height           = MIN( dst->height, src->height );
        for( int i = 0; i < number_of_planes; i++ )
        {
            int r_shift = 1 + ((i == 1 || i == 2) ? desc->log2_chroma_h : 0);
            int field_height = (height >> r_shift) + (line_offset == 0 && (height & 1) ? 1 : 0);
            av_image_copy_plane( dst->data[i] + dst->linesize[i] * line_offset, 2 * dst->linesize[i],
                                 src->data[i] + src->linesize[i] * line_offset, 2 * src->linesize[i],
                                 MIN( dst->linesize[i], src->linesize[i] ),
                                 field_height );
        }
    }
    /* Treat this frame as interlaced. */
    dst->interlaced_frame = 1;
    return 0;
}
コード例 #16
0
ファイル: VideoFormat.cpp プロジェクト: Developer-Tools/QtAV
 void init() {
     // TODO: what if other formats not supported by ffmpeg? give attributes in QtAV?
     if (pixfmt_ff == QTAV_PIX_FMT_C(NONE)) {
         qWarning("Invalid pixel format");
         return;
     }
     planes = qMax(av_pix_fmt_count_planes(pixfmt_ff), 0);
     bpps.reserve(planes);
     channels.reserve(planes);
     bpps.resize(planes);
     channels.resize(planes);
     pixdesc = const_cast<AVPixFmtDescriptor*>(av_pix_fmt_desc_get(pixfmt_ff));
     if (!pixdesc)
         return;
     initBpp();
 }
コード例 #17
0
ファイル: vf_fieldhint.c プロジェクト: ZhaoliangGuo/FFmpeg
static int config_input(AVFilterLink *inlink)
{
    FieldHintContext *s = inlink->dst->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int ret;

    if ((ret = av_image_fill_linesizes(s->planewidth, inlink->format, inlink->w)) < 0)
        return ret;

    s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
    s->planeheight[0] = s->planeheight[3] = inlink->h;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);

    return 0;
}
コード例 #18
0
ファイル: VideoFormat.cpp プロジェクト: rockyhuo/QtAV
 void init() {
     // FIXME: hack for invalid ffmpeg formats
     if (pixfmt == VideoFormat::Format_VYUY) {
         pixfmt_ff = QTAV_PIX_FMT_C(UYVY422);
     }
     // TODO: what if other formats not supported by ffmpeg? give attributes in QtAV?
     if (pixfmt_ff == QTAV_PIX_FMT_C(NONE)) {
         qWarning("Invalid pixel format");
         return;
     }
     planes = qMax(av_pix_fmt_count_planes(pixfmt_ff), 0);
     bpps.resize(planes);
     bpps_pad.resize(planes);
     pixdesc = const_cast<AVPixFmtDescriptor*>(av_pix_fmt_desc_get(pixfmt_ff));
     if (!pixdesc)
         return;
     initBpp();
 }
コード例 #19
0
ファイル: vf_midequalizer.c プロジェクト: DeHackEd/FFmpeg
static int config_input1(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    MidEqualizerContext *s = ctx->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int vsub, hsub;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);

    hsub = desc->log2_chroma_w;
    vsub = desc->log2_chroma_h;

    s->height[1][0] = s->height[1][3] = inlink->h;
    s->width[1][0]  = s->width[1][3]  = inlink->w;
    s->height[1][1] = s->height[1][2] = AV_CEIL_RSHIFT(inlink->h, vsub);
    s->width[1][1]  = s->width[1][2]  = AV_CEIL_RSHIFT(inlink->w, hsub);

    return 0;
}
コード例 #20
0
ファイル: vf_shuffleplanes.c プロジェクト: DeHackEd/FFmpeg
static av_cold int shuffleplanes_config_input(AVFilterLink *inlink)
{
    AVFilterContext    *ctx = inlink->dst;
    ShufflePlanesContext *s = ctx->priv;
    const AVPixFmtDescriptor *desc;
    int used[4] = { 0 };
    int i;

    s->copy   = 0;
    s->planes = av_pix_fmt_count_planes(inlink->format);
    desc      = av_pix_fmt_desc_get(inlink->format);

    for (i = 0; i < s->planes; i++) {
        if (s->map[i] >= s->planes) {
            av_log(ctx, AV_LOG_ERROR,
                   "Non-existing input plane #%d mapped to output plane #%d.\n",
                   s->map[i], i);
            return AVERROR(EINVAL);
        }

        if ((desc->log2_chroma_h || desc->log2_chroma_w) &&
            (i == 1 || i == 2) != (s->map[i] == 1 || s->map[i] == 2)) {
            av_log(ctx, AV_LOG_ERROR,
                   "Cannot map between a subsampled chroma plane and a luma "
                   "or alpha plane.\n");
            return AVERROR(EINVAL);
        }

        if ((desc->flags & AV_PIX_FMT_FLAG_PAL ||
             desc->flags & FF_PSEUDOPAL) &&
            (i == 1) != (s->map[i] == 1)) {
            av_log(ctx, AV_LOG_ERROR,
                   "Cannot map between a palette plane and a data plane.\n");
            return AVERROR(EINVAL);
        }
        if (used[s->map[i]])
            s->copy = 1;
        used[s->map[i]]++;
    }

    return 0;
}
コード例 #21
0
ファイル: vf_swaprect.c プロジェクト: Hero2000/CainCamera
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    SwapRectContext *s = ctx->priv;

    if (!s->w  || !s->h  ||
        !s->x1 || !s->y1 ||
        !s->x2 || !s->y2)
        return AVERROR(EINVAL);

    s->desc = av_pix_fmt_desc_get(inlink->format);
    av_image_fill_max_pixsteps(s->pixsteps, NULL, s->desc);
    s->nb_planes = av_pix_fmt_count_planes(inlink->format);

    s->temp = av_malloc_array(inlink->w, s->pixsteps[0]);
    if (!s->temp)
        return AVERROR(ENOMEM);

    return 0;
}
コード例 #22
0
ファイル: vf_amplify.c プロジェクト: Kagami/ffmpeg-hevc-accel
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    AmplifyContext *s = ctx->priv;
    AVFilterLink *inlink = ctx->inputs[0];
    int ret;

    s->desc = av_pix_fmt_desc_get(outlink->format);
    if (!s->desc)
        return AVERROR_BUG;
    s->nb_planes = av_pix_fmt_count_planes(outlink->format);
    s->depth = s->desc->comp[0].depth;

    if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
        return ret;

    s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h);
    s->height[0] = s->height[3] = inlink->h;

    return 0;
}
コード例 #23
0
ファイル: vf_convolution.c プロジェクト: 0day-ci/FFmpeg
static int config_input(AVFilterLink *inlink)
{
    ConvolutionContext *s = inlink->dst->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int ret;

    if ((ret = av_image_fill_linesizes(s->planewidth, inlink->format, inlink->w)) < 0)
        return ret;

    s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
    s->planeheight[0] = s->planeheight[3] = inlink->h;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);

    s->bstride = s->planewidth[0] + 32;
    s->buffer = av_malloc(5 * s->bstride);
    if (!s->buffer)
        return AVERROR(ENOMEM);

    return 0;
}
コード例 #24
0
ファイル: frame.c プロジェクト: venkatarajasekhar/Qt
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
{
    const uint8_t *src_data[4];
    int i, planes;

    if (dst->width  != src->width ||
        dst->height != src->height)
        return AVERROR(EINVAL);

    planes = av_pix_fmt_count_planes(dst->format);
    for (i = 0; i < planes; i++)
        if (!dst->data[i] || !src->data[i])
            return AVERROR(EINVAL);

    memcpy(src_data, src->data, sizeof(src_data));
    av_image_copy(dst->data, dst->linesize,
                  src_data, src->linesize,
                  dst->format, dst->width, dst->height);

    return 0;
}
コード例 #25
0
ファイル: vf_threshold.c プロジェクト: DeHackEd/FFmpeg
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    ThresholdContext *s = ctx->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int vsub, hsub;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);

    hsub = desc->log2_chroma_w;
    vsub = desc->log2_chroma_h;
    s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
    s->height[0] = s->height[3] = inlink->h;
    s->width[1]  = s->width[2]  = AV_CEIL_RSHIFT(inlink->w, hsub);
    s->width[0]  = s->width[3]  = inlink->w;
    s->depth = desc->comp[0].depth;

    ff_threshold_init(s);

    return 0;
}
コード例 #26
0
static int config_props_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    SeparateFieldsContext *s = ctx->priv;
    AVFilterLink *inlink = ctx->inputs[0];

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);

    if (inlink->h & 1) {
        av_log(ctx, AV_LOG_ERROR, "height must be even\n");
        return AVERROR_INVALIDDATA;
    }

    outlink->time_base.num = inlink->time_base.num;
    outlink->time_base.den = inlink->time_base.den * 2;
    outlink->frame_rate.num = inlink->frame_rate.num * 2;
    outlink->frame_rate.den = inlink->frame_rate.den;
    outlink->w = inlink->w;
    outlink->h = inlink->h / 2;

    return 0;
}
コード例 #27
0
ファイル: vf_pullup.c プロジェクト: Bjelijah/EcamTurnH265
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    PullupContext *s = ctx->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int mp = s->metric_plane;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);

    if (mp + 1 > s->nb_planes) {
        av_log(ctx, AV_LOG_ERROR, "input format does not have such plane\n");
        return AVERROR(EINVAL);
    }

    s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
    s->planeheight[0] = s->planeheight[3] = inlink->h;
    s->planewidth[1]  = s->planewidth[2]  = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
    s->planewidth[0]  = s->planewidth[3]  = inlink->w;

    s->metric_w      = (s->planewidth[mp]  - ((s->junk_left + s->junk_right)  << 3)) >> 3;
    s->metric_h      = (s->planeheight[mp] - ((s->junk_top  + s->junk_bottom) << 1)) >> 3;
    s->metric_offset = (s->junk_left << 3) + (s->junk_top << 1) * s->planewidth[mp];
    s->metric_length = s->metric_w * s->metric_h;

    av_log(ctx, AV_LOG_DEBUG, "w: %d h: %d\n", s->metric_w, s->metric_h);
    av_log(ctx, AV_LOG_DEBUG, "offset: %d length: %d\n", s->metric_offset, s->metric_length);

    s->head = make_field_queue(s, 8);
    if (!s->head)
        return AVERROR(ENOMEM);

    s->diff = diff_c;
    s->comb = comb_c;
    s->var  = var_c;

    if (ARCH_X86)
        ff_pullup_init_x86(s);
    return 0;
}
コード例 #28
0
ファイル: vf_gblur.c プロジェクト: DeHackEd/FFmpeg
static int config_input(AVFilterLink *inlink)
{
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    GBlurContext *s = inlink->dst->priv;

    s->depth = desc->comp[0].depth;
    s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
    s->planewidth[0] = s->planewidth[3] = inlink->w;
    s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
    s->planeheight[0] = s->planeheight[3] = inlink->h;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);

    s->buffer = av_malloc_array(inlink->w, inlink->h * sizeof(*s->buffer));
    if (!s->buffer)
        return AVERROR(ENOMEM);

    if (s->sigmaV < 0) {
        s->sigmaV = s->sigma;
    }

    return 0;
}
コード例 #29
0
ファイル: tests.cpp プロジェクト: FFMS/ffms2
bool CheckFrame(const FFMS_Frame *Frame, const FFMS_FrameInfo *info, const TestFrameData *Data) {
    EQ_CHECK(info->PTS, Data->PTS);
    EQ_CHECK(!!info->KeyFrame, Data->Keyframe);
    EQ_CHECK(!!Frame->KeyFrame, Data->Keyframe);
    EQ_CHECK(Frame->EncodedWidth, Data->Width);
    EQ_CHECK(Frame->EncodedHeight, Data->Height);

    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get((AVPixelFormat) Frame->ConvertedPixelFormat);
    NULL_CHECK(desc);

    EXPECT_STREQ(desc->name, Data->PixelFormat);
    if (!!strcmp(desc->name, Data->PixelFormat))
        return false;

    struct AVSHA *sha = av_sha_alloc();
    NULL_CHECK(sha);

    int ret = av_sha_init(sha, 256);
    EQ_CHECK(ret, 0);

    for (int i = 0; i < av_pix_fmt_count_planes((AVPixelFormat) Frame->ConvertedPixelFormat); i++) {
        const int subh = i == 0 ? 0 : desc->log2_chroma_h;
        const int subw = i == 0 ? 0 : desc->log2_chroma_w;
        for (int y = 0; y < Frame->EncodedHeight >> subh; y++)
            av_sha_update(sha, Frame->Data[i] + y * Frame->Linesize[i], Frame->EncodedWidth >> subw);
    }

    uint8_t digest[32];
    av_sha_final(sha, &digest[0]);
    av_free(sha);

    bool ok;

    EXPECT_TRUE((ok = !memcmp(&Data->SHA256[0], &digest[0], 32)));

    return ok;
}
コード例 #30
0
ファイル: vf_transpose.c プロジェクト: jsebechlebsky/FFmpeg
static int config_props_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    TransContext *s = ctx->priv;
    AVFilterLink *inlink = ctx->inputs[0];
    const AVPixFmtDescriptor *desc_out = av_pix_fmt_desc_get(outlink->format);
    const AVPixFmtDescriptor *desc_in  = av_pix_fmt_desc_get(inlink->format);

    if (s->dir&4) {
        av_log(ctx, AV_LOG_WARNING,
               "dir values greater than 3 are deprecated, use the passthrough option instead\n");
        s->dir &= 3;
        s->passthrough = TRANSPOSE_PT_TYPE_LANDSCAPE;
    }

    if ((inlink->w >= inlink->h && s->passthrough == TRANSPOSE_PT_TYPE_LANDSCAPE) ||
        (inlink->w <= inlink->h && s->passthrough == TRANSPOSE_PT_TYPE_PORTRAIT)) {
        av_log(ctx, AV_LOG_VERBOSE,
               "w:%d h:%d -> w:%d h:%d (passthrough mode)\n",
               inlink->w, inlink->h, inlink->w, inlink->h);
        return 0;
    } else {
        s->passthrough = TRANSPOSE_PT_TYPE_NONE;
    }

    s->hsub = desc_in->log2_chroma_w;
    s->vsub = desc_in->log2_chroma_h;
    s->planes = av_pix_fmt_count_planes(outlink->format);

    av_assert0(desc_in->nb_components == desc_out->nb_components);


    av_image_fill_max_pixsteps(s->pixsteps, NULL, desc_out);

    outlink->w = inlink->h;
    outlink->h = inlink->w;

    if (inlink->sample_aspect_ratio.num)
        outlink->sample_aspect_ratio = av_div_q((AVRational) { 1, 1 },
                                                inlink->sample_aspect_ratio);
    else
        outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;

    switch (s->pixsteps[0]) {
    case 1: s->transpose_block = transpose_block_8_c;
            s->transpose_8x8   = transpose_8x8_8_c;  break;
    case 2: s->transpose_block = transpose_block_16_c;
            s->transpose_8x8   = transpose_8x8_16_c; break;
    case 3: s->transpose_block = transpose_block_24_c;
            s->transpose_8x8   = transpose_8x8_24_c; break;
    case 4: s->transpose_block = transpose_block_32_c;
            s->transpose_8x8   = transpose_8x8_32_c; break;
    case 6: s->transpose_block = transpose_block_48_c;
            s->transpose_8x8   = transpose_8x8_48_c; break;
    case 8: s->transpose_block = transpose_block_64_c;
            s->transpose_8x8   = transpose_8x8_64_c; break;
    }

    av_log(ctx, AV_LOG_VERBOSE,
           "w:%d h:%d dir:%d -> w:%d h:%d rotation:%s vflip:%d\n",
           inlink->w, inlink->h, s->dir, outlink->w, outlink->h,
           s->dir == 1 || s->dir == 3 ? "clockwise" : "counterclockwise",
           s->dir == 0 || s->dir == 3);
    return 0;
}