Ejemplo n.º 1
0
static int filter_frame(AVFilterLink *link, AVFrame *picref)
{
    AVFilterContext *ctx = link->dst;
    IDETContext *idet = ctx->priv;
    AVDictionary **metadata = avpriv_frame_get_metadatap(picref);

    if (idet->prev)
        av_frame_free(&idet->prev);
    idet->prev = idet->cur;
    idet->cur  = idet->next;
    idet->next = picref;

    if (!idet->cur)
        return 0;

    if (!idet->prev)
        idet->prev = av_frame_clone(idet->cur);

    if (!idet->csp)
        idet->csp = av_pix_fmt_desc_get(link->format);
    if (idet->csp->comp[0].depth_minus1 / 8 == 1){
        idet->filter_line = (ff_idet_filter_func)ff_idet_filter_line_c_16bit;
        if (ARCH_X86)
            ff_idet_init_x86(idet, 1);
    }

    filter(ctx);

    av_dict_set_int(metadata, "lavfi.idet.tff", idet->prestat[TFF], 0);
    av_dict_set_int(metadata, "lavfi.idet.bff", idet->prestat[BFF], 0);
    av_dict_set_int(metadata, "lavfi.idet.progressive", idet->prestat[PROGRESSIVE], 0);
    av_dict_set_int(metadata, "lavfi.idet.undetermined", idet->prestat[UNDETERMINED], 0);

    return ff_filter_frame(ctx->outputs[0], av_frame_clone(idet->cur));
}
Ejemplo n.º 2
0
static int filter_frame(AVFilterLink *link, AVFrame *picref)
{
    AVFilterContext *ctx = link->dst;
    IDETContext *idet = ctx->priv;

    if (idet->prev)
        av_frame_free(&idet->prev);
    idet->prev = idet->cur;
    idet->cur  = idet->next;
    idet->next = picref;

    if (!idet->cur)
        return 0;

    if (!idet->prev)
        idet->prev = av_frame_clone(idet->cur);

    if (!idet->csp)
        idet->csp = av_pix_fmt_desc_get(link->format);
    if (idet->csp->comp[0].depth_minus1 / 8 == 1){
        idet->filter_line = (ff_idet_filter_func)ff_idet_filter_line_c_16bit;
        if (ARCH_X86)
            ff_idet_init_x86(idet, 1);
    }

    filter(ctx);

    return ff_filter_frame(ctx->outputs[0], av_frame_clone(idet->cur));
}
Ejemplo n.º 3
0
static int push_frame(AVFilterContext *ctx)
{
    AVFilterLink *outlink = ctx->outputs[0];
    LoopContext *s = ctx->priv;
    int64_t pts, duration;
    int ret;

    AVFrame *out = av_frame_clone(s->frames[s->current_frame]);

    if (!out)
        return AVERROR(ENOMEM);
    out->pts += s->duration - s->start_pts;
    if (out->pkt_duration)
        duration = out->pkt_duration;
    else
        duration = av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base);
    pts = out->pts + duration;
    ret = ff_filter_frame(outlink, out);
    s->current_frame++;

    if (s->current_frame >= s->nb_frames) {
        s->duration = pts;
        s->current_frame = 0;

        if (s->loop > 0)
            s->loop--;
    }

    return ret;
}
Ejemplo n.º 4
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    LoopContext *s = ctx->priv;
    int64_t duration;
    int ret = 0;

    if (inlink->frame_count_out >= s->start && s->size > 0 && s->loop != 0) {
        if (s->nb_frames < s->size) {
            if (!s->nb_frames)
                s->start_pts = frame->pts;
            s->frames[s->nb_frames] = av_frame_clone(frame);
            if (!s->frames[s->nb_frames]) {
                av_frame_free(&frame);
                return AVERROR(ENOMEM);
            }
            s->nb_frames++;
            if (frame->pkt_duration)
                duration = frame->pkt_duration;
            else
                duration = av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base);
            s->duration = frame->pts + duration;
            ret = ff_filter_frame(outlink, frame);
        } else {
            av_frame_free(&frame);
            ret = push_frame(ctx);
        }
    } else {
        frame->pts += s->duration;
        ret = ff_filter_frame(outlink, frame);
    }

    return ret;
}
Ejemplo n.º 5
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext    *ctx = inlink->dst;
    ShuffleFramesContext *s = ctx->priv;
    int ret;

    if (s->in_frames < s->nb_frames) {
        s->frames[s->in_frames] = frame;
        s->pts[s->in_frames] = frame->pts;
        s->in_frames++;
        ret = 0;
    }

    if (s->in_frames == s->nb_frames) {
        int n, x;

        for (n = 0; n < s->nb_frames; n++) {
            AVFrame *out;

            x = s->map[n];
            out = av_frame_clone(s->frames[x]);
            if (!out)
                return AVERROR(ENOMEM);
            out->pts = s->pts[n];
            ret = ff_filter_frame(ctx->outputs[0], out);
            s->in_frames--;
        }

        for (n = 0; n < s->nb_frames; n++)
            av_frame_free(&s->frames[n]);
    }

    return ret;
}
Ejemplo n.º 6
0
int libavsmash_video_find_first_valid_frame
(
    libavsmash_video_decode_handler_t *vdhp
)
{
    codec_configuration_t *config = &vdhp->config;
    for( uint32_t i = 1; i <= vdhp->sample_count + get_decoder_delay( config->ctx ); i++ )
    {
        AVPacket pkt = { 0 };
        get_sample( vdhp->root, vdhp->track_id, i, config, &pkt );
        av_frame_unref( vdhp->frame_buffer );
        int got_picture;
        if( decode_video_packet( config->ctx, vdhp->frame_buffer, &got_picture, &pkt ) >= 0 && got_picture )
        {
            vdhp->first_valid_frame_number = i - MIN( get_decoder_delay( config->ctx ), config->delay_count );
            if( vdhp->first_valid_frame_number > 1 || vdhp->sample_count == 1 )
            {
                vdhp->first_valid_frame = av_frame_clone( vdhp->frame_buffer );
                if( !vdhp->first_valid_frame )
                    return -1;
                av_frame_unref( vdhp->frame_buffer );
            }
            break;
        }
        else if( pkt.data )
            ++ config->delay_count;
    }
    return 0;
}
Ejemplo n.º 7
0
static int process_frame(FFFrameSync *fs)
{
    AVFilterContext *ctx = fs->parent;
    StreamSelectContext *s = fs->opaque;
    AVFrame **in = s->frames;
    int i, j, ret = 0;

    for (i = 0; i < ctx->nb_inputs; i++) {
        if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0)
            return ret;
    }

    for (j = 0; j < ctx->nb_inputs; j++) {
        for (i = 0; i < s->nb_map; i++) {
            if (s->map[i] == j) {
                AVFrame *out;

                if (s->is_audio && s->last_pts[j] == in[j]->pts &&
                    ctx->outputs[i]->frame_count_in > 0)
                    continue;
                out = av_frame_clone(in[j]);
                if (!out)
                    return AVERROR(ENOMEM);

                out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, ctx->outputs[i]->time_base);
                s->last_pts[j] = in[j]->pts;
                ret = ff_filter_frame(ctx->outputs[i], out);
                if (ret < 0)
                    return ret;
            }
        }
    }

    return ret;
}
Ejemplo n.º 8
0
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
    AVFilterContext *ctx = inlink->dst;
    SeparateFieldsContext *sf = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *second;
    int i, ret;

    inpicref->height = outlink->h;
    inpicref->interlaced_frame = 0;

    second = av_frame_clone(inpicref);
    if (!second)
        return AVERROR(ENOMEM);

    for (i = 0; i < sf->nb_planes; i++) {
        if (!inpicref->top_field_first)
            inpicref->data[i] = inpicref->data[i] + inpicref->linesize[i];
        else
            second->data[i] = second->data[i] + second->linesize[i];
        inpicref->linesize[i] *= 2;
        second->linesize[i]   *= 2;
    }

    inpicref->pts = sf->frame_count++ * sf->ts_unit;
    second->pts   = sf->frame_count++ * sf->ts_unit;

    ret = ff_filter_frame(outlink, inpicref);
    if (ret < 0)
        return ret;
    return ff_filter_frame(outlink, second);
}
Ejemplo n.º 9
0
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
    AVFilterContext *ctx = inlink->dst;
    int i, ret = 0;

    for (i = 0; i < ctx->nb_outputs; i++) {
        AVFrame *buf_out = av_frame_clone(buf);

        if (!buf_out) {
            ret = AVERROR(ENOMEM);
            break;
        }

        buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i];
        buf_out->channel_layout =
            av_channel_layout_extract_channel(buf->channel_layout, i);
        av_frame_set_channels(buf_out, 1);

        ret = ff_filter_frame(ctx->outputs[i], buf_out);
        if (ret < 0)
            break;
    }
    av_frame_free(&buf);
    return ret;
}
Ejemplo n.º 10
0
static int request_frame(AVFilterLink *link)
{
    AVFilterContext *ctx = link->src;
    IDETContext *idet = ctx->priv;

    do {
        int ret;

        if (idet->eof)
            return AVERROR_EOF;

        ret = ff_request_frame(link->src->inputs[0]);

        if (ret == AVERROR_EOF && idet->cur) {
            AVFrame *next = av_frame_clone(idet->next);

            if (!next)
                return AVERROR(ENOMEM);

            filter_frame(link->src->inputs[0], next);
            idet->eof = 1;
        } else if (ret < 0) {
            return ret;
        }
    } while (!idet->cur);

    return 0;
}
Ejemplo n.º 11
0
static int push_frame(AVFilterContext *ctx)
{
    AVFilterLink *outlink = ctx->outputs[0];
    LoopContext *s = ctx->priv;
    int64_t pts;
    int ret;

    AVFrame *out = av_frame_clone(s->frames[s->current_frame]);

    if (!out)
        return AVERROR(ENOMEM);
    out->pts += s->duration - s->start_pts;
    pts = out->pts + av_frame_get_pkt_duration(out);
    ret = ff_filter_frame(outlink, out);
    s->current_frame++;

    if (s->current_frame >= s->nb_frames) {
        s->duration = pts;
        s->current_frame = 0;

        if (s->loop > 0)
            s->loop--;
    }

    return ret;
}
Ejemplo n.º 12
0
static double get_scene_score(AVFilterContext *ctx, AVFrame *frame)
{
    double ret = 0;
    SelectContext *select = ctx->priv;
    AVFrame *prev_picref = select->prev_picref;

    if (prev_picref &&
        frame->height == prev_picref->height &&
        frame->width  == prev_picref->width) {
        int x, y, nb_sad = 0;
        int64_t sad = 0;
        double mafd, diff;
        uint8_t *p1 =      frame->data[0];
        uint8_t *p2 = prev_picref->data[0];
        const int p1_linesize =       frame->linesize[0];
        const int p2_linesize = prev_picref->linesize[0];

        for (y = 0; y < frame->height - 7; y += 8) {
            for (x = 0; x < frame->width*3 - 7; x += 8) {
                sad += select->sad(p1 + x, p1_linesize, p2 + x, p2_linesize);
                nb_sad += 8 * 8;
            }
            p1 += 8 * p1_linesize;
            p2 += 8 * p2_linesize;
        }
        emms_c();
        mafd = nb_sad ? (double)sad / nb_sad : 0;
        diff = fabs(mafd - select->prev_mafd);
        ret  = av_clipf(FFMIN(mafd, diff) / 100., 0, 1);
        select->prev_mafd = mafd;
        av_frame_free(&prev_picref);
    }
    select->prev_picref = av_frame_clone(frame);
    return ret;
}
Ejemplo n.º 13
0
int ff_yadif_request_frame(AVFilterLink *link)
{
    AVFilterContext *ctx = link->src;
    YADIFContext *yadif = ctx->priv;
    int ret;

    if (yadif->frame_pending) {
        return_frame(ctx, 1);
        return 0;
    }

    if (yadif->eof)
        return AVERROR_EOF;

    ret  = ff_request_frame(ctx->inputs[0]);

    if (ret == AVERROR_EOF && yadif->cur) {
        AVFrame *next = av_frame_clone(yadif->next);

        if (!next)
            return AVERROR(ENOMEM);

        next->pts = yadif->next->pts * 2 - yadif->cur->pts;

        ff_yadif_filter_frame(ctx->inputs[0], next);
        yadif->eof = 1;
    } else if (ret < 0) {
        return ret;
    }

    return 0;
}
Ejemplo n.º 14
0
static int process_frame(FFFrameSync *fs)
{
    AVFilterContext *ctx = fs->parent;
    DisplaceContext *s = fs->opaque;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out, *in, *xpic, *ypic;
    int ret;

    if ((ret = ff_framesync_get_frame(&s->fs, 0, &in,   0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 1, &xpic, 0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 2, &ypic, 0)) < 0)
        return ret;

    if (ctx->is_disabled) {
        out = av_frame_clone(in);
        if (!out)
            return AVERROR(ENOMEM);
    } else {
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, in);

        s->displace(s, in, xpic, ypic, out);
    }
    out->pts = av_rescale_q(in->pts, s->fs.time_base, outlink->time_base);

    return ff_filter_frame(outlink, out);
}
Ejemplo n.º 15
0
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    ShowVolumeContext *s = ctx->priv;
    int c, i, j, k;
    double values[VAR_VARS_NB];

    if (!s->out || s->out->width  != outlink->w ||
                   s->out->height != outlink->h) {
        av_frame_free(&s->out);
        s->out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!s->out) {
            av_frame_free(&insamples);
            return AVERROR(ENOMEM);
        }

        for (i = 0; i < outlink->h; i++)
            memset(s->out->data[0] + i * s->out->linesize[0], 0, outlink->w * 4);
    }
    s->out->pts = insamples->pts;

    for (j = 0; j < outlink->h; j++) {
        uint8_t *dst = s->out->data[0] + j * s->out->linesize[0];
        for (k = 0; k < s->w; k++) {
            dst[k * 4 + 0] = FFMAX(dst[k * 4 + 0] - s->f, 0);
            dst[k * 4 + 1] = FFMAX(dst[k * 4 + 1] - s->f, 0);
            dst[k * 4 + 2] = FFMAX(dst[k * 4 + 2] - s->f, 0);
            dst[k * 4 + 3] = FFMAX(dst[k * 4 + 3] - s->f, 0);
        }
    }

    for (c = 0; c < inlink->channels; c++) {
        float *src = (float *)insamples->extended_data[c];
        float max = 0;
        int color;

        for (i = 0; i < insamples->nb_samples; i++)
            max = FFMAX(max, src[i]);

        max = av_clipf(max, 0, 1);
        values[VAR_VOLUME] = 20.0 * log(max) / M_LN10;
        color = av_expr_eval(s->c_expr, values, NULL);

        for (j = 0; j < s->h; j++) {
            uint8_t *dst = s->out->data[0] + (c * s->h + c * s->b + j) * s->out->linesize[0];

            for (k = 0; k < s->w * max; k++)
                AV_WN32A(dst + k * 4, color);
        }

        if (s->h >= 8 && s->draw_text)
            drawtext(s->out, 2, c * (s->h + s->b) + (s->h - 8) / 2,
                     av_get_channel_name(av_channel_layout_extract_channel(insamples->channel_layout, c)));
    }

    av_frame_free(&insamples);

    return ff_filter_frame(outlink, av_frame_clone(s->out));
}
Ejemplo n.º 16
0
static int process_frame(FFFrameSync *fs)
{
    AVFilterContext *ctx = fs->parent;
    HysteresisContext *s = fs->opaque;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out, *base, *alt;
    int ret;

    if ((ret = ff_framesync_get_frame(&s->fs, 0, &base, 0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 1, &alt,  0)) < 0)
        return ret;

    if (ctx->is_disabled) {
        out = av_frame_clone(base);
        if (!out)
            return AVERROR(ENOMEM);
    } else {
        int p;

        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, base);

        for (p = 0; p < s->nb_planes; p++) {
            if (!((1 << p) & s->planes)) {
                av_image_copy_plane(out->data[p], out->linesize[p], base->data[p], base->linesize[p],
                                    s->width[p], s->height[p]);
                continue;
            } else {
                int y;

                for (y = 0; y < s->height[p]; y++) {
                    memset(out->data[p] + y * out->linesize[p], 0, s->width[p]);
                }
            }

            s->index = -1;
            memset(s->map, 0, s->width[0] * s->height[0]);
            memset(s->xy, 0, s->width[0] * s->height[0] * 4);

            s->hysteresis(s, base->data[p], alt->data[p],
                          out->data[p],
                          base->linesize[p], alt->linesize[p],
                          out->linesize[p],
                          s->width[p], s->height[p]);
        }
    }
    out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);

    return ff_filter_frame(outlink, out);
}
Ejemplo n.º 17
0
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    InterlaceContext *s = ctx->priv;
    AVFrame *out;
    int tff, ret;

    av_frame_free(&s->cur);
    s->cur  = s->next;
    s->next = buf;

    /* we need at least two frames */
    if (!s->cur || !s->next)
        return 0;

    if (s->cur->interlaced_frame) {
        av_log(ctx, AV_LOG_WARNING,
               "video is already interlaced, adjusting framerate only\n");
        out = av_frame_clone(s->cur);
        if (!out)
            return AVERROR(ENOMEM);
        out->pts /= 2;  // adjust pts to new framerate
        ret = ff_filter_frame(outlink, out);
        s->got_output = 1;
        return ret;
    }

    tff = (s->scan == MODE_TFF);
    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out)
        return AVERROR(ENOMEM);

    av_frame_copy_props(out, s->cur);
    out->interlaced_frame = 1;
    out->top_field_first  = tff;
    out->pts             /= 2;  // adjust pts to new framerate

    /* copy upper/lower field from cur */
    copy_picture_field(s, s->cur, out, inlink, tff ? FIELD_UPPER : FIELD_LOWER, s->lowpass);
    av_frame_free(&s->cur);

    /* copy lower/upper field from next */
    copy_picture_field(s, s->next, out, inlink, tff ? FIELD_LOWER : FIELD_UPPER, s->lowpass);
    av_frame_free(&s->next);

    ret = ff_filter_frame(outlink, out);
    s->got_output = 1;

    return ret;
}
Ejemplo n.º 18
0
/* Write a frame to the output */
static int write_frame(AVFilterContext *ctx, FPSContext *s, AVFilterLink *outlink, int *again)
{
    AVFrame *frame;

    av_assert1(s->frames_count == 2 || (s->status && s->frames_count == 1));

    /* We haven't yet determined the pts of the first frame */
    if (s->next_pts == AV_NOPTS_VALUE) {
        if (s->frames[0]->pts != AV_NOPTS_VALUE) {
            s->next_pts = s->frames[0]->pts;
            av_log(ctx, AV_LOG_VERBOSE, "Set first pts to %"PRId64"\n", s->next_pts);
        } else {
            av_log(ctx, AV_LOG_WARNING, "Discarding initial frame(s) with no "
                   "timestamp.\n");
            frame = shift_frame(ctx, s);
            av_frame_free(&frame);
            *again = 1;
            return 0;
        }
    }

    /* There are two conditions where we want to drop a frame:
     * - If we have two buffered frames and the second frame is acceptable
     *   as the next output frame, then drop the first buffered frame.
     * - If we have status (EOF) set, drop frames when we hit the
     *   status timestamp. */
    if ((s->frames_count == 2 && s->frames[1]->pts <= s->next_pts) ||
        (s->status            && s->status_pts     <= s->next_pts)) {

        frame = shift_frame(ctx, s);
        av_frame_free(&frame);
        *again = 1;
        return 0;

    /* Output a copy of the first buffered frame */
    } else {
        frame = av_frame_clone(s->frames[0]);
        if (!frame)
            return AVERROR(ENOMEM);
        // Make sure Closed Captions will not be duplicated
        av_frame_remove_side_data(s->frames[0], AV_FRAME_DATA_A53_CC);
        frame->pts = s->next_pts++;

        av_log(ctx, AV_LOG_DEBUG, "Writing frame with pts %"PRId64" to pts %"PRId64"\n",
               s->frames[0]->pts, frame->pts);
        s->cur_frame_out++;

        return ff_filter_frame(outlink, frame);
    }
}
Ejemplo n.º 19
0
static int process_frame(FFFrameSync *fs)
{
    AVFilterContext *ctx = fs->parent;
    ThresholdContext *s = fs->opaque;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out, *in, *threshold, *min, *max;
    int ret;

    if ((ret = ff_framesync_get_frame(&s->fs, 0, &in,        0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 1, &threshold, 0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 2, &min,       0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 3, &max,       0)) < 0)
        return ret;

    if (ctx->is_disabled) {
        out = av_frame_clone(in);
        if (!out)
            return AVERROR(ENOMEM);
    } else {
        int p;

        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, in);

        for (p = 0; p < s->nb_planes; p++) {
            if (!(s->planes & (1 << p))) {
                av_image_copy_plane(out->data[p], out->linesize[p],
                                    in->data[p], in->linesize[p],
                                    s->width[p] * s->bpc,
                                    s->height[p]);
                continue;
            }
            s->threshold(in->data[p], threshold->data[p],
                         min->data[p], max->data[p],
                         out->data[p],
                         in->linesize[p], threshold->linesize[p],
                         min->linesize[p], max->linesize[p],
                         out->linesize[p],
                         s->width[p], s->height[p]);
        }
    }

    out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);

    return ff_filter_frame(outlink, out);
}
Ejemplo n.º 20
0
static int ndi_write_video_packet(AVFormatContext *avctx, AVStream *st, AVPacket *pkt)
{
    struct NDIContext *ctx = avctx->priv_data;
    AVFrame *avframe, *tmp = (AVFrame *)pkt->data;

    if (tmp->format != AV_PIX_FMT_UYVY422 && tmp->format != AV_PIX_FMT_BGRA &&
        tmp->format != AV_PIX_FMT_BGR0 && tmp->format != AV_PIX_FMT_RGBA &&
        tmp->format != AV_PIX_FMT_RGB0) {
        av_log(avctx, AV_LOG_ERROR, "Got a frame with invalid pixel format.\n");
        return AVERROR(EINVAL);
    }

    if (tmp->linesize[0] < 0) {
        av_log(avctx, AV_LOG_ERROR, "Got a frame with negative linesize.\n");
        return AVERROR(EINVAL);
    }

    if (tmp->width  != ctx->video->xres ||
        tmp->height != ctx->video->yres) {
        av_log(avctx, AV_LOG_ERROR, "Got a frame with invalid dimension.\n");
        av_log(avctx, AV_LOG_ERROR, "tmp->width=%d, tmp->height=%d, ctx->video->xres=%d, ctx->video->yres=%d\n",
            tmp->width, tmp->height, ctx->video->xres, ctx->video->yres);
        return AVERROR(EINVAL);
    }

    avframe = av_frame_clone(tmp);
    if (!avframe)
        return AVERROR(ENOMEM);

    ctx->video->timecode = av_rescale_q(pkt->pts, st->time_base, NDI_TIME_BASE_Q);

    ctx->video->line_stride_in_bytes = avframe->linesize[0];
    ctx->video->p_data = (void *)(avframe->data[0]);

    av_log(avctx, AV_LOG_DEBUG, "%s: pkt->pts=%"PRId64", timecode=%"PRId64", st->time_base=%d/%d\n",
        __func__, pkt->pts, ctx->video->timecode, st->time_base.num, st->time_base.den);

    /* asynchronous for one frame, but will block if a second frame
        is given before the first one has been sent */
    NDIlib_send_send_video_async(ctx->ndi_send, ctx->video);

    av_frame_free(&ctx->last_avframe);
    ctx->last_avframe = avframe;

    return 0;
}
Ejemplo n.º 21
0
static int process_frame(FFFrameSync *fs)
{
    AVFilterContext *ctx = fs->parent;
    MidEqualizerContext *s = fs->opaque;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out, *in0, *in1;
    int ret;

    if ((ret = ff_framesync_get_frame(&s->fs, 0, &in0, 0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 1, &in1, 0)) < 0)
        return ret;

    if (ctx->is_disabled) {
        out = av_frame_clone(in0);
        if (!out)
            return AVERROR(ENOMEM);
    } else {
        int p;

        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, in0);

        for (p = 0; p < s->nb_planes; p++) {
            if (!((1 << p) & s->planes)) {
                av_image_copy_plane(out->data[p], out->linesize[p], in0->data[p], in0->linesize[p],
                                    s->width[0][p] * (1 + (s->histogram_size > 256)), s->height[0][p]);
                continue;
            }

            s->midequalizer(in0->data[p], in1->data[p],
                            out->data[p],
                            in0->linesize[p], in1->linesize[p],
                            out->linesize[p],
                            s->width[0][p], s->height[0][p],
                            s->width[1][p], s->height[1][p],
                            s->histogram[0], s->histogram[1],
                            s->cchange, s->histogram_size);
        }
    }
    out->pts = av_rescale_q(in0->pts, s->fs.time_base, outlink->time_base);

    return ff_filter_frame(outlink, out);
}
Ejemplo n.º 22
0
static int process_frame(FFFrameSync *fs)
{
    AVFilterContext *ctx = fs->parent;
    MaskedMergeContext *s = fs->opaque;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out, *base, *overlay, *mask;
    int ret;

    if ((ret = ff_framesync_get_frame(&s->fs, 0, &base,    0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 1, &overlay, 0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 2, &mask,    0)) < 0)
        return ret;

    if (ctx->is_disabled) {
        out = av_frame_clone(base);
        if (!out)
            return AVERROR(ENOMEM);
    } else {
        int p;

        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, base);

        for (p = 0; p < s->nb_planes; p++) {
            if (!((1 << p) & s->planes)) {
                av_image_copy_plane(out->data[p], out->linesize[p], base->data[p], base->linesize[p],
                                    s->width[p], s->height[p]);
                continue;
            }

            s->maskedmerge(base->data[p], base->linesize[p],
                           overlay->data[p], overlay->linesize[p],
                           mask->data[p], mask->linesize[p],
                           out->data[p], out->linesize[p],
                           s->width[p], s->height[p],
                           s->max, s->half, s->depth);
        }
    }
    out->pts = av_rescale_q(base->pts, s->fs.time_base, outlink->time_base);

    return ff_filter_frame(outlink, out);
}
Ejemplo n.º 23
0
static int request_frame(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    ATADenoiseContext *s = ctx->priv;
    int ret = 0;

    ret = ff_request_frame(ctx->inputs[0]);

    if (ret == AVERROR_EOF && !ctx->is_disabled && s->available) {
        AVFrame *buf = av_frame_clone(ff_bufqueue_peek(&s->q, s->available));
        if (!buf)
            return AVERROR(ENOMEM);

        ret = filter_frame(ctx->inputs[0], buf);
        s->available--;
    }

    return ret;
}
Ejemplo n.º 24
0
Archivo: split.c Proyecto: AVLeo/libav
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    int i, ret = 0;

    for (i = 0; i < ctx->nb_outputs; i++) {
        AVFrame *buf_out = av_frame_clone(frame);
        if (!buf_out) {
            ret = AVERROR(ENOMEM);
            break;
        }

        ret = ff_filter_frame(ctx->outputs[i], buf_out);
        if (ret < 0)
            break;
    }
    av_frame_free(&frame);
    return ret;
}
Ejemplo n.º 25
0
// packets must be decoded in order
// indata must be inlen + FF_INPUT_BUFFER_PADDING_SIZE in length
int nv_avc_decode(unsigned char* indata, int inlen) {
	int err;
	int got_pic = 0;

	pkt.data = indata;
	pkt.size = inlen;

	while (pkt.size > 0) {
		got_pic = 0;
		err = avcodec_decode_video2(
			decoder_ctx,
			dec_frame,
			&got_pic,
			&pkt);
		if (err < 0) {
			__android_log_write(ANDROID_LOG_ERROR, "NVAVCDEC",
				"Decode failed");
			got_pic = 0;
			break;
		}

		pkt.size -= err;
		pkt.data += err;
	}
	
	// Only copy the picture at the end of decoding the packet
	if (got_pic) {
		pthread_mutex_lock(&mutex);

		// Only clone this frame if the last frame was taken.
		// This saves on extra copies for frames that don't get
		// rendered.
		if (yuv_frame == NULL) {
			// Clone a new frame
			yuv_frame = av_frame_clone(dec_frame);
		}

		pthread_mutex_unlock(&mutex);
	}

	return err < 0 ? err : 0;
}
Ejemplo n.º 26
0
static int request_frame(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    FieldHintContext *s = ctx->priv;
    int ret;

    if (s->eof)
        return AVERROR_EOF;

    ret = ff_request_frame(ctx->inputs[0]);
    if (ret == AVERROR_EOF && s->frame[2]) {
        AVFrame *next = av_frame_clone(s->frame[2]);
        if (!next)
            return AVERROR(ENOMEM);
        ret = filter_frame(ctx->inputs[0], next);
        s->eof = 1;
    }

    return ret;
}
Ejemplo n.º 27
0
static void process(ID3ASFilterContext *context, AVFrame *frame, AVRational timebase)
{
  codec_t *this = context->priv_data;
  
  for (int i = 0; i < context->num_downstream_filters; i++) {
    
    frame_entry_t *frame_entry = (frame_entry_t *) malloc(sizeof(frame_entry_t));
    frame_entry->timebase = timebase;
    frame_entry->frame = av_frame_clone(frame);
    frame_entry->exit_thread = 0;
    
    ADD_TO_QUEUE(context, this->threads[i].inbound_frame_queue, frame_entry);
  }
  
  if (sync_mode) {
    for (int i = 0; i < context->num_downstream_filters; i++) {
      pthread_cond_wait(&this->threads[i].complete, &this->threads[i].complete_mutex);
    }
  }
}
Ejemplo n.º 28
0
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
    AVFilterContext *ctx = inlink->dst;
    SeparateFieldsContext *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    int ret;

    inpicref->height = outlink->h;
    inpicref->interlaced_frame = 0;

    if (!s->second) {
        goto clone;
    } else {
        AVFrame *second = s->second;

        extract_field(second, s->nb_planes, second->top_field_first);

        if (second->pts != AV_NOPTS_VALUE &&
            inpicref->pts != AV_NOPTS_VALUE)
            second->pts += inpicref->pts;
        else
            second->pts = AV_NOPTS_VALUE;

        ret = ff_filter_frame(outlink, second);
        if (ret < 0)
            return ret;
clone:
        s->second = av_frame_clone(inpicref);
        if (!s->second)
            return AVERROR(ENOMEM);
    }

    extract_field(inpicref, s->nb_planes, !inpicref->top_field_first);

    if (inpicref->pts != AV_NOPTS_VALUE)
        inpicref->pts *= 2;

    return ff_filter_frame(outlink, inpicref);
}
Ejemplo n.º 29
0
static int process_frame(FFFrameSync *fs)
{
    AVFilterContext *ctx = fs->parent;
    ThresholdContext *s = fs->opaque;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out, *in, *threshold, *min, *max;
    ThreadData td;
    int ret;

    if ((ret = ff_framesync_get_frame(&s->fs, 0, &in,        0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 1, &threshold, 0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 2, &min,       0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 3, &max,       0)) < 0)
        return ret;

    if (ctx->is_disabled) {
        out = av_frame_clone(in);
        if (!out)
            return AVERROR(ENOMEM);
    } else {
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, in);

        td.out = out;
        td.in = in;
        td.threshold = threshold;
        td.min = min;
        td.max = max;
        ctx->internal->execute(ctx, filter_slice, &td, NULL,
                               FFMIN(s->height[2], ff_filter_get_nb_threads(ctx)));
    }

    out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);

    return ff_filter_frame(outlink, out);
}
Ejemplo n.º 30
0
static int request_frame(AVFilterLink *link)
{
    AVFilterContext *ctx = link->src;
    IDETContext *idet = ctx->priv;
    int ret;

    if (idet->eof)
        return AVERROR_EOF;

    ret = ff_request_frame(link->src->inputs[0]);

    if (ret == AVERROR_EOF && idet->cur && !idet->analyze_interlaced_flag_done) {
        AVFrame *next = av_frame_clone(idet->next);

        if (!next)
            return AVERROR(ENOMEM);

        ret = filter_frame(link->src->inputs[0], next);
        idet->eof = 1;
    }

    return ret;
}