コード例 #1
0
ファイル: vf_yadif.c プロジェクト: aznrice/android-libav
static void filter(AVFilterContext *ctx, AVFrame *dstpic,
                   int parity, int tff)
{
    YADIFContext *yadif = ctx->priv;
    int y, i;

    for (i = 0; i < yadif->csp->nb_components; i++) {
        int w = dstpic->width;
        int h = dstpic->height;
        int refs = yadif->cur->linesize[i];
        int df = (yadif->csp->comp[i].depth_minus1 + 8) / 8;
        int l_edge, l_edge_pix;

        if (i == 1 || i == 2) {
        /* Why is this not part of the per-plane description thing? */
            w >>= yadif->csp->log2_chroma_w;
            h >>= yadif->csp->log2_chroma_h;
        }

        /* filtering reads 3 pixels to the left/right; to avoid invalid reads,
         * we need to call the c variant which avoids this for border pixels
         */
        l_edge     = yadif->req_align;
        l_edge_pix = l_edge / df;

        for (y = 0; y < h; y++) {
            if ((y ^ parity) & 1) {
                uint8_t *prev = &yadif->prev->data[i][y * refs];
                uint8_t *cur  = &yadif->cur ->data[i][y * refs];
                uint8_t *next = &yadif->next->data[i][y * refs];
                uint8_t *dst  = &dstpic->data[i][y * dstpic->linesize[i]];
                int     mode  = y == 1 || y + 2 == h ? 2 : yadif->mode;
                if (yadif->req_align) {
                    yadif->filter_line(dst + l_edge, prev + l_edge, cur + l_edge,
                                       next + l_edge, w - l_edge_pix - 3,
                                       y + 1 < h ? refs : -refs,
                                       y ? -refs : refs,
                                       parity ^ tff, mode);
                    yadif->filter_edges(dst, prev, cur, next, w,
                                         y + 1 < h ? refs : -refs,
                                         y ? -refs : refs,
                                         parity ^ tff, mode, l_edge_pix);
                } else {
                    yadif->filter_line(dst, prev, cur, next + l_edge, w,
                                       y + 1 < h ? refs : -refs,
                                       y ? -refs : refs,
                                       parity ^ tff, mode);
                }
            } else {
                memcpy(&dstpic->data[i][y * dstpic->linesize[i]],
                       &yadif->cur->data[i][y * refs], w * df);
            }
        }
    }
コード例 #2
0
ファイル: vf_yadif.c プロジェクト: 10045125/xuggle-xuggler
static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic,
                   int parity, int tff)
{
    YADIFContext *yadif = ctx->priv;
    int y, i;

    for (i = 0; i < yadif->csp->nb_components; i++) {
        int w = dstpic->video->w;
        int h = dstpic->video->h;
        int refs = yadif->cur->linesize[i];
        int df = (yadif->csp->comp[i].depth_minus1 + 8) / 8;

        if (i == 1 || i == 2) {
        /* Why is this not part of the per-plane description thing? */
            w >>= yadif->csp->log2_chroma_w;
            h >>= yadif->csp->log2_chroma_h;
        }

        for (y = 0; y < h; y++) {
            if ((y ^ parity) & 1) {
                uint8_t *prev = &yadif->prev->data[i][y*refs];
                uint8_t *cur  = &yadif->cur ->data[i][y*refs];
                uint8_t *next = &yadif->next->data[i][y*refs];
                uint8_t *dst  = &dstpic->data[i][y*dstpic->linesize[i]];
                int     mode  = y==1 || y+2==h ? 2 : yadif->mode;
                yadif->filter_line(dst, prev, cur, next, w, y+1<h ? refs : -refs, y ? -refs : refs, parity ^ tff, mode);
            } else {
                memcpy(&dstpic->data[i][y*dstpic->linesize[i]],
                       &yadif->cur->data[i][y*refs], w*df);
            }
        }
    }
コード例 #3
0
ファイル: vf_yadif.c プロジェクト: xyxdasnjss/FFmpeg
static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic,
                   int parity, int tff)
{
    YADIFContext *yadif = ctx->priv;
    int y, i;

    for (i = 0; i < yadif->csp->nb_components; i++) {
        int w = dstpic->video->w;
        int h = dstpic->video->h;
        int refs = yadif->cur->linesize[i];
        int absrefs = FFABS(refs);
        int df = (yadif->csp->comp[i].depth_minus1 + 8) / 8;

        if (i == 1 || i == 2) {
        /* Why is this not part of the per-plane description thing? */
            w >>= yadif->csp->log2_chroma_w;
            h >>= yadif->csp->log2_chroma_h;
        }

        if(yadif->temp_line_size < absrefs) {
            av_free(yadif->temp_line);
            yadif->temp_line = av_mallocz(2*64 + 5*absrefs);
            yadif->temp_line_size = absrefs;
        }

        for (y = 0; y < h; y++) {
            if ((y ^ parity) & 1) {
                uint8_t *prev = &yadif->prev->data[i][y * refs];
                uint8_t *cur  = &yadif->cur ->data[i][y * refs];
                uint8_t *next = &yadif->next->data[i][y * refs];
                uint8_t *dst  = &dstpic->data[i][y * dstpic->linesize[i]];
                int     mode  = y == 1 || y + 2 == h ? 2 : yadif->mode;
                int     prefs = y+1<h ? refs : -refs;
                int     mrefs =     y ?-refs :  refs;

                if(y<=1 || y+2>=h) {
                    uint8_t *tmp = yadif->temp_line + 64 + 2*absrefs;
                    if(mode<2)
                        memcpy(tmp+2*mrefs, cur+2*mrefs, w*df);
                    memcpy(tmp+mrefs, cur+mrefs, w*df);
                    memcpy(tmp      , cur      , w*df);
                    if(prefs != mrefs) {
                        memcpy(tmp+prefs, cur+prefs, w*df);
                        if(mode<2)
                            memcpy(tmp+2*prefs, cur+2*prefs, w*df);
                    }
                    cur = tmp;
                }

                yadif->filter_line(dst, prev, cur, next, w,
                                   prefs, mrefs,
                                   parity ^ tff, mode);
            } else {
                memcpy(&dstpic->data[i][y * dstpic->linesize[i]],
                       &yadif->cur->data[i][y * refs], w * df);
            }
        }
    }