Example #1
0
static int dxva2_transfer_data(AVHWFramesContext *ctx, AVFrame *dst,
                               const AVFrame *src)
{
    IDirect3DSurface9 *surface;
    D3DSURFACE_DESC    surfaceDesc;
    D3DLOCKED_RECT     LockedRect;
    HRESULT            hr;

    int download = !!src->hw_frames_ctx;
    int bytes_per_component;

    switch (ctx->sw_format) {
        case AV_PIX_FMT_NV12:
            bytes_per_component = 1;
            break;
        case AV_PIX_FMT_P010:
            bytes_per_component = 2;
            break;
        default:
            av_assert0(0);
    }

    surface = (IDirect3DSurface9*)(download ? src->data[3] : dst->data[3]);

    hr = IDirect3DSurface9_GetDesc(surface, &surfaceDesc);
    if (FAILED(hr)) {
        av_log(ctx, AV_LOG_ERROR, "Error getting a surface description\n");
        return AVERROR_UNKNOWN;
    }

    hr = IDirect3DSurface9_LockRect(surface, &LockedRect, NULL,
                                    download ? D3DLOCK_READONLY : D3DLOCK_DISCARD);
    if (FAILED(hr)) {
        av_log(ctx, AV_LOG_ERROR, "Unable to lock DXVA2 surface\n");
        return AVERROR_UNKNOWN;
    }

    if (download) {
        av_image_copy_plane(dst->data[0], dst->linesize[0],
                            (uint8_t*)LockedRect.pBits, LockedRect.Pitch,
                            src->width * bytes_per_component, src->height);
        av_image_copy_plane(dst->data[1], dst->linesize[1],
                            (uint8_t*)LockedRect.pBits + LockedRect.Pitch * surfaceDesc.Height,
                            LockedRect.Pitch, src->width * bytes_per_component, src->height / 2);
    } else {
        av_image_copy_plane((uint8_t*)LockedRect.pBits, LockedRect.Pitch,
                            dst->data[0], dst->linesize[0],
                            src->width * bytes_per_component, src->height);
        av_image_copy_plane((uint8_t*)LockedRect.pBits + LockedRect.Pitch * surfaceDesc.Height,
                            LockedRect.Pitch, dst->data[1], dst->linesize[1],
                            src->width * bytes_per_component, src->height / 2);
    }

    IDirect3DSurface9_UnlockRect(surface);

    return 0;
}
Example #2
0
static int dxva2_retrieve_data(AVCodecContext *s, AVFrame *frame)
{
    LPDIRECT3DSURFACE9 surface =  (LPDIRECT3DSURFACE9)frame->data[3];
    HwAccelContext        *hac = s->opaque;
    DXVA2Context       *ctx = hac->hwaccel_ctx;
    D3DSURFACE_DESC    surfaceDesc;
    D3DLOCKED_RECT     LockedRect;
    HRESULT            hr;
    int                ret;
    int                i;

    IDirect3DSurface9_GetDesc(surface, &surfaceDesc);

    ctx->tmp_frame->width  = frame->width;
    ctx->tmp_frame->height = frame->height;
    ctx->tmp_frame->format = AV_PIX_FMT_NV12;

    ret = av_frame_get_buffer(ctx->tmp_frame, 32);
    if (ret < 0)
        return ret;

    hr = IDirect3DSurface9_LockRect(surface, &LockedRect, NULL, D3DLOCK_READONLY);
    if (FAILED(hr)) {
        av_log(NULL, AV_LOG_ERROR, "Unable to lock DXVA2 surface\n");
        return AVERROR_UNKNOWN;
    }

    av_image_copy_plane(ctx->tmp_frame->data[0], ctx->tmp_frame->linesize[0],
                        (uint8_t*)LockedRect.pBits,
                        LockedRect.Pitch, frame->width, frame->height);

    av_image_copy_plane(ctx->tmp_frame->data[1], ctx->tmp_frame->linesize[1],
                        (uint8_t*)LockedRect.pBits + LockedRect.Pitch * surfaceDesc.Height,
                        LockedRect.Pitch, frame->width, frame->height / 2);

    IDirect3DSurface9_UnlockRect(surface);
    for (i = 0; i < ctx->num_surfaces; i++) {
        if (ctx->surfaces[i] == surface) {
            break;
        }
    }
    av_log(NULL, AV_LOG_DEBUG, "dxva2_retrieve_data:%d\n",i);
    ret = av_frame_copy_props(ctx->tmp_frame, frame);
    if (ret < 0)
        goto fail;

    av_frame_unref(frame);
    av_frame_move_ref(frame, ctx->tmp_frame);

    return 0;
fail:
    av_frame_unref(ctx->tmp_frame);
    return ret;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    WeaveContext *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out;
    int i;

    if (!s->prev) {
        s->prev = in;
        return 0;
    }

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out) {
        av_frame_free(&in);
        av_frame_free(&s->prev);
        return AVERROR(ENOMEM);
    }
    av_frame_copy_props(out, in);

    for (i = 0; i < s->nb_planes; i++) {
        if (s->double_weave && !(inlink->frame_count_out & 1)) {
            av_image_copy_plane(out->data[i] + out->linesize[i] * s->first_field,
                                out->linesize[i] * 2,
                                in->data[i], in->linesize[i],
                                s->linesize[i], s->planeheight[i]);
            av_image_copy_plane(out->data[i] + out->linesize[i] * !s->first_field,
                                out->linesize[i] * 2,
                                s->prev->data[i], s->prev->linesize[i],
                                s->linesize[i], s->planeheight[i]);
        } else {
            av_image_copy_plane(out->data[i] + out->linesize[i] * !s->first_field,
                                out->linesize[i] * 2,
                                in->data[i], in->linesize[i],
                                s->linesize[i], s->planeheight[i]);
            av_image_copy_plane(out->data[i] + out->linesize[i] * s->first_field,
                                out->linesize[i] * 2,
                                s->prev->data[i], s->prev->linesize[i],
                                s->linesize[i], s->planeheight[i]);
        }
    }

    out->pts = s->double_weave ? s->prev->pts : in->pts / 2;
    out->interlaced_frame = 1;
    out->top_field_first = !s->first_field;

    if (!s->double_weave)
        av_frame_free(&in);
    av_frame_free(&s->prev);
    if (s->double_weave)
        s->prev = in;
    return ff_filter_frame(outlink, out);
}
static int process_frame(FFFrameSync *fs)
{
    AVFilterContext *ctx = fs->parent;
    AVFilterLink *outlink = ctx->outputs[0];
    StackContext *s = fs->opaque;
    AVFrame **in = s->frames;
    AVFrame *out;
    int i, p, ret, offset[4] = { 0 };

    for (i = 0; i < s->nb_inputs; i++) {
        if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0)
            return ret;
    }

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out)
        return AVERROR(ENOMEM);
    out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);

    for (i = 0; i < s->nb_inputs; i++) {
        AVFilterLink *inlink = ctx->inputs[i];
        int linesize[4];
        int height[4];

        if ((ret = av_image_fill_linesizes(linesize, inlink->format, inlink->w)) < 0) {
            av_frame_free(&out);
            return ret;
        }

        height[1] = height[2] = FF_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h);
        height[0] = height[3] = inlink->h;

        for (p = 0; p < s->nb_planes; p++) {
            if (s->is_vertical) {
                av_image_copy_plane(out->data[p] + offset[p] * out->linesize[p],
                                    out->linesize[p],
                                    in[i]->data[p],
                                    in[i]->linesize[p],
                                    linesize[p], height[p]);
                offset[p] += height[p];
            } else {
                av_image_copy_plane(out->data[p] + offset[p],
                                    out->linesize[p],
                                    in[i]->data[p],
                                    in[i]->linesize[p],
                                    linesize[p], height[p]);
                offset[p] += linesize[p];
            }
        }
    }

    return ff_filter_frame(outlink, out);
}
Example #5
0
static int nvenc_copy_frame(NV_ENC_LOCK_INPUT_BUFFER *in, const AVFrame *frame)
{
    uint8_t *buf = in->bufferDataPtr;
    int off      = frame->height * in->pitch;

    switch (frame->format) {
    case AV_PIX_FMT_YUV420P:
        av_image_copy_plane(buf, in->pitch,
                            frame->data[0], frame->linesize[0],
                            frame->width, frame->height);
        buf += off;

        av_image_copy_plane(buf, in->pitch >> 1,
                            frame->data[2], frame->linesize[2],
                            frame->width >> 1, frame->height >> 1);

        buf += off >> 2;

        av_image_copy_plane(buf, in->pitch >> 1,
                            frame->data[1], frame->linesize[1],
                            frame->width >> 1, frame->height >> 1);
        break;
    case AV_PIX_FMT_NV12:
        av_image_copy_plane(buf, in->pitch,
                            frame->data[0], frame->linesize[0],
                            frame->width, frame->height);
        buf += off;

        av_image_copy_plane(buf, in->pitch,
                            frame->data[1], frame->linesize[1],
                            frame->width, frame->height >> 1);
        break;
    case AV_PIX_FMT_YUV444P:
        av_image_copy_plane(buf, in->pitch,
                            frame->data[0], frame->linesize[0],
                            frame->width, frame->height);
        buf += off;

        av_image_copy_plane(buf, in->pitch,
                            frame->data[1], frame->linesize[1],
                            frame->width, frame->height);
        buf += off;

        av_image_copy_plane(buf, in->pitch,
                            frame->data[2], frame->linesize[2],
                            frame->width, frame->height);
        break;
    default:
        return AVERROR_BUG;
    }

    return 0;
}
Example #6
0
static void draw_frame(AVFilterContext *ctx,
                       AVFrame *main_buf,
                       AVFrame *alpha_buf)
{
    AlphaMergeContext *merge = ctx->priv;
    int h = main_buf->height;

    if (merge->is_packed_rgb) {
        int x, y;
        uint8_t *pin, *pout;
        for (y = 0; y < h; y++) {
            pin = alpha_buf->data[0] + y * alpha_buf->linesize[0];
            pout = main_buf->data[0] + y * main_buf->linesize[0] + merge->rgba_map[A];
            for (x = 0; x < main_buf->width; x++) {
                *pout = *pin;
                pin += 1;
                pout += 4;
            }
        }
    } else {
        const int main_linesize = main_buf->linesize[A];
        const int alpha_linesize = alpha_buf->linesize[Y];
        av_image_copy_plane(main_buf->data[A], main_linesize,
                            alpha_buf->data[Y], alpha_linesize,
                            FFMIN(main_linesize, alpha_linesize), alpha_buf->height);
    }
}
Example #7
0
/**
 * Blur image plane using a mask.
 *
 * @param source The image to have it's logo removed.
 * @param destination Where the output image will be stored.
 * @param source_stride How far apart (in memory) two consecutive lines are.
 * @param destination Same as source_stride, but for the destination image.
 * @param width Width of the image. This is the same for source and destination.
 * @param height Height of the image. This is the same for source and destination.
 * @param is_image_direct If the image is direct, then source and destination are
 *        the same and we can save a lot of time by not copying pixels that
 *        haven't changed.
 * @param filter The image that stores the distance to the edge of the logo for
 *        each pixel.
 * @param logo_start_x smallest x-coordinate that contains at least 1 logo pixel.
 * @param logo_start_y smallest y-coordinate that contains at least 1 logo pixel.
 * @param logo_end_x   largest x-coordinate that contains at least 1 logo pixel.
 * @param logo_end_y   largest y-coordinate that contains at least 1 logo pixel.
 *
 * This function processes an entire plane. Pixels outside of the logo are copied
 * to the output without change, and pixels inside the logo have the de-blurring
 * function applied.
 */
static void blur_image(int ***mask,
                       const uint8_t *src_data,  int src_linesize,
                             uint8_t *dst_data,  int dst_linesize,
                       const uint8_t *mask_data, int mask_linesize,
                       int w, int h, int direct,
                       FFBoundingBox *bbox)
{
    int x, y;
    uint8_t *dst_line;
    const uint8_t *src_line;

    if (!direct)
        av_image_copy_plane(dst_data, dst_linesize, src_data, src_linesize, w, h);

    for (y = bbox->y1; y <= bbox->y2; y++) {
        src_line = src_data + src_linesize * y;
        dst_line = dst_data + dst_linesize * y;

        for (x = bbox->x1; x <= bbox->x2; x++) {
             if (mask_data[y * mask_linesize + x]) {
                /* Only process if we are in the mask. */
                 dst_line[x] = blur_pixel(mask,
                                          mask_data, mask_linesize,
                                          dst_data, dst_linesize,
                                          w, h, x, y);
            } else {
                /* Else just copy the data. */
                if (!direct)
                    dst_line[x] = src_line[x];
            }
        }
    }
}
Example #8
0
static int load_mask(uint8_t **mask, int *w, int *h,
                     const char *filename, void *log_ctx)
{
    int ret;
    enum PixelFormat pix_fmt;
    uint8_t *src_data[4], *gray_data[4];
    int src_linesize[4], gray_linesize[4];

    /* load image from file */
    if ((ret = ff_load_image(src_data, src_linesize, w, h, &pix_fmt, filename, log_ctx)) < 0)
        return ret;

    /* convert the image to GRAY8 */
    if ((ret = ff_scale_image(gray_data, gray_linesize, *w, *h, PIX_FMT_GRAY8,
                              src_data, src_linesize, *w, *h, pix_fmt,
                              log_ctx)) < 0)
        goto end;

    /* copy mask to a newly allocated array */
    *mask = av_malloc(*w * *h);
    if (!*mask)
        ret = AVERROR(ENOMEM);
    av_image_copy_plane(*mask, *w, gray_data[0], gray_linesize[0], *w, *h);

end:
    av_free(src_data[0]);
    av_free(gray_data[0]);
    return ret;
}
Example #9
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    ConvolutionContext *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out;
    int plane;

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out) {
        av_frame_free(&in);
        return AVERROR(ENOMEM);
    }
    av_frame_copy_props(out, in);

    for (plane = 0; plane < s->nb_planes; plane++) {
        ThreadData td;

        if (s->copy[plane]) {
            av_image_copy_plane(out->data[plane], out->linesize[plane],
                                in->data[plane], in->linesize[plane],
                                s->planewidth[plane] * s->bpc,
                                s->planeheight[plane]);
            continue;
        }

        td.in = in;
        td.out = out;
        td.plane = plane;
        ctx->internal->execute(ctx, s->filter[plane], &td, NULL, FFMIN(s->planeheight[plane], s->nb_threads));
    }

    av_frame_free(&in);
    return ff_filter_frame(outlink, out);
}
Example #10
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    ConvolutionContext *s = inlink->dst->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    AVFrame *out;
    int plane;

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out) {
        av_frame_free(&in);
        return AVERROR(ENOMEM);
    }
    av_frame_copy_props(out, in);

    for (plane = 0; plane < s->nb_planes; plane++) {
        if (s->copy[plane]) {
            av_image_copy_plane(out->data[plane], out->linesize[plane],
                                in->data[plane], in->linesize[plane],
                                s->planewidth[plane],
                                s->planeheight[plane]);
            continue;
        }

        s->filter[plane](s, in, out, plane);
    }

    av_frame_free(&in);
    return ff_filter_frame(outlink, out);
}
Example #11
0
static int android_render_rgb_on_rgb(ANativeWindow_Buffer *out_buffer, const SDL_VoutOverlay *overlay, int bpp)
{
    // SDLTRACE("SDL_VoutAndroid: android_render_rgb_on_rgb(%p)", overlay);
    assert(overlay->format == SDL_FCC_RV16);
    assert(overlay->planes == 1);

    int min_height = CAPMIN(out_buffer->height, overlay->h);
    int dst_stride = out_buffer->stride;
    int src_line_size = overlay->pitches[0];
    int dst_line_size = dst_stride * bpp / 8;

    uint8_t *dst_pixels = out_buffer->bits;
    const uint8_t *src_pixels = overlay->pixels[0];

    if (dst_line_size == src_line_size) {
        int plane_size = src_line_size * min_height;
        // ALOGE("android_render_rgb_on_rgb (pix-match) %p %p %d", dst_pixels, src_pixels, plane_size);
        memcpy(dst_pixels, src_pixels, plane_size);
    } else {
        // TODO: 9 padding
        int bytewidth = CAPMIN(dst_line_size, src_line_size);

        // ALOGE("android_render_rgb_on_rgb (pix-mismatch) %p %d %p %d %d %d", dst_pixels, dst_line_size, src_pixels, src_line_size, bytewidth, min_height);
        av_image_copy_plane(dst_pixels, dst_line_size, src_pixels, src_line_size, bytewidth, min_height);
    }

    return 0;
}
Example #12
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    NContext *s = ctx->priv;
    AVFrame *out;
    int plane, y;

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out) {
        av_frame_free(&in);
        return AVERROR(ENOMEM);
    }
    av_frame_copy_props(out, in);

    for (plane = 0; plane < s->nb_planes; plane++) {
        const int threshold = s->threshold[plane];

        if (threshold) {
            const uint8_t *src = in->data[plane];
            uint8_t *dst = out->data[plane];
            int stride = in->linesize[plane];
            int height = s->planeheight[plane];
            int width  = s->planewidth[plane];
            uint8_t *p0 = s->buffer + 16;
            uint8_t *p1 = p0 + s->planewidth[0];
            uint8_t *p2 = p1 + s->planewidth[0];
            uint8_t *orig = p0, *end = p2;

            line_copy8(p0, src + stride, width, 1);
            line_copy8(p1, src, width, 1);

            for (y = 0; y < height; y++) {
                const uint8_t *coordinates[] = { p0 - 1, p0, p0 + 1,
                                                 p1 - 1,     p1 + 1,
                                                 p2 - 1, p2, p2 + 1};
                src += stride * (y < height - 1 ? 1 : -1);
                line_copy8(p2, src, width, 1);

                s->filter(dst, p1, width, threshold, coordinates, s->coordinates);

                p0 = p1;
                p1 = p2;
                p2 = (p2 == end) ? orig: p2 + s->planewidth[0];
                dst += out->linesize[plane];
            }
        } else {
            av_image_copy_plane(out->data[plane], out->linesize[plane],
                                in->data[plane], in->linesize[plane],
                                s->planewidth[plane], s->planeheight[plane]);
        }
    }

    av_frame_free(&in);
    return ff_filter_frame(outlink, out);
}
Example #13
0
static int process_frame(FFFrameSync *fs)
{
    AVFilterContext *ctx = fs->parent;
    HysteresisContext *s = fs->opaque;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out, *base, *alt;
    int ret;

    if ((ret = ff_framesync_get_frame(&s->fs, 0, &base, 0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 1, &alt,  0)) < 0)
        return ret;

    if (ctx->is_disabled) {
        out = av_frame_clone(base);
        if (!out)
            return AVERROR(ENOMEM);
    } else {
        int p;

        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, base);

        for (p = 0; p < s->nb_planes; p++) {
            if (!((1 << p) & s->planes)) {
                av_image_copy_plane(out->data[p], out->linesize[p], base->data[p], base->linesize[p],
                                    s->width[p], s->height[p]);
                continue;
            } else {
                int y;

                for (y = 0; y < s->height[p]; y++) {
                    memset(out->data[p] + y * out->linesize[p], 0, s->width[p]);
                }
            }

            s->index = -1;
            memset(s->map, 0, s->width[0] * s->height[0]);
            memset(s->xy, 0, s->width[0] * s->height[0] * 4);

            s->hysteresis(s, base->data[p], alt->data[p],
                          out->data[p],
                          base->linesize[p], alt->linesize[p],
                          out->linesize[p],
                          s->width[p], s->height[p]);
        }
    }
    out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);

    return ff_filter_frame(outlink, out);
}
/**
 * Copy picture field from src to dst.
 *
 * @param src_field copy from upper, lower field or both
 * @param interleave leave a padding line between each copied line
 * @param dst_field copy to upper or lower field,
 *        only meaningful when interleave is selected
 * @param flags context flags
 */
static inline
void copy_picture_field(TInterlaceContext *tinterlace,
                        uint8_t *dst[4], int dst_linesize[4],
                        const uint8_t *src[4], int src_linesize[4],
                        enum AVPixelFormat format, int w, int src_h,
                        int src_field, int interleave, int dst_field,
                        int flags)
{
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format);
    int plane, vsub = desc->log2_chroma_h;
    int k = src_field == FIELD_UPPER_AND_LOWER ? 1 : 2;
    int h;

    for (plane = 0; plane < desc->nb_components; plane++) {
        int lines = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(src_h, vsub) : src_h;
        int cols  = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(    w, desc->log2_chroma_w) : w;
        int linesize = av_image_get_linesize(format, w, plane);
        uint8_t *dstp = dst[plane];
        const uint8_t *srcp = src[plane];

        if (linesize < 0)
            return;

        lines = (lines + (src_field == FIELD_UPPER)) / k;
        if (src_field == FIELD_LOWER)
            srcp += src_linesize[plane];
        if (interleave && dst_field == FIELD_LOWER)
            dstp += dst_linesize[plane];
        if (flags & TINTERLACE_FLAG_VLPF) {
            // Low-pass filtering is required when creating an interlaced destination from
            // a progressive source which contains high-frequency vertical detail.
            // Filtering will reduce interlace 'twitter' and Moire patterning.
            int srcp_linesize = src_linesize[plane] * k;
            int dstp_linesize = dst_linesize[plane] * (interleave ? 2 : 1);
            for (h = lines; h > 0; h--) {
                const uint8_t *srcp_above = srcp - src_linesize[plane];
                const uint8_t *srcp_below = srcp + src_linesize[plane];
                if (h == lines) srcp_above = srcp; // there is no line above
                if (h == 1) srcp_below = srcp;     // there is no line below

                tinterlace->lowpass_line(dstp, cols, srcp, srcp_above, srcp_below);
                dstp += dstp_linesize;
                srcp += srcp_linesize;
            }
        } else {
            av_image_copy_plane(dstp, dst_linesize[plane] * (interleave ? 2 : 1),
                            srcp, src_linesize[plane]*k, linesize, lines);
        }
    }
}
/**
 * Copy picture field from src to dst.
 *
 * @param src_field copy from upper, lower field or both
 * @param interleave leave a padding line between each copied line
 * @param dst_field copy to upper or lower field,
 *        only meaningful when interleave is selected
 * @param flags context flags
 */
static inline
void copy_picture_field(TInterlaceContext *tinterlace,
                        uint8_t *dst[4], int dst_linesize[4],
                        const uint8_t *src[4], int src_linesize[4],
                        enum AVPixelFormat format, int w, int src_h,
                        int src_field, int interleave, int dst_field,
                        int flags)
{
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format);
    int hsub = desc->log2_chroma_w;
    int plane, vsub = desc->log2_chroma_h;
    int k = src_field == FIELD_UPPER_AND_LOWER ? 1 : 2;
    int h;

    for (plane = 0; plane < desc->nb_components; plane++) {
        int lines = plane == 1 || plane == 2 ? AV_CEIL_RSHIFT(src_h, vsub) : src_h;
        int cols  = plane == 1 || plane == 2 ? AV_CEIL_RSHIFT(    w, hsub) : w;
        uint8_t *dstp = dst[plane];
        const uint8_t *srcp = src[plane];
        int srcp_linesize = src_linesize[plane] * k;
        int dstp_linesize = dst_linesize[plane] * (interleave ? 2 : 1);
        int clip_max = (1 << tinterlace->csp->comp[plane].depth) - 1;

        lines = (lines + (src_field == FIELD_UPPER)) / k;
        if (src_field == FIELD_LOWER)
            srcp += src_linesize[plane];
        if (interleave && dst_field == FIELD_LOWER)
            dstp += dst_linesize[plane];
        // Low-pass filtering is required when creating an interlaced destination from
        // a progressive source which contains high-frequency vertical detail.
        // Filtering will reduce interlace 'twitter' and Moire patterning.
        if (flags & (TINTERLACE_FLAG_VLPF | TINTERLACE_FLAG_CVLPF)) {
            int x = !!(flags & TINTERLACE_FLAG_CVLPF);
            for (h = lines; h > 0; h--) {
                ptrdiff_t pref = src_linesize[plane];
                ptrdiff_t mref = -pref;
                if (h >= (lines - x))  mref = 0; // there is no line above
                else if (h <= (1 + x)) pref = 0; // there is no line below

                tinterlace->lowpass_line(dstp, cols, srcp, mref, pref, clip_max);
                dstp += dstp_linesize;
                srcp += srcp_linesize;
            }
        } else {
            if (tinterlace->csp->comp[plane].depth > 8)
                cols *= 2;
            av_image_copy_plane(dstp, dstp_linesize, srcp, srcp_linesize, cols, lines);
        }
    }
}
Example #16
0
static inline int copy_field
(
    lw_log_handler_t *lhp,
    AVFrame          *dst,
    AVFrame          *src,
    int               line_offset
)
{
    /* Check if the destination is writable. */
    if( av_frame_is_writable( dst ) == 0 )
    {
        /* The destination is NOT writable, so allocate new buffers and copy the data. */
        av_frame_unref( dst );
        if( av_frame_ref( dst, src ) < 0 )
        {
            if( lhp->show_log )
                lhp->show_log( lhp, LW_LOG_ERROR, "Failed to reference a video frame.\n" );
            return -1;
        }
        if( av_frame_make_writable( dst ) < 0 )
        {
            if( lhp->show_log )
                lhp->show_log( lhp, LW_LOG_ERROR, "Failed to make a video frame writable.\n" );
            return -1;
        }
        /* For direct rendering, the destination can not know
         * whether the value at the address held by the opaque pointer is valid or not.
         * Anyway, the opaque pointer for direct rendering shall be set to NULL. */
        dst->opaque = NULL;
    }
    else
    {
        /* The destination is writable. Copy field data from the source. */
        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get( (enum AVPixelFormat)dst->format );
        int number_of_planes = av_pix_fmt_count_planes( (enum AVPixelFormat)dst->format );
        int height           = MIN( dst->height, src->height );
        for( int i = 0; i < number_of_planes; i++ )
        {
            int r_shift = 1 + ((i == 1 || i == 2) ? desc->log2_chroma_h : 0);
            int field_height = (height >> r_shift) + (line_offset == 0 && (height & 1) ? 1 : 0);
            av_image_copy_plane( dst->data[i] + dst->linesize[i] * line_offset, 2 * dst->linesize[i],
                                 src->data[i] + src->linesize[i] * line_offset, 2 * src->linesize[i],
                                 MIN( dst->linesize[i], src->linesize[i] ),
                                 field_height );
        }
    }
    /* Treat this frame as interlaced. */
    dst->interlaced_frame = 1;
    return 0;
}
static int process_frame(FFFrameSync *fs)
{
    AVFilterContext *ctx = fs->parent;
    ThresholdContext *s = fs->opaque;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out, *in, *threshold, *min, *max;
    int ret;

    if ((ret = ff_framesync_get_frame(&s->fs, 0, &in,        0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 1, &threshold, 0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 2, &min,       0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 3, &max,       0)) < 0)
        return ret;

    if (ctx->is_disabled) {
        out = av_frame_clone(in);
        if (!out)
            return AVERROR(ENOMEM);
    } else {
        int p;

        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, in);

        for (p = 0; p < s->nb_planes; p++) {
            if (!(s->planes & (1 << p))) {
                av_image_copy_plane(out->data[p], out->linesize[p],
                                    in->data[p], in->linesize[p],
                                    s->width[p] * s->bpc,
                                    s->height[p]);
                continue;
            }
            s->threshold(in->data[p], threshold->data[p],
                         min->data[p], max->data[p],
                         out->data[p],
                         in->linesize[p], threshold->linesize[p],
                         min->linesize[p], max->linesize[p],
                         out->linesize[p],
                         s->width[p], s->height[p]);
        }
    }

    out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);

    return ff_filter_frame(outlink, out);
}
Example #18
0
static void copy_picture_field(AVFrame *src_frame, AVFrame *dst_frame,
                               AVFilterLink *inlink, enum FieldType field_type,
                               int lowpass)
{
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int vsub = desc->log2_chroma_h;
    int plane, i, j;

    for (plane = 0; plane < desc->nb_components; plane++) {
        int lines = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;
        int linesize = av_image_get_linesize(inlink->format, inlink->w, plane);
        uint8_t *dstp = dst_frame->data[plane];
        const uint8_t *srcp = src_frame->data[plane];

        av_assert0(linesize >= 0);

        lines = (lines + (field_type == FIELD_UPPER)) / 2;
        if (field_type == FIELD_LOWER)
            srcp += src_frame->linesize[plane];
        if (field_type == FIELD_LOWER)
            dstp += dst_frame->linesize[plane];
        if (lowpass) {
            int srcp_linesize = src_frame->linesize[plane] * 2;
            int dstp_linesize = dst_frame->linesize[plane] * 2;
            for (j = lines; j > 0; j--) {
                const uint8_t *srcp_above = srcp - src_frame->linesize[plane];
                const uint8_t *srcp_below = srcp + src_frame->linesize[plane];
                if (j == lines)
                    srcp_above = srcp; // there is no line above
                if (j == 1)
                    srcp_below = srcp; // there is no line below
                for (i = 0; i < linesize; i++) {
                    // this calculation is an integer representation of
                    // '0.5 * current + 0.25 * above + 0.25 * below'
                    // '1 +' is for rounding.
                    dstp[i] = (1 + srcp[i] + srcp[i] + srcp_above[i] + srcp_below[i]) >> 2;
                }
                dstp += dstp_linesize;
                srcp += srcp_linesize;
            }
        } else {
            av_image_copy_plane(dstp, dst_frame->linesize[plane] * 2,
                                srcp, src_frame->linesize[plane] * 2,
                                linesize, lines);
        }
    }
Example #19
0
static int android_render_yv12_on_yv12(ANativeWindow_Buffer *out_buffer, const SDL_VoutOverlay *overlay)
{
    // SDLTRACE("SDL_VoutAndroid: android_render_yv12_on_yv12(%p)", overlay);
    assert(overlay->format == SDL_FCC_YV12);
    assert(overlay->planes == 3);

    int min_height = CAPMIN(out_buffer->height, overlay->h);
    int dst_y_stride = out_buffer->stride;
    int dst_c_stride = CAPALIGN(out_buffer->stride / 2, 16);
    int dst_y_size = dst_y_stride * out_buffer->height;
    int dst_c_size = dst_c_stride * out_buffer->height / 2;

    // ALOGE("stride:%d/%d, size:%d/%d", dst_y_stride, dst_c_stride, dst_y_size, dst_c_size);

    uint8_t *dst_pixels_array[] = {
        out_buffer->bits,
        out_buffer->bits + dst_y_size,
        out_buffer->bits + dst_y_size + dst_c_size,
    };
    int dst_line_height[] = { min_height, min_height / 2, min_height / 2 };
    int dst_line_size_array[] = { dst_y_stride, dst_c_stride, dst_c_stride };

    for (int i = 0; i < 3; ++i) {
        int dst_line_size = dst_line_size_array[i];
        int src_line_size = overlay->pitches[i];
        int line_height = dst_line_height[i];
        uint8_t *dst_pixels = dst_pixels_array[i];
        const uint8_t *src_pixels = overlay->pixels[i];

        if (dst_line_size == src_line_size) {
            int plane_size = src_line_size * line_height;

            // ALOGE("sdl_image_copy_plane %p %p %d", dst_pixels, src_pixels, dst_plane_size);
            memcpy(dst_pixels, src_pixels, plane_size);
        } else {
            // TODO: 9 padding
            int bytewidth = CAPMIN(dst_line_size, src_line_size);

            // ALOGE("av_image_copy_plane %p %d %p %d %d %d", dst_pixels, dst_line_size, src_pixels, src_line_size, bytewidth, line_height);
            av_image_copy_plane(dst_pixels, dst_line_size, src_pixels, src_line_size, bytewidth, line_height);
        }
    }

    return 0;
}
Example #20
0
static int process_frame(FFFrameSync *fs)
{
    AVFilterContext *ctx = fs->parent;
    MidEqualizerContext *s = fs->opaque;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out, *in0, *in1;
    int ret;

    if ((ret = ff_framesync_get_frame(&s->fs, 0, &in0, 0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 1, &in1, 0)) < 0)
        return ret;

    if (ctx->is_disabled) {
        out = av_frame_clone(in0);
        if (!out)
            return AVERROR(ENOMEM);
    } else {
        int p;

        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, in0);

        for (p = 0; p < s->nb_planes; p++) {
            if (!((1 << p) & s->planes)) {
                av_image_copy_plane(out->data[p], out->linesize[p], in0->data[p], in0->linesize[p],
                                    s->width[0][p] * (1 + (s->histogram_size > 256)), s->height[0][p]);
                continue;
            }

            s->midequalizer(in0->data[p], in1->data[p],
                            out->data[p],
                            in0->linesize[p], in1->linesize[p],
                            out->linesize[p],
                            s->width[0][p], s->height[0][p],
                            s->width[1][p], s->height[1][p],
                            s->histogram[0], s->histogram[1],
                            s->cchange, s->histogram_size);
        }
    }
    out->pts = av_rescale_q(in0->pts, s->fs.time_base, outlink->time_base);

    return ff_filter_frame(outlink, out);
}
Example #21
0
static void copy_picture_field(InterlaceContext *s,
                               AVFrame *src_frame, AVFrame *dst_frame,
                               AVFilterLink *inlink, enum FieldType field_type,
                               int lowpass)
{
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int hsub = desc->log2_chroma_w;
    int vsub = desc->log2_chroma_h;
    int plane, j;

    for (plane = 0; plane < desc->nb_components; plane++) {
        int cols  = (plane == 1 || plane == 2) ? -(-inlink->w) >> hsub : inlink->w;
        int lines = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;
        uint8_t *dstp = dst_frame->data[plane];
        const uint8_t *srcp = src_frame->data[plane];

        av_assert0(cols >= 0 || lines >= 0);

        lines = (lines + (field_type == FIELD_UPPER)) / 2;
        if (field_type == FIELD_LOWER) {
            srcp += src_frame->linesize[plane];
            dstp += dst_frame->linesize[plane];
        }
        if (lowpass) {
            int srcp_linesize = src_frame->linesize[plane] * 2;
            int dstp_linesize = dst_frame->linesize[plane] * 2;
            for (j = lines; j > 0; j--) {
                const uint8_t *srcp_above = srcp - src_frame->linesize[plane];
                const uint8_t *srcp_below = srcp + src_frame->linesize[plane];
                if (j == lines)
                    srcp_above = srcp; // there is no line above
                if (j == 1)
                    srcp_below = srcp; // there is no line below
                s->lowpass_line(dstp, cols, srcp, srcp_above, srcp_below);
                dstp += dstp_linesize;
                srcp += srcp_linesize;
            }
        } else {
            av_image_copy_plane(dstp, dst_frame->linesize[plane] * 2,
                                srcp, src_frame->linesize[plane] * 2,
                                cols, lines);
        }
    }
}
Example #22
0
static int process_frame(FFFrameSync *fs)
{
    AVFilterContext *ctx = fs->parent;
    MaskedMergeContext *s = fs->opaque;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out, *base, *overlay, *mask;
    int ret;

    if ((ret = ff_framesync_get_frame(&s->fs, 0, &base,    0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 1, &overlay, 0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 2, &mask,    0)) < 0)
        return ret;

    if (ctx->is_disabled) {
        out = av_frame_clone(base);
        if (!out)
            return AVERROR(ENOMEM);
    } else {
        int p;

        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, base);

        for (p = 0; p < s->nb_planes; p++) {
            if (!((1 << p) & s->planes)) {
                av_image_copy_plane(out->data[p], out->linesize[p], base->data[p], base->linesize[p],
                                    s->width[p], s->height[p]);
                continue;
            }

            s->maskedmerge(base->data[p], base->linesize[p],
                           overlay->data[p], overlay->linesize[p],
                           mask->data[p], mask->linesize[p],
                           out->data[p], out->linesize[p],
                           s->width[p], s->height[p],
                           s->max, s->half, s->depth);
        }
    }
    out->pts = av_rescale_q(base->pts, s->fs.time_base, outlink->time_base);

    return ff_filter_frame(outlink, out);
}
Example #23
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    RemoveGrainContext *s = ctx->priv;
    ThreadData td;
    AVFrame *out;
    int i;

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out) {
        av_frame_free(&in);
        return AVERROR(ENOMEM);
    }
    av_frame_copy_props(out, in);

    for (i = 0; i < s->nb_planes; i++) {
        uint8_t *dst = out->data[i];
        uint8_t *src = in->data[i];

        if (s->mode[i] == 0) {
            av_image_copy_plane(dst, out->linesize[i],
                                src, in->linesize[i],
                                s->planewidth[i], s->planeheight[i]);
            continue;
        }

        memcpy(dst, src, s->planewidth[i]);

        td.in = in; td.out = out; td.plane = i;
        ctx->internal->execute(ctx, filter_slice, &td, NULL,
                               FFMIN(s->planeheight[i], ctx->graph->nb_threads));

        src = in->data[i] + (s->planeheight[i] - 1) * in->linesize[i];
        dst = out->data[i] + (s->planeheight[i] - 1) * out->linesize[i];
        memcpy(dst, src, s->planewidth[i]);
    }

    av_frame_free(&in);
    return ff_filter_frame(outlink, out);
}
Example #24
0
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
    ThresholdContext *s = ctx->priv;
    ThreadData *td = arg;
    AVFrame *min = td->min;
    AVFrame *max = td->max;
    AVFrame *threshold = td->threshold;
    AVFrame *in = td->in;
    AVFrame *out = td->out;

    for (int p = 0; p < s->nb_planes; p++) {
        const int h = s->height[p];
        const int slice_start = (h * jobnr) / nb_jobs;
        const int slice_end = (h * (jobnr+1)) / nb_jobs;

        if (!(s->planes & (1 << p))) {
            av_image_copy_plane(out->data[p] + slice_start * out->linesize[p],
                                out->linesize[p],
                                in->data[p] + slice_start * in->linesize[p],
                                in->linesize[p],
                                s->width[p] * s->bpc,
                                slice_end - slice_start);
            continue;
        }
        s->threshold(in->data[p] + slice_start * in->linesize[p],
                     threshold->data[p] + slice_start * threshold->linesize[p],
                     min->data[p] + slice_start * min->linesize[p],
                     max->data[p] + slice_start * max->linesize[p],
                     out->data[p] + slice_start * out->linesize[p],
                     in->linesize[p], threshold->linesize[p],
                     min->linesize[p], max->linesize[p],
                     out->linesize[p],
                     s->width[p], slice_end - slice_start);
    }

    return 0;
}
static av_cold int init(AVCodecContext *avctx)
{
	CHDContext* priv;
	BC_STATUS ret;
	BC_INFO_CRYSTAL version;
	BC_INPUT_FORMAT format =
	{
		.FGTEnable   = FALSE,
		.Progressive = TRUE,
		.OptFlags    = 0x80000000 | vdecFrameRate59_94 | 0x40,
		.width       = avctx->width,
		.height      = avctx->height,
	};

	BC_MEDIA_SUBTYPE subtype;

	uint32_t mode = DTS_PLAYBACK_MODE |
	                DTS_LOAD_FILE_PLAY_FW |
	                DTS_SKIP_TX_CHK_CPB |
	                DTS_PLAYBACK_DROP_RPT_MODE |
	                DTS_SINGLE_THREADED_MODE |
	                DTS_DFLT_RESOLUTION(vdecRESOLUTION_1080p23_976);

	av_log(avctx, AV_LOG_VERBOSE, "CrystalHD Init for %s\n",
	       avctx->codec->name);

	avctx->pix_fmt = PIX_FMT_YUYV422;

	/* Initialize the library */
	priv               = avctx->priv_data;
	priv->avctx        = avctx;
	priv->is_nal       = avctx->extradata_size > 0 && *(avctx->extradata) == 1;
	priv->last_picture = -1;
	priv->decode_wait  = BASE_WAIT;

	subtype = id2subtype(priv, avctx->codec->id);
	switch (subtype)
	{
	case BC_MSUBTYPE_AVC1:
	{
		uint8_t *dummy_p;
		int dummy_int;
		AVBitStreamFilterContext *bsfc;

		uint32_t orig_data_size = avctx->extradata_size;
		uint8_t *orig_data = av_malloc(orig_data_size);
		if (!orig_data)
		{
			av_log(avctx, AV_LOG_ERROR,
			       "Failed to allocate copy of extradata\n");
			return AVERROR(ENOMEM);
		}
		memcpy(orig_data, avctx->extradata, orig_data_size);


		bsfc = av_bitstream_filter_init("h264_mp4toannexb");
		if (!bsfc)
		{
			av_log(avctx, AV_LOG_ERROR,
			       "Cannot open the h264_mp4toannexb BSF!\n");
			av_free(orig_data);
			return AVERROR_BSF_NOT_FOUND;
		}
		av_bitstream_filter_filter(bsfc, avctx, NULL, &dummy_p,
		                           &dummy_int, NULL, 0, 0);
		av_bitstream_filter_close(bsfc);

		priv->sps_pps_buf     = avctx->extradata;
		priv->sps_pps_size    = avctx->extradata_size;
		avctx->extradata      = orig_data;
		avctx->extradata_size = orig_data_size;

		format.pMetaData   = priv->sps_pps_buf;
		format.metaDataSz  = priv->sps_pps_size;
		format.startCodeSz = (avctx->extradata[4] & 0x03) + 1;
	}
	break;
	case BC_MSUBTYPE_H264:
		format.startCodeSz = 4;
		// Fall-through
	case BC_MSUBTYPE_VC1:
	case BC_MSUBTYPE_WVC1:
	case BC_MSUBTYPE_WMV3:
	case BC_MSUBTYPE_WMVA:
	case BC_MSUBTYPE_MPEG2VIDEO:
	case BC_MSUBTYPE_DIVX:
	case BC_MSUBTYPE_DIVX311:
		format.pMetaData  = avctx->extradata;
		format.metaDataSz = avctx->extradata_size;
		break;
	default:
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: Unknown codec name\n");
		return AVERROR(EINVAL);
	}
	format.mSubtype = subtype;

	/* Get a decoder instance */
	av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: starting up\n");
	// Initialize the Link and Decoder devices
	ret = DtsDeviceOpen(&priv->dev, mode);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: DtsDeviceOpen failed\n");
		goto fail;
	}

	ret = DtsCrystalHDVersion(priv->dev, &version);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_VERBOSE,
		       "CrystalHD: DtsCrystalHDVersion failed\n");
		goto fail;
	}
	priv->is_70012 = version.device == 0;

	if (priv->is_70012 &&
	        (subtype == BC_MSUBTYPE_DIVX || subtype == BC_MSUBTYPE_DIVX311))
	{
		av_log(avctx, AV_LOG_VERBOSE,
		       "CrystalHD: BCM70012 doesn't support MPEG4-ASP/DivX/Xvid\n");
		goto fail;
	}

	ret = DtsSetInputFormat(priv->dev, &format);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: SetInputFormat failed\n");
		goto fail;
	}

	ret = DtsOpenDecoder(priv->dev, BC_STREAM_TYPE_ES);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsOpenDecoder failed\n");
		goto fail;
	}

	ret = DtsSetColorSpace(priv->dev, OUTPUT_MODE422_YUY2);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsSetColorSpace failed\n");
		goto fail;
	}
	ret = DtsStartDecoder(priv->dev);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartDecoder failed\n");
		goto fail;
	}
	ret = DtsStartCapture(priv->dev);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartCapture failed\n");
		goto fail;
	}

	av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Init complete.\n");

	return 0;

fail:
	uninit(avctx);
	return -1;
}


/*
 * The CrystalHD doesn't report interlaced H.264 content in a way that allows
 * us to distinguish between specific cases that require different handling.
 * So, for now, we have to hard-code the behaviour we want.
 *
 * The default behaviour is to assume MBAFF with input and output fieldpairs.
 *
 * Define ASSUME_PAFF_OVER_MBAFF to treat input as PAFF with separate input
 * and output fields.
 *
 * Define ASSUME_TWO_INPUTS_ONE_OUTPUT to treat input as separate fields but
 * output as a single fieldpair.
 *
 * Define both to mess up your playback.
 */
#define ASSUME_PAFF_OVER_MBAFF 0
#define ASSUME_TWO_INPUTS_ONE_OUTPUT 0
static inline CopyRet copy_frame(AVCodecContext *avctx,
                                 BC_DTS_PROC_OUT *output,
                                 void *data, int *data_size,
                                 uint8_t second_field)
{
	BC_STATUS ret;
	BC_DTS_STATUS decoder_status;
	uint8_t is_paff;
	uint8_t next_frame_same;
	uint8_t interlaced;

	CHDContext *priv = avctx->priv_data;

	uint8_t bottom_field = (output->PicInfo.flags & VDEC_FLAG_BOTTOMFIELD) ==
	                       VDEC_FLAG_BOTTOMFIELD;
	uint8_t bottom_first = !!(output->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST);

	int width    = output->PicInfo.width;
	int height   = output->PicInfo.height;
	int bwidth;
	uint8_t *src = output->Ybuff;
	int sStride;
	uint8_t *dst;
	int dStride;

	ret = DtsGetDriverStatus(priv->dev, &decoder_status);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR,
		       "CrystalHD: GetDriverStatus failed: %u\n", ret);
		return RET_ERROR;
	}

	is_paff           = ASSUME_PAFF_OVER_MBAFF ||
	                    !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC);
	next_frame_same   = output->PicInfo.picture_number ==
	                    (decoder_status.picNumFlags & ~0x40000000);
	interlaced        = ((output->PicInfo.flags &
	                      VDEC_FLAG_INTERLACED_SRC) && is_paff) ||
	                    next_frame_same || bottom_field || second_field;

	av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: next_frame_same: %u | %u | %u\n",
	       next_frame_same, output->PicInfo.picture_number,
	       decoder_status.picNumFlags & ~0x40000000);

	if (priv->pic.data[0] && !priv->need_second_field)
		avctx->release_buffer(avctx, &priv->pic);

	priv->need_second_field = interlaced && !priv->need_second_field;

	priv->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
	                         FF_BUFFER_HINTS_REUSABLE;
	if (!priv->pic.data[0])
	{
		if (avctx->get_buffer(avctx, &priv->pic) < 0)
		{
			av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
			return RET_ERROR;
		}
	}

	bwidth = av_image_get_linesize(avctx->pix_fmt, width, 0);
	if (priv->is_70012)
	{
		int pStride;

		if (width <= 720)
			pStride = 720;
		else if (width <= 1280)
			pStride = 1280;
		else if (width <= 1080)
			pStride = 1080;
		sStride = av_image_get_linesize(avctx->pix_fmt, pStride, 0);
	}
	else
	{
		sStride = bwidth;
	}

	dStride = priv->pic.linesize[0];
	dst     = priv->pic.data[0];

	av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n");

	if (interlaced)
	{
		int dY = 0;
		int sY = 0;

		height /= 2;
		if (bottom_field)
		{
			av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: bottom field\n");
			dY = 1;
		}
		else
		{
			av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: top field\n");
			dY = 0;
		}

		for (sY = 0; sY < height; dY++, sY++)
		{
			memcpy(&(dst[dY * dStride]), &(src[sY * sStride]), bwidth);
			dY++;
		}
	}
	else
	{
		av_image_copy_plane(dst, dStride, src, sStride, bwidth, height);
	}

	priv->pic.interlaced_frame = interlaced;
	if (interlaced)
		priv->pic.top_field_first = !bottom_first;

	if (output->PicInfo.timeStamp != 0)
	{
		priv->pic.pkt_pts = opaque_list_pop(priv, output->PicInfo.timeStamp);
		av_log(avctx, AV_LOG_VERBOSE, "output \"pts\": %"PRIu64"\n",
		       priv->pic.pkt_pts);
	}

	if (!priv->need_second_field)
	{
		*data_size       = sizeof(AVFrame);
		*(AVFrame *)data = priv->pic;
	}

	if (ASSUME_TWO_INPUTS_ONE_OUTPUT &&
	        output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC)
	{
		av_log(priv->avctx, AV_LOG_VERBOSE, "Fieldpair from two packets.\n");
		return RET_SKIP_NEXT_COPY;
	}

	return RET_OK;
}
Example #26
0
static int dds_decode(AVCodecContext *avctx, void *data,
                      int *got_frame, AVPacket *avpkt)
{
    DDSContext *ctx = avctx->priv_data;
    GetByteContext *gbc = &ctx->gbc;
    AVFrame *frame = data;
    int mipmap;
    int ret;

    ff_texturedsp_init(&ctx->texdsp);
    bytestream2_init(gbc, avpkt->data, avpkt->size);

    if (bytestream2_get_bytes_left(gbc) < 128) {
        av_log(avctx, AV_LOG_ERROR, "Frame is too small (%d).\n",
               bytestream2_get_bytes_left(gbc));
        return AVERROR_INVALIDDATA;
    }

    if (bytestream2_get_le32(gbc) != MKTAG('D', 'D', 'S', ' ') ||
            bytestream2_get_le32(gbc) != 124) { // header size
        av_log(avctx, AV_LOG_ERROR, "Invalid DDS header.\n");
        return AVERROR_INVALIDDATA;
    }

    bytestream2_skip(gbc, 4); // flags

    avctx->height = bytestream2_get_le32(gbc);
    avctx->width  = bytestream2_get_le32(gbc);
    ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid image size %dx%d.\n",
               avctx->width, avctx->height);
        return ret;
    }

    /* Since codec is based on 4x4 blocks, size is aligned to 4. */
    avctx->coded_width  = FFALIGN(avctx->width,  TEXTURE_BLOCK_W);
    avctx->coded_height = FFALIGN(avctx->height, TEXTURE_BLOCK_H);

    bytestream2_skip(gbc, 4); // pitch
    bytestream2_skip(gbc, 4); // depth
    mipmap = bytestream2_get_le32(gbc);
    if (mipmap != 0)
        av_log(avctx, AV_LOG_VERBOSE, "Found %d mipmaps (ignored).\n", mipmap);

    /* Extract pixel format information, considering additional elements
     * in reserved1 and reserved2. */
    ret = parse_pixel_format(avctx);
    if (ret < 0)
        return ret;

    ret = ff_get_buffer(avctx, frame, 0);
    if (ret < 0)
        return ret;

    if (ctx->compressed) {
        int size = (avctx->coded_height / TEXTURE_BLOCK_H) *
                   (avctx->coded_width / TEXTURE_BLOCK_W) * ctx->tex_ratio;
        ctx->slice_count = av_clip(avctx->thread_count, 1,
                                   avctx->coded_height / TEXTURE_BLOCK_H);

        if (bytestream2_get_bytes_left(gbc) < size) {
            av_log(avctx, AV_LOG_ERROR,
                   "Compressed Buffer is too small (%d < %d).\n",
                   bytestream2_get_bytes_left(gbc), size);
            return AVERROR_INVALIDDATA;
        }

        /* Use the decompress function on the texture, one block per thread. */
        ctx->tex_data = gbc->buffer;
        avctx->execute2(avctx, decompress_texture_thread, frame, NULL, ctx->slice_count);
    } else {
        int linesize = av_image_get_linesize(avctx->pix_fmt, frame->width, 0);

        if (ctx->paletted) {
            int i;
            /* Use the first 1024 bytes as palette, then copy the rest. */
            bytestream2_get_buffer(gbc, frame->data[1], 256 * 4);
            for (i = 0; i < 256; i++)
                AV_WN32(frame->data[1] + i*4,
                        (frame->data[1][2+i*4]<<0)+
                        (frame->data[1][1+i*4]<<8)+
                        (frame->data[1][0+i*4]<<16)+
                        (frame->data[1][3+i*4]<<24)
                       );

            frame->palette_has_changed = 1;
        }

        if (bytestream2_get_bytes_left(gbc) < frame->height * linesize) {
            av_log(avctx, AV_LOG_ERROR, "Buffer is too small (%d < %d).\n",
                   bytestream2_get_bytes_left(gbc), frame->height * linesize);
            return AVERROR_INVALIDDATA;
        }

        av_image_copy_plane(frame->data[0], frame->linesize[0],
                            gbc->buffer, linesize,
                            linesize, frame->height);
    }

    /* Run any post processing here if needed. */
    if (avctx->pix_fmt == AV_PIX_FMT_BGRA ||
            avctx->pix_fmt == AV_PIX_FMT_RGBA ||
            avctx->pix_fmt == AV_PIX_FMT_RGB0 ||
            avctx->pix_fmt == AV_PIX_FMT_BGR0 ||
            avctx->pix_fmt == AV_PIX_FMT_YA8)
        run_postproc(avctx, frame);

    /* Frame is ready to be output. */
    frame->pict_type = AV_PICTURE_TYPE_I;
    frame->key_frame = 1;
    *got_frame = 1;

    return avpkt->size;
}
Example #27
0
/**
 * Apply a simple delogo algorithm to the image in dst and put the
 * result in src.
 *
 * The algorithm is only applied to the region specified by the logo
 * parameters.
 *
 * @param w      width of the input image
 * @param h      height of the input image
 * @param logo_x x coordinate of the top left corner of the logo region
 * @param logo_y y coordinate of the top left corner of the logo region
 * @param logo_w width of the logo
 * @param logo_h height of the logo
 * @param band   the size of the band around the processed area
 * @param show   show a rectangle around the processed area, useful for
 *               parameters tweaking
 * @param direct if non-zero perform in-place processing
 */
static void apply_delogo(uint8_t *dst, int dst_linesize,
                         uint8_t *src, int src_linesize,
                         int w, int h,
                         int logo_x, int logo_y, int logo_w, int logo_h,
                         int band, int show, int direct)
{
    int x, y;
    int interp, dist;
    uint8_t *xdst, *xsrc;

    uint8_t *topleft, *botleft, *topright;
    int xclipl, xclipr, yclipt, yclipb;
    int logo_x1, logo_x2, logo_y1, logo_y2;

    xclipl = FFMAX(-logo_x, 0);
    xclipr = FFMAX(logo_x+logo_w-w, 0);
    yclipt = FFMAX(-logo_y, 0);
    yclipb = FFMAX(logo_y+logo_h-h, 0);

    logo_x1 = logo_x + xclipl;
    logo_x2 = logo_x + logo_w - xclipr;
    logo_y1 = logo_y + yclipt;
    logo_y2 = logo_y + logo_h - yclipb;

    topleft  = src+logo_y1     * src_linesize+logo_x1;
    topright = src+logo_y1     * src_linesize+logo_x2-1;
    botleft  = src+(logo_y2-1) * src_linesize+logo_x1;

    dst += (logo_y1+1)*dst_linesize;
    src += (logo_y1+1)*src_linesize;

    if (!direct)
        av_image_copy_plane(dst, dst_linesize, src, src_linesize, w, h);

    for (y = logo_y1+1; y < logo_y2-1; y++) {
        for (x = logo_x1+1,
             xdst = dst+logo_x1+1,
             xsrc = src+logo_x1+1; x < logo_x2-1; x++, xdst++, xsrc++) {
            interp = (topleft[src_linesize*(y-logo_y  -yclipt)]   +
                      topleft[src_linesize*(y-logo_y-1-yclipt)]   +
                      topleft[src_linesize*(y-logo_y+1-yclipt)])  * (logo_w-(x-logo_x))/logo_w
                   + (topright[src_linesize*(y-logo_y-yclipt)]    +
                      topright[src_linesize*(y-logo_y-1-yclipt)]  +
                      topright[src_linesize*(y-logo_y+1-yclipt)]) * (x-logo_x)/logo_w
                   + (topleft[x-logo_x-xclipl]                    +
                      topleft[x-logo_x-1-xclipl]                  +
                      topleft[x-logo_x+1-xclipl])                 * (logo_h-(y-logo_y))/logo_h
                   + (botleft[x-logo_x-xclipl]                    +
                      botleft[x-logo_x-1-xclipl]                  +
                      botleft[x-logo_x+1-xclipl])                 * (y-logo_y)/logo_h;
            interp /= 6;

            if (y >= logo_y+band && y < logo_y+logo_h-band &&
                x >= logo_x+band && x < logo_x+logo_w-band) {
                *xdst = interp;
            } else {
                dist = 0;
                if      (x < logo_x+band)
                    dist = FFMAX(dist, logo_x-x+band);
                else if (x >= logo_x+logo_w-band)
                    dist = FFMAX(dist, x-(logo_x+logo_w-1-band));

                if      (y < logo_y+band)
                    dist = FFMAX(dist, logo_y-y+band);
                else if (y >= logo_y+logo_h-band)
                    dist = FFMAX(dist, y-(logo_y+logo_h-1-band));

                *xdst = (*xsrc*dist + interp*(band-dist))/band;
                if (show && (dist == band-1))
                    *xdst = 0;
            }
        }

        dst += dst_linesize;
        src += src_linesize;
    }
}
Example #28
0
static av_cold int init(AVCodecContext *avctx)
{
    CHDContext* priv;
    BC_STATUS ret;
    BC_INFO_CRYSTAL version;
    BC_INPUT_FORMAT format = {
        .FGTEnable   = FALSE,
        .Progressive = TRUE,
        .OptFlags    = 0x80000000 | vdecFrameRate59_94 | 0x40,
        .width       = avctx->width,
        .height      = avctx->height,
    };

    BC_MEDIA_SUBTYPE subtype;

    uint32_t mode = DTS_PLAYBACK_MODE |
                    DTS_LOAD_FILE_PLAY_FW |
                    DTS_SKIP_TX_CHK_CPB |
                    DTS_PLAYBACK_DROP_RPT_MODE |
                    DTS_SINGLE_THREADED_MODE |
                    DTS_DFLT_RESOLUTION(vdecRESOLUTION_1080p23_976);

    av_log(avctx, AV_LOG_VERBOSE, "CrystalHD Init for %s\n",
           avctx->codec->name);

    avctx->pix_fmt = PIX_FMT_YUYV422;

    /* Initialize the library */
    priv               = avctx->priv_data;
    priv->avctx        = avctx;
    priv->is_nal       = avctx->extradata_size > 0 && *(avctx->extradata) == 1;
    priv->last_picture = -1;
    priv->decode_wait  = BASE_WAIT;

    subtype = id2subtype(priv, avctx->codec->id);
    switch (subtype) {
    case BC_MSUBTYPE_AVC1:
        {
            uint8_t *dummy_p;
            int dummy_int;

            priv->bsfc = av_bitstream_filter_init("h264_mp4toannexb");
            if (!priv->bsfc) {
                av_log(avctx, AV_LOG_ERROR,
                       "Cannot open the h264_mp4toannexb BSF!\n");
                return AVERROR_BSF_NOT_FOUND;
            }
            av_bitstream_filter_filter(priv->bsfc, avctx, NULL, &dummy_p,
                                       &dummy_int, NULL, 0, 0);
        }
        subtype = BC_MSUBTYPE_H264;
        // Fall-through
    case BC_MSUBTYPE_H264:
        format.startCodeSz = 4;
        // Fall-through
    case BC_MSUBTYPE_VC1:
    case BC_MSUBTYPE_WVC1:
    case BC_MSUBTYPE_WMV3:
    case BC_MSUBTYPE_WMVA:
    case BC_MSUBTYPE_MPEG2VIDEO:
    case BC_MSUBTYPE_DIVX:
    case BC_MSUBTYPE_DIVX311:
        format.pMetaData  = avctx->extradata;
        format.metaDataSz = avctx->extradata_size;
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "CrystalHD: Unknown codec name\n");
        return AVERROR(EINVAL);
    }
    format.mSubtype = subtype;

    if (priv->sWidth) {
        format.bEnableScaling = 1;
        format.ScalingParams.sWidth = priv->sWidth;
    }

    /* Get a decoder instance */
    av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: starting up\n");
    // Initialize the Link and Decoder devices
    ret = DtsDeviceOpen(&priv->dev, mode);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: DtsDeviceOpen failed\n");
        goto fail;
    }

    ret = DtsCrystalHDVersion(priv->dev, &version);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_VERBOSE,
               "CrystalHD: DtsCrystalHDVersion failed\n");
        goto fail;
    }
    priv->is_70012 = version.device == 0;

    if (priv->is_70012 &&
        (subtype == BC_MSUBTYPE_DIVX || subtype == BC_MSUBTYPE_DIVX311)) {
        av_log(avctx, AV_LOG_VERBOSE,
               "CrystalHD: BCM70012 doesn't support MPEG4-ASP/DivX/Xvid\n");
        goto fail;
    }

    ret = DtsSetInputFormat(priv->dev, &format);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_ERROR, "CrystalHD: SetInputFormat failed\n");
        goto fail;
    }

    ret = DtsOpenDecoder(priv->dev, BC_STREAM_TYPE_ES);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsOpenDecoder failed\n");
        goto fail;
    }

    ret = DtsSetColorSpace(priv->dev, OUTPUT_MODE422_YUY2);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsSetColorSpace failed\n");
        goto fail;
    }
    ret = DtsStartDecoder(priv->dev);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartDecoder failed\n");
        goto fail;
    }
    ret = DtsStartCapture(priv->dev);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartCapture failed\n");
        goto fail;
    }

    if (avctx->codec->id == CODEC_ID_H264) {
        priv->parser = av_parser_init(avctx->codec->id);
        if (!priv->parser)
            av_log(avctx, AV_LOG_WARNING,
                   "Cannot open the h.264 parser! Interlaced h.264 content "
                   "will not be detected reliably.\n");
        priv->parser->flags = PARSER_FLAG_COMPLETE_FRAMES;
    }
    av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Init complete.\n");

    return 0;

 fail:
    uninit(avctx);
    return -1;
}


static inline CopyRet copy_frame(AVCodecContext *avctx,
                                 BC_DTS_PROC_OUT *output,
                                 void *data, int *data_size)
{
    BC_STATUS ret;
    BC_DTS_STATUS decoder_status;
    uint8_t trust_interlaced;
    uint8_t interlaced;

    CHDContext *priv = avctx->priv_data;
    int64_t pkt_pts  = AV_NOPTS_VALUE;
    uint8_t pic_type = 0;

    uint8_t bottom_field = (output->PicInfo.flags & VDEC_FLAG_BOTTOMFIELD) ==
                           VDEC_FLAG_BOTTOMFIELD;
    uint8_t bottom_first = !!(output->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST);

    int width    = output->PicInfo.width;
    int height   = output->PicInfo.height;
    int bwidth;
    uint8_t *src = output->Ybuff;
    int sStride;
    uint8_t *dst;
    int dStride;

    if (output->PicInfo.timeStamp != 0) {
        OpaqueList *node = opaque_list_pop(priv, output->PicInfo.timeStamp);
        if (node) {
            pkt_pts = node->reordered_opaque;
            pic_type = node->pic_type;
            av_free(node);
        } else {
            /*
             * We will encounter a situation where a timestamp cannot be
             * popped if a second field is being returned. In this case,
             * each field has the same timestamp and the first one will
             * cause it to be popped. To keep subsequent calculations
             * simple, pic_type should be set a FIELD value - doesn't
             * matter which, but I chose BOTTOM.
             */
            pic_type = PICT_BOTTOM_FIELD;
        }
        av_log(avctx, AV_LOG_VERBOSE, "output \"pts\": %"PRIu64"\n",
               output->PicInfo.timeStamp);
        av_log(avctx, AV_LOG_VERBOSE, "output picture type %d\n",
               pic_type);
    }

    ret = DtsGetDriverStatus(priv->dev, &decoder_status);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_ERROR,
               "CrystalHD: GetDriverStatus failed: %u\n", ret);
       return RET_ERROR;
    }

    /*
     * For most content, we can trust the interlaced flag returned
     * by the hardware, but sometimes we can't. These are the
     * conditions under which we can trust the flag:
     *
     * 1) It's not h.264 content
     * 2) The UNKNOWN_SRC flag is not set
     * 3) We know we're expecting a second field
     * 4) The hardware reports this picture and the next picture
     *    have the same picture number.
     *
     * Note that there can still be interlaced content that will
     * fail this check, if the hardware hasn't decoded the next
     * picture or if there is a corruption in the stream. (In either
     * case a 0 will be returned for the next picture number)
     */
    trust_interlaced = avctx->codec->id != CODEC_ID_H264 ||
                       !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ||
                       priv->need_second_field ||
                       (decoder_status.picNumFlags & ~0x40000000) ==
                       output->PicInfo.picture_number;

    /*
     * If we got a false negative for trust_interlaced on the first field,
     * we will realise our mistake here when we see that the picture number is that
     * of the previous picture. We cannot recover the frame and should discard the
     * second field to keep the correct number of output frames.
     */
    if (output->PicInfo.picture_number == priv->last_picture && !priv->need_second_field) {
        av_log(avctx, AV_LOG_WARNING,
               "Incorrectly guessed progressive frame. Discarding second field\n");
        /* Returning without providing a picture. */
        return RET_OK;
    }

    interlaced = (output->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC) &&
                 trust_interlaced;

    if (!trust_interlaced && (decoder_status.picNumFlags & ~0x40000000) == 0) {
        av_log(avctx, AV_LOG_VERBOSE,
               "Next picture number unknown. Assuming progressive frame.\n");
    }

    av_log(avctx, AV_LOG_VERBOSE, "Interlaced state: %d | trust_interlaced %d\n",
           interlaced, trust_interlaced);

    if (priv->pic.data[0] && !priv->need_second_field)
        avctx->release_buffer(avctx, &priv->pic);

    priv->need_second_field = interlaced && !priv->need_second_field;

    priv->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
                             FF_BUFFER_HINTS_REUSABLE;
    if (!priv->pic.data[0]) {
        if (avctx->get_buffer(avctx, &priv->pic) < 0) {
            av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
            return RET_ERROR;
        }
    }

    bwidth = av_image_get_linesize(avctx->pix_fmt, width, 0);
    if (priv->is_70012) {
        int pStride;

        if (width <= 720)
            pStride = 720;
        else if (width <= 1280)
            pStride = 1280;
        else if (width <= 1080)
            pStride = 1080;
        sStride = av_image_get_linesize(avctx->pix_fmt, pStride, 0);
    } else {
        sStride = bwidth;
    }

    dStride = priv->pic.linesize[0];
    dst     = priv->pic.data[0];

    av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n");

    if (interlaced) {
        int dY = 0;
        int sY = 0;

        height /= 2;
        if (bottom_field) {
            av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: bottom field\n");
            dY = 1;
        } else {
            av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: top field\n");
            dY = 0;
        }

        for (sY = 0; sY < height; dY++, sY++) {
            memcpy(&(dst[dY * dStride]), &(src[sY * sStride]), bwidth);
            dY++;
        }
    } else {
        av_image_copy_plane(dst, dStride, src, sStride, bwidth, height);
    }

    priv->pic.interlaced_frame = interlaced;
    if (interlaced)
        priv->pic.top_field_first = !bottom_first;

    priv->pic.pkt_pts = pkt_pts;

    if (!priv->need_second_field) {
        *data_size       = sizeof(AVFrame);
        *(AVFrame *)data = priv->pic;
    }

    /*
     * Two types of PAFF content have been observed. One form causes the
     * hardware to return a field pair and the other individual fields,
     * even though the input is always individual fields. We must skip
     * copying on the next decode() call to maintain pipeline length in
     * the first case.
     */
    if (!interlaced && (output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) &&
        (pic_type == PICT_TOP_FIELD || pic_type == PICT_BOTTOM_FIELD)) {
        av_log(priv->avctx, AV_LOG_VERBOSE, "Fieldpair from two packets.\n");
        return RET_SKIP_NEXT_COPY;
    }

    /*
     * Testing has shown that in all cases where we don't want to return the
     * full frame immediately, VDEC_FLAG_UNKNOWN_SRC is set.
     */
    return priv->need_second_field &&
           !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ?
           RET_COPY_NEXT_FIELD : RET_OK;
}
Example #29
0
/**
 * Apply a simple delogo algorithm to the image in src and put the
 * result in dst.
 *
 * The algorithm is only applied to the region specified by the logo
 * parameters.
 *
 * @param w      width of the input image
 * @param h      height of the input image
 * @param logo_x x coordinate of the top left corner of the logo region
 * @param logo_y y coordinate of the top left corner of the logo region
 * @param logo_w width of the logo
 * @param logo_h height of the logo
 * @param band   the size of the band around the processed area
 * @param show   show a rectangle around the processed area, useful for
 *               parameters tweaking
 * @param direct if non-zero perform in-place processing
 */
static void apply_delogo(uint8_t *dst, int dst_linesize,
                         uint8_t *src, int src_linesize,
                         int w, int h, AVRational sar,
                         int logo_x, int logo_y, int logo_w, int logo_h,
                         unsigned int band, int show, int direct)
{
    int x, y;
    uint64_t interp, weightl, weightr, weightt, weightb;
    uint8_t *xdst, *xsrc;

    uint8_t *topleft, *botleft, *topright;
    unsigned int left_sample, right_sample;
    int xclipl, xclipr, yclipt, yclipb;
    int logo_x1, logo_x2, logo_y1, logo_y2;

    xclipl = FFMAX(-logo_x, 0);
    xclipr = FFMAX(logo_x+logo_w-w, 0);
    yclipt = FFMAX(-logo_y, 0);
    yclipb = FFMAX(logo_y+logo_h-h, 0);

    logo_x1 = logo_x + xclipl;
    logo_x2 = logo_x + logo_w - xclipr;
    logo_y1 = logo_y + yclipt;
    logo_y2 = logo_y + logo_h - yclipb;

    topleft  = src+logo_y1     * src_linesize+logo_x1;
    topright = src+logo_y1     * src_linesize+logo_x2-1;
    botleft  = src+(logo_y2-1) * src_linesize+logo_x1;

    if (!direct)
        av_image_copy_plane(dst, dst_linesize, src, src_linesize, w, h);

    dst += (logo_y1 + 1) * dst_linesize;
    src += (logo_y1 + 1) * src_linesize;

    for (y = logo_y1+1; y < logo_y2-1; y++) {
        left_sample = topleft[src_linesize*(y-logo_y1)]   +
                      topleft[src_linesize*(y-logo_y1-1)] +
                      topleft[src_linesize*(y-logo_y1+1)];
        right_sample = topright[src_linesize*(y-logo_y1)]   +
                       topright[src_linesize*(y-logo_y1-1)] +
                       topright[src_linesize*(y-logo_y1+1)];

        for (x = logo_x1+1,
             xdst = dst+logo_x1+1,
             xsrc = src+logo_x1+1; x < logo_x2-1; x++, xdst++, xsrc++) {

            /* Weighted interpolation based on relative distances, taking SAR into account */
            weightl = (uint64_t)              (logo_x2-1-x) * (y-logo_y1) * (logo_y2-1-y) * sar.den;
            weightr = (uint64_t)(x-logo_x1)                 * (y-logo_y1) * (logo_y2-1-y) * sar.den;
            weightt = (uint64_t)(x-logo_x1) * (logo_x2-1-x)               * (logo_y2-1-y) * sar.num;
            weightb = (uint64_t)(x-logo_x1) * (logo_x2-1-x) * (y-logo_y1)                 * sar.num;

            interp =
                left_sample * weightl
                +
                right_sample * weightr
                +
                (topleft[x-logo_x1]    +
                 topleft[x-logo_x1-1]  +
                 topleft[x-logo_x1+1]) * weightt
                +
                (botleft[x-logo_x1]    +
                 botleft[x-logo_x1-1]  +
                 botleft[x-logo_x1+1]) * weightb;
            interp /= (weightl + weightr + weightt + weightb) * 3U;

            if (y >= logo_y+band && y < logo_y+logo_h-band &&
                x >= logo_x+band && x < logo_x+logo_w-band) {
                *xdst = interp;
            } else {
                unsigned dist = 0;

                if      (x < logo_x+band)
                    dist = FFMAX(dist, logo_x-x+band);
                else if (x >= logo_x+logo_w-band)
                    dist = FFMAX(dist, x-(logo_x+logo_w-1-band));

                if      (y < logo_y+band)
                    dist = FFMAX(dist, logo_y-y+band);
                else if (y >= logo_y+logo_h-band)
                    dist = FFMAX(dist, y-(logo_y+logo_h-1-band));

                *xdst = (*xsrc*dist + interp*(band-dist))/band;
                if (show && (dist == band-1))
                    *xdst = 0;
            }
        }

        dst += dst_linesize;
        src += src_linesize;
    }
}
Example #30
0
static int screenpresso_decode_frame(AVCodecContext *avctx, void *data,
                                     int *got_frame, AVPacket *avpkt)
{
    ScreenpressoContext *ctx = avctx->priv_data;
    AVFrame *frame = data;
    uLongf length = ctx->inflated_size;
    int keyframe;
    int ret;

    /* Size check */
    if (avpkt->size < 3) {
        av_log(avctx, AV_LOG_ERROR, "Packet too small (%d)\n", avpkt->size);
        return AVERROR_INVALIDDATA;
    }

    /* Basic sanity check, but not really harmful */
    if ((avpkt->data[0] != 0x73 && avpkt->data[0] != 0x72) ||
        avpkt->data[1] != 8) { // bpp probably
        av_log(avctx, AV_LOG_WARNING, "Unknown header 0x%02X%02X\n",
               avpkt->data[0], avpkt->data[1]);
    }
    keyframe = (avpkt->data[0] == 0x73);

    /* Inflate the frame after the 2 byte header */
    ret = uncompress(ctx->inflated_buf, &length,
                     avpkt->data + 2, avpkt->size - 2);
    if (ret) {
        av_log(avctx, AV_LOG_ERROR, "Deflate error %d.\n", ret);
        return AVERROR_UNKNOWN;
    }

    ret = ff_reget_buffer(avctx, ctx->current);
    if (ret < 0)
        return ret;

    /* When a keyframe is found, copy it (flipped) */
    if (keyframe)
        av_image_copy_plane(ctx->current->data[0] +
                            ctx->current->linesize[0] * (avctx->height - 1),
                            -1 * ctx->current->linesize[0],
                            ctx->inflated_buf, avctx->width * 3,
                            avctx->width * 3, avctx->height);
    /* Otherwise sum the delta on top of the current frame */
    else
        sum_delta_flipped(ctx->current->data[0], ctx->current->linesize[0],
                          ctx->inflated_buf, avctx->width * 3,
                          avctx->width * 3, avctx->height);

    /* Frame is ready to be output */
    ret = av_frame_ref(frame, ctx->current);
    if (ret < 0)
        return ret;

    /* Usual properties */
    if (keyframe) {
        frame->pict_type = AV_PICTURE_TYPE_I;
        frame->key_frame = 1;
    } else {
        frame->pict_type = AV_PICTURE_TYPE_P;
    }
    *got_frame = 1;

    return 0;
}