示例#1
0
static void set_format_params(struct AVCodecContext *avctx, enum PixelFormat fmt){
    int imgfmt;
    if (fmt == PIX_FMT_NONE)
        return;
    imgfmt = pixfmt2imgfmt(fmt);
    if (IMGFMT_IS_HWACCEL(imgfmt)) {
        sh_video_t *sh     = avctx->opaque;
        vd_ffmpeg_ctx *ctx = sh->context;
        ctx->do_dr1    = 1;
        ctx->nonref_dr = 0;
        ctx->do_slices = 1;
        // HACK: FFmpeg thread handling is a major mess and
        // hinders any attempt to decide on hwaccel after the
        // codec is open. We really want this to change, so
        // just beat it until it's dead
        avctx->thread_count    = 1;
        avctx->active_thread_type = 0;
        avctx->get_buffer      = get_buffer;
        avctx->release_buffer  = release_buffer;
        avctx->reget_buffer    = get_buffer;
        avctx->draw_horiz_band = draw_slice;
        mp_msg(MSGT_DECVIDEO, MSGL_INFO, MSGTR_MPCODECS_XVMCAcceleratedMPEG2);
        avctx->slice_flags = SLICE_FLAG_CODED_ORDER|SLICE_FLAG_ALLOW_FIELD;
    } else {
        avctx->slice_flags &= ~(SLICE_FLAG_CODED_ORDER|SLICE_FLAG_ALLOW_FIELD);
    }
}
示例#2
0
static int pixfmt2imgfmt2(enum AVPixelFormat fmt, enum AVCodecID cid)
{
    if (fmt == AV_PIX_FMT_VDPAU)
        switch (cid) {
        case AV_CODEC_ID_H264:       return IMGFMT_VDPAU_H264;
        case AV_CODEC_ID_MPEG1VIDEO: return IMGFMT_VDPAU_MPEG1;
        case AV_CODEC_ID_MPEG2VIDEO: return IMGFMT_VDPAU_MPEG2;
        case AV_CODEC_ID_MPEG4:      return IMGFMT_VDPAU_MPEG4;
        case AV_CODEC_ID_WMV3:       return IMGFMT_VDPAU_WMV3;
        case AV_CODEC_ID_VC1:        return IMGFMT_VDPAU_VC1;
        }
    return pixfmt2imgfmt(fmt);
}
示例#3
0
static int get_encoder_format(struct AVCodec *codec, int srcfmt, bool highdepth)
{
    const enum AVPixelFormat *pix_fmts = codec->pix_fmts;
    int current = 0;
    for (int n = 0; pix_fmts && pix_fmts[n] != AV_PIX_FMT_NONE; n++) {
        int fmt = pixfmt2imgfmt(pix_fmts[n]);
        if (!fmt)
            continue;
        // Ignore formats larger than 8 bit per pixel.
        if (!highdepth && IMGFMT_RGB_DEPTH(fmt) > 32)
            continue;
        current = current ? mp_imgfmt_select_best(current, fmt, srcfmt) : fmt;
    }
    return current;
}
示例#4
0
enum AVPixelFormat imgfmt2pixfmt(int fmt)
{
    if (fmt == IMGFMT_NONE)
        return AV_PIX_FMT_NONE;

    if (fmt >= IMGFMT_AVPIXFMT_START && fmt < IMGFMT_AVPIXFMT_END) {
        enum AVPixelFormat pixfmt = fmt - IMGFMT_AVPIXFMT_START;
        // Avoid duplicate format - each format must be unique.
        int mpfmt = pixfmt2imgfmt(pixfmt);
        if (mpfmt == fmt)
            return pixfmt;
        return AV_PIX_FMT_NONE;
    }

    for (int i = 0; conversion_map[i].fmt; i++) {
        if (conversion_map[i].fmt == fmt)
            return conversion_map[i].pix_fmt;
    }
    return AV_PIX_FMT_NONE;
}
示例#5
0
文件: vf_lavfi.c 项目: 0p1pp1/mplayer
static int config(struct vf_instance *vf, int w, int h, int dw, int dh,
                        unsigned flags, unsigned fmt)
{
    int ret;
    AVFilterLink *out;
    AVRational iar, dar;

    av_reduce(&iar.num, &iar.den, w, h, INT_MAX);
    av_reduce(&dar.num, &dar.den, dw, dh, INT_MAX);
    vf->priv->in_pixfmt = imgfmt2pixfmt(fmt);
    vf->priv->in_imgfmt = fmt;
    vf->priv->in_w = w;
    vf->priv->in_h = h;
    vf->priv->in_sar = av_div_q(dar, iar);
    ret = avfilter_graph_config(vf->priv->graph, NULL);
    if (ret < 0)
        return 0;
    out = vf->priv->out->inputs[0];
    vf->priv->out_w = out->w;
    vf->priv->out_h = out->h;
    vf->priv->out_pixfmt = out->format;
    vf->priv->out_imgfmt = pixfmt2imgfmt(out->format);
    vf->priv->out_sar = out->sample_aspect_ratio;
    if (vf->priv->out_sar.num != vf->priv->in_sar.num ||
        vf->priv->out_sar.den != vf->priv->in_sar.den ||
        out->w != w || out->h != h) {
        av_reduce(&iar.num, &iar.den, out->w, out->h, INT_MAX);
        dar = av_mul_q(iar, out->sample_aspect_ratio);
        if (av_cmp_q(dar, iar) >= 0) {
            dh = out->h;
            dw = av_rescale(dh, dar.num, dar.den);
        } else {
            dw = out->w;
            dh = av_rescale(dw, dar.den, dar.num);
        }
    }
    return vf_next_config(vf, out->w, out->h, dw, dh, flags, fmt);
}
示例#6
0
文件: vf_lavfi.c 项目: 0p1pp1/mplayer
static int mpsink_query_formats(AVFilterContext *ctx)
{
    struct mpsink_priv *c = ctx->priv;
    struct vf_instance *vf = c->vf;
    AVFilterFormats *all;
    enum AVPixelFormat *sup;
    unsigned i, nsup = 0;
    int ifmt;

    all = avfilter_all_formats(AVMEDIA_TYPE_VIDEO);
    sup = av_mallocz(sizeof(*sup) * (all->format_count + 1));
    if (!sup)
        return AVERROR(errno);
    for(i = 0; i < all->format_count; i++) {
        ifmt = pixfmt2imgfmt(all->formats[i]);
        if (vf->next->query_format(vf->next, ifmt) > 0)
            sup[nsup++] = all->formats[i];
    }
    sup[nsup++] = AV_PIX_FMT_NONE;
    avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(sup));
    av_free(sup);
    return 0;
}