static void set_format_params(struct AVCodecContext *avctx, enum PixelFormat fmt){ int imgfmt; if (fmt == PIX_FMT_NONE) return; imgfmt = pixfmt2imgfmt(fmt); if (IMGFMT_IS_HWACCEL(imgfmt)) { sh_video_t *sh = avctx->opaque; vd_ffmpeg_ctx *ctx = sh->context; ctx->do_dr1 = 1; ctx->nonref_dr = 0; ctx->do_slices = 1; // HACK: FFmpeg thread handling is a major mess and // hinders any attempt to decide on hwaccel after the // codec is open. We really want this to change, so // just beat it until it's dead avctx->thread_count = 1; avctx->active_thread_type = 0; avctx->get_buffer = get_buffer; avctx->release_buffer = release_buffer; avctx->reget_buffer = get_buffer; avctx->draw_horiz_band = draw_slice; mp_msg(MSGT_DECVIDEO, MSGL_INFO, MSGTR_MPCODECS_XVMCAcceleratedMPEG2); avctx->slice_flags = SLICE_FLAG_CODED_ORDER|SLICE_FLAG_ALLOW_FIELD; } else { avctx->slice_flags &= ~(SLICE_FLAG_CODED_ORDER|SLICE_FLAG_ALLOW_FIELD); } }
static void set_format_params(struct AVCodecContext *avctx, enum AVPixelFormat fmt) { sh_video_t *sh = avctx->opaque; vd_ffmpeg_ctx *ctx = sh->context; int imgfmt; if (fmt == PIX_FMT_NONE) return; ctx->use_hwaccel = fmt == AV_PIX_FMT_VDPAU; imgfmt = pixfmt2imgfmt2(fmt, avctx->codec_id); if (IMGFMT_IS_HWACCEL(imgfmt)) { ctx->do_dr1 = 1; ctx->nonref_dr = 0; avctx->get_buffer = get_buffer; avctx->release_buffer = release_buffer; avctx->reget_buffer = get_buffer; mp_msg(MSGT_DECVIDEO, MSGL_INFO, MSGTR_MPCODECS_XVMCAcceleratedMPEG2); if (ctx->use_hwaccel) { avctx->draw_horiz_band = NULL; avctx->slice_flags = 0; ctx->do_slices = 0; } else { avctx->draw_horiz_band = draw_slice; avctx->slice_flags = SLICE_FLAG_CODED_ORDER|SLICE_FLAG_ALLOW_FIELD; ctx->do_slices = 1; } } else { set_dr_slice_settings(avctx, avctx->codec); } }
static void get_image(struct vf_instance *vf, mp_image_t *mpi){ if(!vo_config_count) return; // GET_IMAGE is required for hardware-accelerated formats if(vo_directrendering || IMGFMT_IS_HWACCEL(mpi->imgfmt)) video_out->control(VOCTRL_GET_IMAGE,mpi); }
static int query_format(struct vf_instance *vf, unsigned int fmt) { if (!IMGFMT_IS_HWACCEL(fmt) && imgfmt2pixfmt(fmt) != AV_PIX_FMT_NONE) { if (sws_isSupportedInput(imgfmt2pixfmt(fmt)) < 1) return 0; unsigned int best = find_best_out(vf, fmt); int flags; if (!best) return 0; // no matching out-fmt flags = vf_next_query_format(vf, best); if (!(flags & (VFCAP_CSP_SUPPORTED | VFCAP_CSP_SUPPORTED_BY_HW))) return 0; if (fmt != best) flags &= ~VFCAP_CSP_SUPPORTED_BY_HW; return flags; } return 0; // nomatching in-fmt }
int mpcodecs_config_vo(sh_video_t *sh, int w, int h, unsigned int preferred_outfmt) { int i, j; int only_preferred = 1; unsigned int out_fmt = 0; int screen_size_x = 0; //SCREEN_SIZE_X; int screen_size_y = 0; //SCREEN_SIZE_Y; vf_instance_t *vf = sh->vfilter, *sc = NULL; int palette = 0; int vocfg_flags = 0; if (w) sh->disp_w = w; if (h) sh->disp_h = h; if (!sh->disp_w || !sh->disp_h) return 0; mp_msg(MSGT_DECVIDEO, MSGL_V, "VDec: vo config request - %d x %d (preferred colorspace: %s)\n", w, h, vo_format_name(preferred_outfmt)); // if(!vf) return 1; // temp hack if (get_video_quality_max(sh) <= 0 && divx_quality) { // user wants postprocess but no pp filter yet: sh->vfilter = vf = vf_open_filter(vf, "pp", NULL); } // check if libvo and codec has common outfmt (no conversion): csp_again: if (mp_msg_test(MSGT_DECVIDEO, MSGL_V)) { vf_instance_t *f = vf; mp_msg(MSGT_DECVIDEO, MSGL_V, "Trying filter chain:"); for (f = vf; f; f = f->next) mp_msg(MSGT_DECVIDEO, MSGL_V, " %s", f->info->name); mp_msg(MSGT_DECVIDEO, MSGL_V, "\n"); } j = -1; for (i = 0; only_preferred || i < CODECS_MAX_OUTFMT; i++) { int flags; if (i == CODECS_MAX_OUTFMT) { i = 0; only_preferred = 0; } out_fmt = sh->codec->outfmt[i]; if (only_preferred && out_fmt != preferred_outfmt) continue; if (out_fmt == (unsigned int) 0xFFFFFFFF) continue; // check (query) if codec really support this outfmt... sh->outfmtidx = i; // pass index to the control() function this way if (mpvdec->control(sh, VDCTRL_QUERY_FORMAT, &out_fmt) == CONTROL_FALSE) { mp_msg(MSGT_CPLAYER, MSGL_DBG2, "vo_debug: codec query_format(%s) returned FALSE\n", vo_format_name(out_fmt)); continue; } flags = vf->query_format(vf, out_fmt); mp_msg(MSGT_CPLAYER, MSGL_DBG2, "vo_debug: query(%s) returned 0x%X (i=%d) \n", vo_format_name(out_fmt), flags, i); if ((flags & VFCAP_CSP_SUPPORTED_BY_HW) || (flags & VFCAP_CSP_SUPPORTED && j < 0)) { j = i; vo_flags = flags; if (flags & VFCAP_CSP_SUPPORTED_BY_HW) break; } else if (!palette && !(flags & (VFCAP_CSP_SUPPORTED_BY_HW | VFCAP_CSP_SUPPORTED)) && (out_fmt == IMGFMT_RGB8 || out_fmt == IMGFMT_BGR8)) { palette = 1; } } if (j < 0 && !IMGFMT_IS_HWACCEL(preferred_outfmt)) { // TODO: no match - we should use conversion... if (strcmp(vf->info->name, "scale") && palette != -1) { mp_msg(MSGT_DECVIDEO, MSGL_INFO, MSGTR_CouldNotFindColorspace); sc = vf = vf_open_filter(vf, "scale", NULL); goto csp_again; } else if (palette == 1) { mp_msg(MSGT_DECVIDEO, MSGL_V, "vd: Trying -vf palette...\n"); palette = -1; vf = vf_open_filter(vf, "palette", NULL); goto csp_again; } else { // sws failed, if the last filter (vf_vo) support MPEGPES try to append vf_lavc vf_instance_t *vo, *vp = NULL, *ve, *vpp = NULL; // Remove the scale filter if we added it ourselves if (vf == sc) { ve = vf; vf = vf->next; vf_uninit_filter(ve); } // Find the last filter (vf_vo) for (vo = vf; vo->next; vo = vo->next) { vpp = vp; vp = vo; } if (vo->query_format(vo, IMGFMT_MPEGPES) && (!vp || (vp && strcmp(vp->info->name, "lavc")))) { ve = vf_open_filter(vo, "lavc", NULL); if (vp) vp->next = ve; else vf = ve; goto csp_again; } if (vp && !strcmp(vp->info->name, "lavc")) { if (vpp) vpp->next = vo; else vf = vo; vf_uninit_filter(vp); } } } if (j < 0) { mp_msg(MSGT_CPLAYER, MSGL_WARN, MSGTR_VOincompCodec); sh->vf_initialized = -1; return 0; // failed } out_fmt = sh->codec->outfmt[j]; mp_msg(MSGT_CPLAYER, MSGL_V, "VDec: using %s as output csp (no %d)\n", vo_format_name(out_fmt), j); sh->outfmtidx = j; sh->vfilter = vf; // autodetect flipping if (flip == -1) { flip = 0; if (sh->codec->outflags[j] & CODECS_FLAG_FLIP) if (!(sh->codec->outflags[j] & CODECS_FLAG_NOFLIP)) flip = 1; } if (vo_flags & VFCAP_FLIPPED) flip ^= 1; flip ^= sh->flipped_input; if (flip && !(vo_flags & VFCAP_FLIP)) { // we need to flip, but no flipping filter avail. vf_add_before_vo(&vf, "flip", NULL); sh->vfilter = vf; } // time to do aspect ratio corrections... if (movie_aspect > -1.0) sh->aspect = movie_aspect; // cmdline overrides autodetect else if (sh->stream_aspect != 0.0) sh->aspect = sh->stream_aspect; else sh->aspect = sh->original_aspect; if (opt_screen_size_x || opt_screen_size_y) { screen_size_x = opt_screen_size_x; screen_size_y = opt_screen_size_y; if (!vidmode) { if (!screen_size_x) screen_size_x = SCREEN_SIZE_X; if (!screen_size_y) screen_size_y = SCREEN_SIZE_Y; if (screen_size_x <= 8) screen_size_x *= sh->disp_w; if (screen_size_y <= 8) screen_size_y *= sh->disp_h; } } else { // check source format aspect, calculate prescale ::atmos screen_size_x = sh->disp_w; screen_size_y = sh->disp_h; if (screen_size_xy >= 0.001) { if (screen_size_xy <= 8) { // -xy means x+y scale screen_size_x *= screen_size_xy; screen_size_y *= screen_size_xy; } else { // -xy means forced width while keeping correct aspect screen_size_x = screen_size_xy; screen_size_y = screen_size_xy * sh->disp_h / sh->disp_w; } } if (sh->aspect >= 0.01) { int w; mp_msg(MSGT_CPLAYER, MSGL_INFO, MSGTR_MovieAspectIsSet, sh->aspect); mp_msg(MSGT_IDENTIFY, MSGL_INFO, "ID_VIDEO_ASPECT=%1.4f\n", sh->aspect); w = (int) ((float) screen_size_y * sh->aspect); w += w % 2; // round // we don't like horizontal downscale || user forced width: if (w < screen_size_x || screen_size_xy > 8) { screen_size_y = (int) ((float) screen_size_x * (1.0 / sh->aspect)); screen_size_y += screen_size_y % 2; // round } else screen_size_x = w; // keep new width } else { mp_msg(MSGT_CPLAYER, MSGL_INFO, MSGTR_MovieAspectUndefined); } } vocfg_flags = (fullscreen ? VOFLAG_FULLSCREEN : 0) | (vidmode ? VOFLAG_MODESWITCHING : 0) | (softzoom ? VOFLAG_SWSCALE : 0) | (flip ? VOFLAG_FLIPPING : 0); // Time to config libvo! mp_msg(MSGT_CPLAYER, MSGL_V, "VO Config (%dx%d->%dx%d,flags=%d,'%s',0x%X)\n", sh->disp_w, sh->disp_h, screen_size_x, screen_size_y, vocfg_flags, "MPlayer", out_fmt); vf->w = sh->disp_w; vf->h = sh->disp_h; if (vf_config_wrapper (vf, sh->disp_w, sh->disp_h, screen_size_x, screen_size_y, vocfg_flags, out_fmt) == 0) { // "MPlayer",out_fmt)){ mp_msg(MSGT_CPLAYER, MSGL_WARN, MSGTR_CannotInitVO); sh->vf_initialized = -1; return 0; } sh->vf_initialized = 1; if (vo_gamma_gamma != 1000) set_video_colors(sh, "gamma", vo_gamma_gamma); if (vo_gamma_brightness != 1000) set_video_colors(sh, "brightness", vo_gamma_brightness); if (vo_gamma_contrast != 1000) set_video_colors(sh, "contrast", vo_gamma_contrast); if (vo_gamma_saturation != 1000) set_video_colors(sh, "saturation", vo_gamma_saturation); if (vo_gamma_hue != 1000) set_video_colors(sh, "hue", vo_gamma_hue); return 1; }
void mp_image_setfmt(mp_image_t* mpi,unsigned int out_fmt){ mpi->flags&=~(MP_IMGFLAG_PLANAR|MP_IMGFLAG_YUV|MP_IMGFLAG_SWAPPED); mpi->imgfmt=out_fmt; // compressed formats if(out_fmt == IMGFMT_MPEGPES || IMGFMT_IS_HWACCEL(out_fmt)){ mpi->bpp=0; return; } mpi->num_planes=1; if (IMGFMT_IS_RGB(out_fmt)) { if (IMGFMT_RGB_DEPTH(out_fmt) < 8 && !(out_fmt&128)) mpi->bpp = IMGFMT_RGB_DEPTH(out_fmt); else mpi->bpp=(IMGFMT_RGB_DEPTH(out_fmt)+7)&(~7); return; } if (IMGFMT_IS_BGR(out_fmt)) { if (IMGFMT_BGR_DEPTH(out_fmt) < 8 && !(out_fmt&128)) mpi->bpp = IMGFMT_BGR_DEPTH(out_fmt); else mpi->bpp=(IMGFMT_BGR_DEPTH(out_fmt)+7)&(~7); mpi->flags|=MP_IMGFLAG_SWAPPED; return; } mpi->flags|=MP_IMGFLAG_YUV; mpi->num_planes=3; if (mp_get_chroma_shift(out_fmt, NULL, NULL, NULL)) { mpi->flags|=MP_IMGFLAG_PLANAR; mpi->bpp = mp_get_chroma_shift(out_fmt, &mpi->chroma_x_shift, &mpi->chroma_y_shift, NULL); mpi->chroma_width = mpi->width >> mpi->chroma_x_shift; mpi->chroma_height = mpi->height >> mpi->chroma_y_shift; } switch(out_fmt){ case IMGFMT_I420: case IMGFMT_IYUV: mpi->flags|=MP_IMGFLAG_SWAPPED; case IMGFMT_YV12: return; case IMGFMT_420A: case IMGFMT_IF09: mpi->num_planes=4; case IMGFMT_YVU9: case IMGFMT_444P: case IMGFMT_422P: case IMGFMT_411P: case IMGFMT_440P: case IMGFMT_444P16_LE: case IMGFMT_444P16_BE: case IMGFMT_444P10_LE: case IMGFMT_444P10_BE: case IMGFMT_444P9_LE: case IMGFMT_444P9_BE: case IMGFMT_422P16_LE: case IMGFMT_422P16_BE: case IMGFMT_422P10_LE: case IMGFMT_422P10_BE: case IMGFMT_420P16_LE: case IMGFMT_420P16_BE: case IMGFMT_420P10_LE: case IMGFMT_420P10_BE: case IMGFMT_420P9_LE: case IMGFMT_420P9_BE: return; case IMGFMT_Y800: case IMGFMT_Y8: /* they're planar ones, but for easier handling use them as packed */ mpi->flags&=~MP_IMGFLAG_PLANAR; mpi->num_planes=1; return; case IMGFMT_UYVY: mpi->flags|=MP_IMGFLAG_SWAPPED; case IMGFMT_YUY2: mpi->bpp=16; mpi->num_planes=1; return; case IMGFMT_NV12: mpi->flags|=MP_IMGFLAG_SWAPPED; case IMGFMT_NV21: mpi->flags|=MP_IMGFLAG_PLANAR; mpi->bpp=12; mpi->num_planes=2; mpi->chroma_width=(mpi->width>>0); mpi->chroma_height=(mpi->height>>1); mpi->chroma_x_shift=0; mpi->chroma_y_shift=1; return; } mp_msg(MSGT_DECVIDEO,MSGL_WARN,"mp_image: unknown out_fmt: 0x%X\n",out_fmt); mpi->bpp=0; }
static int query_format(struct vf_instance *vf, unsigned int fmt) { if (IMGFMT_IS_HWACCEL(fmt) || sws_isSupportedInput(imgfmt2pixfmt(fmt)) < 1) return 0; return !!find_best_out(vf, fmt); }
static int query_format(struct vf_instance *vf, unsigned int fmt) { if (!IMGFMT_IS_HWACCEL(fmt)) return vf_next_query_format(vf, fmt); return 0; }
static int query_format(struct vo *vo, uint32_t format) { if (IMGFMT_IS_HWACCEL(format)) return 0; return VFCAP_CSP_SUPPORTED; }