/** @brief Query if movie colorspace is supported by the HW. * @return 0 on failure, device capabilities (not probed * currently) on success. */ static int query_format(uint32_t movie_fmt) { int i; for (i = 0; i < DISPLAY_FORMAT_TABLE_ENTRIES; i++) { if (fmt_table[i].mplayer_fmt == movie_fmt) { /* Test conversion from Movie colorspace to * display's target colorspace. */ if (FAILED(IDirect3D9_CheckDeviceFormatConversion(priv->d3d_handle, D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, fmt_table[i].fourcc, priv->desktop_fmt))) { mp_msg(MSGT_VO, MSGL_V, "<vo_direct3d>Rejected image format: %s\n", vo_format_name(fmt_table[i].mplayer_fmt)); return 0; } priv->movie_src_fmt = fmt_table[i].fourcc; mp_msg(MSGT_VO, MSGL_V, "<vo_direct3d>Accepted image format: %s\n", vo_format_name(fmt_table[i].mplayer_fmt)); return (VFCAP_CSP_SUPPORTED | VFCAP_CSP_SUPPORTED_BY_HW | VFCAP_OSD | VFCAP_HWSCALE_UP | VFCAP_HWSCALE_DOWN); } } return 0; }
static int config(struct vf_instance *vf, int width, int height, int d_width, int d_height, unsigned int flags, unsigned int outfmt) { if ((width <= 0) || (height <= 0) || (d_width <= 0) || (d_height <= 0)) { mp_msg(MSGT_CPLAYER, MSGL_ERR, "VO: invalid dimensions!\n"); return 0; } const vo_info_t *info = video_out->driver->info; mp_msg(MSGT_CPLAYER, MSGL_INFO, "VO: [%s] %dx%d => %dx%d %s %s%s\n", info->short_name, width, height, d_width, d_height, vo_format_name(outfmt), (flags & VOFLAG_FULLSCREEN) ? " [fs]" : "", (flags & VOFLAG_FLIPPING) ? " [flip]" : ""); mp_msg(MSGT_CPLAYER, MSGL_V, "VO: Description: %s\n", info->name); mp_msg(MSGT_CPLAYER, MSGL_V, "VO: Author: %s\n", info->author); if (info->comment && strlen(info->comment) > 0) mp_msg(MSGT_CPLAYER, MSGL_V, "VO: Comment: %s\n", info->comment); if (vo_config(video_out, width, height, d_width, d_height, flags, outfmt)) return 0; return 1; }
static int query_format(uint32_t format) { mp_msg(MSGT_VO, MSGL_DBG2, "vo_x11: query_format was called: %x (%s)\n", format, vo_format_name(format)); if (IMGFMT_IS_BGR(format)) { if (IMGFMT_BGR_DEPTH(format) <= 8) return 0; // TODO 8bpp not yet fully implemented if (IMGFMT_BGR_DEPTH(format) == vo_depthonscreen) return VFCAP_CSP_SUPPORTED | VFCAP_CSP_SUPPORTED_BY_HW | VFCAP_OSD | VFCAP_SWSCALE | VFCAP_FLIP | VFCAP_ACCEPT_STRIDE; else return VFCAP_CSP_SUPPORTED | VFCAP_OSD | VFCAP_SWSCALE | VFCAP_FLIP | VFCAP_ACCEPT_STRIDE; } switch (format) { // case IMGFMT_BGR8: // case IMGFMT_BGR15: // case IMGFMT_BGR16: // case IMGFMT_BGR24: // case IMGFMT_BGR32: // return 0x2; // case IMGFMT_YUY2: case IMGFMT_I420: case IMGFMT_IYUV: case IMGFMT_YV12: return VFCAP_CSP_SUPPORTED | VFCAP_OSD | VFCAP_SWSCALE | VFCAP_ACCEPT_STRIDE; } return 0; }
static void set_csp(BITMAPINFOHEADER *o_bih,unsigned int outfmt){ int yuv = 0; switch (outfmt) { /* planar format */ case IMGFMT_YV12: case IMGFMT_I420: case IMGFMT_IYUV: o_bih->biBitCount=12; yuv=1; break; case IMGFMT_YVU9: case IMGFMT_IF09: o_bih->biBitCount=9; yuv=1; break; /* packed format */ case IMGFMT_YUY2: case IMGFMT_UYVY: case IMGFMT_YVYU: o_bih->biBitCount=16; yuv=1; break; /* rgb/bgr format */ case IMGFMT_RGB8: case IMGFMT_BGR8: o_bih->biBitCount=8; break; case IMGFMT_RGB15: case IMGFMT_RGB16: case IMGFMT_BGR15: case IMGFMT_BGR16: o_bih->biBitCount=16; break; case IMGFMT_RGB24: case IMGFMT_BGR24: o_bih->biBitCount=24; break; case IMGFMT_RGB32: case IMGFMT_BGR32: o_bih->biBitCount=32; break; default: mp_msg(MSGT_WIN32,MSGL_ERR,"Unsupported image format: %s\n", vo_format_name(outfmt)); return; } o_bih->biSizeImage = abs(o_bih->biWidth * o_bih->biHeight * (o_bih->biBitCount/8)); // Note: we cannot rely on sh->outfmtidx here, it's undefined at this stage!!! // if (yuv && !(sh->codec->outflags[sh->outfmtidx] & CODECS_FLAG_YUVHACK)) if (yuv) o_bih->biCompression = outfmt; else o_bih->biCompression = 0; }
static void reconfig_video(struct MPContext *mpctx, const struct mp_image_params *params, bool probe_only) { struct MPOpts *opts = mpctx->opts; struct dec_video *d_video = mpctx->d_video; d_video->decoder_output = *params; set_allowed_vo_formats(d_video->vfilter, mpctx->video_out); // The event should happen _after_ filter and VO reconfig. Since we don't // have any fine grained locking, this is just as good. mp_notify(mpctx, MPV_EVENT_VIDEO_RECONFIG, NULL); if (video_reconfig_filters(d_video, params) < 0) { // Most video filters don't work with hardware decoding, so this // might be the reason filter reconfig failed. if (!probe_only && video_vd_control(d_video, VDCTRL_FORCE_HWDEC_FALLBACK, NULL) == CONTROL_OK) { // Fallback active; decoder will return software format next // time. Don't abort video decoding. d_video->vfilter->initialized = 0; } return; } if (d_video->vfilter->initialized < 1) return; struct mp_image_params p = d_video->vfilter->output_params; const struct vo_driver *info = mpctx->video_out->driver; MP_INFO(mpctx, "VO: [%s] %dx%d => %dx%d %s\n", info->name, p.w, p.h, p.d_w, p.d_h, vo_format_name(p.imgfmt)); MP_VERBOSE(mpctx, "VO: Description: %s\n", info->description); int r = vo_reconfig(mpctx->video_out, &p, 0); if (r < 0) d_video->vfilter->initialized = -1; if (r >= 0) { if (opts->gamma_gamma != 1000) video_set_colors(d_video, "gamma", opts->gamma_gamma); if (opts->gamma_brightness != 1000) video_set_colors(d_video, "brightness", opts->gamma_brightness); if (opts->gamma_contrast != 1000) video_set_colors(d_video, "contrast", opts->gamma_contrast); if (opts->gamma_saturation != 1000) video_set_colors(d_video, "saturation", opts->gamma_saturation); if (opts->gamma_hue != 1000) video_set_colors(d_video, "hue", opts->gamma_hue); } }
static unsigned int find_best_out(vf_instance_t *vf, int in_format) { unsigned int best = 0; int i = -1; int j = -1; int format = 0; // find the best outfmt: while (1) { int ret; if (j < 0) { format = in_format; j = 0; } else if (i < 0) { while (preferred_conversions[j][0] && preferred_conversions[j][0] != in_format) j++; format = preferred_conversions[j++][1]; // switch to standard list if (!format) i = 0; } if (i >= 0) format = outfmt_list[i++]; if (!format) break; ret = check_outfmt(vf, format); mp_msg(MSGT_VFILTER, MSGL_DBG2, "scale: query(%s) -> %d\n", vo_format_name( format), ret & 3); if (ret & VFCAP_CSP_SUPPORTED_BY_HW) { best = format; // no conversion -> bingo! break; } if (ret & VFCAP_CSP_SUPPORTED && !best) best = format; // best with conversion } if (!best) { // Try anything else. outfmt_list is just a list of preferred formats. for (int cur = IMGFMT_START; cur < IMGFMT_END; cur++) { int ret = check_outfmt(vf, cur); if (ret & VFCAP_CSP_SUPPORTED_BY_HW) { best = cur; // no conversion -> bingo! break; } if (ret & VFCAP_CSP_SUPPORTED && !best) best = cur; // best with conversion } } return best; }
enum AVPixelFormat imgfmt2pixfmt(int fmt) { int i; enum AVPixelFormat pix_fmt; for (i = 0; conversion_map[i].fmt; i++) if (conversion_map[i].fmt == fmt) break; pix_fmt = conversion_map[i].pix_fmt; if (pix_fmt == PIX_FMT_NONE) mp_msg(MSGT_GLOBAL, MSGL_ERR, "Unsupported format %s\n", vo_format_name(fmt)); return pix_fmt; }
static unsigned int find_best(struct vf_instance *vf){ unsigned int best=0; int ret; const unsigned int* p=bgr_list; while(*p){ ret=vf->next->query_format(vf->next,*p); mp_msg(MSGT_VFILTER,MSGL_V,"[%s] query(%s) -> %d\n",vf->info->name,vo_format_name(*p),ret&3); if(ret&VFCAP_CSP_SUPPORTED_BY_HW){ best=*p; break;} // no conversion -> bingo! if(ret&VFCAP_CSP_SUPPORTED && !best) best=*p; // best with conversion ++p; } return best; }
static unsigned int find_best(struct vf_instance_s* vf, unsigned int fmt){ unsigned int best=0; int ret; unsigned int* p; if(fmt==IMGFMT_BGR8) p=bgr_list; else if(fmt==IMGFMT_RGB8) p=rgb_list; else return 0; while(*p){ ret=vf->next->query_format(vf->next,*p); mp_msg(MSGT_VFILTER,MSGL_DBG2,"[%s] query(%s) -> %d\n",vf->info->name,vo_format_name(*p),ret&3); if(ret&VFCAP_CSP_SUPPORTED_BY_HW){ best=*p; break;} // no conversion -> bingo! if(ret&VFCAP_CSP_SUPPORTED && !best) best=*p; // best with conversion ++p; } return best; }
static int reconfig(struct vf_instance *vf, struct mp_image_params *p, int flags) { if (p->w <= 0 || p->h <= 0 || p->d_w <= 0 || p->d_h <= 0) { mp_msg(MSGT_CPLAYER, MSGL_ERR, "VO: invalid dimensions!\n"); return -1; } const struct vo_driver *info = video_out->driver; mp_msg(MSGT_CPLAYER, MSGL_INFO, "VO: [%s] %dx%d => %dx%d %s %s\n", info->name, p->w, p->h, p->d_w, p->d_h, vo_format_name(p->imgfmt), (flags & VOFLAG_FLIPPING) ? " [flip]" : ""); mp_msg(MSGT_CPLAYER, MSGL_V, "VO: Description: %s\n", info->description); return vo_reconfig(video_out, p, flags); }
static int config(struct vf_instance_s* vf, int width, int height, int d_width, int d_height, unsigned int flags, unsigned int outfmt){ if ((width <= 0) || (height <= 0) || (d_width <= 0) || (d_height <= 0)) { mp_msg(MSGT_CPLAYER, MSGL_ERR, "VO: invalid dimensions!\n"); return 0; } if(video_out->info) { const vo_info_t *info = video_out->info; mp_msg(MSGT_CPLAYER,MSGL_INFO,"VO: [%s] %dx%d => %dx%d %s %s%s%s%s\n",info->short_name, width, height, d_width, d_height, vo_format_name(outfmt), (flags&VOFLAG_FULLSCREEN)?" [fs]":"", (flags&VOFLAG_MODESWITCHING)?" [vm]":"", (flags&VOFLAG_SWSCALE)?" [zoom]":"", (flags&VOFLAG_FLIPPING)?" [flip]":""); mp_msg(MSGT_CPLAYER,MSGL_V,"VO: Description: %s\n",info->name); mp_msg(MSGT_CPLAYER,MSGL_V,"VO: Author: %s\n", info->author); if(info->comment && strlen(info->comment) > 0) mp_msg(MSGT_CPLAYER,MSGL_V,"VO: Comment: %s\n", info->comment); } // save vo's stride capability for the wanted colorspace: vf->default_caps=query_format(vf,outfmt); if(config_video_out(video_out,width,height,d_width,d_height,flags,"MPlayer",outfmt)) return 0; #ifdef USE_ASS if (vf->priv->ass_priv) ass_configure(vf->priv->ass_priv, width, height, !!(vf->default_caps & VFCAP_EOSD_UNSCALED)); #endif ++vo_config_count; return 1; }
static unsigned int find_best_out(vf_instance_t *vf, int in_format){ unsigned int best=0; int i = -1; int normalized_format = normalize_yuvp16(in_format); int j = normalized_format ? -2 : -1; int format = 0; // find the best outfmt: while (1) { int ret; if (j < 0) { format = j == -1 && normalized_format ? normalized_format : in_format; j++; } else if (i < 0) { while (preferred_conversions[j][0] && preferred_conversions[j][0] != in_format) j++; format = preferred_conversions[j++][1]; // switch to standard list if (!format) i = 0; } if (i >= 0) format = outfmt_list[i++]; if (!format) break; ret = vf_next_query_format(vf, format); mp_msg(MSGT_VFILTER,MSGL_DBG2,"scale: query(%s) -> %d\n",vo_format_name(format),ret&3); if(ret&VFCAP_CSP_SUPPORTED_BY_HW){ best=format; // no conversion -> bingo! break; } if(ret&VFCAP_CSP_SUPPORTED && !best) best=format; // best with conversion } return best; }
static int config(struct vf_instance *vf, int width, int height, int d_width, int d_height, unsigned int flags, unsigned int outfmt) { if ((width <= 0) || (height <= 0) || (d_width <= 0) || (d_height <= 0)) { mp_msg(MSGT_CPLAYER, MSGL_ERR, "VO: invalid dimensions!\n"); return 0; } const vo_info_t *info = video_out->driver->info; mp_msg(MSGT_CPLAYER, MSGL_INFO, "VO: [%s] %dx%d => %dx%d %s %s%s%s%s\n", info->short_name, width, height, d_width, d_height, vo_format_name(outfmt), (flags & VOFLAG_FULLSCREEN) ? " [fs]" : "", (flags & VOFLAG_MODESWITCHING) ? " [vm]" : "", (flags & VOFLAG_SWSCALE) ? " [zoom]" : "", (flags & VOFLAG_FLIPPING) ? " [flip]" : ""); mp_msg(MSGT_CPLAYER, MSGL_V, "VO: Description: %s\n", info->name); mp_msg(MSGT_CPLAYER, MSGL_V, "VO: Author: %s\n", info->author); if (info->comment && strlen(info->comment) > 0) mp_msg(MSGT_CPLAYER, MSGL_V, "VO: Comment: %s\n", info->comment); // save vo's stride capability for the wanted colorspace: vf->default_caps = query_format(vf, outfmt); vf->draw_slice = (vf->default_caps & VOCAP_NOSLICES) ? NULL : draw_slice; if (vo_config(video_out, width, height, d_width, d_height, flags, outfmt)) return 0; vf->priv->scale_ratio = (double) d_width / d_height * height / width; return 1; }
int mpcodecs_config_vo(sh_video_t *sh, int w, int h, const unsigned int *outfmts, unsigned int preferred_outfmt) { struct MPOpts *opts = sh->opts; int j; unsigned int out_fmt = 0; int screen_size_x = 0; int screen_size_y = 0; vf_instance_t *vf = sh->vfilter; int vocfg_flags = 0; if (w) sh->disp_w = w; if (h) sh->disp_h = h; mp_msg(MSGT_DECVIDEO, MSGL_V, "VIDEO: %dx%d %5.3f fps %5.1f kbps (%4.1f kB/s)\n", sh->disp_w, sh->disp_h, sh->fps, sh->i_bps * 0.008, sh->i_bps / 1000.0); if (!sh->disp_w || !sh->disp_h) return 0; mp_msg(MSGT_DECVIDEO, MSGL_V, "VDec: vo config request - %d x %d (preferred colorspace: %s)\n", w, h, vo_format_name(preferred_outfmt)); if (get_video_quality_max(sh) <= 0 && divx_quality) { // user wants postprocess but no pp filter yet: sh->vfilter = vf = vf_open_filter(opts, vf, "pp", NULL); } if (!outfmts || sh->codec->outfmt[0] != 0xffffffff) outfmts = sh->codec->outfmt; // check if libvo and codec has common outfmt (no conversion): csp_again: if (mp_msg_test(MSGT_DECVIDEO, MSGL_V)) { mp_msg(MSGT_DECVIDEO, MSGL_V, "Trying filter chain:"); for (vf_instance_t *f = vf; f; f = f->next) mp_msg(MSGT_DECVIDEO, MSGL_V, " %s", f->info->name); mp_msg(MSGT_DECVIDEO, MSGL_V, "\n"); } j = -1; for (int i = 0; i < CODECS_MAX_OUTFMT; i++) { int flags; out_fmt = outfmts[i]; if (out_fmt == (unsigned int) 0xFFFFFFFF) break; flags = vf->query_format(vf, out_fmt); mp_msg(MSGT_CPLAYER, MSGL_DBG2, "vo_debug: query(%s) returned 0x%X (i=%d) \n", vo_format_name(out_fmt), flags, i); if ((flags & VFCAP_CSP_SUPPORTED_BY_HW) || (flags & VFCAP_CSP_SUPPORTED && j < 0)) { // check (query) if codec really support this outfmt... sh->outfmtidx = j; // pass index to the control() function this way if (sh->vd_driver->control(sh, VDCTRL_QUERY_FORMAT, &out_fmt) == CONTROL_FALSE) { mp_msg(MSGT_CPLAYER, MSGL_DBG2, "vo_debug: codec query_format(%s) returned FALSE\n", vo_format_name(out_fmt)); continue; } j = i; sh->output_flags = flags; if (flags & VFCAP_CSP_SUPPORTED_BY_HW) break; } } if (j < 0) { // TODO: no match - we should use conversion... if (strcmp(vf->info->name, "scale")) { mp_tmsg(MSGT_DECVIDEO, MSGL_INFO, "Could not find matching colorspace - retrying with -vf scale...\n"); vf = vf_open_filter(opts, vf, "scale", NULL); goto csp_again; } mp_tmsg(MSGT_CPLAYER, MSGL_WARN, "The selected video_out device is incompatible with this codec.\n"\ "Try appending the scale filter to your filter list,\n"\ "e.g. -vf spp,scale instead of -vf spp.\n"); sh->vf_initialized = -1; return 0; // failed } out_fmt = outfmts[j]; sh->outfmt = out_fmt; mp_msg(MSGT_CPLAYER, MSGL_V, "VDec: using %s as output csp (no %d)\n", vo_format_name(out_fmt), j); sh->outfmtidx = j; sh->vfilter = vf; // autodetect flipping if (opts->flip == -1) { opts->flip = 0; if (sh->codec->outflags[j] & CODECS_FLAG_FLIP) if (!(sh->codec->outflags[j] & CODECS_FLAG_NOFLIP)) opts->flip = 1; } if (opts->flip && !(sh->output_flags & VFCAP_FLIP)) { // we need to flip, but no flipping filter avail. vf_add_before_vo(&vf, "flip", NULL); sh->vfilter = vf; } // time to do aspect ratio corrections... if (opts->movie_aspect > -1.0) sh->aspect = opts->movie_aspect; // cmdline overrides autodetect else if (sh->stream_aspect != 0.0) sh->aspect = sh->stream_aspect; if (opts->screen_size_x || opts->screen_size_y) { screen_size_x = opts->screen_size_x; screen_size_y = opts->screen_size_y; if (!opts->vidmode) { if (!screen_size_x) screen_size_x = 1; if (!screen_size_y) screen_size_y = 1; if (screen_size_x <= 8) screen_size_x *= sh->disp_w; if (screen_size_y <= 8) screen_size_y *= sh->disp_h; } } else { // check source format aspect, calculate prescale ::atmos screen_size_x = sh->disp_w; screen_size_y = sh->disp_h; if (opts->screen_size_xy >= 0.001) { if (opts->screen_size_xy <= 8) { // -xy means x+y scale screen_size_x *= opts->screen_size_xy; screen_size_y *= opts->screen_size_xy; } else { // -xy means forced width while keeping correct aspect screen_size_x = opts->screen_size_xy; screen_size_y = opts->screen_size_xy * sh->disp_h / sh->disp_w; } } if (sh->aspect > 0.01) { mp_msg(MSGT_IDENTIFY, MSGL_INFO, "ID_VIDEO_ASPECT=%1.4f\n", sh->aspect); int w = screen_size_y * sh->aspect; int h = screen_size_y; // we don't like horizontal downscale || user forced width: if (w < screen_size_x || opts->screen_size_xy > 8) { w = screen_size_x; h = screen_size_x / sh->aspect; } if (abs(screen_size_x - w) >= 4 || abs(screen_size_y - h) >= 4) { screen_size_x = w; screen_size_y = h; mp_tmsg(MSGT_CPLAYER, MSGL_V, "Aspect ratio is %.2f:1 - " "scaling to correct movie aspect.\n", sh->aspect); } } else { mp_tmsg(MSGT_CPLAYER, MSGL_V, "Movie-Aspect is undefined - no prescaling applied.\n"); } } vocfg_flags = (opts->fullscreen ? VOFLAG_FULLSCREEN : 0) | (opts->vidmode ? VOFLAG_MODESWITCHING : 0) | (opts->flip ? VOFLAG_FLIPPING : 0); // Time to config libvo! mp_msg(MSGT_CPLAYER, MSGL_V, "VO Config (%dx%d->%dx%d,flags=%d,0x%X)\n", sh->disp_w, sh->disp_h, screen_size_x, screen_size_y, vocfg_flags, out_fmt); vf->w = sh->disp_w; vf->h = sh->disp_h; if (vf_config_wrapper (vf, sh->disp_w, sh->disp_h, screen_size_x, screen_size_y, vocfg_flags, out_fmt) == 0) { mp_tmsg(MSGT_CPLAYER, MSGL_WARN, "FATAL: Cannot initialize video driver.\n"); sh->vf_initialized = -1; return 0; } sh->vf_initialized = 1; set_video_colorspace(sh); if (opts->vo_gamma_gamma != 1000) set_video_colors(sh, "gamma", opts->vo_gamma_gamma); if (opts->vo_gamma_brightness != 1000) set_video_colors(sh, "brightness", opts->vo_gamma_brightness); if (opts->vo_gamma_contrast != 1000) set_video_colors(sh, "contrast", opts->vo_gamma_contrast); if (opts->vo_gamma_saturation != 1000) set_video_colors(sh, "saturation", opts->vo_gamma_saturation); if (opts->vo_gamma_hue != 1000) set_video_colors(sh, "hue", opts->vo_gamma_hue); return 1; }
static int config(uint32_t width, uint32_t height, uint32_t d_width, uint32_t d_height, uint32_t flags, char *title, uint32_t format) { ggi_mode mode = { 1, /* frames */ {width, height}, /* visible */ {GGI_AUTO, GGI_AUTO}, /* virt */ {GGI_AUTO, GGI_AUTO}, /* size */ GT_AUTO, /* graphtype */ {GGI_AUTO, GGI_AUTO} /* dots per pixel */ }; set_graphtype(format, &mode); #if 0 printf("[ggi] mode: "); ggiPrintMode(&mode); printf("\n"); #endif ggiCheckMode(ggi_conf.vis, &mode); if (ggiSetMode(ggi_conf.vis, &mode) < 0) { mp_msg(MSGT_VO, MSGL_ERR, "[ggi] unable to set display mode\n"); return (-1); } if (ggiGetMode(ggi_conf.vis, &mode) < 0) { mp_msg(MSGT_VO, MSGL_ERR, "[ggi] unable to get display mode\n"); return (-1); } if ((mode.graphtype == GT_INVALID) || (mode.graphtype == GT_AUTO)) { mp_msg(MSGT_VO, MSGL_ERR, "[ggi] not supported depth/bpp\n"); return (-1); } #if 0 printf("[ggi] mode: "); ggiPrintMode(&mode); printf("\n"); #endif #ifdef HAVE_GGIWMH ggiWmhSetTitle(ggi_conf.vis, title); if (vo_ontop) window_ontop(); #endif ggiSetFlags(ggi_conf.vis, GGIFLAG_ASYNC); if (GT_SCHEME(mode.graphtype) == GT_PALETTE) ggiSetColorfulPalette(ggi_conf.vis); if (GT_SCHEME(mode.graphtype) != GT_TRUECOLOR) { ggi_mode drawmode; ggi_conf.drawvis = ggiOpen("display-memory", NULL); if (ggi_conf.drawvis == NULL) { mp_msg(MSGT_VO, MSGL_ERR, "[ggi] unable to get backbuffer for conversion\n"); return -1; } memcpy(&drawmode, &mode, sizeof(ggi_mode)); drawmode.graphtype = GT_32BIT; drawmode.size.x = GGI_AUTO; drawmode.size.y = GGI_AUTO; ggiCheckMode(ggi_conf.drawvis, &drawmode); if (ggiSetMode(ggi_conf.drawvis, &drawmode) < 0) { mp_msg(MSGT_VO, MSGL_ERR, "[ggi] unable to set backbuffer mode\n"); return -1; } mode.graphtype = drawmode.graphtype; ggiSetFlags(ggi_conf.drawvis, GGIFLAG_ASYNC); } vo_depthonscreen = GT_DEPTH(mode.graphtype); vo_screenwidth = mode.virt.x; vo_screenheight = mode.virt.y; vo_dwidth = width; vo_dheight = height; vo_dbpp = GT_SIZE(mode.graphtype); /* calculate top, left corner */ vo_dx = (vo_screenwidth - vo_dwidth) / 2; vo_dy = (vo_screenheight - vo_dheight) / 2; ggi_conf.srcwidth = width; ggi_conf.srcheight = height; ggi_conf.srcformat = format; ggi_conf.voflags = flags; if (IMGFMT_IS_RGB(ggi_conf.srcformat)) { ggi_conf.srcdepth = IMGFMT_RGB_DEPTH(ggi_conf.srcformat); } else if (IMGFMT_IS_BGR(ggi_conf.srcformat)) { ggi_conf.srcdepth = IMGFMT_BGR_DEPTH(ggi_conf.srcformat); } else { mp_msg(MSGT_VO, MSGL_FATAL, "[ggi] Unknown image format: %s\n", vo_format_name(ggi_conf.srcformat)); return (-1); } mp_msg(MSGT_VO, MSGL_INFO, "[ggi] input: %dx%dx%d, output: %dx%dx%d\n", ggi_conf.srcwidth, ggi_conf.srcheight, ggi_conf.srcdepth, mode.virt.x, mode.virt.y, vo_dbpp); ggi_conf.srcbpp = (ggi_conf.srcdepth + 7) / 8; ggi_conf.flushregion.x1 = vo_dx; ggi_conf.flushregion.y1 = vo_dy; ggi_conf.flushregion.x2 = vo_dwidth; ggi_conf.flushregion.y2 = vo_dheight; return (0); }
// init driver static int init(sh_video_t *sh) { vd_xanim_ctx *priv; char dll[1024]; XA_CODEC_HDR codec_hdr; int i; priv = malloc(sizeof(vd_xanim_ctx)); if (!priv) return 0; sh->context = priv; memset(priv, 0, sizeof(vd_xanim_ctx)); if(!mpcodecs_config_vo(sh,sh->disp_w,sh->disp_h,IMGFMT_YV12)) return 0; priv->iq_func = NULL; priv->dec_func = NULL; for (i=0; i < XA_CLOSE_FUNCS; i++) xa_close_func[i] = NULL; snprintf(dll, 1024, "%s/%s", codec_path, sh->codec->dll); if (xacodec_load(sh, dll) == 0) return 0; codec_hdr.xapi_rev = XAVID_API_REV; codec_hdr.anim_hdr = malloc(4096); codec_hdr.description = sh->codec->info; codec_hdr.compression = bswap_32(sh->bih->biCompression); codec_hdr.decoder = NULL; codec_hdr.x = sh->bih->biWidth; /* ->disp_w */ codec_hdr.y = sh->bih->biHeight; /* ->disp_h */ /* extra fields to store palette */ codec_hdr.avi_ctab_flag = 0; codec_hdr.avi_read_ext = NULL; codec_hdr.extra = NULL; switch(sh->codec->outfmt[sh->outfmtidx]) { case IMGFMT_BGR32: codec_hdr.depth = 32; break; case IMGFMT_BGR24: codec_hdr.depth = 24; break; case IMGFMT_IYUV: case IMGFMT_I420: case IMGFMT_YV12: codec_hdr.depth = 12; break; case IMGFMT_YVU9: codec_hdr.depth = 9; break; default: mp_msg(MSGT_DECVIDEO, MSGL_FATAL, "xacodec: not supported image out format (%s)\n", vo_format_name(sh->codec->outfmt[sh->outfmtidx])); return 0; } mp_msg(MSGT_DECVIDEO, MSGL_INFO, "xacodec: querying for input %dx%d %dbit [fourcc: %4x] (%s)...\n", codec_hdr.x, codec_hdr.y, codec_hdr.depth, codec_hdr.compression, codec_hdr.description); if (xacodec_query(sh, &codec_hdr) == 0) return 0; // free(codec_hdr.anim_hdr); priv->decinfo = malloc(sizeof(XA_DEC_INFO)); if (priv->decinfo == NULL) { mp_msg(MSGT_DECVIDEO, MSGL_FATAL, "xacodec: memory allocation error: %s\n", strerror(errno)); return 0; } priv->decinfo->cmd = 0; priv->decinfo->skip_flag = 0; priv->decinfo->imagex = priv->decinfo->xe = codec_hdr.x; priv->decinfo->imagey = priv->decinfo->ye = codec_hdr.y; priv->decinfo->imaged = codec_hdr.depth; priv->decinfo->chdr = NULL; priv->decinfo->map_flag = 0; /* xaFALSE */ priv->decinfo->map = NULL; priv->decinfo->xs = priv->decinfo->ys = 0; priv->decinfo->special = 0; priv->decinfo->extra = codec_hdr.extra; mp_msg(MSGT_DECVIDEO, MSGL_DBG2, "decinfo->extra, filled by codec: %p [%s]\n", &priv->decinfo->extra, (char *)priv->decinfo->extra); return 1; }
static int config(struct vf_instance *vf, int width, int height, int d_width, int d_height, unsigned int flags, unsigned int outfmt){ unsigned int best=find_best_out(vf, outfmt); int vo_flags; int int_sws_flags=0; int round_w=0, round_h=0; int i; SwsFilter *srcFilter, *dstFilter; enum PixelFormat dfmt, sfmt; if(!best){ mp_msg(MSGT_VFILTER,MSGL_WARN,"SwScale: no supported outfmt found :(\n"); return 0; } sfmt = imgfmt2pixfmt(outfmt); if (outfmt == IMGFMT_RGB8 || outfmt == IMGFMT_BGR8) sfmt = PIX_FMT_PAL8; dfmt = imgfmt2pixfmt(best); vo_flags=vf->next->query_format(vf->next,best); // scaling to dwidth*d_height, if all these TRUE: // - option -zoom // - no other sw/hw up/down scaling avail. // - we're after postproc // - user didn't set w:h if(!(vo_flags&VFCAP_POSTPROC) && (flags&4) && vf->priv->w<0 && vf->priv->h<0){ // -zoom int x=(vo_flags&VFCAP_SWSCALE) ? 0 : 1; if(d_width<width || d_height<height){ // downscale! if(vo_flags&VFCAP_HWSCALE_DOWN) x=0; } else { // upscale: if(vo_flags&VFCAP_HWSCALE_UP) x=0; } if(x){ // user wants sw scaling! (-zoom) vf->priv->w=d_width; vf->priv->h=d_height; } } if(vf->priv->noup){ if((vf->priv->w > width) + (vf->priv->h > height) >= vf->priv->noup){ vf->priv->w= width; vf->priv->h= height; } } if (vf->priv->w <= -8) { vf->priv->w += 8; round_w = 1; } if (vf->priv->h <= -8) { vf->priv->h += 8; round_h = 1; } if (vf->priv->w < -3 || vf->priv->h < -3 || (vf->priv->w < -1 && vf->priv->h < -1)) { // TODO: establish a direct connection to the user's brain // and find out what the heck he thinks MPlayer should do // with this nonsense. mp_msg(MSGT_VFILTER, MSGL_ERR, "SwScale: EUSERBROKEN Check your parameters, they make no sense!\n"); return 0; } if (vf->priv->w == -1) vf->priv->w = width; if (vf->priv->w == 0) vf->priv->w = d_width; if (vf->priv->h == -1) vf->priv->h = height; if (vf->priv->h == 0) vf->priv->h = d_height; if (vf->priv->w == -3) vf->priv->w = vf->priv->h * width / height; if (vf->priv->w == -2) vf->priv->w = vf->priv->h * d_width / d_height; if (vf->priv->h == -3) vf->priv->h = vf->priv->w * height / width; if (vf->priv->h == -2) vf->priv->h = vf->priv->w * d_height / d_width; if (round_w) vf->priv->w = ((vf->priv->w + 8) / 16) * 16; if (round_h) vf->priv->h = ((vf->priv->h + 8) / 16) * 16; // calculate the missing parameters: switch(best) { case IMGFMT_YV12: /* YV12 needs w & h rounded to 2 */ case IMGFMT_I420: case IMGFMT_IYUV: case IMGFMT_NV12: case IMGFMT_NV21: vf->priv->h = (vf->priv->h + 1) & ~1; case IMGFMT_YUY2: /* YUY2 needs w rounded to 2 */ case IMGFMT_UYVY: vf->priv->w = (vf->priv->w + 1) & ~1; } mp_msg(MSGT_VFILTER,MSGL_DBG2,"SwScale: scaling %dx%d %s to %dx%d %s \n", width,height,vo_format_name(outfmt), vf->priv->w,vf->priv->h,vo_format_name(best)); // free old ctx: if(vf->priv->ctx) sws_freeContext(vf->priv->ctx); if(vf->priv->ctx2)sws_freeContext(vf->priv->ctx2); // new swscaler: sws_getFlagsAndFilterFromCmdLine(&int_sws_flags, &srcFilter, &dstFilter); int_sws_flags|= vf->priv->v_chr_drop << SWS_SRC_V_CHR_DROP_SHIFT; int_sws_flags|= vf->priv->accurate_rnd * SWS_ACCURATE_RND; vf->priv->ctx=sws_getContext(width, height >> vf->priv->interlaced, sfmt, vf->priv->w, vf->priv->h >> vf->priv->interlaced, dfmt, int_sws_flags | get_sws_cpuflags(), srcFilter, dstFilter, vf->priv->param); if(vf->priv->interlaced){ vf->priv->ctx2=sws_getContext(width, height >> 1, sfmt, vf->priv->w, vf->priv->h >> 1, dfmt, int_sws_flags | get_sws_cpuflags(), srcFilter, dstFilter, vf->priv->param); }
static void draw_osd(struct vo *vo, struct osd_state *osd) { struct priv *p = vo->priv; struct mp_image img = get_x_buffer(p); struct mp_osd_res res = { .w = img.w, .h = img.h, .display_par = vo->monitor_par, .video_par = vo->aspdat.par, }; osd_draw_on_image_bk(osd, res, osd->vo_pts, 0, p->osd_backup, &img); } static mp_image_t *get_screenshot(struct vo *vo) { struct priv *p = vo->priv; struct mp_image img = get_x_buffer(p); struct mp_image *res = alloc_mpi(img.w, img.h, img.imgfmt); copy_mpi(res, &img); mp_draw_sub_backup_restore(p->osd_backup, res); return res; } static int redraw_frame(struct vo *vo) { struct priv *p = vo->priv; struct mp_image img = get_x_buffer(p); mp_draw_sub_backup_restore(p->osd_backup, &img); return true; } static void flip_page(struct vo *vo) { struct priv *p = vo->priv; Display_Image(p, p->myximage, p->ImageData); XSync(vo->x11->display, False); } static int draw_slice(struct vo *vo, uint8_t *src[], int stride[], int w, int h, int x, int y) { struct priv *p = vo->priv; uint8_t *dst[MP_MAX_PLANES] = {NULL}; int dstStride[MP_MAX_PLANES] = {0}; if ((p->old_vo_dwidth != vo->dwidth || p->old_vo_dheight != vo->dheight) /*&& y==0 */ && p->zoomFlag) { int newW = vo->dwidth; int newH = vo->dheight; struct SwsContext *oldContext = p->swsContext; p->old_vo_dwidth = vo->dwidth; p->old_vo_dheight = vo->dheight; if (vo_fs) aspect(vo, &newW, &newH, A_ZOOM); if (sws_flags == 0) newW &= (~31); // not needed but, if the user wants the FAST_BILINEAR SCALER, then its needed p->swsContext = sws_getContextFromCmdLine(p->srcW, p->srcH, p->in_format, newW, newH, p->out_format); if (p->swsContext) { p->image_width = (newW + 7) & (~7); p->image_height = newH; freeMyXImage(p); getMyXImage(p); sws_freeContext(oldContext); } else p->swsContext = oldContext; p->dst_width = newW; } dstStride[0] = p->image_width * ((p->bpp + 7) / 8); dst[0] = p->ImageData; if (p->Flip_Flag) { dst[0] += dstStride[0] * (p->image_height - 1); dstStride[0] = -dstStride[0]; } sws_scale(p->swsContext, (const uint8_t **)src, stride, y, h, dst, dstStride); mp_draw_sub_backup_reset(p->osd_backup); return 0; } static int query_format(struct vo *vo, uint32_t format) { mp_msg(MSGT_VO, MSGL_DBG2, "vo_x11: query_format was called: %x (%s)\n", format, vo_format_name(format)); if (IMGFMT_IS_BGR(format)) { if (IMGFMT_BGR_DEPTH(format) <= 8) return 0; // TODO 8bpp not yet fully implemented if (IMGFMT_BGR_DEPTH(format) == vo->x11->depthonscreen) return VFCAP_CSP_SUPPORTED | VFCAP_CSP_SUPPORTED_BY_HW | VFCAP_OSD | VFCAP_FLIP | VFCAP_ACCEPT_STRIDE; else return VFCAP_CSP_SUPPORTED | VFCAP_OSD | VFCAP_FLIP | VFCAP_ACCEPT_STRIDE; } switch (format) { case IMGFMT_I420: case IMGFMT_IYUV: case IMGFMT_YV12: return VFCAP_CSP_SUPPORTED | VFCAP_OSD | VFCAP_ACCEPT_STRIDE; } return 0; }
void DisplayMediaType(const char * label,const AM_MEDIA_TYPE* pmt){ WAVEFORMATEX* pWF; VIDEOINFOHEADER* Vhdr; int i; GUID* iid; Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"=======================\n"); if(label){ Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"AM_MEDIA_TYPE: %s\n",label); }else Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"AM_MEDIA_TYPE:\n"); Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"-(Ptr:%p)--------\n",pmt); for(i=0;i<sizeof(AM_MEDIA_TYPE);i++){ Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"%02x ",(BYTE)((BYTE*)pmt)[i]); if((i+1)%8==0) Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"\n"); } if((i)%8!=0) Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"\n"); Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"-(Ptr:%p)--(%lu)--\n",pmt->pbFormat,pmt->cbFormat); for(i=0;i<pmt->cbFormat;i++){ Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"%02x ",(BYTE)pmt->pbFormat[i]); if((i+1)%8==0) Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"\n"); } if((i)%8!=0) Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"\n"); Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"-----------------------\n"); iid=(GUID*)&(pmt->subtype); Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"Subtype: %08x-%04x-%04x-%02x%02x-" "%02x%02x%02x%02x%02x%02x\n", iid->f1, iid->f2, iid->f3, (unsigned char)iid->f4[1], (unsigned char)iid->f4[0], (unsigned char)iid->f4[2], (unsigned char)iid->f4[3], (unsigned char)iid->f4[4], (unsigned char)iid->f4[5], (unsigned char)iid->f4[6], (unsigned char)iid->f4[7]); iid=(GUID*)&(pmt->formattype); Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"Format type: %08x-%04x-%04x-%02x%02x-" "%02x%02x%02x%02x%02x%02x\n", iid->f1, iid->f2, iid->f3, (unsigned char)iid->f4[1], (unsigned char)iid->f4[0], (unsigned char)iid->f4[2], (unsigned char)iid->f4[3], (unsigned char)iid->f4[4], (unsigned char)iid->f4[5], (unsigned char)iid->f4[6], (unsigned char)iid->f4[7]); if(pmt && memcmp(&pmt->formattype,&FORMAT_WaveFormatEx,16)==0 && pmt->pbFormat){ pWF=(WAVEFORMATEX*)pmt->pbFormat; Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"PMT: nChannels %d\n",pWF->nChannels); Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"PMT: nSamplesPerSec %ld\n",pWF->nSamplesPerSec); Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"PMT: wBitsPerSample %d\n",pWF->wBitsPerSample); Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"PMT: nBlockAlign %d\n",pWF->nBlockAlign); Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"PMT: nAvgBytesPerSec %ld\n",pWF->nAvgBytesPerSec); Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"PMT: SampleSize %ld\n",pmt->lSampleSize); } if(pmt && memcmp(&pmt->formattype,&FORMAT_VideoInfo,16)==0 && pmt->pbFormat){ Vhdr=(VIDEOINFOHEADER*)pmt->pbFormat; Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"Vhdr: dwBitRate %ld\n",Vhdr->dwBitRate); Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"Vhdr: biWidth %ld\n",Vhdr->bmiHeader.biWidth); Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"Vhdr: biHeight %ld\n",Vhdr->bmiHeader.biHeight); Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"Vhdr: biSizeImage %ld\n",Vhdr->bmiHeader.biSizeImage); Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"Vhdr: biBitCount %d\n",Vhdr->bmiHeader.biBitCount); if(Vhdr->bmiHeader.biCompression){ Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"Vhdr: biComression 0x%08lx (%s)\n",Vhdr->bmiHeader.biCompression,vo_format_name(Vhdr->bmiHeader.biCompression)); }else Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"Vhdr: biComression 0x00000000\n"); } Debug mp_msg(MSGT_LOADER,MSGL_DBG4,"=======================\n"); }
static int config(struct vf_instance_s* vf, int width, int height, int d_width, int d_height, unsigned int flags, unsigned int outfmt){ int size, i; void *p; mux_v->bih->biWidth=width; mux_v->bih->biHeight=height; mux_v->bih->biSizeImage=mux_v->bih->biWidth*mux_v->bih->biHeight*(mux_v->bih->biBitCount/8); mp_msg(MSGT_MENCODER, MSGL_INFO,"videocodec: libavcodec (%dx%d fourcc=%x [%.4s])\n", mux_v->bih->biWidth, mux_v->bih->biHeight, mux_v->bih->biCompression, (char *)&mux_v->bih->biCompression); lavc_venc_context->width = width; lavc_venc_context->height = height; if (lavc_param_vbitrate > 16000) /* != -1 */ lavc_venc_context->bit_rate = lavc_param_vbitrate; else if (lavc_param_vbitrate >= 0) /* != -1 */ lavc_venc_context->bit_rate = lavc_param_vbitrate*1000; else lavc_venc_context->bit_rate = 800000; /* default */ mux_v->avg_rate= lavc_venc_context->bit_rate; lavc_venc_context->bit_rate_tolerance= lavc_param_vrate_tolerance*1000; lavc_venc_context->time_base= (AVRational){mux_v->h.dwScale, mux_v->h.dwRate}; lavc_venc_context->qmin= lavc_param_vqmin; lavc_venc_context->qmax= lavc_param_vqmax; lavc_venc_context->mb_qmin= lavc_param_mb_qmin; lavc_venc_context->mb_qmax= lavc_param_mb_qmax; lavc_venc_context->lmin= (int)(FF_QP2LAMBDA * lavc_param_lmin + 0.5); lavc_venc_context->lmax= (int)(FF_QP2LAMBDA * lavc_param_lmax + 0.5); lavc_venc_context->mb_lmin= (int)(FF_QP2LAMBDA * lavc_param_mb_lmin + 0.5); lavc_venc_context->mb_lmax= (int)(FF_QP2LAMBDA * lavc_param_mb_lmax + 0.5); lavc_venc_context->max_qdiff= lavc_param_vqdiff; lavc_venc_context->qcompress= lavc_param_vqcompress; lavc_venc_context->qblur= lavc_param_vqblur; lavc_venc_context->max_b_frames= lavc_param_vmax_b_frames; lavc_venc_context->b_quant_factor= lavc_param_vb_qfactor; lavc_venc_context->rc_strategy= lavc_param_vrc_strategy; lavc_venc_context->b_frame_strategy= lavc_param_vb_strategy; lavc_venc_context->b_quant_offset= (int)(FF_QP2LAMBDA * lavc_param_vb_qoffset + 0.5); lavc_venc_context->luma_elim_threshold= lavc_param_luma_elim_threshold; lavc_venc_context->chroma_elim_threshold= lavc_param_chroma_elim_threshold; lavc_venc_context->rtp_payload_size= lavc_param_packet_size; if(lavc_param_packet_size )lavc_venc_context->rtp_mode=1; lavc_venc_context->strict_std_compliance= lavc_param_strict; lavc_venc_context->i_quant_factor= lavc_param_vi_qfactor; lavc_venc_context->i_quant_offset= (int)(FF_QP2LAMBDA * lavc_param_vi_qoffset + 0.5); lavc_venc_context->rc_qsquish= lavc_param_rc_qsquish; lavc_venc_context->rc_qmod_amp= lavc_param_rc_qmod_amp; lavc_venc_context->rc_qmod_freq= lavc_param_rc_qmod_freq; lavc_venc_context->rc_eq= lavc_param_rc_eq; mux_v->max_rate= lavc_venc_context->rc_max_rate= lavc_param_rc_max_rate*1000; lavc_venc_context->rc_min_rate= lavc_param_rc_min_rate*1000; mux_v->vbv_size= lavc_venc_context->rc_buffer_size= lavc_param_rc_buffer_size*1000; lavc_venc_context->rc_initial_buffer_occupancy= lavc_venc_context->rc_buffer_size * lavc_param_rc_initial_buffer_occupancy; lavc_venc_context->rc_buffer_aggressivity= lavc_param_rc_buffer_aggressivity; lavc_venc_context->rc_initial_cplx= lavc_param_rc_initial_cplx; lavc_venc_context->debug= lavc_param_debug; lavc_venc_context->last_predictor_count= lavc_param_last_pred; lavc_venc_context->pre_me= lavc_param_pre_me; lavc_venc_context->me_pre_cmp= lavc_param_me_pre_cmp; lavc_venc_context->pre_dia_size= lavc_param_pre_dia_size; lavc_venc_context->me_subpel_quality= lavc_param_me_subpel_quality; lavc_venc_context->me_range= lavc_param_me_range; lavc_venc_context->intra_quant_bias= lavc_param_ibias; lavc_venc_context->inter_quant_bias= lavc_param_pbias; lavc_venc_context->coder_type= lavc_param_coder; lavc_venc_context->context_model= lavc_param_context; lavc_venc_context->scenechange_threshold= lavc_param_sc_threshold; lavc_venc_context->noise_reduction= lavc_param_noise_reduction; lavc_venc_context->quantizer_noise_shaping= lavc_param_qns; lavc_venc_context->inter_threshold= lavc_param_inter_threshold; lavc_venc_context->nsse_weight= lavc_param_nssew; lavc_venc_context->frame_skip_threshold= lavc_param_skip_threshold; lavc_venc_context->frame_skip_factor= lavc_param_skip_factor; lavc_venc_context->frame_skip_exp= lavc_param_skip_exp; lavc_venc_context->frame_skip_cmp= lavc_param_skip_cmp; if (lavc_param_intra_matrix) { char *tmp; lavc_venc_context->intra_matrix = av_malloc(sizeof(*lavc_venc_context->intra_matrix)*64); i = 0; while ((tmp = strsep(&lavc_param_intra_matrix, ",")) && (i < 64)) { if (!tmp || (tmp && !strlen(tmp))) break; lavc_venc_context->intra_matrix[i++] = atoi(tmp); } if (i != 64) av_freep(&lavc_venc_context->intra_matrix); else mp_msg(MSGT_MENCODER, MSGL_V, "Using user specified intra matrix\n"); } if (lavc_param_inter_matrix) { char *tmp; lavc_venc_context->inter_matrix = av_malloc(sizeof(*lavc_venc_context->inter_matrix)*64); i = 0; while ((tmp = strsep(&lavc_param_inter_matrix, ",")) && (i < 64)) { if (!tmp || (tmp && !strlen(tmp))) break; lavc_venc_context->inter_matrix[i++] = atoi(tmp); } if (i != 64) av_freep(&lavc_venc_context->inter_matrix); else mp_msg(MSGT_MENCODER, MSGL_V, "Using user specified inter matrix\n"); } p= lavc_param_rc_override_string; for(i=0; p; i++){ int start, end, q; int e=sscanf(p, "%d,%d,%d", &start, &end, &q); if(e!=3){ mp_msg(MSGT_MENCODER,MSGL_ERR,"error parsing vrc_q\n"); return 0; } lavc_venc_context->rc_override= realloc(lavc_venc_context->rc_override, sizeof(RcOverride)*(i+1)); lavc_venc_context->rc_override[i].start_frame= start; lavc_venc_context->rc_override[i].end_frame = end; if(q>0){ lavc_venc_context->rc_override[i].qscale= q; lavc_venc_context->rc_override[i].quality_factor= 1.0; } else{ lavc_venc_context->rc_override[i].qscale= 0; lavc_venc_context->rc_override[i].quality_factor= -q/100.0; } p= strchr(p, '/'); if(p) p++; } lavc_venc_context->rc_override_count=i; lavc_venc_context->mpeg_quant=lavc_param_mpeg_quant; lavc_venc_context->dct_algo= lavc_param_fdct; lavc_venc_context->idct_algo= lavc_param_idct; lavc_venc_context->lumi_masking= lavc_param_lumi_masking; lavc_venc_context->temporal_cplx_masking= lavc_param_temporal_cplx_masking; lavc_venc_context->spatial_cplx_masking= lavc_param_spatial_cplx_masking; lavc_venc_context->p_masking= lavc_param_p_masking; lavc_venc_context->dark_masking= lavc_param_dark_masking; lavc_venc_context->border_masking = lavc_param_border_masking; if (lavc_param_aspect != NULL) { int par_width, par_height, e; float ratio=0; e= sscanf (lavc_param_aspect, "%d/%d", &par_width, &par_height); if(e==2){ if(par_height) ratio= (float)par_width / (float)par_height; }else{ e= sscanf (lavc_param_aspect, "%f", &ratio); } if (e && ratio > 0.1 && ratio < 10.0) { lavc_venc_context->sample_aspect_ratio= av_d2q(ratio * height / width, 255); mp_dbg(MSGT_MENCODER, MSGL_DBG2, "sample_aspect_ratio: %d/%d\n", lavc_venc_context->sample_aspect_ratio.num, lavc_venc_context->sample_aspect_ratio.den); mux_v->aspect = ratio; mp_dbg(MSGT_MENCODER, MSGL_DBG2, "aspect_ratio: %f\n", ratio); } else { mp_dbg(MSGT_MENCODER, MSGL_ERR, "aspect ratio: cannot parse \"%s\"\n", lavc_param_aspect); return 0; } } else if (lavc_param_autoaspect) { lavc_venc_context->sample_aspect_ratio = av_d2q((float)d_width/d_height*height / width, 255); mux_v->aspect = (float)d_width/d_height; } /* keyframe interval */ if (lavc_param_keyint >= 0) /* != -1 */ lavc_venc_context->gop_size = lavc_param_keyint; else lavc_venc_context->gop_size = 250; /* default */ lavc_venc_context->flags = 0; if (lavc_param_mb_decision) { mp_msg(MSGT_MENCODER, MSGL_INFO, MSGTR_MPCODECS_HighQualityEncodingSelected); lavc_venc_context->mb_decision= lavc_param_mb_decision; } lavc_venc_context->me_cmp= lavc_param_me_cmp; lavc_venc_context->me_sub_cmp= lavc_param_me_sub_cmp; lavc_venc_context->mb_cmp= lavc_param_mb_cmp; #ifdef FF_CMP_VSAD lavc_venc_context->ildct_cmp= lavc_param_ildct_cmp; #endif lavc_venc_context->dia_size= lavc_param_dia_size; lavc_venc_context->flags|= lavc_param_qpel; lavc_venc_context->flags|= lavc_param_trell; lavc_venc_context->flags|= lavc_param_lowdelay; lavc_venc_context->flags|= lavc_param_bit_exact; lavc_venc_context->flags|= lavc_param_aic; lavc_venc_context->flags|= lavc_param_aiv; lavc_venc_context->flags|= lavc_param_umv; lavc_venc_context->flags|= lavc_param_obmc; lavc_venc_context->flags|= lavc_param_loop; lavc_venc_context->flags|= lavc_param_v4mv ? CODEC_FLAG_4MV : 0; lavc_venc_context->flags|= lavc_param_data_partitioning; lavc_venc_context->flags|= lavc_param_cbp; lavc_venc_context->flags|= lavc_param_mv0; lavc_venc_context->flags|= lavc_param_qp_rd; lavc_venc_context->flags|= lavc_param_ss; lavc_venc_context->flags|= lavc_param_alt; lavc_venc_context->flags|= lavc_param_ilme; #ifdef CODEC_FLAG_CLOSED_GOP lavc_venc_context->flags|= lavc_param_closed_gop; #endif if(lavc_param_gray) lavc_venc_context->flags|= CODEC_FLAG_GRAY; if(lavc_param_normalize_aqp) lavc_venc_context->flags|= CODEC_FLAG_NORMALIZE_AQP; if(lavc_param_interlaced_dct) lavc_venc_context->flags|= CODEC_FLAG_INTERLACED_DCT; lavc_venc_context->flags|= lavc_param_psnr; lavc_venc_context->intra_dc_precision = lavc_param_dc_precision - 8; lavc_venc_context->prediction_method= lavc_param_prediction_method; lavc_venc_context->brd_scale = lavc_param_brd_scale; lavc_venc_context->bidir_refine = lavc_param_bidir_refine; lavc_venc_context->scenechange_factor = lavc_param_sc_factor; if((lavc_param_video_global_header&1) /*|| (video_global_header==0 && (oc->oformat->flags & AVFMT_GLOBALHEADER))*/){ lavc_venc_context->flags |= CODEC_FLAG_GLOBAL_HEADER; } if(lavc_param_video_global_header&2){ lavc_venc_context->flags2 |= CODEC_FLAG2_LOCAL_HEADER; } lavc_venc_context->mv0_threshold = lavc_param_mv0_threshold; lavc_venc_context->refs = lavc_param_refs; lavc_venc_context->b_sensitivity = lavc_param_b_sensitivity; lavc_venc_context->level = lavc_param_level; mux_v->imgfmt = lavc_param_format; switch(lavc_param_format) { case IMGFMT_YV12: lavc_venc_context->pix_fmt = PIX_FMT_YUV420P; break; case IMGFMT_422P: lavc_venc_context->pix_fmt = PIX_FMT_YUV422P; break; case IMGFMT_444P: lavc_venc_context->pix_fmt = PIX_FMT_YUV444P; break; case IMGFMT_411P: lavc_venc_context->pix_fmt = PIX_FMT_YUV411P; break; case IMGFMT_YVU9: lavc_venc_context->pix_fmt = PIX_FMT_YUV410P; break; case IMGFMT_BGR32: lavc_venc_context->pix_fmt = PIX_FMT_RGB32; break; default: mp_msg(MSGT_MENCODER,MSGL_ERR,"%s is not a supported format\n", vo_format_name(lavc_param_format)); return 0; } if(!stats_file) { /* lavc internal 2pass bitrate control */ switch(lavc_param_vpass){ case 2: case 3: lavc_venc_context->flags|= CODEC_FLAG_PASS2; stats_file= fopen(passtmpfile, "rb"); if(stats_file==NULL){ mp_msg(MSGT_MENCODER,MSGL_ERR,"2pass failed: filename=%s\n", passtmpfile); return 0; } fseek(stats_file, 0, SEEK_END); size= ftell(stats_file); fseek(stats_file, 0, SEEK_SET); lavc_venc_context->stats_in= av_malloc(size + 1); lavc_venc_context->stats_in[size]=0; if(fread(lavc_venc_context->stats_in, size, 1, stats_file)<1){ mp_msg(MSGT_MENCODER,MSGL_ERR,"2pass failed: reading from filename=%s\n", passtmpfile); return 0; } if(lavc_param_vpass == 2) break; else fclose(stats_file); /* fall through */ case 1: lavc_venc_context->flags|= CODEC_FLAG_PASS1; stats_file= fopen(passtmpfile, "wb"); if(stats_file==NULL){ mp_msg(MSGT_MENCODER,MSGL_ERR,"2pass failed: filename=%s\n", passtmpfile); return 0; } if(lavc_param_turbo && (lavc_param_vpass == 1)) { /* uses SAD comparison functions instead of other hungrier */ lavc_venc_context->me_pre_cmp = 0; lavc_venc_context->me_cmp = 0; lavc_venc_context->me_sub_cmp = 0; lavc_venc_context->mb_cmp = 2; /* Disables diamond motion estimation */ lavc_venc_context->pre_dia_size = 0; lavc_venc_context->dia_size = 1; lavc_venc_context->quantizer_noise_shaping = 0; // qns=0 lavc_venc_context->noise_reduction = 0; // nr=0 lavc_venc_context->mb_decision = 0; // mbd=0 ("realtime" encoding) lavc_venc_context->flags &= ~CODEC_FLAG_QPEL; lavc_venc_context->flags &= ~CODEC_FLAG_4MV; lavc_venc_context->flags &= ~CODEC_FLAG_TRELLIS_QUANT; lavc_venc_context->flags &= ~CODEC_FLAG_CBP_RD; lavc_venc_context->flags &= ~CODEC_FLAG_QP_RD; lavc_venc_context->flags &= ~CODEC_FLAG_MV0; } break; } } lavc_venc_context->me_method = ME_ZERO+lavc_param_vme; /* fixed qscale :p */ if (lavc_param_vqscale >= 0.0) { mp_msg(MSGT_MENCODER, MSGL_INFO, MSGTR_MPCODECS_UsingConstantQscale, lavc_param_vqscale); lavc_venc_context->flags |= CODEC_FLAG_QSCALE; lavc_venc_context->global_quality= vf->priv->pic->quality = (int)(FF_QP2LAMBDA * lavc_param_vqscale + 0.5); } if(lavc_param_threads > 1) avcodec_thread_init(lavc_venc_context, lavc_param_threads); if (avcodec_open(lavc_venc_context, vf->priv->codec) != 0) { mp_msg(MSGT_MENCODER,MSGL_ERR,MSGTR_CantOpenCodec); return 0; } if (lavc_venc_context->codec->encode == NULL) { mp_msg(MSGT_MENCODER,MSGL_ERR,"avcodec init failed (ctx->codec->encode == NULL)!\n"); return 0; } /* free second pass buffer, its not needed anymore */ av_freep(&lavc_venc_context->stats_in); if(lavc_venc_context->bits_per_sample) mux_v->bih->biBitCount= lavc_venc_context->bits_per_sample; if(lavc_venc_context->extradata_size){ mux_v->bih= realloc(mux_v->bih, sizeof(BITMAPINFOHEADER) + lavc_venc_context->extradata_size); memcpy(mux_v->bih + 1, lavc_venc_context->extradata, lavc_venc_context->extradata_size); mux_v->bih->biSize= sizeof(BITMAPINFOHEADER) + lavc_venc_context->extradata_size; } mux_v->decoder_delay = lavc_venc_context->max_b_frames ? 1 : 0; return 1; }
// init driver static int init(sh_video_t *sh){ OSErr result = 1; int extradata_size = sh->bih ? sh->bih->biSize - sizeof(*sh->bih) : 0; void *extradata = sh->bih + 1; if (!sh->ImageDesc) mp_msg(MSGT_DECVIDEO,MSGL_ERR,"sh->ImageDesc not set, try -demuxer mov if this fails.\n"); #ifndef CONFIG_QUICKTIME #ifdef WIN32_LOADER Setup_LDT_Keeper(); #endif //preload quicktime.qts to avoid the problems caused by the hardcoded path inside the dll qtime_qts = LoadLibraryA("QuickTime.qts"); if(!qtime_qts){ mp_msg(MSGT_DECVIDEO,MSGL_ERR,"unable to load QuickTime.qts\n" ); return 0; } handler = LoadLibraryA("qtmlClient.dll"); if(!handler){ mp_msg(MSGT_DECVIDEO,MSGL_ERR,"unable to load qtmlClient.dll\n"); return 0; } InitializeQTML = (OSErr (*)(long))GetProcAddress(handler, "InitializeQTML"); EnterMovies = (OSErr (*)(void))GetProcAddress(handler, "EnterMovies"); ExitMovies = (void (*)(void))GetProcAddress(handler, "ExitMovies"); DecompressSequenceBegin = (OSErr (*)(ImageSequence*,ImageDescriptionHandle,CGrafPtr,void *,const Rect *,MatrixRecordPtr,short,RgnHandle,CodecFlags,CodecQ,DecompressorComponent))GetProcAddress(handler, "DecompressSequenceBegin"); DecompressSequenceFrameS = (OSErr (*)(ImageSequence,Ptr,long,CodecFlags,CodecFlags*,ICMCompletionProcRecordPtr))GetProcAddress(handler, "DecompressSequenceFrameS"); GetGWorldPixMap = (PixMapHandle (*)(GWorldPtr))GetProcAddress(handler, "GetGWorldPixMap"); QTNewGWorldFromPtr = (OSErr(*)(GWorldPtr *,OSType,const Rect *,CTabHandle,void*,GWorldFlags,void *,long))GetProcAddress(handler, "QTNewGWorldFromPtr"); NewHandleClear = (OSErr(*)(Size))GetProcAddress(handler, "NewHandleClear"); DisposeHandle = (void (*)(Handle))GetProcAddress(handler, "DisposeHandle"); DisposeGWorld = (void (*)(GWorldPtr))GetProcAddress(handler, "DisposeGWorld"); CDSequenceEnd = (OSErr (*)(ImageSequence))GetProcAddress(handler, "CDSequenceEnd"); if(!InitializeQTML || !EnterMovies || !DecompressSequenceBegin || !DecompressSequenceFrameS){ mp_msg(MSGT_DECVIDEO,MSGL_ERR,"invalid qtmlClient.dll!\n"); return 0; } result=InitializeQTML(kInitializeQTMLDisableDirectSound | kInitializeQTMLUseGDIFlag | kInitializeQTMLDisableDDClippers); mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"InitializeQTML returned %d\n",result); #endif /* CONFIG_QUICKTIME */ result=EnterMovies(); mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"EnterMovies returned %d\n",result); //make a yuy2 gworld OutBufferRect.top=0; OutBufferRect.left=0; OutBufferRect.right=sh->disp_w; OutBufferRect.bottom=sh->disp_h; //Fill the imagedescription for our SVQ3 frame //we can probably get this from Demuxer if (!sh->ImageDesc && extradata_size >= sizeof(ImageDescription) && ((ImageDescription *)extradata)->idSize <= extradata_size) sh->ImageDesc = extradata; if (sh->ImageDesc) { mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"ImageDescription size: %d\n",((ImageDescription*)(sh->ImageDesc))->idSize); framedescHandle=(ImageDescriptionHandle)NewHandleClear(((ImageDescription*)(sh->ImageDesc))->idSize); memcpy(*framedescHandle,sh->ImageDesc,((ImageDescription*)(sh->ImageDesc))->idSize); } else { // assume extradata consists only of the atoms, build the other parts ImageDescription *idesc; int size = sizeof(*idesc) + extradata_size; mp_msg(MSGT_DECVIDEO, MSGL_V, "Generating a ImageDescription\n"); framedescHandle=(ImageDescriptionHandle)NewHandleClear(size); idesc = *framedescHandle; memcpy(idesc + 1, extradata, extradata_size); idesc->idSize = size; idesc->width = sh->disp_w; idesc->height = sh->disp_h; } dump_ImageDescription(*framedescHandle); (**framedescHandle).cType = bswap_32(sh->format); sh->context = (void *)kYUVSPixelFormat; { int imgfmt = sh->codec->outfmt[sh->outfmtidx]; int qt_imgfmt; switch(imgfmt) { case IMGFMT_YUY2: qt_imgfmt = kYUVSPixelFormat; break; case IMGFMT_YVU9: qt_imgfmt = 0x73797639; //kYVU9PixelFormat; break; case IMGFMT_YV12: qt_imgfmt = 0x79343230; break; case IMGFMT_UYVY: qt_imgfmt = k2vuyPixelFormat; break; case IMGFMT_YVYU: qt_imgfmt = kYVYU422PixelFormat; imgfmt = IMGFMT_YUY2; break; case IMGFMT_RGB16: qt_imgfmt = k16LE555PixelFormat; break; case IMGFMT_BGR24: qt_imgfmt = k24BGRPixelFormat; break; case IMGFMT_BGR32: qt_imgfmt = k32BGRAPixelFormat; break; case IMGFMT_RGB32: qt_imgfmt = k32RGBAPixelFormat; break; default: mp_msg(MSGT_DECVIDEO,MSGL_ERR,"Unknown requested csp\n"); return 0; } mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"imgfmt: %s qt_imgfmt: %.4s\n", vo_format_name(imgfmt), (char *)&qt_imgfmt); sh->context = (void *)qt_imgfmt; if(!mpcodecs_config_vo(sh,sh->disp_w,sh->disp_h,imgfmt)) return 0; } mpi=mpcodecs_get_image(sh, MP_IMGTYPE_STATIC, MP_IMGFLAG_PRESERVE, sh->disp_w, sh->disp_h); if(!mpi) return 0; result = QTNewGWorldFromPtr( &OutBufferGWorld, (OSType)sh->context, &OutBufferRect, //we should benchmark if yvu9 is faster for svq3, too 0, 0, 0, mpi->planes[0], mpi->stride[0]); if (result) { mp_msg(MSGT_DECVIDEO,MSGL_ERR,"QTNewGWorldFromPtr result=%d\n",result); return 0; } result = DecompressSequenceBegin(&imageSeq, framedescHandle, (CGrafPtr)OutBufferGWorld, NULL, NULL, NULL, srcCopy, NULL, 0, codecNormalQuality, 0); if(result) { mp_msg(MSGT_DECVIDEO,MSGL_ERR,"DecompressSequenceBegin result=%d\n",result); return 0; } return 1; }
int video_reconfig_filters(struct dec_video *d_video, const struct mp_image_params *params) { struct MPOpts *opts = d_video->opts; struct mp_image_params p = *params; struct sh_video *sh = d_video->header->video; MP_VERBOSE(d_video, "VIDEO: %dx%d %5.3f fps %5.1f kbps (%4.1f kB/s)\n", p.w, p.h, sh->fps, sh->i_bps * 0.008, sh->i_bps / 1000.0); MP_VERBOSE(d_video, "VDec: vo config request - %d x %d (%s)\n", p.w, p.h, vo_format_name(p.imgfmt)); float decoder_aspect = p.d_w / (float)p.d_h; if (d_video->initial_decoder_aspect == 0) d_video->initial_decoder_aspect = decoder_aspect; // We normally prefer the container aspect, unless the decoder aspect // changes at least once. if (d_video->initial_decoder_aspect == decoder_aspect) { if (sh->aspect > 0) vf_set_dar(&p.d_w, &p.d_h, p.w, p.h, sh->aspect); } else { // Even if the aspect switches back, don't use container aspect again. d_video->initial_decoder_aspect = -1; } float force_aspect = opts->movie_aspect; if (force_aspect > -1.0 && d_video->stream_aspect != 0.0) force_aspect = d_video->stream_aspect; if (force_aspect > 0) vf_set_dar(&p.d_w, &p.d_h, p.w, p.h, force_aspect); if (abs(p.d_w - p.w) >= 4 || abs(p.d_h - p.h) >= 4) { MP_VERBOSE(d_video, "Aspect ratio is %.2f:1 - " "scaling to correct movie aspect.\n", sh->aspect); MP_SMODE(d_video, "ID_VIDEO_ASPECT=%1.4f\n", sh->aspect); } else { p.d_w = p.w; p.d_h = p.h; } // Apply user overrides if (opts->requested_colorspace != MP_CSP_AUTO) p.colorspace = opts->requested_colorspace; if (opts->requested_input_range != MP_CSP_LEVELS_AUTO) p.colorlevels = opts->requested_input_range; p.outputlevels = opts->requested_output_range; // Detect colorspace from resolution. // Make sure the user-overrides are consistent (no RGB csp for YUV, etc.). mp_image_params_guess_csp(&p); // Time to config libvo! MP_VERBOSE(d_video, "VO Config (%dx%d->%dx%d,0x%X)\n", p.w, p.h, p.d_w, p.d_h, p.imgfmt); if (vf_reconfig(d_video->vfilter, &p) < 0) { MP_WARN(d_video, "FATAL: Cannot initialize video driver.\n"); return -1; } d_video->vf_input = p; return 0; }
// init driver static int init(sh_video_t *sh){ #ifndef CONFIG_QUICKTIME long result = 1; #endif ComponentResult cres; ComponentDescription desc; Component prev=NULL; CodecInfo cinfo; // for ImageCodecGetCodecInfo() ImageSubCodecDecompressCapabilities icap; // for ImageCodecInitialize() codec_initialized = 0; #ifdef CONFIG_QUICKTIME EnterMovies(); #else #ifdef WIN32_LOADER Setup_LDT_Keeper(); #endif //preload quicktime.qts to avoid the problems caused by the hardcoded path inside the dll qtime_qts = LoadLibraryA("QuickTime.qts"); if(!qtime_qts){ mp_msg(MSGT_DECVIDEO,MSGL_ERR,"unable to load QuickTime.qts\n" ); return 0; } handler = LoadLibraryA("qtmlClient.dll"); if(!handler){ mp_msg(MSGT_DECVIDEO,MSGL_ERR,"unable to load qtmlClient.dll\n"); return 0; } InitializeQTML = (OSErr (*)(long))GetProcAddress(handler, "InitializeQTML"); EnterMovies = (OSErr (*)(void))GetProcAddress(handler, "EnterMovies"); FindNextComponent = (Component (*)(Component,ComponentDescription*))GetProcAddress(handler, "FindNextComponent"); CountComponents = (long (*)(ComponentDescription*))GetProcAddress(handler, "CountComponents"); GetComponentInfo = (OSErr (*)(Component,ComponentDescription*,Handle,Handle,Handle))GetProcAddress(handler, "GetComponentInfo"); OpenComponent = (ComponentInstance (*)(Component))GetProcAddress(handler, "OpenComponent"); ImageCodecInitialize = (ComponentResult (*)(ComponentInstance,ImageSubCodecDecompressCapabilities *))GetProcAddress(handler, "ImageCodecInitialize"); ImageCodecGetCodecInfo = (ComponentResult (*)(ComponentInstance,CodecInfo *))GetProcAddress(handler, "ImageCodecGetCodecInfo"); ImageCodecBeginBand = (ComponentResult (*)(ComponentInstance,CodecDecompressParams *,ImageSubCodecDecompressRecord *,long))GetProcAddress(handler, "ImageCodecBeginBand"); ImageCodecPreDecompress = (ComponentResult (*)(ComponentInstance,CodecDecompressParams *))GetProcAddress(handler, "ImageCodecPreDecompress"); ImageCodecBandDecompress = (ComponentResult (*)(ComponentInstance,CodecDecompressParams *))GetProcAddress(handler, "ImageCodecBandDecompress"); GetGWorldPixMap = (PixMapHandle (*)(GWorldPtr))GetProcAddress(handler, "GetGWorldPixMap"); QTNewGWorldFromPtr = (OSErr(*)(GWorldPtr *,OSType,const Rect *,CTabHandle,void*,GWorldFlags,void *,long))GetProcAddress(handler, "QTNewGWorldFromPtr"); NewHandleClear = (OSErr(*)(Size))GetProcAddress(handler, "NewHandleClear"); // = GetProcAddress(handler, ""); if(!InitializeQTML || !EnterMovies || !FindNextComponent || !ImageCodecBandDecompress){ mp_msg(MSGT_DECVIDEO,MSGL_ERR,"invalid qtmlClient.dll!\n"); return 0; } result=InitializeQTML(6+16); // result=InitializeQTML(0); mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"InitializeQTML returned %li\n",result); // result=EnterMovies(); // printf("EnterMovies->%d\n",result); #endif /* CONFIG_QUICKTIME */ #if 0 memset(&desc,0,sizeof(desc)); while((prev=FindNextComponent(prev,&desc))){ ComponentDescription desc2; unsigned char* c1=&desc2.componentType; unsigned char* c2=&desc2.componentSubType; memset(&desc2,0,sizeof(desc2)); // printf("juhee %p (%p)\n",prev,&desc); GetComponentInfo(prev,&desc2,NULL,NULL,NULL); mp_msg(MSGT_DECVIDEO,MSGL_DGB2,"DESC: %c%c%c%c/%c%c%c%c [0x%X/0x%X] 0x%X\n", c1[3],c1[2],c1[1],c1[0], c2[3],c2[2],c2[1],c2[0], desc2.componentType,desc2.componentSubType, desc2.componentFlags); } #endif memset(&desc,0,sizeof(desc)); desc.componentType= (((unsigned char)'i')<<24)| (((unsigned char)'m')<<16)| (((unsigned char)'d')<<8)| (((unsigned char)'c')); #if 0 desc.componentSubType= (((unsigned char)'S'<<24))| (((unsigned char)'V')<<16)| (((unsigned char)'Q')<<8)| (((unsigned char)'3')); #else desc.componentSubType = bswap_32(sh->format); #endif desc.componentManufacturer=0; desc.componentFlags=0; desc.componentFlagsMask=0; mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"Count = %ld\n",CountComponents(&desc)); prev=FindNextComponent(NULL,&desc); if(!prev){ mp_msg(MSGT_DECVIDEO,MSGL_ERR,"Cannot find requested component\n"); return 0; } mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"Found it! ID = %p\n",prev); ci=OpenComponent(prev); mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"ci=%p\n",ci); memset(&icap,0,sizeof(icap)); cres=ImageCodecInitialize(ci,&icap); mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"ImageCodecInitialize->%#x size=%d (%d)\n",cres,icap.recordSize,icap.decompressRecordSize); memset(&cinfo,0,sizeof(cinfo)); cres=ImageCodecGetCodecInfo(ci,&cinfo); mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"Flags: compr: 0x%X decomp: 0x%X format: 0x%X\n", cinfo.compressFlags, cinfo.decompressFlags, cinfo.formatFlags); mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"Codec name: %.*s\n",((unsigned char*)&cinfo.typeName)[0], ((unsigned char*)&cinfo.typeName)+1); //make a yuy2 gworld OutBufferRect.top=0; OutBufferRect.left=0; OutBufferRect.right=sh->disp_w; OutBufferRect.bottom=sh->disp_h; //Fill the imagedescription for our SVQ3 frame //we can probably get this from Demuxer #if 0 framedescHandle=(ImageDescriptionHandle)NewHandleClear(sizeof(ImageDescription)+200); printf("framedescHandle=%p *p=%p\n",framedescHandle,*framedescHandle); { FILE* f=fopen("/root/.wine/fake_windows/IDesc","r"); if(!f) printf("filenot found: IDesc\n"); fread(*framedescHandle,sizeof(ImageDescription)+200,1,f); fclose(f); } #else if(!sh->ImageDesc) sh->ImageDesc=(sh->bih+1); // hack for SVQ3-in-AVI mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"ImageDescription size: %d\n",((ImageDescription*)(sh->ImageDesc))->idSize); framedescHandle=(ImageDescriptionHandle)NewHandleClear(((ImageDescription*)(sh->ImageDesc))->idSize); memcpy(*framedescHandle,sh->ImageDesc,((ImageDescription*)(sh->ImageDesc))->idSize); dump_ImageDescription(*framedescHandle); #endif //Find codecscomponent for video decompression // result = FindCodec ('SVQ1',anyCodec,&compressor,&decompressor ); // printf("FindCodec SVQ1 returned:%i compressor: 0x%X decompressor: 0x%X\n",result,compressor,decompressor); sh->context = (void *)kYUVSPixelFormat; #if 1 { int imgfmt = sh->codec->outfmt[sh->outfmtidx]; int qt_imgfmt; switch(imgfmt) { case IMGFMT_YUY2: qt_imgfmt = kYUVSPixelFormat; break; case IMGFMT_YVU9: qt_imgfmt = 0x73797639; //kYVU9PixelFormat; break; case IMGFMT_YV12: qt_imgfmt = 0x79343230; break; case IMGFMT_UYVY: qt_imgfmt = kUYVY422PixelFormat; break; case IMGFMT_YVYU: qt_imgfmt = kYVYU422PixelFormat; imgfmt = IMGFMT_YUY2; break; case IMGFMT_RGB16: qt_imgfmt = k16LE555PixelFormat; break; case IMGFMT_BGR24: qt_imgfmt = k24BGRPixelFormat; break; case IMGFMT_BGR32: qt_imgfmt = k32BGRAPixelFormat; break; case IMGFMT_RGB32: qt_imgfmt = k32RGBAPixelFormat; break; default: mp_msg(MSGT_DECVIDEO,MSGL_ERR,"Unknown requested csp\n"); return 0; } mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"imgfmt: %s qt_imgfmt: %.4s\n", vo_format_name(imgfmt), (char *)&qt_imgfmt); sh->context = (void *)qt_imgfmt; if(!mpcodecs_config_vo(sh,sh->disp_w,sh->disp_h,imgfmt)) return 0; } #else if(!mpcodecs_config_vo(sh,sh->disp_w,sh->disp_h,IMGFMT_YUY2)) return 0; #endif return 1; }
int vlvo_init(unsigned src_width,unsigned src_height, unsigned x_org,unsigned y_org,unsigned dst_width, unsigned dst_height,unsigned format,unsigned dest_bpp) { size_t i,awidth; mp_msg(MSGT_VO,MSGL_WARN, MSGTR_LIBVO_VESA_ThisBranchIsNoLongerSupported); return -1; if( mp_msg_test(MSGT_VO,MSGL_DBG2) ) { mp_msg(MSGT_VO,MSGL_DBG2, "vesa_lvo: vlvo_init() was called\n"); } image_width = src_width; image_height = src_height; mga_vid_config.version=MGA_VID_VERSION; src_format = mga_vid_config.format=format; awidth = (src_width + (WIDTH_ALIGN-1)) & ~(WIDTH_ALIGN-1); switch(format) { case IMGFMT_YV12: case IMGFMT_I420: case IMGFMT_IYUV: image_bpp=16; mga_vid_config.frame_size = awidth*src_height+(awidth*src_height)/2; break; case IMGFMT_YUY2: case IMGFMT_UYVY: image_bpp=16; mga_vid_config.frame_size = awidth*src_height*2; break; case IMGFMT_RGB15: case IMGFMT_BGR15: case IMGFMT_RGB16: case IMGFMT_BGR16: image_bpp=16; mga_vid_config.frame_size = awidth*src_height*2; break; case IMGFMT_RGB24: case IMGFMT_BGR24: image_bpp=24; mga_vid_config.frame_size = awidth*src_height*3; break; case IMGFMT_RGB32: case IMGFMT_BGR32: image_bpp=32; mga_vid_config.frame_size = awidth*src_height*4; break; default: mp_msg(MSGT_VO,MSGL_WARN, MSGTR_LIBVO_VESA_InvalidOutputFormat,vo_format_name(format),format); return -1; } mga_vid_config.colkey_on=0; mga_vid_config.src_width = src_width; mga_vid_config.src_height= src_height; mga_vid_config.dest_width = dst_width; mga_vid_config.dest_height= dst_height; mga_vid_config.x_org=x_org; mga_vid_config.y_org=y_org; mga_vid_config.num_frames=NUM_FRAMES; if (ioctl(lvo_handler,MGA_VID_CONFIG,&mga_vid_config)) { perror("vesa_lvo: Error in mga_vid_config ioctl()"); mp_msg(MSGT_VO,MSGL_WARN, MSGTR_LIBVO_VESA_IncompatibleDriverVersion); return -1; } ioctl(lvo_handler,MGA_VID_ON,0); frames[0] = (char*)mmap(0,mga_vid_config.frame_size*mga_vid_config.num_frames,PROT_WRITE,MAP_SHARED,lvo_handler,0); for(i=1; i<NUM_FRAMES; i++) frames[i] = frames[i-1] + mga_vid_config.frame_size; next_frame = 0; lvo_mem = frames[next_frame]; /*clear the buffer*/ memset(frames[0],0x80,mga_vid_config.frame_size*mga_vid_config.num_frames); return 0; }
int MovDecoder_InitSubsystem() { long result = 1; ComponentResult cres; ComponentDescription desc; Component prev=NULL; CodecInfo cinfo; // for ImageCodecGetCodecInfo() //ImageSubCodecDecompressCapabilities icap; // for ImageCodecInitialize() //preload quicktime.qts to avoid the problems caused by the hardcoded path inside the dll qtime_qts = LoadLibraryA("QuickTime.qts"); if (!qtime_qts) { //mp_msg(MSGT_DECVIDEO,MSGL_ERR,"unable to load QuickTime.qts\n" ); return 0; } handler = LoadLibraryA("qtmlClient.dll"); if(!handler) { //mp_msg(MSGT_DECVIDEO,MSGL_ERR,"unable to load qtmlClient.dll\n"); return 0; } result=InitializeQTML(6+16); //mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"InitializeQTML returned %li\n",result); memset(&desc,0,sizeof(desc)); desc.componentType= (((unsigned char)'i')<<24)|(((unsigned char)'m')<<16)|(((unsigned char)'d')<<8)|(((unsigned char)'c')); desc.componentSubType = bswap_32(sh->format); desc.componentManufacturer=0; desc.componentFlags=0; desc.componentFlagsMask=0; // mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"Count = %ld\n",CountComponents(&desc)); prev=FindNextComponent(NULL,&desc); if(!prev) { //mp_msg(MSGT_DECVIDEO,MSGL_ERR,"Cannot find requested component\n"); return(0); } //mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"Found it! ID = %p\n",prev); ci=OpenComponent(prev); //mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"ci=%p\n",ci); memset(&icap,0,sizeof(icap)); cres=ImageCodecInitialize(ci,&icap); //mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"ImageCodecInitialize->%#x size=%d (%d)\n",cres,icap.recordSize,icap.decompressRecordSize); memset(&cinfo,0,sizeof(cinfo)); cres=ImageCodecGetCodecInfo(ci,&cinfo); //mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"Flags: compr: 0x%X decomp: 0x%X format: 0x%X\n", cinfo.compressFlags, cinfo.decompressFlags, cinfo.formatFlags); //mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"Codec name: %.*s\n",((unsigned char*)&cinfo.typeName)[0], ((unsigned char*)&cinfo.typeName)+1); //make a yuy2 gworld OutBufferRect.top=0; OutBufferRect.left=0; OutBufferRect.right=sh->disp_w; OutBufferRect.bottom=sh->disp_h; //Fill the imagedescription for our SVQ3 frame //we can probably get this from Demuxer if(!sh->ImageDesc) sh->ImageDesc=(sh->bih+1); // hack for SVQ3-in-AVI mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"ImageDescription size: %d\n",((ImageDescription*)(sh->ImageDesc))->idSize); framedescHandle=(ImageDescriptionHandle)NewHandleClear(((ImageDescription*)(sh->ImageDesc))->idSize); memcpy(*framedescHandle,sh->ImageDesc,((ImageDescription*)(sh->ImageDesc))->idSize); dump_ImageDescription(*framedescHandle); //Find codecscomponent for video decompression // result = FindCodec ('SVQ1',anyCodec,&compressor,&decompressor ); // printf("FindCodec SVQ1 returned:%i compressor: 0x%X decompressor: 0x%X\n",result,compressor,decompressor); sh->context = (void *)kYUVSPixelFormat; #if 1 { int imgfmt = sh->codec->outfmt[sh->outfmtidx]; int qt_imgfmt; switch(imgfmt) { case IMGFMT_YUY2: qt_imgfmt = kYUVSPixelFormat; break; case IMGFMT_YVU9: qt_imgfmt = 0x73797639; //kYVU9PixelFormat; break; case IMGFMT_YV12: qt_imgfmt = 0x79343230; break; case IMGFMT_UYVY: qt_imgfmt = kUYVY422PixelFormat; break; case IMGFMT_YVYU: qt_imgfmt = kYVYU422PixelFormat; imgfmt = IMGFMT_YUY2; break; case IMGFMT_RGB16: qt_imgfmt = k16LE555PixelFormat; break; case IMGFMT_BGR24: qt_imgfmt = k24BGRPixelFormat; break; case IMGFMT_BGR32: qt_imgfmt = k32BGRAPixelFormat; break; case IMGFMT_RGB32: qt_imgfmt = k32RGBAPixelFormat; break; default: mp_msg(MSGT_DECVIDEO,MSGL_ERR,"Unknown requested csp\n"); return(0); } mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"imgfmt: %s qt_imgfmt: %.4s\n", vo_format_name(imgfmt), (char *)&qt_imgfmt); sh->context = (void *)qt_imgfmt; if(!mpcodecs_config_vo(sh,sh->disp_w,sh->disp_h,imgfmt)) return 0; } return 1; }
int mpcodecs_config_vo(sh_video_t *sh, int w, int h, unsigned int preferred_outfmt) { int i, j; int only_preferred = 1; unsigned int out_fmt = 0; int screen_size_x = 0; //SCREEN_SIZE_X; int screen_size_y = 0; //SCREEN_SIZE_Y; vf_instance_t *vf = sh->vfilter, *sc = NULL; int palette = 0; int vocfg_flags = 0; if (w) sh->disp_w = w; if (h) sh->disp_h = h; if (!sh->disp_w || !sh->disp_h) return 0; mp_msg(MSGT_DECVIDEO, MSGL_V, "VDec: vo config request - %d x %d (preferred colorspace: %s)\n", w, h, vo_format_name(preferred_outfmt)); // if(!vf) return 1; // temp hack if (get_video_quality_max(sh) <= 0 && divx_quality) { // user wants postprocess but no pp filter yet: sh->vfilter = vf = vf_open_filter(vf, "pp", NULL); } // check if libvo and codec has common outfmt (no conversion): csp_again: if (mp_msg_test(MSGT_DECVIDEO, MSGL_V)) { vf_instance_t *f = vf; mp_msg(MSGT_DECVIDEO, MSGL_V, "Trying filter chain:"); for (f = vf; f; f = f->next) mp_msg(MSGT_DECVIDEO, MSGL_V, " %s", f->info->name); mp_msg(MSGT_DECVIDEO, MSGL_V, "\n"); } j = -1; for (i = 0; only_preferred || i < CODECS_MAX_OUTFMT; i++) { int flags; if (i == CODECS_MAX_OUTFMT) { i = 0; only_preferred = 0; } out_fmt = sh->codec->outfmt[i]; if (only_preferred && out_fmt != preferred_outfmt) continue; if (out_fmt == (unsigned int) 0xFFFFFFFF) continue; // check (query) if codec really support this outfmt... sh->outfmtidx = i; // pass index to the control() function this way if (mpvdec->control(sh, VDCTRL_QUERY_FORMAT, &out_fmt) == CONTROL_FALSE) { mp_msg(MSGT_CPLAYER, MSGL_DBG2, "vo_debug: codec query_format(%s) returned FALSE\n", vo_format_name(out_fmt)); continue; } flags = vf->query_format(vf, out_fmt); mp_msg(MSGT_CPLAYER, MSGL_DBG2, "vo_debug: query(%s) returned 0x%X (i=%d) \n", vo_format_name(out_fmt), flags, i); if ((flags & VFCAP_CSP_SUPPORTED_BY_HW) || (flags & VFCAP_CSP_SUPPORTED && j < 0)) { j = i; vo_flags = flags; if (flags & VFCAP_CSP_SUPPORTED_BY_HW) break; } else if (!palette && !(flags & (VFCAP_CSP_SUPPORTED_BY_HW | VFCAP_CSP_SUPPORTED)) && (out_fmt == IMGFMT_RGB8 || out_fmt == IMGFMT_BGR8)) { palette = 1; } } if (j < 0 && !IMGFMT_IS_HWACCEL(preferred_outfmt)) { // TODO: no match - we should use conversion... if (strcmp(vf->info->name, "scale") && palette != -1) { mp_msg(MSGT_DECVIDEO, MSGL_INFO, MSGTR_CouldNotFindColorspace); sc = vf = vf_open_filter(vf, "scale", NULL); goto csp_again; } else if (palette == 1) { mp_msg(MSGT_DECVIDEO, MSGL_V, "vd: Trying -vf palette...\n"); palette = -1; vf = vf_open_filter(vf, "palette", NULL); goto csp_again; } else { // sws failed, if the last filter (vf_vo) support MPEGPES try to append vf_lavc vf_instance_t *vo, *vp = NULL, *ve, *vpp = NULL; // Remove the scale filter if we added it ourselves if (vf == sc) { ve = vf; vf = vf->next; vf_uninit_filter(ve); } // Find the last filter (vf_vo) for (vo = vf; vo->next; vo = vo->next) { vpp = vp; vp = vo; } if (vo->query_format(vo, IMGFMT_MPEGPES) && (!vp || (vp && strcmp(vp->info->name, "lavc")))) { ve = vf_open_filter(vo, "lavc", NULL); if (vp) vp->next = ve; else vf = ve; goto csp_again; } if (vp && !strcmp(vp->info->name, "lavc")) { if (vpp) vpp->next = vo; else vf = vo; vf_uninit_filter(vp); } } } if (j < 0) { mp_msg(MSGT_CPLAYER, MSGL_WARN, MSGTR_VOincompCodec); sh->vf_initialized = -1; return 0; // failed } out_fmt = sh->codec->outfmt[j]; mp_msg(MSGT_CPLAYER, MSGL_V, "VDec: using %s as output csp (no %d)\n", vo_format_name(out_fmt), j); sh->outfmtidx = j; sh->vfilter = vf; // autodetect flipping if (flip == -1) { flip = 0; if (sh->codec->outflags[j] & CODECS_FLAG_FLIP) if (!(sh->codec->outflags[j] & CODECS_FLAG_NOFLIP)) flip = 1; } if (vo_flags & VFCAP_FLIPPED) flip ^= 1; flip ^= sh->flipped_input; if (flip && !(vo_flags & VFCAP_FLIP)) { // we need to flip, but no flipping filter avail. vf_add_before_vo(&vf, "flip", NULL); sh->vfilter = vf; } // time to do aspect ratio corrections... if (movie_aspect > -1.0) sh->aspect = movie_aspect; // cmdline overrides autodetect else if (sh->stream_aspect != 0.0) sh->aspect = sh->stream_aspect; else sh->aspect = sh->original_aspect; if (opt_screen_size_x || opt_screen_size_y) { screen_size_x = opt_screen_size_x; screen_size_y = opt_screen_size_y; if (!vidmode) { if (!screen_size_x) screen_size_x = SCREEN_SIZE_X; if (!screen_size_y) screen_size_y = SCREEN_SIZE_Y; if (screen_size_x <= 8) screen_size_x *= sh->disp_w; if (screen_size_y <= 8) screen_size_y *= sh->disp_h; } } else { // check source format aspect, calculate prescale ::atmos screen_size_x = sh->disp_w; screen_size_y = sh->disp_h; if (screen_size_xy >= 0.001) { if (screen_size_xy <= 8) { // -xy means x+y scale screen_size_x *= screen_size_xy; screen_size_y *= screen_size_xy; } else { // -xy means forced width while keeping correct aspect screen_size_x = screen_size_xy; screen_size_y = screen_size_xy * sh->disp_h / sh->disp_w; } } if (sh->aspect >= 0.01) { int w; mp_msg(MSGT_CPLAYER, MSGL_INFO, MSGTR_MovieAspectIsSet, sh->aspect); mp_msg(MSGT_IDENTIFY, MSGL_INFO, "ID_VIDEO_ASPECT=%1.4f\n", sh->aspect); w = (int) ((float) screen_size_y * sh->aspect); w += w % 2; // round // we don't like horizontal downscale || user forced width: if (w < screen_size_x || screen_size_xy > 8) { screen_size_y = (int) ((float) screen_size_x * (1.0 / sh->aspect)); screen_size_y += screen_size_y % 2; // round } else screen_size_x = w; // keep new width } else { mp_msg(MSGT_CPLAYER, MSGL_INFO, MSGTR_MovieAspectUndefined); } } vocfg_flags = (fullscreen ? VOFLAG_FULLSCREEN : 0) | (vidmode ? VOFLAG_MODESWITCHING : 0) | (softzoom ? VOFLAG_SWSCALE : 0) | (flip ? VOFLAG_FLIPPING : 0); // Time to config libvo! mp_msg(MSGT_CPLAYER, MSGL_V, "VO Config (%dx%d->%dx%d,flags=%d,'%s',0x%X)\n", sh->disp_w, sh->disp_h, screen_size_x, screen_size_y, vocfg_flags, "MPlayer", out_fmt); vf->w = sh->disp_w; vf->h = sh->disp_h; if (vf_config_wrapper (vf, sh->disp_w, sh->disp_h, screen_size_x, screen_size_y, vocfg_flags, out_fmt) == 0) { // "MPlayer",out_fmt)){ mp_msg(MSGT_CPLAYER, MSGL_WARN, MSGTR_CannotInitVO); sh->vf_initialized = -1; return 0; } sh->vf_initialized = 1; if (vo_gamma_gamma != 1000) set_video_colors(sh, "gamma", vo_gamma_gamma); if (vo_gamma_brightness != 1000) set_video_colors(sh, "brightness", vo_gamma_brightness); if (vo_gamma_contrast != 1000) set_video_colors(sh, "contrast", vo_gamma_contrast); if (vo_gamma_saturation != 1000) set_video_colors(sh, "saturation", vo_gamma_saturation); if (vo_gamma_hue != 1000) set_video_colors(sh, "hue", vo_gamma_hue); return 1; }
static int reconfig(struct vf_instance *vf, struct mp_image_params *in, struct mp_image_params *out) { int width = in->w, height = in->h, d_width = in->d_w, d_height = in->d_h; unsigned int outfmt = in->imgfmt; unsigned int best = find_best_out(vf, outfmt); int round_w = 0, round_h = 0; if (!best) { MP_WARN(vf, "SwScale: no supported outfmt found :(\n"); return -1; } vf->next->query_format(vf->next, best); vf->priv->w = vf->priv->cfg_w; vf->priv->h = vf->priv->cfg_h; if (vf->priv->w <= -8) { vf->priv->w += 8; round_w = 1; } if (vf->priv->h <= -8) { vf->priv->h += 8; round_h = 1; } if (vf->priv->w < -3 || vf->priv->h < -3 || (vf->priv->w < -1 && vf->priv->h < -1)) { // TODO: establish a direct connection to the user's brain // and find out what the heck he thinks MPlayer should do // with this nonsense. MP_ERR(vf, "SwScale: EUSERBROKEN Check your parameters, they make no sense!\n"); return -1; } if (vf->priv->w == -1) vf->priv->w = width; if (vf->priv->w == 0) vf->priv->w = d_width; if (vf->priv->h == -1) vf->priv->h = height; if (vf->priv->h == 0) vf->priv->h = d_height; if (vf->priv->w == -3) vf->priv->w = vf->priv->h * width / height; if (vf->priv->w == -2) vf->priv->w = vf->priv->h * d_width / d_height; if (vf->priv->h == -3) vf->priv->h = vf->priv->w * height / width; if (vf->priv->h == -2) vf->priv->h = vf->priv->w * d_height / d_width; if (round_w) vf->priv->w = ((vf->priv->w + 8) / 16) * 16; if (round_h) vf->priv->h = ((vf->priv->h + 8) / 16) * 16; // check for upscaling, now that all parameters had been applied if (vf->priv->noup) { if ((vf->priv->w > width) + (vf->priv->h > height) >= vf->priv->noup) { vf->priv->w = width; vf->priv->h = height; } } MP_DBG(vf, "SwScale: scaling %dx%d %s to %dx%d %s \n", width, height, vo_format_name(outfmt), vf->priv->w, vf->priv->h, vo_format_name(best)); // Compute new d_width and d_height, preserving aspect // while ensuring that both are >= output size in pixels. if (vf->priv->h * d_width > vf->priv->w * d_height) { d_width = vf->priv->h * d_width / d_height; d_height = vf->priv->h; } else { d_height = vf->priv->w * d_height / d_width; d_width = vf->priv->w; } *out = *in; out->w = vf->priv->w; out->h = vf->priv->h; out->d_w = d_width; out->d_h = d_height; out->imgfmt = best; // Second-guess what libswscale is going to output and what not. // It depends what libswscale supports for in/output, and what makes sense. struct mp_imgfmt_desc s_fmt = mp_imgfmt_get_desc(in->imgfmt); struct mp_imgfmt_desc d_fmt = mp_imgfmt_get_desc(out->imgfmt); // keep colorspace settings if the data stays in yuv if (!(s_fmt.flags & MP_IMGFLAG_YUV) || !(d_fmt.flags & MP_IMGFLAG_YUV)) { out->colorspace = MP_CSP_AUTO; out->colorlevels = MP_CSP_LEVELS_AUTO; } mp_image_params_guess_csp(out); mp_sws_set_from_cmdline(vf->priv->sws, vf->chain->opts->vo.sws_opts); vf->priv->sws->flags |= vf->priv->v_chr_drop << SWS_SRC_V_CHR_DROP_SHIFT; vf->priv->sws->flags |= vf->priv->accurate_rnd * SWS_ACCURATE_RND; vf->priv->sws->src = *in; vf->priv->sws->dst = *out; if (mp_sws_reinit(vf->priv->sws) < 0) { // error... MP_WARN(vf, "Couldn't init libswscale for this setup\n"); return -1; } return 0; }
uint32_t vidix_query_fourcc(uint32_t format) { if( mp_msg_test(MSGT_VO,MSGL_DBG2) ) { mp_msg(MSGT_VO,MSGL_DBG2, "vosub_vidix: query_format was called: %x (%s)\n",format,vo_format_name(format)); } vidix_fourcc.fourcc = format; vdlQueryFourcc(vidix_handler,&vidix_fourcc); if (vidix_fourcc.depth == VID_DEPTH_NONE) return 0; return VFCAP_CSP_SUPPORTED|VFCAP_CSP_SUPPORTED_BY_HW|VFCAP_HWSCALE_UP|VFCAP_HWSCALE_DOWN|VFCAP_OSD|VFCAP_ACCEPT_STRIDE; }
void write_video(struct MPContext *mpctx, double endpts) { struct MPOpts *opts = mpctx->opts; struct vo *vo = mpctx->video_out; if (!mpctx->d_video) return; // Actual playback starts when both audio and video are ready. if (mpctx->video_status == STATUS_READY) return; if (mpctx->paused && mpctx->video_status >= STATUS_READY) return; int r = video_output_image(mpctx, endpts); MP_TRACE(mpctx, "video_output_image: %d\n", r); if (r < 0) goto error; if (r == VD_WAIT) // Demuxer will wake us up for more packets to decode. return; if (r == VD_EOF) { mpctx->video_status = vo_still_displaying(vo) ? STATUS_DRAINING : STATUS_EOF; mpctx->delay = 0; mpctx->last_av_difference = 0; MP_DBG(mpctx, "video EOF (status=%d)\n", mpctx->video_status); return; } if (mpctx->video_status > STATUS_PLAYING) mpctx->video_status = STATUS_PLAYING; if (r != VD_NEW_FRAME) { mpctx->sleeptime = 0; // Decode more in next iteration. return; } // Filter output is different from VO input? struct mp_image_params p = mpctx->next_frames[0]->params; if (!vo->params || !mp_image_params_equal(&p, vo->params)) { // Changing config deletes the current frame; wait until it's finished. if (vo_still_displaying(vo)) return; const struct vo_driver *info = mpctx->video_out->driver; char extra[20] = {0}; if (p.w != p.d_w || p.h != p.d_h) snprintf(extra, sizeof(extra), " => %dx%d", p.d_w, p.d_h); MP_INFO(mpctx, "VO: [%s] %dx%d%s %s\n", info->name, p.w, p.h, extra, vo_format_name(p.imgfmt)); MP_VERBOSE(mpctx, "VO: Description: %s\n", info->description); int vo_r = vo_reconfig(vo, &p, 0); if (vo_r < 0) { mpctx->error_playing = MPV_ERROR_VO_INIT_FAILED; goto error; } init_vo(mpctx); } mpctx->time_frame -= get_relative_time(mpctx); update_avsync_before_frame(mpctx); double time_frame = MPMAX(mpctx->time_frame, -1); int64_t pts = mp_time_us() + (int64_t)(time_frame * 1e6); // wait until VO wakes us up to get more frames if (!vo_is_ready_for_frame(vo, pts)) { if (video_feed_async_filter(mpctx) < 0) goto error; return; } assert(mpctx->num_next_frames >= 1); struct vo_frame dummy = { .pts = pts, .duration = -1, .num_frames = mpctx->num_next_frames, }; for (int n = 0; n < dummy.num_frames; n++) dummy.frames[n] = mpctx->next_frames[n]; struct vo_frame *frame = vo_frame_ref(&dummy); double diff = -1; double vpts0 = mpctx->next_frames[0]->pts; double vpts1 = MP_NOPTS_VALUE; if (mpctx->num_next_frames >= 2) vpts1 = mpctx->next_frames[1]->pts; if (vpts0 != MP_NOPTS_VALUE && vpts1 != MP_NOPTS_VALUE) diff = vpts1 - vpts0; if (diff < 0 && mpctx->d_video->fps > 0) diff = 1.0 / mpctx->d_video->fps; // fallback to demuxer-reported fps if (opts->untimed || vo->driver->untimed) diff = -1; // disable frame dropping and aspects of frame timing if (diff >= 0) { // expected A/V sync correction is ignored diff /= opts->playback_speed; if (mpctx->time_frame < 0) diff += mpctx->time_frame; frame->duration = MPCLAMP(diff, 0, 10) * 1e6; } mpctx->video_pts = mpctx->next_frames[0]->pts; mpctx->last_vo_pts = mpctx->video_pts; mpctx->playback_pts = mpctx->video_pts; update_avsync_after_frame(mpctx); mpctx->osd_force_update = true; update_osd_msg(mpctx); update_subtitles(mpctx); vo_queue_frame(vo, frame); shift_frames(mpctx); // The frames were shifted down; "initialize" the new first entry. if (mpctx->num_next_frames >= 1) handle_new_frame(mpctx); mpctx->shown_vframes++; if (mpctx->video_status < STATUS_PLAYING) { mpctx->video_status = STATUS_READY; // After a seek, make sure to wait until the first frame is visible. vo_wait_frame(vo); MP_VERBOSE(mpctx, "first video frame after restart shown\n"); } screenshot_flip(mpctx); mp_notify(mpctx, MPV_EVENT_TICK, NULL); if (!mpctx->sync_audio_to_video) mpctx->video_status = STATUS_EOF; if (mpctx->video_status != STATUS_EOF) { if (mpctx->step_frames > 0) { mpctx->step_frames--; if (!mpctx->step_frames && !opts->pause) pause_player(mpctx); } if (mpctx->max_frames == 0 && !mpctx->stop_play) mpctx->stop_play = AT_END_OF_FILE; if (mpctx->max_frames > 0) mpctx->max_frames--; } mpctx->sleeptime = 0; return; error: MP_FATAL(mpctx, "Could not initialize video chain.\n"); uninit_video_chain(mpctx); error_on_track(mpctx, mpctx->current_track[STREAM_VIDEO][0]); handle_force_window(mpctx, true); mpctx->sleeptime = 0; }
int vidix_init(unsigned src_width,unsigned src_height, unsigned x_org,unsigned y_org,unsigned dst_width, unsigned dst_height,unsigned format,unsigned dest_bpp, unsigned vid_w,unsigned vid_h) { void *tmp, *tmpa; size_t i; int err; uint32_t sstride,apitch; if( mp_msg_test(MSGT_VO,MSGL_DBG2) ) mp_msg(MSGT_VO,MSGL_DBG2, "vosub_vidix: vidix_init() was called\n" "src_w=%u src_h=%u dest_x_y_w_h = %u %u %u %u\n" "format=%s dest_bpp=%u vid_w=%u vid_h=%u\n" ,src_width,src_height,x_org,y_org,dst_width,dst_height ,vo_format_name(format),dest_bpp,vid_w,vid_h); if(vidix_query_fourcc(format) == 0) { mp_msg(MSGT_VO,MSGL_ERR, MSGTR_LIBVO_SUB_VIDIX_UnsupportedFourccForThisVidixDriver, format,vo_format_name(format)); return -1; } if(((vidix_cap.maxwidth != -1) && (vid_w > vidix_cap.maxwidth)) || ((vidix_cap.minwidth != -1) && (vid_w < vidix_cap.minwidth)) || ((vidix_cap.maxheight != -1) && (vid_h > vidix_cap.maxheight)) || ((vidix_cap.minwidth != -1 ) && (vid_h < vidix_cap.minheight))) { mp_msg(MSGT_VO,MSGL_ERR, MSGTR_LIBVO_SUB_VIDIX_VideoServerHasUnsupportedResolution, vid_w, vid_h, vidix_cap.minwidth, vidix_cap.minheight, vidix_cap.maxwidth, vidix_cap.maxheight); return -1; } err = 0; switch(dest_bpp) { case 1: err = ((vidix_fourcc.depth & VID_DEPTH_1BPP) != VID_DEPTH_1BPP); break; case 2: err = ((vidix_fourcc.depth & VID_DEPTH_2BPP) != VID_DEPTH_2BPP); break; case 4: err = ((vidix_fourcc.depth & VID_DEPTH_4BPP) != VID_DEPTH_4BPP); break; case 8: err = ((vidix_fourcc.depth & VID_DEPTH_8BPP) != VID_DEPTH_8BPP); break; case 12:err = ((vidix_fourcc.depth & VID_DEPTH_12BPP) != VID_DEPTH_12BPP); break; case 15:err = ((vidix_fourcc.depth & VID_DEPTH_15BPP) != VID_DEPTH_15BPP); break; case 16:err = ((vidix_fourcc.depth & VID_DEPTH_16BPP) != VID_DEPTH_16BPP); break; case 24:err = ((vidix_fourcc.depth & VID_DEPTH_24BPP) != VID_DEPTH_24BPP); break; case 32:err = ((vidix_fourcc.depth & VID_DEPTH_32BPP) != VID_DEPTH_32BPP); break; default: err=1; break; } if(err) { mp_msg(MSGT_VO,MSGL_ERR, MSGTR_LIBVO_SUB_VIDIX_VideoServerHasUnsupportedColorDepth ,vidix_fourcc.depth); return -1; } if((dst_width > src_width || dst_height > src_height) && (vidix_cap.flags & FLAG_UPSCALER) != FLAG_UPSCALER) { mp_msg(MSGT_VO,MSGL_ERR, MSGTR_LIBVO_SUB_VIDIX_DriverCantUpscaleImage, src_width, src_height, dst_width, dst_height); return -1; } if((dst_width > src_width || dst_height > src_height) && (vidix_cap.flags & FLAG_DOWNSCALER) != FLAG_DOWNSCALER) { mp_msg(MSGT_VO,MSGL_ERR, MSGTR_LIBVO_SUB_VIDIX_DriverCantDownscaleImage, src_width, src_height, dst_width, dst_height); return -1; } image_width = src_width; image_height = src_height; src_format = format; if(forced_fourcc) format = forced_fourcc; memset(&vidix_play,0,sizeof(vidix_playback_t)); vidix_play.fourcc = format; vidix_play.capability = vidix_cap.flags; /* every ;) */ vidix_play.blend_factor = 0; /* for now */ /* display the full picture. Nick: we could implement here zooming to a specified area -- alex */ vidix_play.src.x = vidix_play.src.y = 0; vidix_play.src.w = src_width; vidix_play.src.h = src_height; vidix_play.dest.x = x_org; vidix_play.dest.y = y_org; vidix_play.dest.w = dst_width; vidix_play.dest.h = dst_height; // vidix_play.num_frames=vo_doublebuffering?NUM_FRAMES-1:1; /* we aren't mad...3 buffers are more than enough */ vidix_play.num_frames=vo_doublebuffering?3:1; vidix_play.src.pitch.y = vidix_play.src.pitch.u = vidix_play.src.pitch.v = 0; if((err=vdlConfigPlayback(vidix_handler,&vidix_play))!=0) { mp_msg(MSGT_VO,MSGL_ERR, MSGTR_LIBVO_SUB_VIDIX_CantConfigurePlayback,strerror(err)); return -1; } if ( mp_msg_test(MSGT_VO,MSGL_V) ) { mp_msg(MSGT_VO,MSGL_V, "vosub_vidix: using %d buffer(s)\n", vidix_play.num_frames); } vidix_mem = vidix_play.dga_addr; tmp = calloc(image_width, image_height); tmpa = malloc(image_width * image_height); memset(tmpa, 1, image_width * image_height); /* clear every frame with correct address and frame_size */ /* HACK: use draw_alpha to clear Y component */ for (i = 0; i < vidix_play.num_frames; i++) { next_frame = i; memset(vidix_mem + vidix_play.offsets[i], 0x80, vidix_play.frame_size); draw_alpha(0, 0, image_width, image_height, tmp, tmpa, image_width); } free(tmp); free(tmpa); /* show one of the "clear" frames */ vidix_flip_page(); switch(format) { case IMGFMT_YV12: case IMGFMT_I420: case IMGFMT_IYUV: case IMGFMT_YVU9: case IMGFMT_IF09: case IMGFMT_Y800: case IMGFMT_Y8: apitch = vidix_play.dest.pitch.y-1; dstrides.y = (image_width + apitch) & ~apitch; apitch = vidix_play.dest.pitch.v-1; dstrides.v = (image_width + apitch) & ~apitch; apitch = vidix_play.dest.pitch.u-1; dstrides.u = (image_width + apitch) & ~apitch; image_Bpp=1; break; case IMGFMT_RGB32: case IMGFMT_BGR32: apitch = vidix_play.dest.pitch.y-1; dstrides.y = (image_width*4 + apitch) & ~apitch; dstrides.u = dstrides.v = 0; image_Bpp=4; break; case IMGFMT_RGB24: case IMGFMT_BGR24: apitch = vidix_play.dest.pitch.y-1; dstrides.y = (image_width*3 + apitch) & ~apitch; dstrides.u = dstrides.v = 0; image_Bpp=3; break; default: apitch = vidix_play.dest.pitch.y-1; dstrides.y = (image_width*2 + apitch) & ~apitch; dstrides.u = dstrides.v = 0; image_Bpp=2; break; } /* tune some info here */ sstride = src_width*image_Bpp; if(!forced_fourcc) { is_422_planes_eq = sstride == dstrides.y; if(src_format == IMGFMT_YV12 || src_format == IMGFMT_I420 || src_format == IMGFMT_IYUV) vo_server->draw_slice = vidix_draw_slice_420; else if (src_format == IMGFMT_YVU9 || src_format == IMGFMT_IF09) vo_server->draw_slice = vidix_draw_slice_410; else vo_server->draw_slice = vidix_draw_slice_packed; } return 0; }