void write_status_line(struct MPContext *mpctx, const char *line) { struct MPOpts *opts = mpctx->opts; if (opts->slave_mode) { MP_MSG(mpctx, MSGL_STATUS, "%s\n", line); } else if (erase_to_end_of_line) { MP_MSG(mpctx, MSGL_STATUS, "%s%s\r", line, erase_to_end_of_line); } else { int pos = strlen(line); int width = get_term_width() - pos; MP_MSG(mpctx, MSGL_STATUS, "%s%*s\r", line, width, ""); } }
void stream_dump(struct MPContext *mpctx) { struct MPOpts *opts = mpctx->opts; char *filename = opts->stream_dump; stream_t *stream = mpctx->stream; assert(stream && filename); int64_t size = 0; stream_control(stream, STREAM_CTRL_GET_SIZE, &size); stream_set_capture_file(stream, filename); while (mpctx->stop_play == KEEP_PLAYING && !stream->eof) { if (!opts->quiet && ((stream->pos / (1024 * 1024)) % 2) == 1) { uint64_t pos = stream->pos; MP_MSG(mpctx, MSGL_STATUS, "Dumping %lld/%lld...", (long long int)pos, (long long int)size); } stream_fill_buffer(stream); for (;;) { mp_cmd_t *cmd = mp_input_get_cmd(mpctx->input, 0, false); if (!cmd) break; run_command(mpctx, cmd); talloc_free(cmd); } } }
void stream_dump(struct MPContext *mpctx) { struct MPOpts *opts = mpctx->opts; char *filename = opts->stream_dump; stream_t *stream = mpctx->stream; assert(stream && filename); int64_t size = 0; stream_control(stream, STREAM_CTRL_GET_SIZE, &size); stream_set_capture_file(stream, filename); while (mpctx->stop_play == KEEP_PLAYING && !stream->eof) { if (!opts->quiet && ((stream->pos / (1024 * 1024)) % 2) == 1) { uint64_t pos = stream->pos; MP_MSG(mpctx, MSGL_STATUS, "Dumping %lld/%lld...", (long long int)pos, (long long int)size); } stream_fill_buffer(stream); mp_process_input(mpctx); } }
void update_vo_playback_state(struct MPContext *mpctx) { if (mpctx->video_out && mpctx->video_out->config_ok) { struct voctrl_playback_state oldstate = mpctx->vo_playback_state; struct voctrl_playback_state newstate = { .taskbar_progress = mpctx->opts->vo->taskbar_progress, .playing = mpctx->playing, .paused = mpctx->paused, .percent_pos = get_percent_pos(mpctx), }; if (oldstate.taskbar_progress != newstate.taskbar_progress || oldstate.playing != newstate.playing || oldstate.paused != newstate.paused || oldstate.percent_pos != newstate.percent_pos) { // Don't update progress bar if it was and still is hidden if ((oldstate.playing && oldstate.taskbar_progress) || (newstate.playing && newstate.taskbar_progress)) { vo_control_async(mpctx->video_out, VOCTRL_UPDATE_PLAYBACK_STATE, &newstate); } mpctx->vo_playback_state = newstate; } } else { mpctx->vo_playback_state = (struct voctrl_playback_state){ 0 }; } } void update_window_title(struct MPContext *mpctx, bool force) { if (!mpctx->video_out && !mpctx->ao) { talloc_free(mpctx->last_window_title); mpctx->last_window_title = NULL; return; } char *title = mp_property_expand_string(mpctx, mpctx->opts->wintitle); if (!mpctx->last_window_title || force || strcmp(title, mpctx->last_window_title) != 0) { talloc_free(mpctx->last_window_title); mpctx->last_window_title = talloc_steal(mpctx, title); if (mpctx->video_out) vo_control(mpctx->video_out, VOCTRL_UPDATE_WINDOW_TITLE, title); if (mpctx->ao) { ao_control(mpctx->ao, AOCONTROL_UPDATE_STREAM_TITLE, title); } } else { talloc_free(title); } } void error_on_track(struct MPContext *mpctx, struct track *track) { if (!track || !track->selected) return; mp_deselect_track(mpctx, track); if (track->type == STREAM_AUDIO) MP_INFO(mpctx, "Audio: no audio\n"); if (track->type == STREAM_VIDEO) MP_INFO(mpctx, "Video: no video\n"); if (mpctx->opts->stop_playback_on_init_failure || !(mpctx->vo_chain || mpctx->ao_chain)) { if (!mpctx->stop_play) mpctx->stop_play = PT_ERROR; if (mpctx->error_playing >= 0) mpctx->error_playing = MPV_ERROR_NOTHING_TO_PLAY; } mp_wakeup_core(mpctx); } int stream_dump(struct MPContext *mpctx, const char *source_filename) { struct MPOpts *opts = mpctx->opts; stream_t *stream = stream_open(source_filename, mpctx->global); if (!stream) return -1; int64_t size = stream_get_size(stream); FILE *dest = fopen(opts->stream_dump, "wb"); if (!dest) { MP_ERR(mpctx, "Error opening dump file: %s\n", mp_strerror(errno)); return -1; } bool ok = true; while (mpctx->stop_play == KEEP_PLAYING && ok) { if (!opts->quiet && ((stream->pos / (1024 * 1024)) % 2) == 1) { uint64_t pos = stream->pos; MP_MSG(mpctx, MSGL_STATUS, "Dumping %lld/%lld...", (long long int)pos, (long long int)size); } bstr data = stream_peek(stream, STREAM_MAX_BUFFER_SIZE); if (data.len == 0) { ok &= stream->eof; break; } ok &= fwrite(data.start, data.len, 1, dest) == 1; stream_skip(stream, data.len); mp_wakeup_core(mpctx); // don't actually sleep mp_idle(mpctx); // but process input } ok &= fclose(dest) == 0; free_stream(stream); return ok ? 0 : -1; } void merge_playlist_files(struct playlist *pl) { if (!pl->first) return; char *edl = talloc_strdup(NULL, "edl://"); for (struct playlist_entry *e = pl->first; e; e = e->next) { if (e != pl->first) edl = talloc_strdup_append_buffer(edl, ";"); // Escape if needed if (e->filename[strcspn(e->filename, "=%,;\n")] || bstr_strip(bstr0(e->filename)).len != strlen(e->filename)) { // %length% edl = talloc_asprintf_append_buffer(edl, "%%%zd%%", strlen(e->filename)); } edl = talloc_strdup_append_buffer(edl, e->filename); } playlist_clear(pl); playlist_add_file(pl, edl); talloc_free(edl); }
struct ra_layout ra_renderpass_input_layout(struct ra_renderpass_input *input) { size_t el_size = ra_vartype_size(input->type); if (!el_size) return (struct ra_layout){0}; // host data is always tightly packed return (struct ra_layout) { .align = 1, .stride = el_size * input->dim_v, .size = el_size * input->dim_v * input->dim_m, }; } static struct ra_renderpass_input *dup_inputs(void *ta_parent, const struct ra_renderpass_input *inputs, int num_inputs) { struct ra_renderpass_input *res = talloc_memdup(ta_parent, (void *)inputs, num_inputs * sizeof(inputs[0])); for (int n = 0; n < num_inputs; n++) res[n].name = talloc_strdup(res, res[n].name); return res; } // Return a newly allocated deep-copy of params. struct ra_renderpass_params *ra_renderpass_params_copy(void *ta_parent, const struct ra_renderpass_params *params) { struct ra_renderpass_params *res = talloc_ptrtype(ta_parent, res); *res = *params; res->inputs = dup_inputs(res, res->inputs, res->num_inputs); res->vertex_attribs = dup_inputs(res, res->vertex_attribs, res->num_vertex_attribs); res->cached_program = bstrdup(res, res->cached_program); res->vertex_shader = talloc_strdup(res, res->vertex_shader); res->frag_shader = talloc_strdup(res, res->frag_shader); res->compute_shader = talloc_strdup(res, res->compute_shader); return res; }; struct glsl_fmt { enum ra_ctype ctype; int num_components; int component_depth[4]; const char *glsl_format; }; // List taken from the GLSL specification, sans snorm and sint formats static const struct glsl_fmt ra_glsl_fmts[] = { {RA_CTYPE_FLOAT, 1, {16}, "r16f"}, {RA_CTYPE_FLOAT, 1, {32}, "r32f"}, {RA_CTYPE_FLOAT, 2, {16, 16}, "rg16f"}, {RA_CTYPE_FLOAT, 2, {32, 32}, "rg32f"}, {RA_CTYPE_FLOAT, 4, {16, 16, 16, 16}, "rgba16f"}, {RA_CTYPE_FLOAT, 4, {32, 32, 32, 32}, "rgba32f"}, {RA_CTYPE_FLOAT, 3, {11, 11, 10}, "r11f_g11f_b10f"}, {RA_CTYPE_UNORM, 1, {8}, "r8"}, {RA_CTYPE_UNORM, 1, {16}, "r16"}, {RA_CTYPE_UNORM, 2, {8, 8}, "rg8"}, {RA_CTYPE_UNORM, 2, {16, 16}, "rg16"}, {RA_CTYPE_UNORM, 4, {8, 8, 8, 8}, "rgba8"}, {RA_CTYPE_UNORM, 4, {16, 16, 16, 16}, "rgba16"}, {RA_CTYPE_UNORM, 4, {10, 10, 10, 2}, "rgb10_a2"}, {RA_CTYPE_UINT, 1, {8}, "r8ui"}, {RA_CTYPE_UINT, 1, {16}, "r16ui"}, {RA_CTYPE_UINT, 1, {32}, "r32ui"}, {RA_CTYPE_UINT, 2, {8, 8}, "rg8ui"}, {RA_CTYPE_UINT, 2, {16, 16}, "rg16ui"}, {RA_CTYPE_UINT, 2, {32, 32}, "rg32ui"}, {RA_CTYPE_UINT, 4, {8, 8, 8, 8}, "rgba8ui"}, {RA_CTYPE_UINT, 4, {16, 16, 16, 16}, "rgba16ui"}, {RA_CTYPE_UINT, 4, {32, 32, 32, 32}, "rgba32ui"}, {RA_CTYPE_UINT, 4, {10, 10, 10, 2}, "rgb10_a2ui"}, }; const char *ra_fmt_glsl_format(const struct ra_format *fmt) { for (int n = 0; n < MP_ARRAY_SIZE(ra_glsl_fmts); n++) { const struct glsl_fmt *gfmt = &ra_glsl_fmts[n]; if (fmt->ctype != gfmt->ctype) continue; if (fmt->num_components != gfmt->num_components) continue; for (int i = 0; i < fmt->num_components; i++) { if (fmt->component_depth[i] != gfmt->component_depth[i]) goto next_fmt; } return gfmt->glsl_format; next_fmt: ; // equivalent to `continue` } return NULL; } // Return whether this is a tightly packed format with no external padding and // with the same bit size/depth in all components, and the shader returns // components in the same order as in memory. static bool ra_format_is_regular(const struct ra_format *fmt) { if (!fmt->pixel_size || !fmt->num_components || !fmt->ordered) return false; for (int n = 1; n < fmt->num_components; n++) { if (fmt->component_size[n] != fmt->component_size[0] || fmt->component_depth[n] != fmt->component_depth[0]) return false; } if (fmt->component_size[0] * fmt->num_components != fmt->pixel_size * 8) return false; return true; } // Return a regular filterable format using RA_CTYPE_UNORM. const struct ra_format *ra_find_unorm_format(struct ra *ra, int bytes_per_component, int n_components) { for (int n = 0; n < ra->num_formats; n++) { const struct ra_format *fmt = ra->formats[n]; if (fmt->ctype == RA_CTYPE_UNORM && fmt->num_components == n_components && fmt->pixel_size == bytes_per_component * n_components && fmt->component_depth[0] == bytes_per_component * 8 && fmt->linear_filter && ra_format_is_regular(fmt)) return fmt; } return NULL; } // Return a regular format using RA_CTYPE_UINT. const struct ra_format *ra_find_uint_format(struct ra *ra, int bytes_per_component, int n_components) { for (int n = 0; n < ra->num_formats; n++) { const struct ra_format *fmt = ra->formats[n]; if (fmt->ctype == RA_CTYPE_UINT && fmt->num_components == n_components && fmt->pixel_size == bytes_per_component * n_components && fmt->component_depth[0] == bytes_per_component * 8 && ra_format_is_regular(fmt)) return fmt; } return NULL; } // Find a float format of any precision that matches the C type of the same // size for upload. // May drop bits from the mantissa (such as selecting float16 even if // bytes_per_component == 32); prefers possibly faster formats first. static const struct ra_format *ra_find_float_format(struct ra *ra, int bytes_per_component, int n_components) { // Assumes ra_format are ordered by performance. // The >=16 check is to avoid catching fringe formats. for (int n = 0; n < ra->num_formats; n++) { const struct ra_format *fmt = ra->formats[n]; if (fmt->ctype == RA_CTYPE_FLOAT && fmt->num_components == n_components && fmt->pixel_size == bytes_per_component * n_components && fmt->component_depth[0] >= 16 && fmt->linear_filter && ra_format_is_regular(fmt)) return fmt; } return NULL; } // Return a filterable regular format that uses at least float16 internally, and // uses a normal C float for transfer on the CPU side. (This is just so we don't // need 32->16 bit conversion on CPU, which would be messy.) const struct ra_format *ra_find_float16_format(struct ra *ra, int n_components) { return ra_find_float_format(ra, sizeof(float), n_components); } const struct ra_format *ra_find_named_format(struct ra *ra, const char *name) { for (int n = 0; n < ra->num_formats; n++) { const struct ra_format *fmt = ra->formats[n]; if (strcmp(fmt->name, name) == 0) return fmt; } return NULL; } // Like ra_find_unorm_format(), but if no fixed point format is available, // return an unsigned integer format. static const struct ra_format *find_plane_format(struct ra *ra, int bytes, int n_channels, enum mp_component_type ctype) { switch (ctype) { case MP_COMPONENT_TYPE_UINT: { const struct ra_format *f = ra_find_unorm_format(ra, bytes, n_channels); if (f) return f; return ra_find_uint_format(ra, bytes, n_channels); } case MP_COMPONENT_TYPE_FLOAT: return ra_find_float_format(ra, bytes, n_channels); default: return NULL; } } // Put a mapping of imgfmt to texture formats into *out. Basically it selects // the correct texture formats needed to represent an imgfmt in a shader, with // textures using the same memory organization as on the CPU. // Each plane is represented by a texture, and each texture has a RGBA // component order. out->components describes the meaning of them. // May return integer formats for >8 bit formats, if the driver has no // normalized 16 bit formats. // Returns false (and *out is not touched) if no format found. bool ra_get_imgfmt_desc(struct ra *ra, int imgfmt, struct ra_imgfmt_desc *out) { struct ra_imgfmt_desc res = {0}; struct mp_regular_imgfmt regfmt; if (mp_get_regular_imgfmt(®fmt, imgfmt)) { enum ra_ctype ctype = RA_CTYPE_UNKNOWN; res.num_planes = regfmt.num_planes; res.component_bits = regfmt.component_size * 8; res.component_pad = regfmt.component_pad; for (int n = 0; n < regfmt.num_planes; n++) { struct mp_regular_imgfmt_plane *plane = ®fmt.planes[n]; res.planes[n] = find_plane_format(ra, regfmt.component_size, plane->num_components, regfmt.component_type); if (!res.planes[n]) return false; for (int i = 0; i < plane->num_components; i++) res.components[n][i] = plane->components[i]; // Dropping LSBs when shifting will lead to dropped MSBs. if (res.component_bits > res.planes[n]->component_depth[0] && res.component_pad < 0) return false; // Renderer restriction, but actually an unwanted corner case. if (ctype != RA_CTYPE_UNKNOWN && ctype != res.planes[n]->ctype) return false; ctype = res.planes[n]->ctype; } res.chroma_w = regfmt.chroma_w; res.chroma_h = regfmt.chroma_h; goto supported; } for (int n = 0; n < ra->num_formats; n++) { if (imgfmt && ra->formats[n]->special_imgfmt == imgfmt) { res = *ra->formats[n]->special_imgfmt_desc; goto supported; } } // Unsupported format return false; supported: *out = res; return true; } void ra_dump_tex_formats(struct ra *ra, int msgl) { if (!mp_msg_test(ra->log, msgl)) return; MP_MSG(ra, msgl, "Texture formats:\n"); MP_MSG(ra, msgl, " NAME COMP*TYPE SIZE DEPTH PER COMP.\n"); for (int n = 0; n < ra->num_formats; n++) { const struct ra_format *fmt = ra->formats[n]; const char *ctype = "unknown"; switch (fmt->ctype) { case RA_CTYPE_UNORM: ctype = "unorm"; break; case RA_CTYPE_UINT: ctype = "uint "; break; case RA_CTYPE_FLOAT: ctype = "float"; break; } char cl[40] = ""; for (int i = 0; i < fmt->num_components; i++) { mp_snprintf_cat(cl, sizeof(cl), "%s%d", i ? " " : "", fmt->component_size[i]); if (fmt->component_size[i] != fmt->component_depth[i]) mp_snprintf_cat(cl, sizeof(cl), "/%d", fmt->component_depth[i]); } MP_MSG(ra, msgl, " %-10s %d*%s %3dB %s %s %s {%s}\n", fmt->name, fmt->num_components, ctype, fmt->pixel_size, fmt->luminance_alpha ? "LA" : " ", fmt->linear_filter ? "LF" : " ", fmt->renderable ? "CR" : " ", cl); } MP_MSG(ra, msgl, " LA = LUMINANCE_ALPHA hack format\n"); MP_MSG(ra, msgl, " LF = linear filterable\n"); MP_MSG(ra, msgl, " CR = can be used for render targets\n"); } void ra_dump_imgfmt_desc(struct ra *ra, const struct ra_imgfmt_desc *desc, int msgl) { char pl[80] = ""; char pf[80] = ""; for (int n = 0; n < desc->num_planes; n++) { if (n > 0) { mp_snprintf_cat(pl, sizeof(pl), "/"); mp_snprintf_cat(pf, sizeof(pf), "/"); } char t[5] = {0}; for (int i = 0; i < 4; i++) t[i] = "_rgba"[desc->components[n][i]]; for (int i = 3; i > 0 && t[i] == '_'; i--) t[i] = '\0'; mp_snprintf_cat(pl, sizeof(pl), "%s", t); mp_snprintf_cat(pf, sizeof(pf), "%s", desc->planes[n]->name); } MP_MSG(ra, msgl, "%d planes %dx%d %d/%d [%s] (%s)\n", desc->num_planes, desc->chroma_w, desc->chroma_h, desc->component_bits, desc->component_pad, pf, pl); } void ra_dump_img_formats(struct ra *ra, int msgl) { if (!mp_msg_test(ra->log, msgl)) return; MP_MSG(ra, msgl, "Image formats:\n"); for (int imgfmt = IMGFMT_START; imgfmt < IMGFMT_END; imgfmt++) { const char *name = mp_imgfmt_to_name(imgfmt); if (strcmp(name, "unknown") == 0) continue; MP_MSG(ra, msgl, " %s", name); struct ra_imgfmt_desc desc; if (ra_get_imgfmt_desc(ra, imgfmt, &desc)) { MP_MSG(ra, msgl, " => "); ra_dump_imgfmt_desc(ra, &desc, msgl); } else { MP_MSG(ra, msgl, "\n"); } } }
static bool wayland_vk_init(struct ra_ctx *ctx) { struct priv *p = ctx->priv = talloc_zero(ctx, struct priv); struct mpvk_ctx *vk = &p->vk; int msgl = ctx->opts.probing ? MSGL_V : MSGL_ERR; if (!mpvk_instance_init(vk, ctx->log, VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME, ctx->opts.debug)) goto error; if (!vo_wayland_init(ctx->vo)) goto error; VkWaylandSurfaceCreateInfoKHR wlinfo = { .sType = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR, .display = ctx->vo->wl->display, .surface = ctx->vo->wl->surface, }; VkResult res = vkCreateWaylandSurfaceKHR(vk->inst, &wlinfo, MPVK_ALLOCATOR, &vk->surf); if (res != VK_SUCCESS) { MP_MSG(ctx, msgl, "Failed creating Wayland surface: %s\n", vk_err(res)); goto error; } /* Because in Wayland clients render whenever they receive a callback from * the compositor, and the fact that the compositor usually stops sending * callbacks once the surface is no longer visible, using FIFO here would * mean the entire player would block on acquiring swapchain images. Hence, * use MAILBOX to guarantee that there'll always be a swapchain image and * the player won't block waiting on those */ if (!ra_vk_ctx_init(ctx, vk, VK_PRESENT_MODE_MAILBOX_KHR)) goto error; return true; error: wayland_vk_uninit(ctx); return false; } static void resize(struct ra_ctx *ctx) { struct vo_wayland_state *wl = ctx->vo->wl; MP_VERBOSE(wl, "Handling resize on the vk side\n"); const int32_t width = wl->scaling*mp_rect_w(wl->geometry); const int32_t height = wl->scaling*mp_rect_h(wl->geometry); wl_surface_set_buffer_scale(wl->surface, wl->scaling); wl->vo->dwidth = width; wl->vo->dheight = height; } static bool wayland_vk_reconfig(struct ra_ctx *ctx) { if (!vo_wayland_reconfig(ctx->vo)) return false; return true; } static int wayland_vk_control(struct ra_ctx *ctx, int *events, int request, void *arg) { int ret = vo_wayland_control(ctx->vo, events, request, arg); if (*events & VO_EVENT_RESIZE) { resize(ctx); if (ra_vk_ctx_resize(ctx->swapchain, ctx->vo->dwidth, ctx->vo->dheight)) return VO_ERROR; } return ret; }