/* * Server info callback */ static void pulse_server_info(pa_context *c, const pa_server_info *i, void *userdata) { UNUSED_PARAMETER(c); PULSE_DATA(userdata); data->format = i->sample_spec.format; data->samples_per_sec = i->sample_spec.rate; data->channels = i->sample_spec.channels; blog(LOG_DEBUG, "pulse-input: Default format: %s, %u Hz, %u channels", pa_sample_format_to_string(i->sample_spec.format), i->sample_spec.rate, i->sample_spec.channels); pulse_signal(0); }
static inline bool check_shader_pipeline_validity(device_t device) { int valid = false; glValidateProgramPipeline(device->pipeline); if (!gl_success("glValidateProgramPipeline")) return false; glGetProgramPipelineiv(device->pipeline, GL_VALIDATE_STATUS, &valid); if (!gl_success("glGetProgramPipelineiv")) return false; if (!valid) blog(LOG_ERROR, "Shader pipeline appears to be invalid"); return valid != 0; }
/* * Create a new pulse audio stream and connect to it * * Return a negative value on error */ static int pulse_connect_stream(struct pulse_data *data) { pa_sample_spec spec; spec.format = data->format; spec.rate = data->samples_per_sec; spec.channels = get_audio_channels(data->speakers); if (!pa_sample_spec_valid(&spec)) { blog(LOG_ERROR, "pulse-input: Sample spec is not valid"); return -1; } data->bytes_per_frame = pa_frame_size(&spec); blog(LOG_DEBUG, "pulse-input: %u bytes per frame", (unsigned int) data->bytes_per_frame); pa_buffer_attr attr; attr.fragsize = get_buffer_size(data, 250); attr.maxlength = (uint32_t) -1; attr.minreq = (uint32_t) -1; attr.prebuf = (uint32_t) -1; attr.tlength = (uint32_t) -1; data->stream = pa_stream_new_with_proplist(data->context, obs_source_getname(data->source), &spec, NULL, data->props); if (!data->stream) { blog(LOG_ERROR, "pulse-input: Unable to create stream"); return -1; } pa_stream_flags_t flags = PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_ADJUST_LATENCY; if (pa_stream_connect_record(data->stream, NULL, &attr, flags) < 0) { blog(LOG_ERROR, "pulse-input: Unable to connect to stream"); return -1; } for (;;) { pulse_iterate(data); pa_stream_state_t state = pa_stream_get_state(data->stream); if (state == PA_STREAM_READY) { blog(LOG_DEBUG, "pulse-input: Stream ready"); break; } if (!PA_STREAM_IS_GOOD(state)) { blog(LOG_ERROR, "pulse-input: Stream connect failed"); return -1; } } return 0; }
void obs_output_destroy(obs_output_t *output) { if (output) { obs_context_data_remove(&output->context); blog(LOG_INFO, "output '%s' destroyed", output->context.name); if (output->valid && active(output)) obs_output_actual_stop(output, true, 0); os_event_wait(output->stopping_event); if (data_capture_ending(output)) pthread_join(output->end_data_capture_thread, NULL); if (output->service) output->service->output = NULL; if (output->context.data) output->info.destroy(output->context.data); free_packets(output); if (output->video_encoder) { obs_encoder_remove_output(output->video_encoder, output); } for (size_t i = 0; i < MAX_AUDIO_MIXES; i++) { if (output->audio_encoders[i]) { obs_encoder_remove_output( output->audio_encoders[i], output); } } os_event_destroy(output->stopping_event); pthread_mutex_destroy(&output->interleaved_mutex); pthread_mutex_destroy(&output->delay_mutex); os_event_destroy(output->reconnect_stop_event); obs_context_data_free(&output->context); circlebuf_free(&output->delay_data); if (output->owns_info_id) bfree((void*)output->info.id); bfree(output); } }
/* * binnie_open_out * * Opens the file named FILENAME for writing, with the mode (BAM/SAM) * depending on the file extension. * * Returns: a pointer to the opened samFile, or 0 on error. */ samFile *binnie_open_out(const char *filename) { samFile *fp; int filename_len; DLOG("binnie_open_out: filename=[%s]", filename); fp = 0; if(!filename) { error(0, 0, "binnie_open_out: null filename"); return fp; } /* * check whether filename is bam or sam */ filename_len = strlen(filename); if ( !strcasecmp(".bam", filename + filename_len - 4) ) { fp = sam_open(filename, "wb", 0); if (!fp) { error(0, errno, "binnie_open_out: error opening [%s] as bam", filename); } } else if ( !strcasecmp(".sam", filename + filename_len - 4) ) { fp = sam_open(filename, "w", 0); if (fp == NULL) { error(0, errno, "binnie_open_out: error opening [%s] as sam", filename); } } else { error(0, 0, "binnie_open_out: filename [%s] does not end in .bam or .sam", filename); return fp; } blog(3, "binnie_open_out: opened fp->fn=[%s]", fp->fn); DLOG("binnie_open_out: returning fp=[%u] for filename=[%s]", fp, filename); return fp; }
bool os_quick_write_utf8_file_safe(const char *path, const char *str, size_t len, bool marker, const char *temp_ext, const char *backup_ext) { struct dstr backup_path = {0}; struct dstr temp_path = {0}; bool success = false; if (!temp_ext || !*temp_ext) { blog(LOG_ERROR, "os_quick_write_utf8_file_safe: invalid " "temporary extension specified"); return false; } dstr_copy(&temp_path, path); if (*temp_ext != '.') dstr_cat(&temp_path, "."); dstr_cat(&temp_path, temp_ext); if (!os_quick_write_utf8_file(temp_path.array, str, len, marker)) { goto cleanup; } if (backup_ext && *backup_ext) { dstr_copy(&backup_path, path); if (*backup_ext != '.') dstr_cat(&backup_path, "."); dstr_cat(&backup_path, backup_ext); os_unlink(backup_path.array); os_rename(path, backup_path.array); dstr_free(&backup_path); } else { os_unlink(path); } os_rename(temp_path.array, path); success = true; cleanup: dstr_free(&backup_path); dstr_free(&temp_path); return success; }
static inline void init_textures(struct dc_capture *capture) { if (capture->compatibility) capture->texture = gs_texture_create( capture->width, capture->height, GS_BGRA, 1, NULL, GS_DYNAMIC); else capture->texture = gs_texture_create_gdi( capture->width, capture->height); if (!capture->texture) { blog(LOG_WARNING, "[dc_capture_init] Failed to " "create textures"); return; } capture->valid = true; }
void BrowserManager::Impl::Startup() { int ret = pthread_create(&managerThread, nullptr, browserManagerEntry, this); if (ret != 0) { blog(LOG_ERROR, "BrowserManager: failed to create browser " "manager thread."); threadAlive = false; } else { threadAlive = true; } return; }
void device_load_swapchain(device_t device, swapchain_t swap) { if (!swap) swap = &device->plat->swap; if (device->cur_swap == swap) return; Display *dpy = swap->wi->display; XID window = swap->wi->glxid; GLXContext ctx = device->plat->context; device->cur_swap = swap; if (!glXMakeCurrent(dpy, window, ctx)) { blog(LOG_ERROR, "Failed to make context current."); } }
static bool get_tex_dimensions(texture_t tex, uint32_t *width, uint32_t *height) { if (tex->type == GS_TEXTURE_2D) { struct gs_texture_2d *tex2d = (struct gs_texture_2d*)tex; *width = tex2d->width; *height = tex2d->height; return true; } else if (tex->type == GS_TEXTURE_CUBE) { struct gs_texture_cube *cube = (struct gs_texture_cube*)tex; *width = cube->size; *height = cube->size; return true; } blog(LOG_ERROR, "Texture must be 2D or cubemap"); return false; }
bool stagesurface_map(stagesurf_t stagesurf, uint8_t **data, uint32_t *linesize) { if (!gl_bind_buffer(GL_PIXEL_PACK_BUFFER, stagesurf->pack_buffer)) goto fail; *data = glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY); if (!gl_success("glMapBuffer")) goto fail; gl_bind_buffer(GL_PIXEL_PACK_BUFFER, 0); *linesize = stagesurf->bytes_per_pixel * stagesurf->width; return true; fail: blog(LOG_ERROR, "stagesurf_map (GL) failed"); return false; }
void *load_module_subfunc(void *module, const char *module_name, const char *name, const char *func, bool required) { struct dstr func_name; void *func_addr = NULL; dstr_init_copy(&func_name, name); dstr_cat(&func_name, "_"); dstr_cat(&func_name, func); func_addr = os_dlsym(module, func_name.array); if (required && !func_addr) blog(LOG_ERROR, "Could not load function '%s' from module '%s'", func_name.array, module_name); dstr_free(&func_name); return func_addr; }
void obs_register_encoder_s(const struct obs_encoder_info *info, size_t size) { if (find_encoder(info->id)) { blog(LOG_WARNING, "Encoder id '%s' already exists! " "Duplicate library?", info->id); return; } CHECK_REQUIRED_VAL(info, get_name, obs_register_encoder); CHECK_REQUIRED_VAL(info, create, obs_register_encoder); CHECK_REQUIRED_VAL(info, destroy, obs_register_encoder); CHECK_REQUIRED_VAL(info, encode, obs_register_encoder); if (info->type == OBS_ENCODER_AUDIO) CHECK_REQUIRED_VAL(info, get_frame_size, obs_register_encoder); REGISTER_OBS_DEF(size, obs_encoder_info, obs->encoder_types, info); }
static inline uint64_t smooth_ts(struct audio_line *line, uint64_t timestamp) { if (!line->next_ts_min) return timestamp; bool ts_under = (timestamp < line->next_ts_min); uint64_t diff = ts_under ? (line->next_ts_min - timestamp) : (timestamp - line->next_ts_min); #ifdef DEBUG_AUDIO if (diff >= TS_SMOOTHING_THRESHOLD) blog(LOG_DEBUG, "above TS smoothing threshold by %"PRIu64, diff); #endif return (diff < TS_SMOOTHING_THRESHOLD) ? line->next_ts_min : timestamp; }
static void log_processor_name(void) { char *name = NULL; size_t size; int ret; ret = sysctlbyname("machdep.cpu.brand_string", NULL, &size, NULL, 0); if (ret != 0) return; name = malloc(size); ret = sysctlbyname("machdep.cpu.brand_string", name, &size, NULL, 0); if (ret == 0) blog(LOG_INFO, "CPU Name: %s", name); free(name); }
gs_zstencil_t device_zstencil_create(gs_device_t device, uint32_t width, uint32_t height, enum gs_zstencil_format format) { struct gs_zstencil_buffer *zs; zs = bzalloc(sizeof(struct gs_zstencil_buffer)); zs->format = convert_zstencil_format(format); zs->attachment = get_attachment(format); zs->device = device; if (!gl_init_zsbuffer(zs, width, height)) { blog(LOG_ERROR, "device_zstencil_create (GL) failed"); gs_zstencil_destroy(zs); return NULL; } return zs; }
int obs_reset_video(struct obs_video_info *ovi) { if (!obs) return OBS_VIDEO_FAIL; /* don't allow changing of video settings if active. */ if (obs->video.video && video_output_active(obs->video.video)) return OBS_VIDEO_CURRENTLY_ACTIVE; if (!size_valid(ovi->output_width, ovi->output_height) || !size_valid(ovi->base_width, ovi->base_height)) return OBS_VIDEO_INVALID_PARAM; struct obs_core_video *video = &obs->video; stop_video(); obs_free_video(); if (!ovi) { obs_free_graphics(); return OBS_VIDEO_SUCCESS; } /* align to multiple-of-two and SSE alignment sizes */ ovi->output_width &= 0xFFFFFFFC; ovi->output_height &= 0xFFFFFFFE; if (!video->graphics) { int errorcode = obs_init_graphics(ovi); if (errorcode != OBS_VIDEO_SUCCESS) { obs_free_graphics(); return errorcode; } } blog(LOG_INFO, "video settings reset:\n" "\tbase resolution: %dx%d\n" "\toutput resolution: %dx%d\n" "\tfps: %d/%d", ovi->base_width, ovi->base_height, ovi->output_width, ovi->output_height, ovi->fps_num, ovi->fps_den); return obs_init_video(ovi); }
void OBSBasic::on_actionImportProfile_triggered() { char path[512]; QString home = QDir::homePath(); int ret = GetConfigPath(path, 512, "obs-studio/basic/profiles/"); if (ret <= 0) { blog(LOG_WARNING, "Failed to get profile config path"); return; } QString dir = QFileDialog::getExistingDirectory( this, QTStr("Basic.MainMenu.Profile.Import"), home, QFileDialog::ShowDirsOnly | QFileDialog::DontResolveSymlinks); if (!dir.isEmpty() && !dir.isNull()) { QString inputPath = QString::fromUtf8(path); QFileInfo finfo(dir); QString directory = finfo.fileName(); QString profileDir = inputPath + directory; QDir folder(profileDir); if (!folder.exists()) { folder.mkpath(profileDir); QFile::copy(dir + "/basic.ini", profileDir + "/basic.ini"); QFile::copy(dir + "/service.json", profileDir + "/service.json"); QFile::copy(dir + "/streamEncoder.json", profileDir + "/streamEncoder.json"); QFile::copy(dir + "/recordEncoder.json", profileDir + "/recordEncoder.json"); RefreshProfiles(); } else { QMessageBox::information(this, QTStr("Basic.MainMenu.Profile.Import"), QTStr("Basic.MainMenu.Profile.Exists")); } } }
static void hook_data_capture(struct obs_output *output, bool encoded, bool has_video, bool has_audio) { encoded_callback_t encoded_callback; if (encoded) { pthread_mutex_lock(&output->interleaved_mutex); reset_packet_data(output); pthread_mutex_unlock(&output->interleaved_mutex); encoded_callback = (has_video && has_audio) ? interleave_packets : default_encoded_callback; if (output->delay_sec) { output->active_delay_ns = (uint64_t)output->delay_sec * 1000000000ULL; output->delay_cur_flags = output->delay_flags; output->delay_callback = encoded_callback; encoded_callback = process_delay; os_atomic_set_bool(&output->delay_active, true); blog(LOG_INFO, "Output '%s': %"PRIu32" second delay " "active, preserve on disconnect is %s", output->context.name, output->delay_sec, preserve_active(output) ? "on" : "off"); } if (has_audio) start_audio_encoders(output, encoded_callback); if (has_video) obs_encoder_start(output->video_encoder, encoded_callback, output); } else { if (has_video) video_output_connect(output->video, get_video_conversion(output), default_raw_video_callback, output); if (has_audio) audio_output_connect(output->audio, output->mixer_idx, get_audio_conversion(output), default_raw_audio_callback, output); } }
void device_draw(device_t device, enum gs_draw_mode draw_mode, uint32_t start_vert, uint32_t num_verts) { struct gs_index_buffer *ib = device->cur_index_buffer; GLenum topology = convert_gs_topology(draw_mode); effect_t effect = gs_geteffect(); if (!can_render(device)) goto fail; if (effect) effect_updateparams(effect); shader_update_textures(device->cur_pixel_shader); update_viewproj_matrix(device); #ifdef _DEBUG if (!check_shader_pipeline_validity(device)) goto fail; #endif if (ib) { if (num_verts == 0) num_verts = (uint32_t)device->cur_index_buffer->num; glDrawElements(topology, num_verts, ib->gl_type, (const GLvoid*)(start_vert * ib->width)); if (!gl_success("glDrawElements")) goto fail; } else { if (num_verts == 0) num_verts = (uint32_t)device->cur_vertex_buffer->num; glDrawArrays(topology, start_vert, num_verts); if (!gl_success("glDrawArrays")) goto fail; } return; fail: blog(LOG_ERROR, "device_draw (GL) failed"); }
vertbuffer_t device_create_vertexbuffer(device_t device, struct vb_data *data, uint32_t flags) { struct gs_vertex_buffer *vb = bmalloc(sizeof(struct gs_vertex_buffer)); memset(vb, 0, sizeof(struct gs_vertex_buffer)); vb->device = device; vb->data = data; vb->num = data->num; vb->dynamic = flags & GS_DYNAMIC; if (!create_buffers(vb)) { blog(LOG_ERROR, "device_create_vertexbuffer (GL) failed"); vertexbuffer_destroy(vb); return NULL; } return vb; }
effect_t gs_create_effect_from_file(const char *file, char **error_string) { char *file_string; effect_t effect = NULL; if (!thread_graphics || !file) return NULL; file_string = os_quick_read_utf8_file(file); if (!file_string) { blog(LOG_ERROR, "Could not load effect file '%s'", file); return NULL; } effect = gs_create_effect(file_string, file, error_string); bfree(file_string); return effect; }
void proc_handler_add(proc_handler_t *handler, const char *decl_string, proc_handler_proc_t proc, void *data) { if (!handler) return; struct proc_info pi; memset(&pi, 0, sizeof(struct proc_info)); if (!parse_decl_string(&pi.func, decl_string)) { blog(LOG_ERROR, "Function declaration invalid: %s", decl_string); return; } pi.callback = proc; pi.data = data; da_push_back(handler->procs, &pi); }
bool video_scaler_scale(video_scaler_t *scaler, uint8_t *output[], const uint32_t out_linesize[], const uint8_t *const input[], const uint32_t in_linesize[]) { if (!scaler) return false; int ret = sws_scale(scaler->swscale, input, (const int *)in_linesize, 0, scaler->src_height, output, (const int *)out_linesize); if (ret <= 0) { blog(LOG_ERROR, "video_scaler_scale: sws_scale failed: %d", ret); return false; } return true; }
void device_load_samplerstate(device_t device, samplerstate_t ss, int unit) { /* need a pixel shader to properly bind samplers */ if (!device->cur_pixel_shader) ss = NULL; if (device->cur_samplers[unit] == ss) return; device->cur_samplers[unit] = ss; if (!ss) return; if (!load_sampler_on_textures(device, ss, unit)) blog(LOG_ERROR, "device_load_samplerstate (GL) failed"); return; }
void device_load_texture(device_t device, texture_t tex, int unit) { struct shader_param *param; struct gs_sampler_state *sampler; struct gs_texture *cur_tex = device->cur_textures[unit]; /* need a pixel shader to properly bind textures */ if (!device->cur_pixel_shader) tex = NULL; if (cur_tex == tex) return; if (!gl_active_texture(GL_TEXTURE0 + unit)) goto fail; /* the target for the previous text may not be the same as the * next texture, so unbind the previous texture first to be safe */ if (cur_tex && (!tex || cur_tex->gl_target != tex->gl_target)) gl_bind_texture(cur_tex->gl_target, 0); device->cur_textures[unit] = tex; param = get_texture_param(device, unit); if (!param) return; param->texture = tex; if (!tex) return; sampler = device->cur_samplers[param->sampler_id]; if (!gl_bind_texture(tex->gl_target, tex->texture)) goto fail; if (sampler && !load_texture_sampler(tex, sampler)) goto fail; return; fail: blog(LOG_ERROR, "device_load_texture (GL) failed"); }
static bool update_settings(struct obs_x264 *obsx264, obs_data_t *settings) { char *preset = bstrdup(obs_data_get_string(settings, "preset")); char *profile = bstrdup(obs_data_get_string(settings, "profile")); char *tune = bstrdup(obs_data_get_string(settings, "tune")); const char *opts = obs_data_get_string(settings, "x264opts"); char **paramlist; bool success = true; paramlist = strlist_split(opts, ' ', false); blog(LOG_INFO, "---------------------------------"); if (!obsx264->context) { override_base_params(obsx264, paramlist, &preset, &profile, &tune); if (preset && *preset) info("preset: %s", preset); if (profile && *profile) info("profile: %s", profile); if (tune && *tune) info("tune: %s", tune); success = reset_x264_params(obsx264, preset, tune); } if (success) { update_params(obsx264, settings, paramlist); if (opts && *opts) info("custom settings: %s", opts); if (!obsx264->context) apply_x264_profile(obsx264, profile); } obsx264->params.b_repeat_headers = false; strlist_free(paramlist); bfree(preset); bfree(profile); bfree(tune); return success; }
void device_copy_texture_region(gs_device_t *device, gs_texture_t *dst, uint32_t dst_x, uint32_t dst_y, gs_texture_t *src, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { struct gs_texture_2d *src2d = (struct gs_texture_2d*)src; struct gs_texture_2d *dst2d = (struct gs_texture_2d*)dst; if (!src) { blog(LOG_ERROR, "Source texture is NULL"); goto fail; } if (!dst) { blog(LOG_ERROR, "Destination texture is NULL"); goto fail; } if (dst->type != GS_TEXTURE_2D || src->type != GS_TEXTURE_2D) { blog(LOG_ERROR, "Source and destination textures must be 2D " "textures"); goto fail; } if (dst->format != src->format) { blog(LOG_ERROR, "Source and destination formats do not match"); goto fail; } uint32_t nw = (uint32_t)src_w ? (uint32_t)src_w : (src2d->width - src_x); uint32_t nh = (uint32_t)src_h ? (uint32_t)src_h : (src2d->height - src_y); if (dst2d->width - dst_x < nw || dst2d->height - dst_y < nh) { blog(LOG_ERROR, "Destination texture region is not big " "enough to hold the source region"); goto fail; } if (!gl_copy_texture(device, dst->texture, dst->gl_target, dst_x, dst_y, src->texture, src->gl_target, src_x, src_y, nw, nh, src->format)) goto fail; return; fail: blog(LOG_ERROR, "device_copy_texture (GL) failed"); }
/* Apparently for mac, PBOs won't do an asynchronous transfer unless you use * FBOs aong with glReadPixels, which is really dumb. */ void device_stage_texture(device_t device, stagesurf_t dst, texture_t src) { struct gs_texture_2d *tex2d = (struct gs_texture_2d*)src; struct fbo_info *fbo; GLint last_fbo; bool success = false; if (!can_stage(dst, tex2d)) goto failed; if (!gl_bind_buffer(GL_PIXEL_PACK_BUFFER, dst->pack_buffer)) goto failed; fbo = get_fbo(device, dst->width, dst->height, dst->format); if (!gl_get_integer_v(GL_READ_FRAMEBUFFER_BINDING, &last_fbo)) goto failed_unbind_buffer; if (!gl_bind_framebuffer(GL_READ_FRAMEBUFFER, fbo->fbo)) goto failed_unbind_buffer; glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + 0, src->gl_target, src->texture, 0); if (!gl_success("glFrameBufferTexture2D")) goto failed_unbind_all; glReadPixels(0, 0, dst->width, dst->height, dst->gl_format, dst->gl_type, 0); if (!gl_success("glReadPixels")) goto failed_unbind_all; success = true; failed_unbind_all: gl_bind_framebuffer(GL_READ_FRAMEBUFFER, last_fbo); failed_unbind_buffer: gl_bind_buffer(GL_PIXEL_PACK_BUFFER, 0); failed: if (!success) blog(LOG_ERROR, "device_stage_texture (GL) failed"); }
/* * List resolutions for device and format */ static void v4l2_resolution_list(int dev, uint_fast32_t pixelformat, obs_property_t *prop) { struct v4l2_frmsizeenum frmsize; frmsize.pixel_format = pixelformat; frmsize.index = 0; struct dstr buffer; dstr_init(&buffer); obs_property_list_clear(prop); obs_property_list_add_int(prop, obs_module_text("LeaveUnchanged"), -1); v4l2_ioctl(dev, VIDIOC_ENUM_FRAMESIZES, &frmsize); switch(frmsize.type) { case V4L2_FRMSIZE_TYPE_DISCRETE: while (v4l2_ioctl(dev, VIDIOC_ENUM_FRAMESIZES, &frmsize) == 0) { dstr_printf(&buffer, "%dx%d", frmsize.discrete.width, frmsize.discrete.height); obs_property_list_add_int(prop, buffer.array, v4l2_pack_tuple(frmsize.discrete.width, frmsize.discrete.height)); frmsize.index++; } break; default: blog(LOG_INFO, "Stepwise and Continuous framesizes " "are currently hardcoded"); for (const int *packed = v4l2_framesizes; *packed; ++packed) { int width; int height; v4l2_unpack_tuple(&width, &height, *packed); dstr_printf(&buffer, "%dx%d", width, height); obs_property_list_add_int(prop, buffer.array, *packed); } break; } dstr_free(&buffer); }