/* * Ensures that cached frames are displayed on time. If multiple frames * were cached between renders, then releases the unnecessary frames and uses * the frame with the closest timing to ensure sync. */ struct source_frame *obs_source_getframe(obs_source_t source) { uint64_t last_frame_time = source->last_frame_timestamp; struct source_frame *frame = NULL; struct source_frame *next_frame; uint64_t sys_time, frame_time; pthread_mutex_lock(&source->video_mutex); if (!source->video_frames.num) goto unlock; next_frame = source->video_frames.array[0]; sys_time = os_gettime_ns(); frame_time = next_frame->timestamp; if (!source->last_frame_timestamp) { frame = next_frame; da_erase(source->video_frames, 0); source->last_frame_timestamp = frame_time; } else { uint64_t sys_offset, frame_offset; sys_offset = sys_time - source->last_sys_timestamp; frame_offset = frame_time - last_frame_time; source->last_frame_timestamp += sys_offset; while (frame_offset <= sys_offset) { if (frame) source_frame_destroy(frame); frame = next_frame; da_erase(source->video_frames, 0); if (!source->video_frames.num) break; next_frame = source->video_frames.array[0]; frame_time = next_frame->timestamp; frame_offset = frame_time - last_frame_time; } } source->last_sys_timestamp = sys_time; unlock: pthread_mutex_unlock(&source->video_mutex); if (frame != NULL) obs_source_addref(source); return frame; }
static inline void deinterlace_get_closest_frames(obs_source_t *s, uint64_t sys_time) { const struct video_output_info *info; uint64_t half_interval; if (!s->async_frames.num) return; info = video_output_get_info(obs->video.video); half_interval = (uint64_t)info->fps_den * 500000000ULL / (uint64_t)info->fps_num; if (first_frame(s) || ready_deinterlace_frames(s, sys_time)) { uint64_t offset; s->prev_async_frame = NULL; s->cur_async_frame = s->async_frames.array[0]; da_erase(s->async_frames, 0); if (s->cur_async_frame->prev_frame) { s->prev_async_frame = s->cur_async_frame; s->cur_async_frame = s->async_frames.array[0]; da_erase(s->async_frames, 0); s->deinterlace_half_duration = (uint32_t) ((s->cur_async_frame->timestamp - s->prev_async_frame->timestamp) / 2); } else { s->deinterlace_half_duration = (uint32_t) ((s->cur_async_frame->timestamp - s->deinterlace_frame_ts) / 2); } if (!s->last_frame_ts) s->last_frame_ts = s->cur_async_frame->timestamp; s->deinterlace_frame_ts = s->cur_async_frame->timestamp; offset = obs->video.video_time - s->deinterlace_frame_ts; if (!s->deinterlace_offset) { s->deinterlace_offset = offset; } else { uint64_t offset_diff = uint64_diff( s->deinterlace_offset, offset); if (offset_diff > half_interval) s->deinterlace_offset = offset; } } }
static inline bool unregister_hotkey(obs_hotkey_id id) { if (id >= obs->hotkeys.next_id) return false; size_t idx; if (!find_id(id, &idx)) return false; obs_hotkey_t *hotkey = &obs->hotkeys.hotkeys.array[idx]; hotkey_signal("hotkey_unregister", hotkey); release_registerer(hotkey); bfree(hotkey->name); bfree(hotkey->description); if (hotkey->registerer_type == OBS_HOTKEY_REGISTERER_SOURCE) obs_weak_source_release(hotkey->registerer); da_erase(obs->hotkeys.hotkeys, idx); remove_bindings(id); return obs->hotkeys.hotkeys.num >= idx; }
static inline bool obs_encoder_stop_internal(obs_encoder_t *encoder, void (*new_packet)(void *param, struct encoder_packet *packet), void *param) { bool last = false; size_t idx; pthread_mutex_lock(&encoder->callbacks_mutex); idx = get_callback_idx(encoder, new_packet, param); if (idx != DARRAY_INVALID) { da_erase(encoder->callbacks, idx); last = (encoder->callbacks.num == 0); } pthread_mutex_unlock(&encoder->callbacks_mutex); if (last) { remove_connection(encoder); encoder->initialized = false; if (encoder->destroy_on_stop) { pthread_mutex_unlock(&encoder->init_mutex); obs_encoder_actually_destroy(encoder); return true; } } return false; }
void obs_encoder_stop(obs_encoder_t *encoder, void (*new_packet)(void *param, struct encoder_packet *packet), void *param) { bool last = false; size_t idx; if (!encoder) return; pthread_mutex_lock(&encoder->callbacks_mutex); idx = get_callback_idx(encoder, new_packet, param); if (idx != DARRAY_INVALID) { da_erase(encoder->callbacks, idx); last = (encoder->callbacks.num == 0); } pthread_mutex_unlock(&encoder->callbacks_mutex); if (last) { remove_connection(encoder); if (encoder->destroy_on_stop) obs_encoder_actually_destroy(encoder); } }
void obs_source_filter_remove(obs_source_t source, obs_source_t filter) { size_t idx; if (!source || !filter) return; pthread_mutex_lock(&source->filter_mutex); idx = da_find(source->filters, &filter, 0); if (idx == DARRAY_INVALID) return; if (idx > 0) { obs_source_t prev = source->filters.array[idx-1]; prev->filter_target = filter->filter_target; } da_erase(source->filters, idx); pthread_mutex_unlock(&source->filter_mutex); filter->filter_parent = NULL; filter->filter_target = NULL; }
void obs_source_remove(obs_source_t source) { struct obs_core_data *data = &obs->data; size_t id; bool exists; pthread_mutex_lock(&data->sources_mutex); if (!source || source->removed) { pthread_mutex_unlock(&data->sources_mutex); return; } source->removed = true; obs_source_addref(source); id = da_find(data->user_sources, &source, 0); exists = (id != DARRAY_INVALID); if (exists) { da_erase(data->user_sources, id); obs_source_release(source); } pthread_mutex_unlock(&data->sources_mutex); if (exists) obs_source_dosignal(source, "source_remove", "remove"); obs_source_release(source); }
static void apply_scene_item_audio_actions(struct obs_scene_item *item, float **p_buf, uint64_t ts, size_t sample_rate) { bool cur_visible = item->visible; uint64_t frame_num = 0; size_t deref_count = 0; float *buf = NULL; if (p_buf) { if (!*p_buf) *p_buf = malloc(AUDIO_OUTPUT_FRAMES * sizeof(float)); buf = *p_buf; } pthread_mutex_lock(&item->actions_mutex); for (size_t i = 0; i < item->audio_actions.num; i++) { struct item_action action = item->audio_actions.array[i]; uint64_t timestamp = action.timestamp; uint64_t new_frame_num; if (timestamp < ts) timestamp = ts; new_frame_num = (timestamp - ts) * (uint64_t)sample_rate / 1000000000ULL; if (ts && new_frame_num >= AUDIO_OUTPUT_FRAMES) break; da_erase(item->audio_actions, i--); item->visible = action.visible; if (!item->visible) deref_count++; if (buf && new_frame_num > frame_num) { for (; frame_num < new_frame_num; frame_num++) buf[frame_num] = cur_visible ? 1.0f : 0.0f; } cur_visible = item->visible; } if (buf) { for (; frame_num < AUDIO_OUTPUT_FRAMES; frame_num++) buf[frame_num] = cur_visible ? 1.0f : 0.0f; } pthread_mutex_unlock(&item->actions_mutex); while (deref_count--) { if (os_atomic_dec_long(&item->active_refs) == 0) { obs_source_remove_active_child(item->parent->source, item->source); } } }
static inline void cf_preprocess_remove_def_strref(struct cf_preprocessor *pp, const struct strref *ref) { size_t def_idx = cf_preprocess_get_def_idx(pp, ref); if (def_idx != INVALID_INDEX) { struct cf_def *array = pp->defines.array; cf_def_free(array+def_idx); da_erase(pp->defines, def_idx); } }
static inline struct source_frame *get_closest_frame(obs_source_t source, uint64_t sys_time) { if (new_frame_ready(source, sys_time)) { struct source_frame *frame = source->video_frames.array[0]; da_erase(source->video_frames, 0); return frame; } return NULL; }
void gs_matrix_pop(void) { graphics_t graphics = thread_graphics; if (graphics->cur_matrix == 0) { blog(LOG_ERROR, "Tried to pop last matrix on stack"); return; } da_erase(graphics->matrix_stack, graphics->cur_matrix); graphics->cur_matrix--; }
static inline void remove_bindings(obs_hotkey_id id) { size_t idx; while (find_binding(id, &idx)) { obs_hotkey_binding_t *binding = &obs->hotkeys.bindings.array[idx]; if (binding->pressed) release_pressed_binding(binding); da_erase(obs->hotkeys.bindings, idx); } }
static inline void send_interleaved(struct obs_output *output) { struct encoder_packet out = output->interleaved_packets.array[0]; /* do not send an interleaved packet if there's no packet of the * opposing type of a higher timstamp in the interleave buffer. * this ensures that the timestamps are monotonic */ if (!has_higher_opposing_ts(output, &out)) return; da_erase(output->interleaved_packets, 0); output->info.encoded_packet(output->context.data, &out); obs_free_encoder_packet(&out); }
void audio_output_disconnect(audio_t *audio, size_t mix_idx, audio_output_callback_t callback, void *param) { if (!audio || mix_idx >= MAX_AUDIO_MIXES) return; pthread_mutex_lock(&audio->input_mutex); size_t idx = audio_get_input_idx(audio, mix_idx, callback, param); if (idx != DARRAY_INVALID) { struct audio_mix *mix = &audio->mixes[mix_idx]; audio_input_free(mix->inputs.array+idx); da_erase(mix->inputs, idx); } pthread_mutex_unlock(&audio->input_mutex); }
void audio_output_disconnect(audio_t audio, void (*callback)(void *param, struct audio_data *data), void *param) { if (!audio) return; pthread_mutex_lock(&audio->input_mutex); size_t idx = audio_get_input_idx(audio, callback, param); if (idx != DARRAY_INVALID) { audio_input_free(audio->inputs.array+idx); da_erase(audio->inputs, idx); } pthread_mutex_unlock(&audio->input_mutex); }
void video_output_disconnect(video_t video, void (*callback)(void *param, const struct video_data *frame), void *param) { if (!video || !callback) return; pthread_mutex_lock(&video->input_mutex); size_t idx = video_get_input_idx(video, callback, param); if (idx != DARRAY_INVALID) { video_input_free(video->inputs.array+idx); da_erase(video->inputs, idx); } pthread_mutex_unlock(&video->input_mutex); }
static inline bool unregister_hotkey_pair(obs_hotkey_pair_id id) { if (id >= obs->hotkeys.next_pair_id) return false; size_t idx; if (!find_pair_id(id, &idx)) return false; obs_hotkey_pair_t *pair = &obs->hotkeys.hotkey_pairs.array[idx]; bool need_fixup = unregister_hotkey(pair->id[0]); need_fixup = unregister_hotkey(pair->id[1]) || need_fixup; if (need_fixup) fixup_pointers(); da_erase(obs->hotkeys.hotkey_pairs, idx); return obs->hotkeys.hotkey_pairs.num >= idx; }
static inline struct source_frame *get_closest_frame(obs_source_t source, uint64_t sys_time, int *audio_time_refs) { struct source_frame *next_frame = source->video_frames.array[0]; struct source_frame *frame = NULL; uint64_t sys_offset = sys_time - source->last_sys_timestamp; uint64_t frame_time = next_frame->timestamp; uint64_t frame_offset = 0; /* account for timestamp invalidation */ if (frame_out_of_bounds(source, frame_time)) { source->last_frame_ts = next_frame->timestamp; (*audio_time_refs)++; } else { frame_offset = frame_time - source->last_frame_ts; source->last_frame_ts += sys_offset; } while (frame_offset <= sys_offset) { source_frame_destroy(frame); frame = next_frame; da_erase(source->video_frames, 0); if (!source->video_frames.num) break; next_frame = source->video_frames.array[0]; /* more timestamp checking and compensating */ if ((next_frame->timestamp - frame_time) > MAX_TIMESTAMP_JUMP) { source->last_frame_ts = next_frame->timestamp - frame_offset; (*audio_time_refs)++; } frame_time = next_frame->timestamp; frame_offset = frame_time - source->last_frame_ts; } return frame; }
static bool new_frame_ready(obs_source_t source, uint64_t sys_time) { struct source_frame *next_frame = source->video_frames.array[0]; struct source_frame *frame = NULL; uint64_t sys_offset = sys_time - source->last_sys_timestamp; uint64_t frame_time = next_frame->timestamp; uint64_t frame_offset = 0; /* account for timestamp invalidation */ if (frame_out_of_bounds(source, frame_time)) { source->last_frame_ts = next_frame->timestamp; os_atomic_inc_long(&source->av_sync_ref); } else { frame_offset = frame_time - source->last_frame_ts; source->last_frame_ts += frame_offset; } while (frame_offset <= sys_offset) { source_frame_destroy(frame); if (source->video_frames.num == 1) return true; frame = next_frame; da_erase(source->video_frames, 0); next_frame = source->video_frames.array[0]; /* more timestamp checking and compensating */ if ((next_frame->timestamp - frame_time) > MAX_TIMESTAMP_JUMP) { source->last_frame_ts = next_frame->timestamp - frame_offset; os_atomic_inc_long(&source->av_sync_ref); } frame_time = next_frame->timestamp; frame_offset = frame_time - source->last_frame_ts; } return frame != NULL; }
/* * Ensures that cached frames are displayed on time. If multiple frames * were cached between renders, then releases the unnecessary frames and uses * the frame with the closest timing to ensure sync. Also ensures that timing * with audio is synchronized. */ struct source_frame *obs_source_getframe(obs_source_t source) { struct source_frame *frame = NULL; uint64_t sys_time; if (!source) return NULL; pthread_mutex_lock(&source->video_mutex); if (!source->video_frames.num) goto unlock; sys_time = os_gettime_ns(); if (!source->last_frame_ts) { frame = source->video_frames.array[0]; da_erase(source->video_frames, 0); source->last_frame_ts = frame->timestamp; } else { frame = get_closest_frame(source, sys_time); } /* reset timing to current system time */ if (frame) { source->timing_adjust = sys_time - frame->timestamp; source->timing_set = true; } source->last_sys_timestamp = sys_time; unlock: pthread_mutex_unlock(&source->video_mutex); if (frame) obs_source_addref(source); return frame; }
static bool ready_deinterlace_frames(obs_source_t *source, uint64_t sys_time) { struct obs_source_frame *next_frame = source->async_frames.array[0]; struct obs_source_frame *prev_frame = NULL; struct obs_source_frame *frame = NULL; uint64_t sys_offset = sys_time - source->last_sys_timestamp; uint64_t frame_time = next_frame->timestamp; uint64_t frame_offset = 0; size_t idx = 1; if ((source->flags & OBS_SOURCE_FLAG_UNBUFFERED) != 0) { while (source->async_frames.num > 2) { da_erase(source->async_frames, 0); remove_async_frame(source, next_frame); next_frame = source->async_frames.array[0]; } if (source->async_frames.num == 2) source->async_frames.array[0]->prev_frame = true; source->deinterlace_offset = 0; return true; } /* account for timestamp invalidation */ if (frame_out_of_bounds(source, frame_time)) { source->last_frame_ts = next_frame->timestamp; source->deinterlace_offset = 0; return true; } else { frame_offset = frame_time - source->last_frame_ts; source->last_frame_ts += sys_offset; } while (source->last_frame_ts > next_frame->timestamp) { /* this tries to reduce the needless frame duplication, also * helps smooth out async rendering to frame boundaries. In * other words, tries to keep the framerate as smooth as * possible */ if ((source->last_frame_ts - next_frame->timestamp) < 2000000) break; if (prev_frame) { da_erase(source->async_frames, 0); remove_async_frame(source, prev_frame); } if (source->async_frames.num <= 2) { bool exit = true; if (prev_frame) { prev_frame->prev_frame = true; } else if (!frame && source->async_frames.num == 2) { exit = false; } if (exit) { source->deinterlace_offset = 0; return true; } } if (frame) idx = 2; else idx = 1; prev_frame = frame; frame = next_frame; next_frame = source->async_frames.array[idx]; /* more timestamp checking and compensating */ if ((next_frame->timestamp - frame_time) > MAX_TS_VAR) { source->last_frame_ts = next_frame->timestamp - frame_offset; source->deinterlace_offset = 0; } frame_time = next_frame->timestamp; frame_offset = frame_time - source->last_frame_ts; } if (prev_frame) prev_frame->prev_frame = true; return frame != NULL; }