static void source_output_audio_line(obs_source_t source, const struct audio_data *data) { struct audio_data in = *data; if (!source->timing_set) { reset_audio_timing(source, in.timestamp); /* detects 'directly' set timestamps as long as they're within * a certain threshold */ if ((source->timing_adjust + MAX_TS_VAR) < MAX_TS_VAR * 2) source->timing_adjust = 0; } else { uint64_t time_diff = data->timestamp - source->next_audio_timestamp_min; /* don't need signed because negative will trigger it * regardless, which is what we want */ if (time_diff > MAX_TIMESTAMP_JUMP) { blog(LOG_DEBUG, "Audio timestamp for source '%s' " "jumped by '%lld', resetting audio " "timing", source->name, time_diff); reset_audio_timing(source, in.timestamp); } } source->next_audio_timestamp_min = in.timestamp + conv_frames_to_time(source, in.frames); in.timestamp += source->timing_adjust; in.volume = source->volume; audio_line_output(source->audio_line, &in); }
static void source_output_audio_line(obs_source_t source, const struct audio_data *data) { struct audio_data in = *data; uint64_t diff; if (!source->timing_set) { reset_audio_timing(source, in.timestamp); /* detects 'directly' set timestamps as long as they're within * a certain threshold */ if ((source->timing_adjust + MAX_TS_VAR) < MAX_TS_VAR * 2) source->timing_adjust = 0; } else { diff = in.timestamp - source->next_audio_ts_min; /* don't need signed because negative will trigger it * regardless, which is what we want */ if (diff > MAX_TIMESTAMP_JUMP) handle_ts_jump(source, in.timestamp, diff); } source->next_audio_ts_min = in.timestamp + conv_frames_to_time(source, in.frames); if (source->audio_reset_ref != 0) return; in.timestamp += source->timing_adjust; in.volume = source->volume; audio_line_output(source->audio_line, &in); }
static bool audio_line_place_data(struct audio_line *line, const struct audio_data *data) { int64_t pos; uint64_t timestamp = smooth_ts(line, data->timestamp); pos = ts_diff_bytes(line->audio, timestamp, line->base_timestamp); if (pos < 0) { return false; } line->next_ts_min = timestamp + conv_frames_to_time(line->audio, data->frames); #ifdef DEBUG_AUDIO blog(LOG_DEBUG, "data->timestamp: %llu, line->base_timestamp: %llu, " "pos: %lu, bytes: %lu, buf size: %lu", timestamp, line->base_timestamp, pos, data->frames * line->audio->block_size, line->buffers[0].size); #endif audio_line_place_data_pos(line, data, (size_t)pos); return true; }
static uint64_t mix_and_output(struct audio_output *audio, uint64_t audio_time, uint64_t prev_time) { struct audio_line *line = audio->first_line; uint32_t frames = (uint32_t)ts_diff_frames(audio, audio_time, prev_time); size_t bytes = frames * audio->block_size; #ifdef DEBUG_AUDIO blog(LOG_DEBUG, "audio_time: %llu, prev_time: %llu, bytes: %lu", audio_time, prev_time, bytes); #endif /* return an adjusted audio_time according to the amount * of data that was sampled to ensure seamless transmission */ audio_time = prev_time + conv_frames_to_time(audio, frames); /* resize and clear mix buffers */ for (size_t i = 0; i < audio->planes; i++) { da_resize(audio->mix_buffers[i], bytes); memset(audio->mix_buffers[i].array, 0, bytes); } /* mix audio lines */ while (line) { struct audio_line *next = line->next; /* if line marked for removal, destroy and move to the next */ if (!line->buffers[0].size) { if (!line->alive) { audio_output_removeline(audio, line); line = next; continue; } } pthread_mutex_lock(&line->mutex); if (line->buffers[0].size && line->base_timestamp < prev_time) { clear_excess_audio_data(line, prev_time); line->base_timestamp = prev_time; } if (mix_audio_line(audio, line, bytes, prev_time)) line->base_timestamp = audio_time; pthread_mutex_unlock(&line->mutex); line = next; } /* output */ do_audio_output(audio, prev_time, frames); return audio_time; }
static void source_output_audio_line(obs_source_t source, const struct audio_data *data) { struct audio_data in = *data; uint64_t diff; if (!source->timing_set) { reset_audio_timing(source, in.timestamp); /* detects 'directly' set timestamps as long as they're within * a certain threshold */ if ((source->timing_adjust + MAX_TS_VAR) < MAX_TS_VAR * 2) source->timing_adjust = 0; } else { bool ts_under = (in.timestamp < source->next_audio_ts_min); diff = ts_under ? (source->next_audio_ts_min - in.timestamp) : (in.timestamp - source->next_audio_ts_min); /* smooth audio if lower or within threshold */ if (diff > MAX_TIMESTAMP_JUMP) handle_ts_jump(source, source->next_audio_ts_min, in.timestamp, diff); else if (ts_under || diff < TS_SMOOTHING_THRESHOLD) in.timestamp = source->next_audio_ts_min; } source->next_audio_ts_min = in.timestamp + conv_frames_to_time(in.frames); if (source->av_sync_ref != 0) return; in.timestamp += source->timing_adjust + source->sync_offset; in.volume = source->user_volume * source->present_volume * obs->audio.user_volume * obs->audio.present_volume; audio_line_output(source->audio_line, &in); }
static uint64_t mix_and_output(struct audio_output *audio, uint64_t audio_time, uint64_t prev_time) { struct audio_line *line = audio->first_line; uint32_t frames = (uint32_t)ts_diff_frames(audio, audio_time, prev_time); size_t bytes = frames * audio->block_size; #ifdef DEBUG_AUDIO blog(LOG_DEBUG, "audio_time: %llu, prev_time: %llu, bytes: %lu", audio_time, prev_time, bytes); #endif /* return an adjusted audio_time according to the amount * of data that was sampled to ensure seamless transmission */ audio_time = prev_time + conv_frames_to_time(audio, frames); /* resize and clear mix buffers */ for (size_t mix_idx = 0; mix_idx < MAX_AUDIO_MIXES; mix_idx++) { struct audio_mix *mix = &audio->mixes[mix_idx]; for (size_t i = 0; i < audio->planes; i++) { da_resize(mix->mix_buffers[i], bytes); memset(mix->mix_buffers[i].array, 0, bytes); } } /* mix audio lines */ while (line) { struct audio_line *next = line->next; /* if line marked for removal, destroy and move to the next */ if (!line->buffers[0].size) { if (!line->alive) { audio_output_removeline(audio, line); line = next; continue; } } pthread_mutex_lock(&line->mutex); if (line->buffers[0].size && line->base_timestamp < prev_time) { clear_excess_audio_data(line, prev_time); line->base_timestamp = prev_time; } else if (line->audio_getting_cut_off) { line->audio_getting_cut_off = false; blog(LOG_WARNING, "Audio line '%s' audio data no " "longer getting cut off.", line->name); } if (mix_audio_line(audio, line, bytes, prev_time)) line->base_timestamp = audio_time; pthread_mutex_unlock(&line->mutex); line = next; } /* clamps audio data to -1.0..1.0 */ clamp_audio_output(audio, bytes); /* output */ for (size_t i = 0; i < MAX_AUDIO_MIXES; i++) do_audio_output(audio, i, prev_time, frames); return audio_time; }