コード例 #1
0
ファイル: audio-io.c プロジェクト: SpaderQueen/Gifscreen1
/* TODO: optimize mixing */
static void mix_float(struct audio_output *audio, struct audio_line *line,
		size_t size, size_t time_offset, size_t plane)
{
	float *mixes[MAX_AUDIO_MIXES];
	float vals[MIX_BUFFER_SIZE];

	for (size_t mix_idx = 0; mix_idx < MAX_AUDIO_MIXES; mix_idx++) {
		uint8_t *bytes = audio->mixes[mix_idx].mix_buffers[plane].array;
		mixes[mix_idx] = (float*)&bytes[time_offset];
	}

	while (size) {
		size_t pop_count = min_size(size, sizeof(vals));
		size -= pop_count;

		circlebuf_pop_front(&line->buffers[plane], vals, pop_count);
		pop_count /= sizeof(float);

		for (size_t mix_idx = 0; mix_idx < MAX_AUDIO_MIXES; mix_idx++) {
			/* only include this audio line in this mix if it's set
			 * via the line's 'mixes' variable */
			if ((line->mixers & (1 << mix_idx)) == 0)
				continue;

			for (size_t i = 0; i < pop_count; i++) {
				*(mixes[mix_idx]++) += vals[i];
			}
		}
	}
}
コード例 #2
0
ファイル: rtmp-stream.c プロジェクト: Eegee/obs-studio
static void droptest_cap_data_rate(struct rtmp_stream *stream, size_t size)
{
	uint64_t ts = os_gettime_ns();
	struct droptest_info info;

	info.ts = ts;
	info.size = size;

	circlebuf_push_back(&stream->droptest_info, &info, sizeof(info));
	stream->droptest_size += size;

	if (stream->droptest_info.size) {
		circlebuf_peek_front(&stream->droptest_info,
				&info, sizeof(info));

		if (stream->droptest_size > DROPTEST_MAX_BYTES) {
			uint64_t elapsed = ts - info.ts;

			if (elapsed < 1000000000ULL) {
				elapsed = 1000000000ULL - elapsed;
				os_sleepto_ns(ts + elapsed);
			}

			while (stream->droptest_size > DROPTEST_MAX_BYTES) {
				circlebuf_pop_front(&stream->droptest_info,
						&info, sizeof(info));
				stream->droptest_size -= info.size;
			}
		}
	}
}
コード例 #3
0
ファイル: audio-io.c プロジェクト: ArnoldSchiller/obs-studio
/* TODO: optimize mixing */
static void mix_float(uint8_t *mix_in, struct circlebuf *buf, size_t size)
{
	float *mix = (float*)mix_in;
	float vals[MIX_BUFFER_SIZE];
	register float mix_val;

	while (size) {
		size_t pop_count = min_size(size, sizeof(vals));
		size -= pop_count;

		circlebuf_pop_front(buf, vals, pop_count);
		pop_count /= sizeof(float);

		/* This sequence provides hints for MSVC to use packed SSE 
		 * instructions addps, minps, maxps, etc. */
		for (size_t i = 0; i < pop_count; i++) {
			mix_val =  *mix + vals[i];

			/* clamp confuses the optimisation */
			mix_val = (mix_val >  1.0f) ?  1.0f : mix_val; 
			mix_val = (mix_val < -1.0f) ? -1.0f : mix_val;

			*(mix++) = mix_val;
		}
	}
}
コード例 #4
0
ファイル: audio-io.c プロジェクト: SpaderQueen/Gifscreen1
/* this only really happens with the very initial data insertion.  can be
 * ignored safely. */
static inline void clear_excess_audio_data(struct audio_line *line,
		uint64_t prev_time)
{
	size_t size = (size_t)ts_diff_bytes(line->audio, prev_time,
			line->base_timestamp);

	/*blog(LOG_DEBUG, "Excess audio data for audio line '%s', somehow "
	                "audio data went back in time by %"PRIu32" bytes.  "
	                "prev_time: %"PRIu64", line->base_timestamp: %"PRIu64,
	                line->name, (uint32_t)size,
	                prev_time, line->base_timestamp);*/

	if (!line->audio_getting_cut_off) {
		blog(LOG_WARNING, "Audio line '%s' audio data currently "
		                  "getting cut off.  This could be due to a "
		                  "negative sync offset that's larger than "
		                  "the current audio buffering time.",
		                  line->name);
		line->audio_getting_cut_off = true;
	}

	for (size_t i = 0; i < line->audio->planes; i++) {
		size_t clear_size = (size < line->buffers[i].size) ?
			size : line->buffers[i].size;

		circlebuf_pop_front(&line->buffers[i], NULL, clear_size);
	}
}
コード例 #5
0
ファイル: obs-audio.c プロジェクト: ab22/obs-studio
static bool discard_if_stopped(obs_source_t *source, size_t channels)
{
	size_t last_size;
	size_t size;

	last_size = source->last_audio_input_buf_size;
	size = source->audio_input_buf[0].size;

	if (!size)
		return false;

	/* if perpetually pending data, it means the audio has stopped,
	 * so clear the audio data */
	if (last_size == size) {
		for (size_t ch = 0; ch < channels; ch++)
			circlebuf_pop_front(&source->audio_input_buf[ch], NULL,
					source->audio_input_buf[ch].size);

		source->audio_ts = 0;
		source->last_audio_input_buf_size = 0;
#if DEBUG_AUDIO == 1
		blog(LOG_DEBUG, "source audio data appears to have "
				"stopped, clearing");
#endif
		return true;
	} else {
		source->last_audio_input_buf_size = size;
		return false;
	}
}
コード例 #6
0
ファイル: obs-video.c プロジェクト: Bl00drav3n/obs-studio
static inline void output_frame(uint64_t *cur_time, uint64_t interval)
{
	struct obs_core_video *video = &obs->video;
	int cur_texture  = video->cur_texture;
	int prev_texture = cur_texture == 0 ? NUM_TEXTURES-1 : cur_texture-1;
	struct video_data frame;
	bool frame_ready;

	memset(&frame, 0, sizeof(struct video_data));

	gs_enter_context(video->graphics);
	render_video(video, cur_texture, prev_texture);
	frame_ready = download_frame(video, prev_texture, &frame);
	gs_flush();
	gs_leave_context();

	if (frame_ready) {
		struct obs_vframe_info vframe_info;
		circlebuf_pop_front(&video->vframe_info_buffer, &vframe_info,
				sizeof(vframe_info));

		frame.timestamp = vframe_info.timestamp;
		output_video_data(video, &frame, vframe_info.count);
	}

	if (++video->cur_texture == NUM_TEXTURES)
		video->cur_texture = 0;

	video_sleep(video, cur_time, interval);
}
コード例 #7
0
ファイル: rtmp-stream.c プロジェクト: Reelapse/obs-studio
static inline void free_packets(struct rtmp_stream *stream)
{
	while (stream->packets.size) {
		struct encoder_packet packet;
		circlebuf_pop_front(&stream->packets, &packet, sizeof(packet));
		obs_free_encoder_packet(&packet);
	}
}
コード例 #8
0
ファイル: obs-video.c プロジェクト: Dead133/obs-studio
static inline bool queue_frame(struct obs_core_video *video, bool raw_active,
		struct obs_vframe_info *vframe_info, int prev_texture)
{
	bool duplicate = !video->gpu_encoder_avail_queue.size ||
		(video->gpu_encoder_queue.size && vframe_info->count > 1);

	if (duplicate) {
		struct obs_tex_frame *tf = circlebuf_data(
				&video->gpu_encoder_queue,
				video->gpu_encoder_queue.size - sizeof(*tf));

		/* texture-based encoding is stopping */
		if (!tf) {
			return false;
		}

		tf->count++;
		os_sem_post(video->gpu_encode_semaphore);
		goto finish;
	}

	struct obs_tex_frame tf;
	circlebuf_pop_front(&video->gpu_encoder_avail_queue, &tf, sizeof(tf));

	if (tf.released) {
		gs_texture_acquire_sync(tf.tex, tf.lock_key, GS_WAIT_INFINITE);
		tf.released = false;
	}

	/* the vframe_info->count > 1 case causing a copy can only happen if by
	 * some chance the very first frame has to be duplicated for whatever
	 * reason.  otherwise, it goes to the 'duplicate' case above, which
	 * will ensure better performance. */
	if (raw_active || vframe_info->count > 1) {
		gs_copy_texture(tf.tex, video->convert_textures[prev_texture]);
	} else {
		gs_texture_t *tex = video->convert_textures[prev_texture];
		gs_texture_t *tex_uv = video->convert_uv_textures[prev_texture];

		video->convert_textures[prev_texture] = tf.tex;
		video->convert_uv_textures[prev_texture] = tf.tex_uv;

		tf.tex = tex;
		tf.tex_uv = tex_uv;
		tf.handle = gs_texture_get_shared_handle(tex);
	}

	tf.count = 1;
	tf.timestamp = vframe_info->timestamp;
	tf.released = true;
	gs_texture_release_sync(tf.tex, ++tf.lock_key);
	circlebuf_push_back(&video->gpu_encoder_queue, &tf, sizeof(tf));

	os_sem_post(video->gpu_encode_semaphore);

finish:
	return --vframe_info->count;
}
コード例 #9
0
ファイル: decode.c プロジェクト: benklett/obs-studio
void mp_decode_clear_packets(struct mp_decode *d)
{
	if (d->packet_pending) {
		av_packet_unref(&d->orig_pkt);
		d->packet_pending = false;
	}

	while (d->packets.size) {
		AVPacket pkt;
		circlebuf_pop_front(&d->packets, &pkt, sizeof(pkt));
		av_packet_unref(&pkt);
	}
}
コード例 #10
0
ファイル: obs-audio.c プロジェクト: ab22/obs-studio
static void ignore_audio(obs_source_t *source, size_t channels,
		size_t sample_rate)
{
	size_t num_floats = source->audio_input_buf[0].size / sizeof(float);

	if (num_floats) {
		for (size_t ch = 0; ch < channels; ch++)
			circlebuf_pop_front(&source->audio_input_buf[ch], NULL,
					source->audio_input_buf[ch].size);

		source->last_audio_input_buf_size = 0;
		source->audio_ts += (uint64_t)num_floats * 1000000000ULL /
			(uint64_t)sample_rate;
	}
}
コード例 #11
0
static inline bool get_next_packet(struct rtmp_stream *stream,
		struct encoder_packet *packet)
{
	bool new_packet = false;

	pthread_mutex_lock(&stream->packets_mutex);
	if (stream->packets.size) {
		circlebuf_pop_front(&stream->packets, packet,
				sizeof(struct encoder_packet));
		new_packet = true;
	}
	pthread_mutex_unlock(&stream->packets_mutex);

	return new_packet;
}
コード例 #12
0
ファイル: rtmp-stream.c プロジェクト: Eegee/obs-studio
static void drop_frames(struct rtmp_stream *stream, const char *name,
		int highest_priority, int64_t *p_min_dts_usec)
{
	struct circlebuf new_buf            = {0};
	uint64_t         last_drop_dts_usec = 0;
	int              num_frames_dropped = 0;

#ifdef _DEBUG
	int start_packets = (int)num_buffered_packets(stream);
#else
	UNUSED_PARAMETER(name);
#endif

	circlebuf_reserve(&new_buf, sizeof(struct encoder_packet) * 8);

	while (stream->packets.size) {
		struct encoder_packet packet;
		circlebuf_pop_front(&stream->packets, &packet, sizeof(packet));

		last_drop_dts_usec = packet.dts_usec;

		/* do not drop audio data or video keyframes */
		if (packet.type          == OBS_ENCODER_AUDIO ||
		    packet.drop_priority >= highest_priority) {
			circlebuf_push_back(&new_buf, &packet, sizeof(packet));

		} else {
			num_frames_dropped++;
			obs_free_encoder_packet(&packet);
		}
	}

	circlebuf_free(&stream->packets);
	stream->packets = new_buf;

	if (stream->min_priority < highest_priority)
		stream->min_priority = highest_priority;

	*p_min_dts_usec = last_drop_dts_usec;

	stream->dropped_frames += num_frames_dropped;
#ifdef _DEBUG
	debug("Dropped %s, prev packet count: %d, new packet count: %d",
			name,
			start_packets,
			(int)num_buffered_packets(stream));
#endif
}
コード例 #13
0
static inline void free_packets(struct rtmp_stream *stream)
{
	size_t num_packets;

	pthread_mutex_lock(&stream->packets_mutex);

	num_packets = num_buffered_packets(stream);
	if (num_packets)
		info("Freeing %d remaining packets", (int)num_packets);

	while (stream->packets.size) {
		struct encoder_packet packet;
		circlebuf_pop_front(&stream->packets, &packet, sizeof(packet));
		obs_free_encoder_packet(&packet);
	}
	pthread_mutex_unlock(&stream->packets_mutex);
}
コード例 #14
0
ファイル: obs-video.c プロジェクト: Dead133/obs-studio
static inline void output_frame(bool raw_active, const bool gpu_active)
{
	struct obs_core_video *video = &obs->video;
	int cur_texture  = video->cur_texture;
	int prev_texture = cur_texture == 0 ? NUM_TEXTURES-1 : cur_texture-1;
	struct video_data frame;
	bool active = raw_active || gpu_active;
	bool frame_ready;

	memset(&frame, 0, sizeof(struct video_data));

	profile_start(output_frame_gs_context_name);
	gs_enter_context(video->graphics);

	profile_start(output_frame_render_video_name);
	render_video(video, raw_active, gpu_active, cur_texture, prev_texture);
	profile_end(output_frame_render_video_name);

	if (raw_active) {
		profile_start(output_frame_download_frame_name);
		frame_ready = download_frame(video, prev_texture, &frame);
		profile_end(output_frame_download_frame_name);
	}

	profile_start(output_frame_gs_flush_name);
	gs_flush();
	profile_end(output_frame_gs_flush_name);

	gs_leave_context();
	profile_end(output_frame_gs_context_name);

	if (raw_active && frame_ready) {
		struct obs_vframe_info vframe_info;
		circlebuf_pop_front(&video->vframe_info_buffer, &vframe_info,
				sizeof(vframe_info));

		frame.timestamp = vframe_info.timestamp;
		profile_start(output_frame_output_video_data_name);
		output_video_data(video, &frame, vframe_info.count);
		profile_end(output_frame_output_video_data_name);
	}

	if (++video->cur_texture == NUM_TEXTURES)
		video->cur_texture = 0;
}
コード例 #15
0
ファイル: audio-io.c プロジェクト: ArnoldSchiller/obs-studio
/* this only really happens with the very initial data insertion.  can be
 * ignored safely. */
static inline void clear_excess_audio_data(struct audio_line *line,
		uint64_t prev_time)
{
	size_t size = ts_diff_bytes(line->audio, prev_time,
			line->base_timestamp);

	/*blog(LOG_DEBUG, "Excess audio data for audio line '%s', somehow "
	                "audio data went back in time by %"PRIu32" bytes.  "
	                "prev_time: %"PRIu64", line->base_timestamp: %"PRIu64,
	                line->name, (uint32_t)size,
	                prev_time, line->base_timestamp);*/

	for (size_t i = 0; i < line->audio->planes; i++) {
		size_t clear_size = (size < line->buffers[i].size) ?
			size : line->buffers[i].size;

		circlebuf_pop_front(&line->buffers[i], NULL, clear_size);
	}
}
コード例 #16
0
ファイル: obs-video.c プロジェクト: Dead133/obs-studio
static void output_gpu_encoders(struct obs_core_video *video, bool raw_active,
		int prev_texture)
{
	profile_start(output_gpu_encoders_name);

	if (!video->textures_converted[prev_texture])
		goto end;
	if (!video->vframe_info_buffer_gpu.size)
		goto end;

	struct obs_vframe_info vframe_info;
	circlebuf_pop_front(&video->vframe_info_buffer_gpu, &vframe_info,
			sizeof(vframe_info));

	pthread_mutex_lock(&video->gpu_encoder_mutex);
	encode_gpu(video, raw_active, &vframe_info, prev_texture);
	pthread_mutex_unlock(&video->gpu_encoder_mutex);

end:
	profile_end(output_gpu_encoders_name);
}
コード例 #17
0
static void drop_frames(struct rtmp_stream *stream)
{
	struct circlebuf new_buf            = {0};
	int              drop_priority      = 0;
	uint64_t         last_drop_dts_usec = 0;
	int              num_frames_dropped = 0;

	debug("Previous packet count: %d", (int)num_buffered_packets(stream));

	circlebuf_reserve(&new_buf, sizeof(struct encoder_packet) * 8);

	while (stream->packets.size) {
		struct encoder_packet packet;
		circlebuf_pop_front(&stream->packets, &packet, sizeof(packet));

		last_drop_dts_usec = packet.dts_usec;

		/* do not drop audio data or video keyframes */
		if (packet.type          == OBS_ENCODER_AUDIO ||
		    packet.drop_priority == OBS_NAL_PRIORITY_HIGHEST) {
			circlebuf_push_back(&new_buf, &packet, sizeof(packet));

		} else {
			if (drop_priority < packet.drop_priority)
				drop_priority = packet.drop_priority;

			num_frames_dropped++;
			obs_free_encoder_packet(&packet);
		}
	}

	circlebuf_free(&stream->packets);
	stream->packets           = new_buf;
	stream->min_priority      = drop_priority;
	stream->min_drop_dts_usec = last_drop_dts_usec;

	stream->dropped_frames += num_frames_dropped;
	debug("New packet count: %d", (int)num_buffered_packets(stream));
}
コード例 #18
0
ファイル: obs-encoder.c プロジェクト: Bluelich/obs-studio-
static void send_audio_data(struct obs_encoder *encoder)
{
	struct encoder_frame  enc_frame;

	memset(&enc_frame, 0, sizeof(struct encoder_frame));

	for (size_t i = 0; i < encoder->planes; i++) {
		circlebuf_pop_front(&encoder->audio_input_buffer[i],
				encoder->audio_output_buffer[i],
				encoder->framesize_bytes);

		enc_frame.data[i]     = encoder->audio_output_buffer[i];
		enc_frame.linesize[i] = (uint32_t)encoder->framesize_bytes;
	}

	enc_frame.frames = (uint32_t)encoder->framesize;
	enc_frame.pts    = encoder->cur_pts;

	do_encode(encoder, &enc_frame);

	encoder->cur_pts += encoder->framesize;
}
コード例 #19
0
ファイル: rtmp-stream.c プロジェクト: Reelapse/obs-studio
static void drop_frames(struct rtmp_stream *stream)
{
	struct circlebuf new_buf            = {0};
	int              drop_priority      = 0;
	uint64_t         last_drop_dts_usec = 0;

	blog(LOG_DEBUG, "Previous packet count: %d",
			(int)num_buffered_packets(stream));

	circlebuf_reserve(&new_buf, sizeof(struct encoder_packet) * 8);

	while (stream->packets.size) {
		struct encoder_packet packet;
		circlebuf_pop_front(&stream->packets, &packet, sizeof(packet));

		last_drop_dts_usec = packet.dts_usec;

		if (packet.type == OBS_ENCODER_AUDIO) {
			circlebuf_push_back(&new_buf, &packet, sizeof(packet));

		} else {
			if (drop_priority < packet.drop_priority)
				drop_priority = packet.drop_priority;

			obs_free_encoder_packet(&packet);
		}
	}

	circlebuf_free(&stream->packets);
	stream->packets           = new_buf;
	stream->min_priority      = drop_priority;
	stream->min_drop_dts_usec = last_drop_dts_usec;

	blog(LOG_DEBUG, "New packet count: %d",
			(int)num_buffered_packets(stream));
}
コード例 #20
0
ファイル: decode.c プロジェクト: benklett/obs-studio
bool mp_decode_next(struct mp_decode *d)
{
	bool eof = d->m->eof;
	int got_frame;
	int ret;

	d->frame_ready = false;

	if (!eof && !d->packets.size)
		return true;

	while (!d->frame_ready) {
		if (!d->packet_pending) {
			if (!d->packets.size) {
				if (eof) {
					d->pkt.data = NULL;
					d->pkt.size = 0;
				} else {
					return true;
				}
			} else {
				circlebuf_pop_front(&d->packets, &d->orig_pkt,
						sizeof(d->orig_pkt));
				d->pkt = d->orig_pkt;
				d->packet_pending = true;
			}
		}

		ret = decode_packet(d, &got_frame);

		if (!got_frame && ret == 0) {
			d->eof = true;
			return true;
		}
		if (ret < 0) {
#ifdef DETAILED_DEBUG_INFO
			blog(LOG_DEBUG, "MP: decode failed: %s",
					av_err2str(ret));
#endif

			if (d->packet_pending) {
				av_packet_unref(&d->orig_pkt);
				av_init_packet(&d->orig_pkt);
				av_init_packet(&d->pkt);
				d->packet_pending = false;
			}
			return true;
		}

		d->frame_ready = !!got_frame;

		if (d->packet_pending) {
			if (d->pkt.size) {
				d->pkt.data += ret;
				d->pkt.size -= ret;
			}

			if (d->pkt.size <= 0) {
				av_packet_unref(&d->orig_pkt);
				av_init_packet(&d->orig_pkt);
				av_init_packet(&d->pkt);
				d->packet_pending = false;
			}
		}
	}

	if (d->frame_ready) {
		int64_t last_pts = d->frame_pts;

		if (d->frame->best_effort_timestamp == AV_NOPTS_VALUE)
			d->frame_pts = d->next_pts;
		else
			d->frame_pts = av_rescale_q(
					d->frame->best_effort_timestamp,
					d->stream->time_base,
					(AVRational){1, 1000000000});

		int64_t duration = d->frame->pkt_duration;
		if (!duration)
			duration = get_estimated_duration(d, last_pts);
		else
			duration = av_rescale_q(duration,
					d->stream->time_base,
					(AVRational){1, 1000000000});

		d->last_duration = duration;
		d->next_pts = d->frame_pts + duration;
	}

	return true;
}
コード例 #21
0
ファイル: decode.c プロジェクト: mrasmus/obs-studio
bool mp_decode_next(struct mp_decode *d)
{
	bool eof = d->m->eof;
	int got_frame;
	int ret;

	d->frame_ready = false;

	if (!eof && !d->packets.size)
		return true;

	while (!d->frame_ready) {
		if (!d->packet_pending) {
			if (!d->packets.size) {
				if (eof) {
					d->pkt.data = NULL;
					d->pkt.size = 0;
				} else {
					return true;
				}
			} else {
				circlebuf_pop_front(&d->packets, &d->orig_pkt,
						sizeof(d->orig_pkt));
				d->pkt = d->orig_pkt;
				d->packet_pending = true;
			}
		}

		if (d->audio) {
			ret = avcodec_decode_audio4(d->decoder,
					d->frame, &got_frame, &d->pkt);
		} else {
			if (d->m->is_network && !d->got_first_keyframe) {
				if (d->pkt.flags & AV_PKT_FLAG_KEY) {
					d->got_first_keyframe = true;
				} else {
					av_packet_unref(&d->orig_pkt);
					av_init_packet(&d->orig_pkt);
					av_init_packet(&d->pkt);
					d->packet_pending = false;
					return true;
				}
			}

			ret = avcodec_decode_video2(d->decoder,
					d->frame, &got_frame, &d->pkt);
		}
		if (!got_frame && ret == 0) {
			d->eof = true;
			return true;
		}
		if (ret < 0) {
			blog(LOG_DEBUG, "MP: decode failed: %s",
					av_err2str(ret));

			if (d->packet_pending) {
				av_packet_unref(&d->orig_pkt);
				av_init_packet(&d->orig_pkt);
				av_init_packet(&d->pkt);
				d->packet_pending = false;
			}
			return true;
		}

		d->frame_ready = !!got_frame;

		if (d->packet_pending) {
			if (d->pkt.size) {
				d->pkt.data += ret;
				d->pkt.size -= ret;
			}

			if (d->pkt.size == 0) {
				av_packet_unref(&d->orig_pkt);
				av_init_packet(&d->orig_pkt);
				av_init_packet(&d->pkt);
				d->packet_pending = false;
			}
		}
	}

	if (d->frame_ready) {
		int64_t last_pts = d->frame_pts;

		d->frame_pts = av_rescale_q(d->frame->best_effort_timestamp,
				d->stream->time_base,
				(AVRational){1, 1000000000});

		int64_t duration = d->frame->pkt_duration;
		if (!duration)
			duration = get_estimated_duration(d, last_pts);
		else
			duration = av_rescale_q(duration,
					d->stream->time_base,
					(AVRational){1, 1000000000});
		d->last_duration = duration;
		d->next_pts = d->frame_pts + duration;
	}

	return true;
}
コード例 #22
0
ファイル: obs-audio.c プロジェクト: ab22/obs-studio
bool audio_callback(void *param,
		uint64_t start_ts_in, uint64_t end_ts_in, uint64_t *out_ts,
		uint32_t mixers, struct audio_output_data *mixes)
{
	struct obs_core_data *data = &obs->data;
	struct obs_core_audio *audio = &obs->audio;
	struct obs_source *source;
	size_t sample_rate = audio_output_get_sample_rate(audio->audio);
	size_t channels = audio_output_get_channels(audio->audio);
	struct ts_info ts = {start_ts_in, end_ts_in};
	size_t audio_size;
	uint64_t min_ts;

	da_resize(audio->render_order, 0);
	da_resize(audio->root_nodes, 0);

	circlebuf_push_back(&audio->buffered_timestamps, &ts, sizeof(ts));
	circlebuf_peek_front(&audio->buffered_timestamps, &ts, sizeof(ts));
	min_ts = ts.start;

	audio_size = AUDIO_OUTPUT_FRAMES * sizeof(float);

#if DEBUG_AUDIO == 1
	blog(LOG_DEBUG, "ts %llu-%llu", ts.start, ts.end);
#endif

	/* ------------------------------------------------ */
	/* build audio render order
	 * NOTE: these are source channels, not audio channels */
	for (uint32_t i = 0; i < MAX_CHANNELS; i++) {
		obs_source_t *source = obs_get_output_source(i);
		if (source) {
			obs_source_enum_active_tree(source, push_audio_tree,
					audio);
			push_audio_tree(NULL, source, audio);
			da_push_back(audio->root_nodes, &source);
			obs_source_release(source);
		}
	}

	pthread_mutex_lock(&data->audio_sources_mutex);

	source = data->first_audio_source;
	while (source) {
		push_audio_tree(NULL, source, audio);
		source = (struct obs_source*)source->next_audio_source;
	}

	pthread_mutex_unlock(&data->audio_sources_mutex);

	/* ------------------------------------------------ */
	/* render audio data */
	for (size_t i = 0; i < audio->render_order.num; i++) {
		obs_source_t *source = audio->render_order.array[i];
		obs_source_audio_render(source, mixers, channels, sample_rate,
				audio_size);
	}

	/* ------------------------------------------------ */
	/* get minimum audio timestamp */
	pthread_mutex_lock(&data->audio_sources_mutex);
	calc_min_ts(data, sample_rate, &min_ts);
	pthread_mutex_unlock(&data->audio_sources_mutex);

	/* ------------------------------------------------ */
	/* if a source has gone backward in time, buffer */
	if (min_ts < ts.start)
		add_audio_buffering(audio, sample_rate, &ts, min_ts);

	/* ------------------------------------------------ */
	/* mix audio */
	if (!audio->buffering_wait_ticks) {
		for (size_t i = 0; i < audio->root_nodes.num; i++) {
			obs_source_t *source = audio->root_nodes.array[i];

			if (source->audio_pending)
				continue;

			pthread_mutex_lock(&source->audio_buf_mutex);

			if (source->audio_output_buf[0][0] && source->audio_ts)
				mix_audio(mixes, source, channels, sample_rate,
						&ts);

			pthread_mutex_unlock(&source->audio_buf_mutex);
		}
	}

	/* ------------------------------------------------ */
	/* discard audio */
	pthread_mutex_lock(&data->audio_sources_mutex);

	source = data->first_audio_source;
	while (source) {
		pthread_mutex_lock(&source->audio_buf_mutex);
		discard_audio(audio, source, channels, sample_rate, &ts);
		pthread_mutex_unlock(&source->audio_buf_mutex);

		source = (struct obs_source*)source->next_audio_source;
	}

	pthread_mutex_unlock(&data->audio_sources_mutex);

	/* ------------------------------------------------ */
	/* release audio sources */
	release_audio_sources(audio);

	circlebuf_pop_front(&audio->buffered_timestamps, NULL, sizeof(ts));

	*out_ts = ts.start;

	if (audio->buffering_wait_ticks) {
		audio->buffering_wait_ticks--;
		return false;
	}

	UNUSED_PARAMETER(param);
	return true;
}
コード例 #23
0
ファイル: wasapi-output.c プロジェクト: LiminWang/obs-studio
static bool process_audio_delay(struct audio_monitor *monitor,
		float **data, uint32_t *frames, uint64_t ts, uint32_t pad)
{
	obs_source_t *s = monitor->source;
	uint64_t last_frame_ts = s->last_frame_ts;
	uint64_t cur_time = os_gettime_ns();
	uint64_t front_ts;
	uint64_t cur_ts;
	int64_t diff;
	uint32_t blocksize = monitor->channels * sizeof(float);

	/* cut off audio if long-since leftover audio in delay buffer */
	if (cur_time - monitor->last_recv_time > 1000000000)
		circlebuf_free(&monitor->delay_buffer);
	monitor->last_recv_time = cur_time;

	ts += monitor->source->sync_offset;

	circlebuf_push_back(&monitor->delay_buffer, &ts, sizeof(ts));
	circlebuf_push_back(&monitor->delay_buffer, frames, sizeof(*frames));
	circlebuf_push_back(&monitor->delay_buffer, *data,
			*frames * blocksize);

	if (!monitor->prev_video_ts) {
		monitor->prev_video_ts = last_frame_ts;

	} else if (monitor->prev_video_ts == last_frame_ts) {
		monitor->time_since_prev += (uint64_t)*frames *
			1000000000ULL / (uint64_t)monitor->sample_rate;
	} else {
		monitor->time_since_prev = 0;
	}

	while (monitor->delay_buffer.size != 0) {
		size_t size;
		bool bad_diff;

		circlebuf_peek_front(&monitor->delay_buffer, &cur_ts,
				sizeof(ts));
		front_ts = cur_ts -
			((uint64_t)pad * 1000000000ULL /
			 (uint64_t)monitor->sample_rate);
		diff = (int64_t)front_ts - (int64_t)last_frame_ts;
		bad_diff = !last_frame_ts ||
		           llabs(diff) > 5000000000 ||
		           monitor->time_since_prev > 100000000ULL;

		/* delay audio if rushing */
		if (!bad_diff && diff > 75000000) {
#ifdef DEBUG_AUDIO
			blog(LOG_INFO, "audio rushing, cutting audio, "
					"diff: %lld, delay buffer size: %lu, "
					"v: %llu: a: %llu",
					diff, (int)monitor->delay_buffer.size,
					last_frame_ts, front_ts);
#endif
			return false;
		}

		circlebuf_pop_front(&monitor->delay_buffer, NULL, sizeof(ts));
		circlebuf_pop_front(&monitor->delay_buffer, frames,
				sizeof(*frames));

		size = *frames * blocksize;
		da_resize(monitor->buf, size);
		circlebuf_pop_front(&monitor->delay_buffer,
				monitor->buf.array, size);

		/* cut audio if dragging */
		if (!bad_diff && diff < -75000000 && monitor->delay_buffer.size > 0) {
#ifdef DEBUG_AUDIO
			blog(LOG_INFO, "audio dragging, cutting audio, "
					"diff: %lld, delay buffer size: %lu, "
					"v: %llu: a: %llu",
					diff, (int)monitor->delay_buffer.size,
					last_frame_ts, front_ts);
#endif
			continue;
		}

		*data = monitor->buf.array;
		return true;
	}

	return false;
}
コード例 #24
0
ファイル: obs-audio.c プロジェクト: ab22/obs-studio
static inline void discard_audio(struct obs_core_audio *audio,
		obs_source_t *source, size_t channels, size_t sample_rate,
		struct ts_info *ts)
{
	size_t total_floats = AUDIO_OUTPUT_FRAMES;
	size_t size;

#if DEBUG_AUDIO == 1
	bool is_audio_source = source->info.output_flags & OBS_SOURCE_AUDIO;
#endif

	if (source->info.audio_render) {
		source->audio_ts = 0;
		return;
	}

	if (ts->end <= source->audio_ts) {
#if DEBUG_AUDIO == 1
		blog(LOG_DEBUG, "can't discard, source "
				"timestamp (%"PRIu64") >= "
				"end timestamp (%"PRIu64")",
				source->audio_ts, ts->end);
#endif
		return;
	}

	if (source->audio_ts < (ts->start - 1)) {
		if (discard_if_stopped(source, channels))
			return;
#if DEBUG_AUDIO == 1
		if (is_audio_source) {
			blog(LOG_DEBUG, "can't discard, source "
					"timestamp (%"PRIu64") < "
					"start timestamp (%"PRIu64")",
					source->audio_ts, ts->start);
		}
#endif
		if (audio->total_buffering_ticks == MAX_BUFFERING_TICKS)
			ignore_audio(source, channels, sample_rate);
		return;
	}

	if (source->audio_ts != ts->start &&
	    source->audio_ts != (ts->start - 1)) {
		size_t start_point = convert_time_to_frames(sample_rate,
				source->audio_ts - ts->start);
		if (start_point == AUDIO_OUTPUT_FRAMES) {
#if DEBUG_AUDIO == 1
			if (is_audio_source)
				blog(LOG_DEBUG, "can't dicard, start point is "
						"at audio frame count");
#endif
			return;
		}

		total_floats -= start_point;
	}

	size = total_floats * sizeof(float);

	if (source->audio_input_buf[0].size < size) {
		if (discard_if_stopped(source, channels))
			return;

#if DEBUG_AUDIO == 1
		if (is_audio_source)
			blog(LOG_DEBUG, "can't discard, data still pending");
#endif
		return;
	}

	for (size_t ch = 0; ch < channels; ch++)
		circlebuf_pop_front(&source->audio_input_buf[ch], NULL, size);

	source->last_audio_input_buf_size = 0;

#if DEBUG_AUDIO == 1
	if (is_audio_source)
		blog(LOG_DEBUG, "audio discarded, new ts: %"PRIu64,
				ts->end);
#endif

	source->audio_ts = ts->end;
}