Example #1
0
static inline void effect_setval_inline(eparam_t param,
		const void *data, size_t size)
{
	bool size_changed;

	if (!param) {
		blog(LOG_ERROR, "effect_setval_inline: invalid param");
		return;
	}

	if (!data) {
		blog(LOG_ERROR, "effect_setval_inline: invalid data");
		return;
	}

	size_changed = param->cur_val.num != size;

	if (size_changed)
		da_resize(param->cur_val, size);

	if (size_changed || memcmp(param->cur_val.array, data, size) != 0) {
		memcpy(param->cur_val.array, data, size);
		param->changed = true;
	}
}
Example #2
0
static uint64_t mix_and_output(struct audio_output *audio, uint64_t audio_time,
		uint64_t prev_time)
{
	struct audio_line *line = audio->first_line;
	uint32_t frames = (uint32_t)ts_diff_frames(audio, audio_time,
	                                           prev_time);
	size_t bytes = frames * audio->block_size;

#ifdef DEBUG_AUDIO
	blog(LOG_DEBUG, "audio_time: %llu, prev_time: %llu, bytes: %lu",
			audio_time, prev_time, bytes);
#endif

	/* return an adjusted audio_time according to the amount
	 * of data that was sampled to ensure seamless transmission */
	audio_time = prev_time + conv_frames_to_time(audio, frames);

	/* resize and clear mix buffers */
	for (size_t i = 0; i < audio->planes; i++) {
		da_resize(audio->mix_buffers[i], bytes);
		memset(audio->mix_buffers[i].array, 0, bytes);
	}

	/* mix audio lines */
	while (line) {
		struct audio_line *next = line->next;

		/* if line marked for removal, destroy and move to the next */
		if (!line->buffers[0].size) {
			if (!line->alive) {
				audio_output_removeline(audio, line);
				line = next;
				continue;
			}
		}

		pthread_mutex_lock(&line->mutex);

		if (line->buffers[0].size && line->base_timestamp < prev_time) {
			clear_excess_audio_data(line, prev_time);
			line->base_timestamp = prev_time;
		}

		if (mix_audio_line(audio, line, bytes, prev_time))
			line->base_timestamp = audio_time;

		pthread_mutex_unlock(&line->mutex);

		line = next;
	}

	/* output */
	do_audio_output(audio, prev_time, frames);

	return audio_time;
}
Example #3
0
static bool ep_compile(struct effect_parser *ep)
{
	bool success = true;
	size_t i;

	assert(ep->effect);

	da_resize(ep->effect->params, ep->params.num);
	da_resize(ep->effect->techniques, ep->techniques.num);

	for (i = 0; i < ep->params.num; i++)
		ep_compile_param(ep, i);
	for (i = 0; i < ep->techniques.num; i++) {
		if (!ep_compile_technique(ep, i))
			success = false;
	}

	return success;
}
Example #4
0
static void parse_packet(struct obs_qsv *obsqsv, struct encoder_packet *packet, mfxBitstream *pBS, uint32_t fps_num, bool *received_packet)
{
	if (pBS == NULL || pBS->DataLength == 0) {
		*received_packet = false;
		return;
	}

	da_resize(obsqsv->packet_data, 0);
	da_push_back_array(obsqsv->packet_data, &pBS->Data[pBS->DataOffset],
			pBS->DataLength);

	packet->data = obsqsv->packet_data.array;
	packet->size = obsqsv->packet_data.num;
	packet->type = OBS_ENCODER_VIDEO;
	packet->pts = pBS->TimeStamp * fps_num / 90000;
	packet->keyframe = (pBS->FrameType &
			(MFX_FRAMETYPE_I | MFX_FRAMETYPE_REF));

	//bool iFrame = pBS->FrameType & MFX_FRAMETYPE_I;
	//bool bFrame = pBS->FrameType & MFX_FRAMETYPE_B;
	bool pFrame = pBS->FrameType & MFX_FRAMETYPE_P;
	//int iType = iFrame ? 0 : (bFrame ? 1 : (pFrame ? 2 : -1));
	//int64_t interval = obsqsv->params.nbFrames + 1;

	// In case MSDK does't support automatic DecodeTimeStamp, do manual
	// calculation
	if (g_pts2dtsShift >= 0)
	{
		if (g_bFirst) {
			packet->dts = packet->pts - 3 * obsqsv->params.nFpsDen;
		} else if (pFrame) {
			packet->dts = packet->pts - 10 * obsqsv->params.nFpsDen;
			g_prevDts = packet->dts;
		} else {
			packet->dts = g_prevDts + obsqsv->params.nFpsDen;
			g_prevDts = packet->dts;
		}
	} else {
		packet->dts = pBS->DecodeTimeStamp * fps_num / 90000;
	}

#if 0
	info("parse packet:\n"
		"\tFrameType: %d\n"
		"\tpts:       %d\n"
		"\tdts:       %d",
		iType, packet->pts, packet->dts);
#endif

	*received_packet = true;
	pBS->DataLength = 0;

	g_bFirst = false;
}
Example #5
0
static bool do_aac_encode(struct aac_encoder *enc,
		struct encoder_packet *packet, bool *received_packet)
{
	AVRational time_base = {1, enc->context->sample_rate};
	AVPacket   avpacket  = {0};
	int        got_packet;
	int        ret;

	enc->aframe->nb_samples = enc->frame_size;
	enc->aframe->pts = av_rescale_q(enc->total_samples,
			(AVRational){1, enc->context->sample_rate},
			enc->context->time_base);

	ret = avcodec_fill_audio_frame(enc->aframe, enc->context->channels,
			enc->context->sample_fmt, enc->samples[0],
			enc->frame_size_bytes * enc->context->channels, 1);
	if (ret < 0) {
		aac_warn("do_aac_encode", "avcodec_fill_audio_frame failed: %s",
				av_err2str(ret));
		return false;
	}

	enc->total_samples += enc->frame_size;

	ret = avcodec_encode_audio2(enc->context, &avpacket, enc->aframe,
			&got_packet);
	if (ret < 0) {
		aac_warn("do_aac_encode", "avcodec_encode_audio2 failed: %s",
				av_err2str(ret));
		return false;
	}

	*received_packet = !!got_packet;
	if (!got_packet)
		return true;

	da_resize(enc->packet_buffer, 0);
	da_push_back_array(enc->packet_buffer, avpacket.data, avpacket.size);

	packet->pts  = rescale_ts(avpacket.pts, enc->context, time_base);
	packet->dts  = rescale_ts(avpacket.dts, enc->context, time_base);
	packet->data = enc->packet_buffer.array;
	packet->size = avpacket.size;
	packet->type = OBS_ENCODER_AUDIO;
	packet->timebase_num = 1;
	packet->timebase_den = (int32_t)enc->context->sample_rate;
	av_free_packet(&avpacket);
	return true;
}
Example #6
0
static bool load_cached_font_list(struct serializer *s)
{
	bool success = true;
	int count;

	success = read_var(s, count);
	if (!success) return false;

	da_init(font_list);
	da_resize(font_list, count);

#define do_read(var) \
	success = read_var(s, var); \
	if (!success) break

	for (int i = 0; i < count; i++) {
		struct font_path_info *info = &font_list.array[i];

		success = read_str(s, &info->face_and_style);
		if (!success) break;

		do_read(info->full_len);
		do_read(info->face_len);
		do_read(info->is_bitmap);
		do_read(info->num_sizes);

		info->sizes = bmalloc(sizeof(int) * info->num_sizes);
		success = read_data(s, info->sizes,
				sizeof(int) * info->num_sizes);
		if (!success) break;

		do_read(info->bold);

		success = read_str(s, &info->path);
		if (!success) break;

		do_read(info->italic);
		do_read(info->index);
	}

#undef do_read

	if (!success) {
		free_os_font_list();
		return false;
	}

	return true;
}
Example #7
0
static void parse_packet(struct obs_qsv *obsqsv, struct encoder_packet *packet, mfxBitstream *pBS, uint32_t fps_num, bool *received_packet)
{
	uint8_t *start, *end;
	int type;

	if (pBS == NULL || pBS->DataLength == 0) {
		*received_packet = false;
		return;
	}

	da_resize(obsqsv->packet_data, 0);
	da_push_back_array(obsqsv->packet_data, &pBS->Data[pBS->DataOffset],
			pBS->DataLength);

	packet->data = obsqsv->packet_data.array;
	packet->size = obsqsv->packet_data.num;
	packet->type = OBS_ENCODER_VIDEO;
	packet->pts = pBS->TimeStamp * fps_num / 90000;
	packet->keyframe = (pBS->FrameType &
			(MFX_FRAMETYPE_I | MFX_FRAMETYPE_REF));

	/* ------------------------------------ */

	start = obsqsv->packet_data.array;
	end = start + obsqsv->packet_data.num;

	start = (uint8_t*)obs_avc_find_startcode(start, end);
	while (true) {
		while (start < end && !*(start++));

		if (start == end)
			break;

		type = start[0] & 0x1F;
		if (type == OBS_NAL_SLICE_IDR || type == OBS_NAL_SLICE) {
			uint8_t prev_type = (start[0] >> 5) & 0x3;
			start[0] &= ~(3 << 5);

			if (pBS->FrameType & MFX_FRAMETYPE_I)
				start[0] |= OBS_NAL_PRIORITY_HIGHEST << 5;
			else if (pBS->FrameType & MFX_FRAMETYPE_P)
				start[0] |= OBS_NAL_PRIORITY_HIGH << 5;
			else
				start[0] |= prev_type << 5;
		}

		start = (uint8_t*)obs_avc_find_startcode(start, end);
	}
Example #8
0
static bool do_http_request(struct update_info *info, const char *url,
	long *response_code)
{
	CURLcode code;
	uint8_t null_terminator = 0;

	da_resize(info->file_data, 0);
	curl_easy_setopt(info->curl, CURLOPT_URL, url);
	curl_easy_setopt(info->curl, CURLOPT_HTTPHEADER, info->header);
	curl_easy_setopt(info->curl, CURLOPT_ERRORBUFFER, info->error);
	curl_easy_setopt(info->curl, CURLOPT_WRITEFUNCTION, http_write);
	curl_easy_setopt(info->curl, CURLOPT_WRITEDATA, info);
	curl_easy_setopt(info->curl, CURLOPT_FAILONERROR, true);

	if (!info->remote_url) {
		// We only care about headers from the main package file
		curl_easy_setopt(info->curl, CURLOPT_HEADERFUNCTION, http_header);
		curl_easy_setopt(info->curl, CURLOPT_HEADERDATA, info);
	}

	// A lot of servers don't yet support ALPN
	curl_easy_setopt(info->curl, CURLOPT_SSL_ENABLE_ALPN, 0);

	code = curl_easy_perform(info->curl);
	if (code != CURLE_OK) {
		warn("Remote update of URL \"%s\" failed: %s", url,
				info->error);
		return false;
	}

	if (curl_easy_getinfo(info->curl, CURLINFO_RESPONSE_CODE,
		response_code) != CURLE_OK)
		return false;

	if (*response_code >= 400) {
		warn("Remote update of URL \"%s\" failed: HTTP/%ld", url,
			response_code);
		return false;
	}

	da_push_back(info->file_data, &null_terminator);

	return true;
}
Example #9
0
static void set_visibility(struct obs_scene_item *item, bool vis)
{
	pthread_mutex_lock(&item->actions_mutex);

	da_resize(item->audio_actions, 0);

	if (os_atomic_load_long(&item->active_refs) > 0) {
		if (!vis)
			obs_source_remove_active_child(item->parent->source,
					item->source);
	} else if (vis) {
		obs_source_add_active_child(item->parent->source, item->source);
	}

	os_atomic_set_long(&item->active_refs, vis ? 1 : 0);
	item->visible = vis;
	item->user_visible = vis;

	pthread_mutex_unlock(&item->actions_mutex);
}
Example #10
0
static void parse_packet(struct obs_x264 *obsx264,
		struct encoder_packet *packet, x264_nal_t *nals,
		int nal_count, x264_picture_t *pic_out)
{
	if (!nal_count) return;

	da_resize(obsx264->packet_data, 0);

	for (int i = 0; i < nal_count; i++) {
		x264_nal_t *nal = nals+i;
		da_push_back_array(obsx264->packet_data, nal->p_payload,
				nal->i_payload);
	}

	packet->data          = obsx264->packet_data.array;
	packet->size          = obsx264->packet_data.num;
	packet->type          = OBS_ENCODER_VIDEO;
	packet->pts           = pic_out->i_pts;
	packet->dts           = pic_out->i_dts;
	packet->keyframe      = pic_out->b_keyframe != 0;
}
Example #11
0
static inline bool ep_compile_technique(struct effect_parser *ep, size_t idx)
{
	struct gs_effect_technique *tech;
	struct ep_technique *tech_in;
	bool success = true;
	size_t i;

	tech = ep->effect->techniques.array+idx;
	tech_in = ep->techniques.array+idx;

	tech->name = bstrdup(tech_in->name);
	tech->section = EFFECT_TECHNIQUE;
	tech->effect = ep->effect;

	da_resize(tech->passes, tech_in->passes.num);

	for (i = 0; i < tech->passes.num; i++) {
		if (!ep_compile_pass(ep, tech, tech_in, i))
			success = false;
	}

	return success;
}
Example #12
0
static bool process_audio_delay(struct audio_monitor *monitor,
		float **data, uint32_t *frames, uint64_t ts, uint32_t pad)
{
	obs_source_t *s = monitor->source;
	uint64_t last_frame_ts = s->last_frame_ts;
	uint64_t cur_time = os_gettime_ns();
	uint64_t front_ts;
	uint64_t cur_ts;
	int64_t diff;
	uint32_t blocksize = monitor->channels * sizeof(float);

	/* cut off audio if long-since leftover audio in delay buffer */
	if (cur_time - monitor->last_recv_time > 1000000000)
		circlebuf_free(&monitor->delay_buffer);
	monitor->last_recv_time = cur_time;

	ts += monitor->source->sync_offset;

	circlebuf_push_back(&monitor->delay_buffer, &ts, sizeof(ts));
	circlebuf_push_back(&monitor->delay_buffer, frames, sizeof(*frames));
	circlebuf_push_back(&monitor->delay_buffer, *data,
			*frames * blocksize);

	if (!monitor->prev_video_ts) {
		monitor->prev_video_ts = last_frame_ts;

	} else if (monitor->prev_video_ts == last_frame_ts) {
		monitor->time_since_prev += (uint64_t)*frames *
			1000000000ULL / (uint64_t)monitor->sample_rate;
	} else {
		monitor->time_since_prev = 0;
	}

	while (monitor->delay_buffer.size != 0) {
		size_t size;
		bool bad_diff;

		circlebuf_peek_front(&monitor->delay_buffer, &cur_ts,
				sizeof(ts));
		front_ts = cur_ts -
			((uint64_t)pad * 1000000000ULL /
			 (uint64_t)monitor->sample_rate);
		diff = (int64_t)front_ts - (int64_t)last_frame_ts;
		bad_diff = !last_frame_ts ||
		           llabs(diff) > 5000000000 ||
		           monitor->time_since_prev > 100000000ULL;

		/* delay audio if rushing */
		if (!bad_diff && diff > 75000000) {
#ifdef DEBUG_AUDIO
			blog(LOG_INFO, "audio rushing, cutting audio, "
					"diff: %lld, delay buffer size: %lu, "
					"v: %llu: a: %llu",
					diff, (int)monitor->delay_buffer.size,
					last_frame_ts, front_ts);
#endif
			return false;
		}

		circlebuf_pop_front(&monitor->delay_buffer, NULL, sizeof(ts));
		circlebuf_pop_front(&monitor->delay_buffer, frames,
				sizeof(*frames));

		size = *frames * blocksize;
		da_resize(monitor->buf, size);
		circlebuf_pop_front(&monitor->delay_buffer,
				monitor->buf.array, size);

		/* cut audio if dragging */
		if (!bad_diff && diff < -75000000 && monitor->delay_buffer.size > 0) {
#ifdef DEBUG_AUDIO
			blog(LOG_INFO, "audio dragging, cutting audio, "
					"diff: %lld, delay buffer size: %lu, "
					"v: %llu: a: %llu",
					diff, (int)monitor->delay_buffer.size,
					last_frame_ts, front_ts);
#endif
			continue;
		}

		*data = monitor->buf.array;
		return true;
	}

	return false;
}
Example #13
0
static uint64_t mix_and_output(struct audio_output *audio, uint64_t audio_time,
		uint64_t prev_time)
{
	struct audio_line *line = audio->first_line;
	uint32_t frames = (uint32_t)ts_diff_frames(audio, audio_time,
	                                           prev_time);
	size_t bytes = frames * audio->block_size;

#ifdef DEBUG_AUDIO
	blog(LOG_DEBUG, "audio_time: %llu, prev_time: %llu, bytes: %lu",
			audio_time, prev_time, bytes);
#endif

	/* return an adjusted audio_time according to the amount
	 * of data that was sampled to ensure seamless transmission */
	audio_time = prev_time + conv_frames_to_time(audio, frames);

	/* resize and clear mix buffers */
	for (size_t mix_idx = 0; mix_idx < MAX_AUDIO_MIXES; mix_idx++) {
		struct audio_mix *mix = &audio->mixes[mix_idx];

		for (size_t i = 0; i < audio->planes; i++) {
			da_resize(mix->mix_buffers[i], bytes);
			memset(mix->mix_buffers[i].array, 0, bytes);
		}
	}

	/* mix audio lines */
	while (line) {
		struct audio_line *next = line->next;

		/* if line marked for removal, destroy and move to the next */
		if (!line->buffers[0].size) {
			if (!line->alive) {
				audio_output_removeline(audio, line);
				line = next;
				continue;
			}
		}

		pthread_mutex_lock(&line->mutex);

		if (line->buffers[0].size && line->base_timestamp < prev_time) {
			clear_excess_audio_data(line, prev_time);
			line->base_timestamp = prev_time;

		} else if (line->audio_getting_cut_off) {
			line->audio_getting_cut_off = false;
			blog(LOG_WARNING, "Audio line '%s' audio data no "
			                  "longer getting cut off.",
			                  line->name);
		}

		if (mix_audio_line(audio, line, bytes, prev_time))
			line->base_timestamp = audio_time;

		pthread_mutex_unlock(&line->mutex);

		line = next;
	}

	/* clamps audio data to -1.0..1.0 */
	clamp_audio_output(audio, bytes);

	/* output */
	for (size_t i = 0; i < MAX_AUDIO_MIXES; i++)
		do_audio_output(audio, i, prev_time, frames);

	return audio_time;
}
Example #14
0
bool audio_callback(void *param,
		uint64_t start_ts_in, uint64_t end_ts_in, uint64_t *out_ts,
		uint32_t mixers, struct audio_output_data *mixes)
{
	struct obs_core_data *data = &obs->data;
	struct obs_core_audio *audio = &obs->audio;
	struct obs_source *source;
	size_t sample_rate = audio_output_get_sample_rate(audio->audio);
	size_t channels = audio_output_get_channels(audio->audio);
	struct ts_info ts = {start_ts_in, end_ts_in};
	size_t audio_size;
	uint64_t min_ts;

	da_resize(audio->render_order, 0);
	da_resize(audio->root_nodes, 0);

	circlebuf_push_back(&audio->buffered_timestamps, &ts, sizeof(ts));
	circlebuf_peek_front(&audio->buffered_timestamps, &ts, sizeof(ts));
	min_ts = ts.start;

	audio_size = AUDIO_OUTPUT_FRAMES * sizeof(float);

#if DEBUG_AUDIO == 1
	blog(LOG_DEBUG, "ts %llu-%llu", ts.start, ts.end);
#endif

	/* ------------------------------------------------ */
	/* build audio render order
	 * NOTE: these are source channels, not audio channels */
	for (uint32_t i = 0; i < MAX_CHANNELS; i++) {
		obs_source_t *source = obs_get_output_source(i);
		if (source) {
			obs_source_enum_active_tree(source, push_audio_tree,
					audio);
			push_audio_tree(NULL, source, audio);
			da_push_back(audio->root_nodes, &source);
			obs_source_release(source);
		}
	}

	pthread_mutex_lock(&data->audio_sources_mutex);

	source = data->first_audio_source;
	while (source) {
		push_audio_tree(NULL, source, audio);
		source = (struct obs_source*)source->next_audio_source;
	}

	pthread_mutex_unlock(&data->audio_sources_mutex);

	/* ------------------------------------------------ */
	/* render audio data */
	for (size_t i = 0; i < audio->render_order.num; i++) {
		obs_source_t *source = audio->render_order.array[i];
		obs_source_audio_render(source, mixers, channels, sample_rate,
				audio_size);
	}

	/* ------------------------------------------------ */
	/* get minimum audio timestamp */
	pthread_mutex_lock(&data->audio_sources_mutex);
	calc_min_ts(data, sample_rate, &min_ts);
	pthread_mutex_unlock(&data->audio_sources_mutex);

	/* ------------------------------------------------ */
	/* if a source has gone backward in time, buffer */
	if (min_ts < ts.start)
		add_audio_buffering(audio, sample_rate, &ts, min_ts);

	/* ------------------------------------------------ */
	/* mix audio */
	if (!audio->buffering_wait_ticks) {
		for (size_t i = 0; i < audio->root_nodes.num; i++) {
			obs_source_t *source = audio->root_nodes.array[i];

			if (source->audio_pending)
				continue;

			pthread_mutex_lock(&source->audio_buf_mutex);

			if (source->audio_output_buf[0][0] && source->audio_ts)
				mix_audio(mixes, source, channels, sample_rate,
						&ts);

			pthread_mutex_unlock(&source->audio_buf_mutex);
		}
	}

	/* ------------------------------------------------ */
	/* discard audio */
	pthread_mutex_lock(&data->audio_sources_mutex);

	source = data->first_audio_source;
	while (source) {
		pthread_mutex_lock(&source->audio_buf_mutex);
		discard_audio(audio, source, channels, sample_rate, &ts);
		pthread_mutex_unlock(&source->audio_buf_mutex);

		source = (struct obs_source*)source->next_audio_source;
	}

	pthread_mutex_unlock(&data->audio_sources_mutex);

	/* ------------------------------------------------ */
	/* release audio sources */
	release_audio_sources(audio);

	circlebuf_pop_front(&audio->buffered_timestamps, NULL, sizeof(ts));

	*out_ts = ts.start;

	if (audio->buffering_wait_ticks) {
		audio->buffering_wait_ticks--;
		return false;
	}

	UNUSED_PARAMETER(param);
	return true;
}