コード例 #1
0
static void noise_gate_update(void *data, obs_data_t *s)
{
	struct noise_gate_data *ng = data;
	float open_threshold_db;
	float close_threshold_db;
	float sample_rate;
	int attack_time_ms;
	int hold_time_ms;
	int release_time_ms;

	open_threshold_db = (float)obs_data_get_double(s, S_OPEN_THRESHOLD);
	close_threshold_db = (float)obs_data_get_double(s, S_CLOSE_THRESHOLD);
	attack_time_ms = (int)obs_data_get_int(s, S_ATTACK_TIME);
	hold_time_ms = (int)obs_data_get_int(s, S_HOLD_TIME);
	release_time_ms = (int)obs_data_get_int(s, S_RELEASE_TIME);
	sample_rate = (float)audio_output_get_sample_rate(obs_get_audio());

	ng->sample_rate_i = 1.0f / sample_rate;
	ng->channels = audio_output_get_channels(obs_get_audio());
	ng->open_threshold = db_to_mul(open_threshold_db);
	ng->close_threshold = db_to_mul(close_threshold_db);
	ng->attack_rate = 1.0f / (ms_to_secf(attack_time_ms) * sample_rate);
	ng->release_rate = 1.0f / (ms_to_secf(release_time_ms) * sample_rate);

	const float threshold_diff = ng->open_threshold - ng->close_threshold;
	const float min_decay_period = (1.0f / 75.0f) * sample_rate;

	ng->decay_rate = threshold_diff / min_decay_period;
	ng->hold_time = ms_to_secf(hold_time_ms);
	ng->is_open = false;
	ng->attenuation = 0.0f;
	ng->level = 0.0f;
	ng->held_time = 0.0f;
}
コード例 #2
0
ファイル: expander-filter.c プロジェクト: Dead133/obs-studio
static void expander_update(void *data, obs_data_t *s)
{
	struct expander_data *cd = data;
	const char *presets = obs_data_get_string(s, S_PRESETS);
	if (strcmp(presets, "expander") == 0 && cd->is_gate) {
		obs_data_clear(s);
		obs_data_set_string(s, S_PRESETS, "expander");
		expander_defaults(s);
		cd->is_gate = false;
	}
	if (strcmp(presets, "gate") == 0 && !cd->is_gate) {
		obs_data_clear(s);
		obs_data_set_string(s, S_PRESETS, "gate");
		expander_defaults(s);
		cd->is_gate = true;
	}

	const uint32_t sample_rate =
			audio_output_get_sample_rate(obs_get_audio());
	const size_t num_channels =
			audio_output_get_channels(obs_get_audio());
	const float attack_time_ms =
			(float)obs_data_get_int(s, S_ATTACK_TIME);
	const float release_time_ms =
			(float)obs_data_get_int(s, S_RELEASE_TIME);
	const float output_gain_db =
			(float)obs_data_get_double(s, S_OUTPUT_GAIN);

	cd->ratio = (float)obs_data_get_double(s, S_RATIO);

	cd->threshold = (float)obs_data_get_double(s, S_THRESHOLD);
	cd->attack_gain = gain_coefficient(sample_rate,
			attack_time_ms / MS_IN_S_F);
	cd->release_gain = gain_coefficient(sample_rate,
			release_time_ms / MS_IN_S_F);
	cd->output_gain = db_to_mul(output_gain_db);
	cd->num_channels = num_channels;
	cd->sample_rate = sample_rate;
	cd->slope = 1.0f - cd->ratio;

	const char *detect_mode = obs_data_get_string(s, S_DETECTOR);
	if (strcmp(detect_mode, "RMS") == 0)
		cd->detector = RMS_DETECT;
	if (strcmp(detect_mode, "peak") == 0)
		cd->detector = PEAK_DETECT;
	if (strcmp(detect_mode, "none") == 0)
		cd->detector = NO_DETECT;

	size_t sample_len = sample_rate * DEFAULT_AUDIO_BUF_MS / MS_IN_S;
	if (cd->envelope_buf_len == 0)
		resize_env_buffer(cd, sample_len);
	if (cd->runaverage_len == 0)
		resize_runaverage_buffer(cd, sample_len);
	if (cd->maxspl_len == 0)
		resize_maxspl_buffer(cd, sample_len);
	if (cd->env_in_len == 0)
		resize_env_in_buffer(cd, sample_len);
}
コード例 #3
0
ファイル: obs-output.c プロジェクト: ahmed1911/obs-studio
obs_output_t *obs_output_create(const char *id, const char *name,
		obs_data_t *settings, obs_data_t *hotkey_data)
{
	const struct obs_output_info *info = find_output(id);
	struct obs_output *output;
	int ret;

	output = bzalloc(sizeof(struct obs_output));
	pthread_mutex_init_value(&output->interleaved_mutex);
	pthread_mutex_init_value(&output->delay_mutex);

	if (pthread_mutex_init(&output->interleaved_mutex, NULL) != 0)
		goto fail;
	if (pthread_mutex_init(&output->delay_mutex, NULL) != 0)
		goto fail;
	if (!init_output_handlers(output, name, settings, hotkey_data))
		goto fail;

	if (!info) {
		blog(LOG_ERROR, "Output ID '%s' not found", id);

		output->info.id      = bstrdup(id);
		output->owns_info_id = true;
	} else {
		output->info = *info;
	}
	output->video    = obs_get_video();
	output->audio    = obs_get_audio();
	if (output->info.get_defaults)
		output->info.get_defaults(output->context.settings);

	ret = os_event_init(&output->reconnect_stop_event,
			OS_EVENT_TYPE_MANUAL);
	if (ret < 0)
		goto fail;

	if (info)
		output->context.data = info->create(output->context.settings,
				output);
	if (!output->context.data)
		blog(LOG_ERROR, "Failed to create output '%s'!", name);

	output->reconnect_retry_sec = 2;
	output->reconnect_retry_max = 20;
	output->valid               = true;

	output->control = bzalloc(sizeof(obs_weak_output_t));
	output->control->output = output;

	obs_context_data_insert(&output->context,
			&obs->data.outputs_mutex,
			&obs->data.first_output);

	blog(LOG_INFO, "output '%s' (%s) created", name, id);
	return output;

fail:
	obs_output_destroy(output);
	return NULL;
}
コード例 #4
0
static void add_audio_encoder_params(struct dstr *cmd, obs_encoder_t *aencoder)
{
	obs_data_t *settings = obs_encoder_get_settings(aencoder);
	int bitrate = (int)obs_data_get_int(settings, "bitrate");
	audio_t *audio = obs_get_audio();
	struct dstr name = {0};

	obs_data_release(settings);

	dstr_copy(&name, obs_encoder_get_name(aencoder));
	dstr_replace(&name, "\"", "\"\"");

	dstr_catf(cmd, "\"%s\" %d %d %d ",
			name.array,
			bitrate,
			(int)obs_encoder_get_sample_rate(aencoder),
			(int)audio_output_get_channels(audio));

	dstr_free(&name);
}
コード例 #5
0
mssapi_captions::mssapi_captions(
		captions_cb callback,
		const std::string &lang) try
	: captions_handler(callback, AUDIO_FORMAT_16BIT, 16000)
{
	HRESULT hr;

	std::wstring wlang;
	wlang.resize(lang.size());

	for (size_t i = 0; i < lang.size(); i++)
		wlang[i] = (wchar_t)lang[i];

	LCID lang_id = LocaleNameToLCID(wlang.c_str(), 0);

	wchar_t lang_str[32];
	_snwprintf(lang_str, 31, L"language=%x", (int)lang_id);

	stop = CreateEvent(nullptr, false, false, nullptr);
	if (!stop.Valid())
		throw "Failed to create event";

	hr = SpFindBestToken(SPCAT_RECOGNIZERS, lang_str, nullptr, &token);
	if (FAILED(hr))
		throw HRError("SpFindBestToken failed", hr);

	hr = CoCreateInstance(CLSID_SpInprocRecognizer, nullptr, CLSCTX_ALL,
			__uuidof(ISpRecognizer), (void**)&recognizer);
	if (FAILED(hr))
		throw HRError("CoCreateInstance for recognizer failed", hr);

	hr = recognizer->SetRecognizer(token);
	if (FAILED(hr))
		throw HRError("SetRecognizer failed", hr);

	hr = recognizer->SetRecoState(SPRST_INACTIVE);
	if (FAILED(hr))
		throw HRError("SetRecoState(SPRST_INACTIVE) failed", hr);

	hr = recognizer->CreateRecoContext(&context);
	if (FAILED(hr))
		throw HRError("CreateRecoContext failed", hr);

	ULONGLONG interest = SPFEI(SPEI_RECOGNITION) |
	                     SPFEI(SPEI_END_SR_STREAM);
	hr = context->SetInterest(interest, interest);
	if (FAILED(hr))
		throw HRError("SetInterest failed", hr);

	hr = context->SetNotifyWin32Event();
	if (FAILED(hr))
		throw HRError("SetNotifyWin32Event", hr);

	notify = context->GetNotifyEventHandle();
	if (notify == INVALID_HANDLE_VALUE)
		throw HRError("GetNotifyEventHandle failed", E_NOINTERFACE);

	size_t sample_rate = audio_output_get_sample_rate(obs_get_audio());
	audio = new CaptionStream((DWORD)sample_rate, this);
	audio->Release();

	hr = recognizer->SetInput(audio, false);
	if (FAILED(hr))
		throw HRError("SetInput failed", hr);

	hr = context->CreateGrammar(1, &grammar);
	if (FAILED(hr))
		throw HRError("CreateGrammar failed", hr);

	hr = grammar->LoadDictation(nullptr, SPLO_STATIC);
	if (FAILED(hr))
		throw HRError("LoadDictation failed", hr);

	try {
		t = std::thread([this] () {main_thread();});
	} catch (...) {
		throw "Failed to create thread";
	}

} catch (const char *err) {
	blog(LOG_WARNING, "%s: %s", __FUNCTION__, err);
	throw CAPTIONS_ERROR_GENERIC_FAIL;

} catch (HRError err) {
	blog(LOG_WARNING, "%s: %s (%lX)", __FUNCTION__, err.str, err.hr);
	throw CAPTIONS_ERROR_GENERIC_FAIL;
}