Example #1
0
        //// Listener settings ////
        void setupListener()
        {
            auto listener_node = *m_dev->emplace(m_dev->children().cend(), "listener");

            auto listener_pos_node = *listener_node->emplace(listener_node->children().cend(), "pos");

            add_position(listener_pos_node,
                         make_parameter(
                             [&] () { return m_scene.listener().Position(); },
                             [&] (const auto& elt) { m_scene.listener().Position(elt); }));

            auto listener_orient_node = *listener_node->emplace(listener_node->children().cend(), "orientation");
            auto listener_orient_addr = listener_orient_node->createAddress(OSSIA::Value::Type::TUPLE); // [ at_x at_y at_z up_x at_y at_z ]

            auto tupl = new OSSIA::Tuple;

            const auto& orient = m_scene.listener().Orientation();
            tupl->value.push_back(new OSSIA::Float(orient.At()[0]));
            tupl->value.push_back(new OSSIA::Float(orient.At()[1]));
            tupl->value.push_back(new OSSIA::Float(orient.At()[2]));
            tupl->value.push_back(new OSSIA::Float(orient.Up()[0]));
            tupl->value.push_back(new OSSIA::Float(orient.Up()[1]));
            tupl->value.push_back(new OSSIA::Float(orient.Up()[2]));
            listener_orient_addr->pushValue(tupl);

            listener_orient_addr->addCallback([&] (const OSSIA::Value* val) {
                auto tpl = dynamic_cast<const OSSIA::Tuple*>(val);
                if(tpl->value.size() != 6)
                    return;

                auto at_x = dynamic_cast<const OSSIA::Float*>(tpl->value[0]);
                auto at_y = dynamic_cast<const OSSIA::Float*>(tpl->value[1]);
                auto at_z = dynamic_cast<const OSSIA::Float*>(tpl->value[2]);
                auto up_x = dynamic_cast<const OSSIA::Float*>(tpl->value[3]);
                auto up_y = dynamic_cast<const OSSIA::Float*>(tpl->value[4]);
                auto up_z = dynamic_cast<const OSSIA::Float*>(tpl->value[5]);
                if(!at_x || !at_y || !at_z || !up_x || !up_y || !up_z)
                    return;

                m_scene.listener().Orientation(at_x->value, at_y->value, at_z->value, up_x->value, up_y->value, up_z->value);
            });

            auto listener_orient_at_node = *listener_orient_node->emplace(listener_orient_node->children().cend(), "at");
            add_position(listener_orient_at_node,
                         make_parameter(
                             [&] () { return m_scene.listener().OrientationAt(); },
                             [&] (const auto& elt) { m_scene.listener().Orientation(elt, m_scene.listener().OrientationUp()); }
            ));
            auto listener_orient_up_node = *listener_orient_node->emplace(listener_orient_node->children().cend(), "up");
            add_position(listener_orient_up_node,
                         make_parameter(
                             [&] () { return m_scene.listener().OrientationUp(); },
                             [&] (const auto& elt) { m_scene.listener().Orientation(m_scene.listener().OrientationAt(), elt); }
            ));
        }
Example #2
0
/* 韻律情報の読み込み */
void setProsFile( char *rel, char *filename )
{
	int error;

	strcpy( slot_Speak_stat, "PROCESSING" );
	if( prop_Speak_stat == AutoOutput )  inqSpeakStat();

	if( strcmp(rel,"=")==0 )  {
		strcpy( slot_Pros_file, filename );
		if( prop_ProsFile == AutoOutput )  inqProsFile();

		refresh();
		/* prosBuf に各種パラメータを読み込む */
		error = read_pros_file( filename );
		if( ! error )  {

			text_analysis( input_text );	/* テキスト解析 */
			if( prop_Text_text == AutoOutput )  inqTextText();
			if( prop_Speak_text == AutoOutput )  inqSpeakText();

/*		parameter_generation();		*/

			init_parameter(); /* パラメータ生成の準備 */
			make_duration(); /* 素のテキストから状態継続長を生成 */
			modify_duration(); /* 継続長の修正(タグ処理) */

			/* 音素継続長が修正されている場合は、状態継続長を
			   計算しなおす */
			update_duration();
			
			/* ここで、prosBuf のデータで音素時間長を設定する。 */

			make_cumul_time(); /* 音素時間長の累積を計算 */
			modify_voice(); /* 話者のスイッチ、αパラメータの変更(タグ処理) */

			if( prop_Text_pho == AutoOutput )  inqTextPho();
			if( prop_Speak_pho == AutoOutput )  inqSpeakPho();
			if( prop_Text_dur == AutoOutput )  inqTextDur();
			if( prop_Speak_dur == AutoOutput )  inqSpeakDur();

			make_parameter(); /* パラメータ生成を実行 */

			modify_f0(); /* F0の修正(タグ処理) */
			modify_power(); /* パワーの修正(タグ処理) */

/*		parameter_generation();	 ここまで	*/

			/* 生成されたパラメータに対してF0とc0を更新 */
			error = set_f0_and_power( filename );
			if( ! error )  {
				do_synthesis();		/* 合成波形の生成 */
			}
		}
	} else {
		unknown_com();
	}
	strcpy( slot_Speak_stat, "READY" );
	if( prop_Speak_stat == AutoOutput )  inqSpeakStat();
}
Example #3
0
        void on_sourceAdded(const OSSIA::Value* val)
        {
            auto str_val = dynamic_cast<const OSSIA::String*>(val);
            if(!str_val)
                return;

            // Create the sound
            auto sound_obj = new SoundObj{str_val->value};
            sound_obj->setParent(&m_scene);
            m_scene.sounds().insert(sound_obj);
            auto& sound = sound_obj->sound;

            // Create the callbacks and OSC device commands
            auto src_node = *m_sourcesListNode->emplace(m_sourcesListNode->children().cend(), str_val->value);

            // Position
            auto src_pos_node = *src_node->emplace(src_node->children().cend(), "pos");
            add_position(src_pos_node,
                         make_parameter(
                             [&] () { return sound.source().Position(); },
                             [&] (const auto& elt) { sound.source().Position(elt); }
            ));

            // Enablement
            add_child(src_node, "enabled", OSSIA::Value::Type::BOOL,
                      [&,sound_obj] (const OSSIA::Value* val) {
                auto enablement_val = dynamic_cast<const OSSIA::Bool*>(val);
                if(!enablement_val)
                    return;
                sound_obj->enablementChanged(enablement_val->value);
            });

            // Audio file
            add_child(src_node, "file", OSSIA::Value::Type::STRING,
                      [&,sound_obj] (const OSSIA::Value* val) {
                auto filename_val = dynamic_cast<const OSSIA::String*>(val);
                if(!filename_val)
                    return;

                sound_obj->fileChanged(QString::fromStdString(filename_val->value));
            });
        }
Example #4
0
void parameter_generation()
{
/* 音声合成の初期設定 */
	init_parameter();

/* 音素継続長の決定 */
	make_duration();
/* 音素継続長の変更 */
	modify_duration();
	make_cumul_time();
	modify_voice();

	if( prop_Text_pho == AutoOutput )  inqTextPho();
	if( prop_Speak_pho == AutoOutput )  inqSpeakPho();
	if( prop_Text_dur == AutoOutput )  inqTextDur();
	if( prop_Speak_dur == AutoOutput )  inqSpeakDur();

/* パラメータ生成 F0,MLSAフィルタ係数 */
	make_parameter();

/* F0, ゲイン b(0) の変更 */
	modify_f0();
	modify_power();
}
Example #5
0
void Self::setScreenName(const std::string& screenName) {
    setParameter(make_parameter(parameter::screen_name), screenName);
}