void PointProcess_hum (PointProcess me, double tmin, double tmax) { static double formant [1 + 6] = { 0, 600, 1400, 2400, 3400, 4500, 5500 }; static double bandwidth [1 + 6] = { 0, 50, 100, 200, 300, 400, 500 }; autoSound sound = PointProcess_to_Sound_pulseTrain (me, 44100, 0.7, 0.05, 30); Sound_filterWithFormants (sound.peek(), tmin, tmax, 6, formant, bandwidth); Sound_playPart (sound.peek(), tmin, tmax, NULL, NULL); }
static autoSound synthesize_pulses (Manipulation me) { try { if (! my pulses) Melder_throw (U"Missing pulses analysis."); return PointProcess_to_Sound_pulseTrain (my pulses.get(), 44100.0, 0.7, 0.05, 30); } catch (MelderError) { Melder_throw (me, U": pulses synthesis not performed."); } }
void PointProcess_playPart (PointProcess me, double tmin, double tmax) { try { autoSound sound = PointProcess_to_Sound_pulseTrain (me, 44100, 0.7, 0.05, 30); Sound_playPart (sound.peek(), tmin, tmax, NULL, NULL); } catch (MelderError) { Melder_throw (me, ": not played."); } }
static autoSound synthesize_pitch (Manipulation me) { try { if (! my pitch) Melder_throw (U"Missing pitch tier."); autoPointProcess pulses = PitchTier_to_PointProcess (my pitch.get()); return PointProcess_to_Sound_pulseTrain (pulses.get(), 44100.0, 0.7, 0.05, 30); } catch (MelderError) { Melder_throw (me, U": pitch manipulation not synthesized."); } }
Sound PointProcess_to_Sound_hum (PointProcess me) { static double formant [1 + 6] = { 0, 600, 1400, 2400, 3400, 4500, 5500 }; static double bandwidth [1 + 6] = { 0, 50, 100, 200, 300, 400, 500 }; try { autoSound sound = PointProcess_to_Sound_pulseTrain (me, 44100, 0.7, 0.05, 30); Sound_filterWithFormants (sound.peek(), my xmin, my xmax, 6, formant, bandwidth); return sound.transfer(); } catch (MelderError) { Melder_throw (me, ": not converted to Sound (hum)."); } }
autoSound PitchTier_to_Sound_pulseTrain (PitchTier me, double samplingFrequency, double adaptFactor, double adaptTime, long interpolationDepth, bool hum) { static double formant [1 + 6] = { 0.0, 600.0, 1400.0, 2400.0, 3400.0, 4500.0, 5500.0 }; static double bandwidth [1 + 6] = { 0.0, 50.0, 100.0, 200.0, 300.0, 400.0, 500.0 }; try { autoPointProcess point = PitchTier_to_PointProcess (me); autoSound sound = PointProcess_to_Sound_pulseTrain (point.peek(), samplingFrequency, adaptFactor, adaptTime, interpolationDepth); if (hum) { Sound_filterWithFormants (sound.peek(), 0.0, 0.0, 6, formant, bandwidth); } return sound; } catch (MelderError) { Melder_throw (me, U": not converted to Sound (pulse train)."); } }
static autoSound synthesize_pulses_lpc (Manipulation me) { try { if (! my lpc) { if (! my sound) Melder_throw (U"Missing original sound."); autoSound sound10k = Sound_resample (my sound.get(), 10000.0, 50); my lpc = Sound_to_LPC_burg (sound10k.get(), 20, 0.025, 0.01, 50.0); } if (! my pulses) Melder_throw (U"Missing pulses analysis."); autoSound train = PointProcess_to_Sound_pulseTrain (my pulses.get(), 1.0 / my lpc -> samplingPeriod, 0.7, 0.05, 30); train -> dx = my lpc -> samplingPeriod; // to be exact Sound_PointProcess_fillVoiceless (train.get(), my pulses.get()); autoSound result = LPC_and_Sound_filter (my lpc.get(), train.get(), true); NUMdeemphasize_f (result -> z [1], result -> nx, result -> dx, 50.0); Vector_scale (result.get(), 0.99); return result; } catch (MelderError) { Melder_throw (me, U": LPC synthesis not performed."); } }