Sound EEG_to_Sound_modulated (EEG me, double baseFrequency, double channelBandwidth, const wchar_t *channelRanges) { try { long numberOfChannels; autoNUMvector <long> channelNumbers (NUMstring_getElementsOfRanges (channelRanges, my d_numberOfChannels, & numberOfChannels, NULL, L"channel", true), 1); double maxFreq = baseFrequency + my d_numberOfChannels * channelBandwidth; double samplingFrequency = 2 * maxFreq; samplingFrequency = samplingFrequency < 44100 ? 44100 : samplingFrequency; autoSound thee = Sound_createSimple (1, my xmax - my xmin, samplingFrequency); for (long i = 1; i <= numberOfChannels; i++) { long ichannel = channelNumbers[i]; double fbase = baseFrequency;// + (ichannel - 1) * channelBandwidth; autoSound si = Sound_extractChannel (my d_sound, ichannel); autoSpectrum spi = Sound_to_Spectrum (si.peek(), 1); Spectrum_passHannBand (spi.peek(), 0.5, channelBandwidth - 0.5, 0.5); autoSpectrum spi_shifted = Spectrum_shiftFrequencies (spi.peek(), fbase, samplingFrequency / 2, 30); autoSound resampled = Spectrum_to_Sound (spi_shifted.peek()); long nx = resampled -> nx < thy nx ? resampled -> nx : thy nx; for (long j = 1; j <= nx; j++) { thy z[1][j] += resampled -> z[1][j]; } } Vector_scale (thee.peek(), 0.99); return thee.transfer(); } catch (MelderError) { Melder_throw (me, ": no playable sound created."); } }
Sound Cepstrum_to_Sound (Cepstrum me) { try { autoSpectrum sx = Cepstrum_to_Spectrum (me); autoSound thee = Spectrum_to_Sound (sx.peek()); return thee.transfer(); } catch (MelderError) { Melder_throw (me, ": no Sound calculated."); } }
autoSound Sound_filter_stopHannBand (Sound me, double fmin, double fmax, double smooth) { try { autoSound thee = Data_copy (me); if (my ny == 1) { autoSpectrum spec = Sound_to_Spectrum (me, true); Spectrum_stopHannBand (spec.peek(), fmin, fmax, smooth); autoSound him = Spectrum_to_Sound (spec.peek()); NUMvector_copyElements (his z [1], thy z [1], 1, thy nx); } else { for (long ichan = 1; ichan <= my ny; ichan ++) { autoSound channel = Sound_extractChannel (me, ichan); autoSpectrum spec = Sound_to_Spectrum (channel.peek(), true); Spectrum_stopHannBand (spec.peek(), fmin, fmax, smooth); autoSound him = Spectrum_to_Sound (spec.peek()); NUMvector_copyElements (his z [1], thy z [ichan], 1, thy nx); } } return thee; } catch (MelderError) { Melder_throw (me, U": not filtered (stop Hann band)."); } }
autoSound Sound_filter_formula (Sound me, const char32 *formula, Interpreter interpreter) { try { autoSound thee = Data_copy (me); if (my ny == 1) { autoSpectrum spec = Sound_to_Spectrum (me, true); Matrix_formula ((Matrix) spec.peek(), formula, interpreter, nullptr); autoSound him = Spectrum_to_Sound (spec.peek()); NUMvector_copyElements (his z [1], thy z [1], 1, thy nx); } else { for (long ichan = 1; ichan <= my ny; ichan ++) { autoSound channel = Sound_extractChannel (me, ichan); autoSpectrum spec = Sound_to_Spectrum (channel.peek(), true); Matrix_formula ((Matrix) spec.peek(), formula, interpreter, nullptr); autoSound him = Spectrum_to_Sound (spec.peek()); NUMvector_copyElements (his z [1], thy z [ichan], 1, thy nx); } } return thee; } catch (MelderError) { Melder_throw (me, U": not filtered (with formula)."); } }
Sound EEG_to_Sound_frequencyShifted (EEG me, long channel, double frequencyShift, double samplingFrequency, double maxAmp) { try { autoSound si = Sound_extractChannel (my d_sound, channel); autoSpectrum spi = Sound_to_Spectrum (si.peek(), 1); autoSpectrum spi_shifted = Spectrum_shiftFrequencies (spi.peek(), frequencyShift, samplingFrequency / 2, 30); autoSound thee = Spectrum_to_Sound (spi_shifted.peek()); if (maxAmp > 0) { Vector_scale (thee.peek(), maxAmp); } return thee.transfer(); } catch (MelderError) { Melder_throw (me, ": channel not converted to sound."); } }
autoPitch Pitch_smooth (Pitch me, double bandWidth) { try { autoPitch interp = Pitch_interpolate (me); autoMatrix matrix1 = Pitch_to_Matrix (interp.peek()); autoSound sound1 = Sound_create (1, 2 * matrix1->xmin - matrix1->xmax, 2 * matrix1->xmax - matrix1->xmin, 3 * matrix1->nx, matrix1->dx, matrix1->x1 - 2 * matrix1->nx * matrix1->dx); long firstVoiced = 0, lastVoiced = 0; for (long i = 1; i <= matrix1 -> nx; i ++) { double f = matrix1 -> z [1] [i]; if (f != 0.0) { if (! firstVoiced) firstVoiced = i; lastVoiced = i; sound1 -> z [1] [i + matrix1 -> nx] = f; } } /* Extrapolate. */ double fextrap = matrix1 -> z [1] [firstVoiced]; firstVoiced += matrix1 -> nx; for (long i = 1; i < firstVoiced; i ++) sound1 -> z [1] [i] = fextrap; fextrap = matrix1 -> z [1] [lastVoiced]; lastVoiced += matrix1 -> nx; for (long i = lastVoiced + 1; i <= sound1 -> nx; i ++) sound1 -> z [1] [i] = fextrap; /* Smooth. */ autoSpectrum spectrum = Sound_to_Spectrum (sound1.peek(), true); for (long i = 1; i <= spectrum -> nx; i ++) { double f = (i - 1) * spectrum -> dx, fT = f / bandWidth, factor = exp (- fT * fT); spectrum -> z [1] [i] *= factor; spectrum -> z [2] [i] *= factor; } autoSound sound2 = Spectrum_to_Sound (spectrum.peek()); autoMatrix matrix2 = Matrix_create (my xmin, my xmax, my nx, my dx, my x1, 1, 1, 1, 1, 1); for (long i = 1; i <= my nx; i ++) { double originalF0 = my frame [i]. candidate [1]. frequency; matrix2 -> z [1] [i] = originalF0 > 0.0 && originalF0 < my ceiling ? sound2 -> z [1] [i + matrix2 -> nx] : 0.0; } autoPitch thee = Matrix_to_Pitch (matrix2.peek()); thy ceiling = my ceiling; return thee; } catch (MelderError) { Melder_throw (me, U": not smoothed."); } }
Spectrum Spectrum_cepstralSmoothing (Spectrum me, double bandWidth) { try { /* * dB-spectrum is log (power). */ autoSpectrum dBspectrum = Data_copy (me); double *re = dBspectrum -> z [1], *im = dBspectrum -> z [2]; for (long i = 1; i <= dBspectrum -> nx; i ++) { re [i] = log (re [i] * re [i] + im [i] * im [i] + 1e-300); im [i] = 0.0; } /* * Cepstrum is Fourier transform of dB-spectrum. */ autoSound cepstrum = Spectrum_to_Sound (dBspectrum.peek()); /* * Multiply cepstrum by a Gaussian. */ double factor = - bandWidth * bandWidth; for (long i = 1; i <= cepstrum -> nx; i ++) { double t = (i - 1) * cepstrum -> dx; cepstrum -> z [1] [i] *= exp (factor * t * t) * ( i == 1 ? 1 : 2 ); } /* * Smoothed power spectrum is original power spectrum convolved with a Gaussian. */ autoSpectrum thee = Sound_to_Spectrum (cepstrum.peek(), TRUE); /* * Convert power spectrum back into a "complex" spectrum without phase information. */ re = thy z [1], im = thy z [2]; for (long i = 1; i <= thy nx; i ++) { re [i] = exp (0.5 * re [i]); // i.e., sqrt (exp (re [i])) im [i] = 0.0; } return thee.transfer(); } catch (MelderError) { Melder_throw (me, ": cepstral smoothing not computed."); } }
autoCepstrum Spectrum_to_Cepstrum (Spectrum me) { try { autoSpectrum dBspectrum = Data_copy (me); double *re = dBspectrum -> z[1], *im = dBspectrum -> z[2]; for (long i = 1; i <= dBspectrum -> nx; i++) { re[i] = log (re[i] * re[i] + im[i] * im[i] + 1e-300); im[i] = 0.0; } autoSound cepstrum = Spectrum_to_Sound (dBspectrum.peek()); autoCepstrum thee = Cepstrum_create (0.5 / my dx, my nx); for (long i = 1; i <= thy nx; i++) { double val = cepstrum -> z[1][i]; thy z[1][i] = val; } return thee; } catch (MelderError) { Melder_throw (me, U": not converted to Sound."); } }
static autoSound ComplexSpectrogram_to_Sound2 (ComplexSpectrogram me, double stretchFactor) { try { /* original number of samples is odd: imaginary part of last spectral value is zero -> * phase is either zero or pi */ double pi = atan2 (0.0, - 0.5); double samplingFrequency = 2.0 * my ymax; double lastFrequency = my y1 + (my ny - 1) * my dy; int originalNumberOfSamplesProbablyOdd = (my phase [my ny][1] != 0.0 && my phase[my ny][1] != pi) || my ymax - lastFrequency > 0.25 * my dx; if (my y1 != 0.0) { Melder_throw (U"A Fourier-transformable Spectrum must have a first frequency of 0 Hz, not ", my y1, U" Hz."); } long numberOfSamples = 2 * my ny - (originalNumberOfSamplesProbablyOdd ? 1 : 2 ); double synthesisWindowDuration = numberOfSamples / samplingFrequency; autoSpectrum spectrum = Spectrum_create (my ymax, my ny); autoSound synthesisWindow = Sound_createSimple (1, synthesisWindowDuration, samplingFrequency); long stepSizeSamples = my dx * samplingFrequency * stretchFactor; double newDuration = (my xmax - my xmin) * stretchFactor + 0.05; autoSound thee = Sound_createSimple (1, newDuration, samplingFrequency); //TODO long istart = 1, iend = istart + stepSizeSamples - 1; for (long iframe = 1; iframe <= my nx; iframe++) { spectrum -> z[1][1] = sqrt (my z[1][iframe]); for (long ifreq = 2; ifreq <= my ny; ifreq++) { double f = my y1 + (ifreq - 1) * my dy; double a = sqrt (my z[ifreq][iframe]); double phi = my phase[ifreq][iframe]; double extraPhase = 2.0 * pi * (stretchFactor - 1.0) * my dx * f; phi += extraPhase; spectrum -> z[1][ifreq] = a * cos (phi); spectrum -> z[2][ifreq] = a * sin (phi); } autoSound synthesis = Spectrum_to_Sound (spectrum.get()); for (long j = istart; j <= iend; j++) { thy z[1][j] = synthesis -> z[1][j - istart + 1]; } istart = iend + 1; iend = istart + stepSizeSamples - 1; } return thee; } catch (MelderError) { Melder_throw (me, U": no Sound created."); } }
Cepstrum Spectrum_to_Cepstrum (Spectrum me) { try { autoMatrix unwrap = Spectrum_unwrap (me); autoSpectrum sx = Data_copy (me); // Copy magnitude-squared and unwrapped phase. for (long i = 1; i <= my nx; i ++) { double xa = unwrap -> z[1][i]; sx -> z[1][i] = xa > 0 ? 0.5 * log (xa) : -300; sx -> z[2][i] = unwrap -> z[2][i]; } // Compute complex cepstrum x. autoSound x = Spectrum_to_Sound (sx.peek()); autoCepstrum thee = Cepstrum_create (0, x -> xmax - x -> xmin, x -> nx); NUMvector_copyElements (x -> z[1], thy z[1], 1, x -> nx); return thee.transfer(); } catch (MelderError) { Melder_throw (me, ": no Cepstrum created."); } }
autoSpectrum Spectrum_lpcSmoothing (Spectrum me, int numberOfPeaks, double preemphasisFrequency) { try { double gain, a [100]; long numberOfCoefficients = 2 * numberOfPeaks; autoSound sound = Spectrum_to_Sound (me); NUMpreemphasize_f (sound -> z [1], sound -> nx, sound -> dx, preemphasisFrequency); NUMburg (sound -> z [1], sound -> nx, a, numberOfCoefficients, & gain); for (long i = 1; i <= numberOfCoefficients; i ++) a [i] = - a [i]; autoSpectrum thee = Data_copy (me); long nfft = 2 * (thy nx - 1); long ndata = numberOfCoefficients < nfft ? numberOfCoefficients : nfft - 1; double scale = 10 * (gain > 0 ? sqrt (gain) : 1) / numberOfCoefficients; autoNUMvector <double> data (1, nfft); data [1] = 1; for (long i = 1; i <= ndata; i ++) data [i + 1] = a [i]; NUMrealft (data.peek(), nfft, 1); double *re = thy z [1]; double *im = thy z [2]; re [1] = scale / data [1]; im [1] = 0.0; long halfnfft = nfft / 2; for (long i = 2; i <= halfnfft; i ++) { double real = data [i + i - 1], imag = data [i + i]; re [i] = scale / sqrt (real * real + imag * imag) / (1 + thy dx * (i - 1) / preemphasisFrequency); im [i] = 0; } re [halfnfft + 1] = scale / data [2] / (1 + thy dx * halfnfft / preemphasisFrequency); im [halfnfft + 1] = 0.0; return thee; } catch (MelderError) { Melder_throw (me, U": not smoothed."); } }
Sound Sound_deepenBandModulation (Sound me, double enhancement_dB, double flow, double fhigh, double slowModulation, double fastModulation, double bandSmoothing) { try { autoSound thee = Data_copy (me); double maximumFactor = pow (10, enhancement_dB / 20), alpha = sqrt (log (2.0)); double alphaslow = alpha / slowModulation, alphafast = alpha / fastModulation; for (long channel = 1; channel <= my ny; channel ++) { autoSound channelSound = Sound_extractChannel (me, channel); autoSpectrum orgspec = Sound_to_Spectrum (channelSound.peek(), true); /* * Keep the part of the sound that is outside the filter bank. */ autoSpectrum spec = Data_copy (orgspec.peek()); Spectrum_stopHannBand (spec.peek(), flow, fhigh, bandSmoothing); autoSound filtered = Spectrum_to_Sound (spec.peek()); long n = thy nx; double *amp = thy z [channel]; for (long i = 1; i <= n; i ++) amp [i] = filtered -> z [1] [i]; autoMelderProgress progress (U"Deepen band modulation..."); double fmin = flow; while (fmin < fhigh) { /* * Take a one-bark frequency band. */ double fmid_bark = NUMhertzToBark (fmin) + 0.5, ceiling; double fmax = NUMbarkToHertz (NUMhertzToBark (fmin) + 1); if (fmax > fhigh) fmax = fhigh; Melder_progress (fmin / fhigh, U"Band: ", Melder_fixed (fmin, 0), U" ... ", Melder_fixed (fmax, 0), U" Hz"); NUMmatrix_copyElements (orgspec -> z, spec -> z, 1, 2, 1, spec -> nx); Spectrum_passHannBand (spec.peek(), fmin, fmax, bandSmoothing); autoSound band = Spectrum_to_Sound (spec.peek()); /* * Compute a relative intensity contour. */ autoSound intensity = Data_copy (band.peek()); n = intensity -> nx; amp = intensity -> z [1]; for (long i = 1; i <= n; i ++) amp [i] = 10 * log10 (amp [i] * amp [i] + 1e-6); autoSpectrum intensityFilter = Sound_to_Spectrum (intensity.peek(), true); n = intensityFilter -> nx; for (long i = 1; i <= n; i ++) { double frequency = intensityFilter -> x1 + (i - 1) * intensityFilter -> dx; double slow = alphaslow * frequency, fast = alphafast * frequency; double factor = exp (- fast * fast) - exp (- slow * slow); intensityFilter -> z [1] [i] *= factor; intensityFilter -> z [2] [i] *= factor; } intensity.reset (Spectrum_to_Sound (intensityFilter.peek())); n = intensity -> nx; amp = intensity -> z [1]; for (long i = 1; i <= n; i ++) amp [i] = pow (10, amp [i] / 2); /* * Clip to maximum enhancement. */ ceiling = 1 + (maximumFactor - 1.0) * (0.5 - 0.5 * cos (NUMpi * fmid_bark / 13)); for (long i = 1; i <= n; i ++) amp [i] = 1 / (1 / amp [i] + 1 / ceiling); n = thy nx; amp = thy z [channel]; for (long i = 1; i <= n; i ++) amp [i] += band -> z [1] [i] * intensity -> z [1] [i]; fmin = fmax; } } Vector_scale (thee.peek(), 0.99); /* Truncate. */ thy xmin = my xmin; thy xmax = my xmax; thy nx = my nx; thy x1 = my x1; return thee.transfer(); } catch (MelderError) { Melder_throw (me, U": band modulation not deepened."); } }
static Sound Spectrum_to_Sound_part (Spectrum me, double fmin, double fmax) { autoSpectrum band = Spectrum_band (me, fmin, fmax); autoSound sound = Spectrum_to_Sound (band.peek()); return sound.transfer(); }
autoSound ComplexSpectrogram_to_Sound (ComplexSpectrogram me, double stretchFactor) { try { /* original number of samples is odd: imaginary part of last spectral value is zero -> * phase is either zero or +/-pi */ double pi = atan2 (0.0, - 0.5); double samplingFrequency = 2.0 * my ymax; double lastFrequency = my y1 + (my ny - 1) * my dy, lastPhase = my phase[my ny][1]; int originalNumberOfSamplesProbablyOdd = (lastPhase != 0.0 && lastPhase != pi && lastPhase != -pi) || my ymax - lastFrequency > 0.25 * my dx; if (my y1 != 0.0) { Melder_throw (U"A Fourier-transformable ComplexSpectrogram must have a first frequency of 0 Hz, not ", my y1, U" Hz."); } long nsamp_window = 2 * my ny - (originalNumberOfSamplesProbablyOdd ? 1 : 2 ); long halfnsamp_window = nsamp_window / 2; double synthesisWindowDuration = nsamp_window / samplingFrequency; autoSpectrum spectrum = Spectrum_create (my ymax, my ny); autoSound synthesisWindow = Sound_createSimple (1, synthesisWindowDuration, samplingFrequency); double newDuration = (my xmax - my xmin) * stretchFactor; autoSound thee = Sound_createSimple (1, newDuration, samplingFrequency); //TODO double thyStartTime; for (long iframe = 1; iframe <= my nx; iframe++) { // "original" sound : double tmid = Sampled_indexToX (me, iframe); long leftSample = Sampled_xToLowIndex (thee.get(), tmid); long rightSample = leftSample + 1; long startSample = rightSample - halfnsamp_window; double startTime = Sampled_indexToX (thee.get(), startSample); if (iframe == 1) { thyStartTime = Sampled_indexToX (thee.get(), startSample); } //long endSample = leftSample + halfnsamp_window; // New Sound with stretch long thyStartSample = Sampled_xToLowIndex (thee.get(),thyStartTime); double thyEndTime = thyStartTime + my dx * stretchFactor; long thyEndSample = Sampled_xToLowIndex (thee.get(), thyEndTime); long stretchedStepSizeSamples = thyEndSample - thyStartSample + 1; //double extraTime = (thyStartSample - startSample + 1) * thy dx; double extraTime = (thyStartTime - startTime); spectrum -> z[1][1] = sqrt (my z[1][iframe]); for (long ifreq = 2; ifreq <= my ny; ifreq++) { double f = my y1 + (ifreq - 1) * my dy; double a = sqrt (my z[ifreq][iframe]); double phi = my phase[ifreq][iframe], intPart; double extraPhase = 2.0 * pi * modf (extraTime * f, &intPart); // fractional part phi += extraPhase; spectrum -> z[1][ifreq] = a * cos (phi); spectrum -> z[2][ifreq] = a * sin (phi); } autoSound synthesis = Spectrum_to_Sound (spectrum.get()); // Where should the sound be placed? long thyEndSampleP = (long) floor (fmin (thyStartSample + synthesis -> nx - 1, thyStartSample + stretchedStepSizeSamples - 1)); // guard against extreme stretches if (iframe == my nx) { thyEndSampleP = (long) floor (fmin (thy nx, thyStartSample + synthesis -> nx - 1)); // ppgb: waarom naar beneden afgerond? } for (long j = thyStartSample; j <= thyEndSampleP; j++) { thy z[1][j] = synthesis -> z[1][j - thyStartSample + 1]; } thyStartTime += my dx * stretchFactor; } return thee; } catch (MelderError) { Melder_throw (me, U": no Sound created."); } }
autoMatrix Spectrum_unwrap (Spectrum me) { try { struct tribolet_struct tbs; int remove_linear_part = 1; long nfft = 2; while (nfft < my nx - 1) { nfft *= 2; } nfft *= 2; if (nfft / 2 != my nx - 1) { Melder_throw (U"Dimension of Spectrum is not (power of 2 - 1)."); } autoSound x = Spectrum_to_Sound (me); autoSound nx = Data_copy (x.get()); for (long i = 1; i <= x -> nx; i++) { nx -> z[1][i] *= (i - 1); } autoSpectrum snx = Sound_to_Spectrum (nx.get(), 1); autoMatrix thee = Matrix_create (my xmin, my xmax, my nx, my dx, my x1, 1, 2, 2, 1, 1); // Common variables. tbs.thlinc = THLINC; tbs.thlcon = THLCON; tbs.x = x -> z[1]; tbs.nx = x -> nx; tbs.l = (long) floor (pow (2, EXP2) + 0.1); tbs.ddf = NUM2pi / ( (tbs.l) * nfft); tbs.reverse_sign = my z[1][1] < 0; tbs.count = 0; // Reuse snx : put phase derivative (d/df) in imaginary part. tbs.dvtmn2 = 0; for (long i = 1; i <= my nx; i ++) { double xr = my z[1][i], xi = my z[2][i]; double nxr = snx -> z[1][i], nxi = snx -> z[2][i]; double xmsq = xr * xr + xi * xi; double pdvt = PHADVT (xr, xi, nxr, nxi, xmsq); thy z[1][i] = xmsq; snx -> z[2][i] = pdvt; tbs.dvtmn2 += pdvt; } tbs.dvtmn2 = (2 * tbs.dvtmn2 - snx -> z[2][1] - snx -> z[2][my nx]) / (my nx - 1); autoMelderProgress progress (U"Phase unwrapping"); double pphase = 0, phase = 0; double ppdvt = snx -> z[2][1]; thy z[2][1] = PPVPHA (my z[1][1], my z[2][1], tbs.reverse_sign); for (long i = 2; i <= my nx; i ++) { double pfreq = NUM2pi * (i - 1) / nfft; double pdvt = snx -> z[2][i]; double ppv = PPVPHA (my z[1][i], my z[2][i], tbs.reverse_sign); phase = phase_unwrap (&tbs, pfreq, ppv, pdvt, &pphase, &ppdvt); ppdvt = pdvt; thy z[2][i] = pphase = phase; Melder_progress ( (double) i / my nx, i, U" unwrapped phases from ", my nx, U"."); } long iphase = (long) floor (phase / NUMpi + 0.1); // ppgb: better than truncation toward zero if (remove_linear_part) { phase /= my nx - 1; for (long i = 2; i <= my nx; i ++) { thy z[2][i] -= phase * (i - 1); } } Melder_information (U"Number of spectral values: ", tbs.count); Melder_information (U" iphase = ", iphase); return thee; } catch (MelderError) { Melder_throw (me, U": not unwrapped."); } }