void onAudio(AudioIOData& io){ while(io()){ float s = src(); if(stft(s)){ // Define the band edges, in Hz float freqLo = 400; float freqHi = 1800; for(unsigned k=0; k<stft.numBins(); ++k){ // Compute the frequency, in Hz, of this bin float freq = k*stft.binFreq(); // If the bin frequency is outside of our band, then zero // the bin. if(freq < freqLo || freq > freqHi){ stft.bin(k) = 0; } } } s = stft(); io.out(0) = s; io.out(1) = s; } }
void onAudio(AudioIOData& io) { while(io()) { // Our signal is a saw wave plus a little noise float s = saw()*0.3 + noise()*0.04; if(stft(s)) { for(unsigned k=0; k<stft.numBins(); ++k) { // Get the bin magnitude (the first bin element) float mag = stft.bin(k)[0]; // If the bin magnitude is less than our threshold, then we // zero its magnitude. The assumption here is that noisy bins // will have a relatively small magnitude. if(mag < 0.0004) stft.bin(k)[0] = 0; // If we flip the comparison, then we keep only the noise. //if(mag > 0.0001) stft.bin(k)[0] = 0; } } // Get next resynthesized sample // (Comment this out to hear the original signal with noise.) s = stft(); io.out(0) = io.out(1) = s; } }
void audioCB(AudioIOData& io){ while(io()){ float s = src(); // Input next sample for analysis // When this returns true, then we have a new spectral frame if(stft(s)){ float frac = scl::pow3( edge.triU() ); int N = stft.numBins(); for(int k=0; k<N; ++k){ int indKnee = frac * N; stft.bin(k) *= k < indKnee ? 1:0; } } // Get next resynthesized sample s = stft() * 0.2; io.out(0) = s; io.out(1) = s; } }
void audioCB(AudioIOData& io){ using namespace gam::rnd; while(io()){ if(tmr()){ if(prob()) rng++; } float mx = mix.hann(); float s = (oscA.up() - oscB.up())*mx + (osc1.up() - osc2.up())*(1-mx); fshift1.freq(modfs1.tri()*2); fshift2.freq(modfs2.tri()*2); s = ((fshift1(s) + fshift2(s))/2 + s)/1; if(stft(s)){ rnd::push(rng); float prb = rnd::uni(0.3, 0.1); for(unsigned k=0; k<stft.numBins(); ++k){ //float frac = double(k)/stft.numBins(); float m = pick(pick(2,1, 0.1),0, prb); stft.bins(k) *= m; } rnd::pop(); stft.zeroEnds(); } s = stft()*0.5; //float s0=s, s1=s; float s0 = chrA3(chrA2(chrA1(s))); float s1 = chrB3(chrB2(chrB1(s))); io.out(0) = s0; io.out(1) = s1; } }
void onAudio(AudioIOData& io){ while(io()){ // Generate new seed? if(tmr()){ if(rnd::prob()) seed++; } // Modulated mix between pulse waves float mx = mix.hann(); float s = (oscA.up() - oscB.up())*mx + (osc1.up() - osc2.up())*(1-mx); // Add frequency shifted versions of signal to get barberpole combs fshift1.freq(modfs1.tri()*2); fshift2.freq(modfs2.tri()*2); s += (fshift1(s) + fshift2(s))*0.5; if(stft(s)){ // Apply spectral thin rnd::push(seed); float prb = rnd::uni(0.3, 0.1); for(unsigned k=0; k<stft.numBins(); ++k){ //float frac = double(k)/stft.numBins(); float m = rnd::pick(rnd::pick(2,1, 0.1),0, prb); stft.bin(k) *= m; } rnd::pop(); stft.zeroEnds(); } s = stft()*0.5; // "Spatialize" with modulated echoes float s0 = chrA3(chrA2(chrA1(s))); float s1 = chrB3(chrB2(chrB1(s))); io.out(0) = s0; io.out(1) = s1; } }
void onAudio(AudioIOData& io){ while(io()){ src.freq(220); float s = src(); // Input next sample for analysis // When this returns true, then we have a new spectral frame if(stft(s)){ // Loop through all the bins for(unsigned k=0; k<stft.numBins(); ++k){ // Here we simply scale the complex sample stft.bin(k) *= 0.2; } } // Get next resynthesized sample s = stft(); io.out(0) = s; io.out(1) = s; } }
void onAudio(AudioIOData& io){ while(io()){ // Our signal is a saw wave plus a little noise float s = saw()*0.3 + noise()*0.05; // Input next sample for analysis // When this returns true, then we have a new spectral frame if(stft(s)){ // Loop through the bins for(unsigned k=1; k<stft.numBins()-1; ++k){ // Get neighborhood of magnitudes float m0 = stft.bin(k-1)[0]; float m1 = stft.bin(k )[0]; float m2 = stft.bin(k+1)[0]; // Is the current bin a peak? if(m1 > m0 && m1 > m2){ // Zero the peak (for demonstration purposes) stft.bin(k-1)[0] = 0; stft.bin(k )[0] = 0; stft.bin(k+1)[0] = 0; k++; // Skip the next bin---it cannot be a peak } } } // Get next resynthesized sample s = stft(); io.out(0) = io.out(1) = s; } }
void onAudio(AudioIOData& io){ float pshift = 1.7831; //pshift = 1./pshift; while(io()){ float s = play(); play.loop(); if(stft(s)){ enum{ PREV_MAG=0, TEMP_MAG, TEMP_FRQ }; // Compute spectral flux (L^1 norm on positive changes) float flux = 0; for(unsigned k=0; k<stft.numBins(); ++k){ float mcurr = stft.bin(k)[0]; float mprev = stft.aux(PREV_MAG)[k]; if(mcurr > mprev){ flux += mcurr - mprev; } } //printf("%g\n", flux); //gam::printPlot(flux); printf("\n"); // Store magnitudes for next frame stft.copyBinsToAux(0, PREV_MAG); // Given an onset, we would like the phases of the output frame // to match the input frame in order to preserve transients. if(flux > 0.2){ stft.resetPhases(); } // Initialize buffers to store pitch-shifted spectrum for(unsigned k=0; k<stft.numBins(); ++k){ stft.aux(TEMP_MAG)[k] = 0.; stft.aux(TEMP_FRQ)[k] = k*stft.binFreq(); } // Perform the pitch shift: // Here we contract or expand the bins. For overlapping bins, // we simply add the magnitudes and replace the frequency. // Reference: // http://oldsite.dspdimension.com/dspdimension.com/src/smbPitchShift.cpp if(pshift > 0){ unsigned kmax = stft.numBins() / pshift; if(kmax >= stft.numBins()) kmax = stft.numBins()-1; for(unsigned k=1; k<kmax; ++k){ unsigned j = k*pshift; stft.aux(TEMP_MAG)[j] += stft.bin(k)[0]; stft.aux(TEMP_FRQ)[j] = stft.bin(k)[1]*pshift; } } // Copy pitch-shifted spectrum over to bins stft.copyAuxToBins(TEMP_MAG, 0); stft.copyAuxToBins(TEMP_FRQ, 1); } s = stft(); io.out(0) = io.out(1) = s; } }