void render(BelaContext *context, void *userData) { // Nested for loops for audio channels for(unsigned int n = 0; n < context->audioFrames; n++) { if(!(n % gAudioFramesPerAnalogFrame)) { // On even audio samples: // Read analog channel 0 and map the range from 0-1 to 0.25-20 // use this to set the value of gFrequency gFrequency = map(analogRead(context, n, 0), 0.0, 1.0, 0.25, 20.0); } // Generate a sinewave with frequency set by gFrequency // and amplitude from -0.5 to 0.5 float lfo = sinf(gPhase) * 0.5; // Keep track and wrap the phase of the sinewave gPhase += 2.0 * M_PI * gFrequency * gInverseSampleRate; if(gPhase > 2.0 * M_PI) gPhase -= 2.0 * M_PI; for(unsigned int channel = 0; channel < context->audioOutChannels; channel++) { // Read the audio input and half the amplitude float input = audioRead(context, n, channel) * 0.5; // Write to audio output the audio input multiplied by the sinewave audioWrite(context, n, channel, (input*lfo)); } } }
void render(BelaContext *context, void *userData) { // process the audio channels for(unsigned int n = 0; n < context->audioFrames; ++n) { for(unsigned int ch = 0; ch < context->audioOutChannels; ++ch) { float in = audioRead(context, n, ch); inAcc[ch] += in; ++inAccCount[ch]; float out = 0.4f * sinf(gPhase[ch]); gPhase[ch] += 2.f * (float)M_PI * gFreq[ch] / context->audioSampleRate; if(gPhase[ch] > M_PI) gPhase[ch] -= 2.f * (float)M_PI; audioWrite(context, n, ch, out); } } // process the analog channels for(unsigned int n = 0; n < context->analogFrames; ++n) { for(unsigned int ch = 0; ch < context->analogInChannels; ++ch) { int idx = ch + context->audioOutChannels; float in = analogRead(context, n, ch); inAcc[idx] += in; ++inAccCount[idx]; // when using the capelet, the analog output is AC-coupled, // so we center it around 0, exactly the same as for the audio out float out = 0.4f * sinf(gPhase[idx]); // use the analogSampleRate instead gPhase[idx] += 2.f * (float)M_PI * gFreq[idx] / context->analogSampleRate; if(gPhase[idx] > (float)M_PI) gPhase[idx] -= 2.f * (float)M_PI; analogWriteOnce(context, n, ch, out); } } static int count = 0; for(unsigned int n = 0; n < context->audioFrames; ++n) { count += 1; if(count % (int)(context->audioSampleRate * 0.5f) == 0) { rt_printf("Average input:\n"); for(unsigned int n = 0; n < 10; ++n) { rt_printf("[%d]:\t%.3f\t", n, inAcc[n]/inAccCount[n]); if(n % 2 == 1) rt_printf("\n"); } } } }
void render(BelaContext *context, void *userData) { static float lfoPhase=0; float amplitude = lfoAmplitude * 4700; // range of variation around D. D has to be between [0 9999] lfoPhase+=lfoRate*2*M_PI*context->audioFrames/context->audioSampleRate; D=amplitude+amplitude*sinf(lfoPhase); for(unsigned int n = 0; n < context->audioFrames; n++) { float input = audioRead(context, n, 0) + audioRead(context, n, 1); delay[writePointer++] = input + delay[readPointer]*feedback; float output = (input + 0.9*delay[readPointer++] ) * 0.5; audioWrite(context, n, 0, output); audioWrite(context, n, 1, output); if(writePointer>=delayLength) writePointer-=delayLength; if(readPointer>=delayLength) readPointer-=delayLength; } }
void render(BelaContext *context, void *userData) { // listen for OSC while (oscServer.messageWaiting()) { oscMessageCallback(oscServer.popMessage()); } // audio loop for (uint32_t n = 0; n < context->audioFrames; n++) { currentFrame = context->audioFramesElapsed + n; bool beat = checkBeat(currentFrame, context->audioSampleRate); if (beat) { for (uint16_t l = 0; l < NUM_LAYERS; l++) { LoopLayer& layer = layers[l]; if (layer.recordingStartScheduled()) { layer.startRecording(currentFrame); } else if (layer.recordingStopScheduled()) { layer.stopRecording(currentFrame); } } beatCount++; } const float inputSignal = audioRead(context, n, gInputChannel); float layerSignal = 0; // record into layers for (uint16_t l = 0; l < NUM_LAYERS; l++) { LoopLayer& layer = layers[l]; // send input signal layer.input(currentFrame, inputSignal); // sum all layers, except those that are recording, as they will // be giving us the input signal, which we already have if (!layer.isRecording()) { layerSignal += layer.read(currentFrame); } } // combine input pass through and recorded layers float outputSignal = layerSignal; outputSignal += (inputSignal * layers[gCurrentLayer].getMul()); // output for (uint32_t ch = 0; ch < context->audioOutChannels; ch++) { audioWrite(context, n, ch, outputSignal); if(beat) { audioWrite(context, n, ch, 1); } } } }
void goertzelSample() { int sample = audioRead() - AUDIO_MIDDLE; offset++; if (offset == SAMPLES_PER_BIT) { offset = 0; } int i; for (i = 0; i < GOERTZEL_COUNT; i++) { if (enabled[i]) { d0High[i] = GOERTZEL_A_HIGH * d1High[i] - d2High[i] + sample; d2High[i] = d1High[i]; d1High[i] = d0High[i]; d0Low[i] = GOERTZEL_A_LOW * d1Low[i] - d2Low[i] + sample; d2Low[i] = d1Low[i]; d1Low[i] = d0Low[i]; if (i * GOERTZEL_DISTANCE == offset) { goertzelBlock(i); } } } }
void render(BelaContext *context, void *userData) { static float lfoPhase = 0; float amplitude = lfoAmplitude * 4700; // range of variation around D. D has to be between [0 9999] lfoPhase+=lfoRate * 2.f * (float)M_PI * context->audioFrames/context->audioSampleRate; D=amplitude+amplitude * sinf(lfoPhase); Bela_scheduleAuxiliaryTask(updatePll); for(unsigned int n = 0; n < context->audioFrames; n++) { float input = 0.0; for(unsigned int ch = 0; ch < context->audioInChannels; ch++) { input += audioRead(context, n, ch); } input = input/(float)context->audioInChannels; delay[writePointer++] = input + delay[readPointer]*feedback; float output = dry * input + wet * delay[readPointer++]; for(unsigned int ch = 0; ch < context->audioOutChannels; ch++) audioWrite(context, n, ch, output); if(writePointer>=delayLength) writePointer-=delayLength; if(readPointer>=delayLength) readPointer-=delayLength; } }
/* Read from audio device and display current buffer. */ void updateDisplay(void) { int i; float bgcolor[3] = DISPLAY_BACKGROUND_COLOR; glClearColor(bgcolor[0], bgcolor[1], bgcolor[2], 1); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); if (interaction.update) { /* Try again *now* if it failed. */ while (audioRead() < 0); } if (sound.bufferReady) { /* The buffer is marked as "full". We can now read it. After the * texture has been updated, the buffer gets marked as "not * ready". */ /* First, copy the current buffer to the secondary buffer. We * will show that second buffer if the first buffer is not yet * ready. */ memmove(sound.bufferLast, sound.buffer, sound.bufferSizeFrames * 2); /* Calculate spectrum. Casting "sound.bufferSizeFrames" works as * long as it's less than 2GB. I assume this to be true because * nobody will read 2GB at once from his sound card (at least * not today :-). */ for (i = 0; i < (int)sound.bufferSizeFrames; i++) { short int val = getFrame(sound.buffer, i); fftw.in[i] = 2 * (double)val / (256 * 256); } fftw_execute(fftw.plan); /* Draw history into a texture. First, move old texture one line up. */ memmove(fftw.textureData + (3 * fftw.textureWidth), fftw.textureData, (fftw.textureHeight - 1) * fftw.textureWidth * 3); int ha = 0, ta = 0; double histramp[][4] = DISPLAY_SPEC_HISTORY_RAMP; for (i = 0; i < fftw.outlen; i++) { double val = sqrt(fftw.out[i][0] * fftw.out[i][0] + fftw.out[i][1] * fftw.out[i][1]) / FFTW_SCALE; val = val > 1.0 ? 1.0 : val; /* Save current line for current spectrum. */ fftw.currentLine[ha++] = val; /* Find first index where "val" is outside that color * interval. */ int colat = 1; while (colat < DISPLAY_SPEC_HISTORY_RAMP_NUM && val > histramp[colat][0]) colat++; colat--; /* Scale "val" into this interval. */ double span = histramp[colat + 1][0] - histramp[colat][0]; val -= histramp[colat][0]; val /= span; /* Interpolate those two colors linearly. */ double colnow[3]; colnow[0] = histramp[colat][1] * (1 - val) + val * histramp[colat + 1][1]; colnow[1] = histramp[colat][2] * (1 - val) + val * histramp[colat + 1][2]; colnow[2] = histramp[colat][3] * (1 - val) + val * histramp[colat + 1][3]; /* Write this line into new first line of the texture. */ fftw.textureData[ta++] = (unsigned char)(colnow[0] * 255); fftw.textureData[ta++] = (unsigned char)(colnow[1] * 255); fftw.textureData[ta++] = (unsigned char)(colnow[2] * 255); } } /* Enable texturing for the quad/history. */ glEnable(GL_TEXTURE_2D); glBindTexture(GL_TEXTURE_2D, fftw.textureHandle); if (sound.bufferReady) { /* Update texture. */ glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, fftw.textureWidth, fftw.textureHeight, GL_RGB, GL_UNSIGNED_BYTE, fftw.textureData); checkError(__LINE__); /* Reset buffer state. The buffer is no longer ready and we * can't update the texture from it until audioRead() re-marked * it as ready. */ sound.bufferReady = 0; } /* Apply zoom and panning. */ glMatrixMode(GL_MODELVIEW); glLoadIdentity(); if (!interaction.forceOverview) { glScaled(interaction.scaleX, 1, 1); glTranslated(interaction.offsetX, 0, 0); } /* Draw a textured quad. */ glColor3f(1, 1, 1); glBegin(GL_QUADS); /* The texture must be moved half the width of a bin to the left to * match the line spectrogram. (Yes, these "0.5"s cancel out. Let * the compiler do this. It's easier to understand this way.) */ double halfBin = (0.5 * fftw.binWidth) / (0.5 * SOUND_RATE); glTexCoord2d(0 + halfBin, 0); glVertex2f(-1, -0.5); glTexCoord2d(1 + halfBin, 0); glVertex2f( 1, -0.5); glTexCoord2d(1 + halfBin, 1); glVertex2f( 1, 1); glTexCoord2d(0 + halfBin, 1); glVertex2f(-1, 1); glEnd(); glDisable(GL_TEXTURE_2D); /* Show current spectrum. */ if (!interaction.showWaveform) { float curcol[3] = DISPLAY_SPEC_CURRENT_COLOR; glColor3fv(curcol); glBegin(GL_LINE_STRIP); for (i = 0; i < fftw.outlen; i++) { /* relX will be in [-1, 1], relY will be in [0, 1]. */ double relX = 2 * ((double)i / fftw.outlen) - 1; double relY = fftw.currentLine[i]; /* Move relY so it'll be shown at the bottom of the screen. */ relY *= 0.5; relY -= 1; glVertex2f(relX, relY); } glEnd(); } else { glPushMatrix(); glLoadIdentity(); float curcol[3] = DISPLAY_WAVEFORM_COLOR; glColor3fv(curcol); glBegin(GL_LINE_STRIP); for (i = 0; i < (int)sound.bufferSizeFrames; i++) { /* relX will be in [-1, 1], relY will be in [-s, s] where s * is WAVEFORM_SCALE. */ short int val = getFrame(sound.bufferLast, i); double relX = 2 * ((double)i / sound.bufferSizeFrames) - 1; double relY = 2 * WAVEFORM_SCALE * (double)val / (256 * 256); /* Clamp relY ... WAVEFORM_SCALE may be too high. */ relY = relY > 1 ? 1 : relY; relY = relY < -1 ? -1 : relY; /* Move relY so it'll be shown at the bottom of the screen. */ relY *= 0.25; relY -= 0.75; glVertex2f(relX, relY); } glEnd(); glPopMatrix(); } float lineYStart = -1; if (interaction.showWaveform) lineYStart = -0.5; /* Current line and overtones? */ if (interaction.showOvertones) { glBegin(GL_LINES); /* Crosshair. */ float colcross[3] = DISPLAY_LINECOLOR_CROSS; glColor3fv(colcross); glVertex2f(interaction.lastMouseDownEW[0], lineYStart); glVertex2f(interaction.lastMouseDownEW[0], 1); glColor3fv(colcross); glVertex2f(-1, interaction.lastMouseDownEW[1]); glVertex2f( 1, interaction.lastMouseDownEW[1]); /* Indicate overtones at all multiples of the current frequency * (... this draws unneccssary lines when zoomed in). Don't draw * these lines if they're less than 5 pixels apart. */ float colover[3] = DISPLAY_LINECOLOR_OVERTONES; glColor3fv(colover); double nowscale = interaction.forceOverview ? 1 : interaction.scaleX; double xInitial = interaction.lastMouseDownEW[0] + 1; if (xInitial * interaction.width * nowscale > 5) { double x = xInitial * 2; while (x - 1 < 1) { glVertex2f(x - 1, lineYStart); glVertex2f(x - 1, 1); x += xInitial; } } /* Undertones until two lines are less than 2 pixels apart. */ double x = xInitial; while ((0.5 * x * interaction.width * nowscale) - (0.25 * x * interaction.width * nowscale) > 2) { x /= 2; glVertex2f(x - 1, lineYStart); glVertex2f(x - 1, 1); } glEnd(); } else if (interaction.showMainGrid) { glBegin(GL_LINES); /* Show "main grid" otherwise. */ float colgrid1[3] = DISPLAY_LINECOLOR_GRID_1; glColor3fv(colgrid1); glVertex2f(0, lineYStart); glVertex2f(0, 1); float colgrid2[3] = DISPLAY_LINECOLOR_GRID_2; glColor3fv(colgrid2); glVertex2f(0.5, lineYStart); glVertex2f(0.5, 1); glVertex2f(-0.5, lineYStart); glVertex2f(-0.5, 1); glEnd(); } if (interaction.showFrequency) { /* Scale from [-1, 1] to [0, fftw.outlen). */ double t = (interaction.lastMouseDownEW[0] + 1) / 2.0; int bin = (int)round(t * fftw.outlen); bin = (bin < 0 ? 0 : bin); bin = (bin >= fftw.outlen ? fftw.outlen - 1 : bin); /* Where exactly is this bin displayed? We want to snap our * guide line to that position. */ double snapX = ((double)bin / fftw.outlen) * 2 - 1; /* SOUND_RATE and SOUND_SAMPLES_PER_TURN determine the "size" of * each "bin" (see calculation of binWidth). Each bin has a size * of some hertz. The i'th bin corresponds to a frequency of i * * <that size> Hz. Note that the resolution is pretty low on * most setups, so it doesn't make any sense to display decimal * places. */ int freq = (int)(fftw.binWidth * bin); /* Draw frequency -- left or right of the guide line. */ float coltext[3] = DISPLAY_TEXTCOLOR; glColor3fv(coltext); double nowscale = interaction.forceOverview ? 1 : interaction.scaleX; double nowoffX = interaction.forceOverview ? 0 : interaction.offsetX; double screenX = (interaction.lastMouseDownEW[0] + nowoffX) * nowscale; /* Flipping the label could be done at exactly 50% of the * screen. But we only flip it if it's some pixels away from the * center. */ if (screenX < -0.25) { interaction.frequencyLabelLeft = 1; } else if (screenX > 0.25) { interaction.frequencyLabelLeft = 0; } char freqstr[256] = ""; if (interaction.frequencyLabelLeft) { glRasterPos2d(snapX, interaction.lastMouseDownEW[1]); snprintf(freqstr, 256, " <- approx. %d Hz", freq); } else { snprintf(freqstr, 256, "approx. %d Hz -> ", freq); glRasterPos2d(snapX - 10 * (double)strlen(freqstr) / interaction.width / nowscale, interaction.lastMouseDownEW[1]); } size_t i; for (i = 0; i < strlen(freqstr); i++) glutBitmapCharacter(GLUT_BITMAP_HELVETICA_10, freqstr[i]); /* Show guideline for this frequency. */ float colcross[3] = DISPLAY_LINECOLOR_CROSS; glColor3fv(colcross); glBegin(GL_LINES); glVertex2f(snapX, lineYStart); glVertex2f(snapX, 1); glEnd(); } /* Separator between current spectrum and history; border. */ glBegin(GL_LINES); float colborder[3] = DISPLAY_LINECOLOR_BORDER; glColor3fv(colborder); glVertex2f(-1, -0.5); glVertex2f( 1, -0.5); glVertex2f(-1, lineYStart); glVertex2f(-1, 1); glVertex2f( 1, lineYStart); glVertex2f( 1, 1); glEnd(); glutSwapBuffers(); }
void render(BelaContext *context, void *userData) { // float* aoc = (float*)&context->analogOutChannels; // *aoc = 0; // simulate Bela Mini. Should also change the condition in setup() accordingly static float phase = 0.0; static int sampleCounter = 0; static int invertChannel = 0; float frequency = 0; if(gAudioTestState == kStateTestingNone){ gAudioTestState = kStateTestingAudioLeft; rt_printf("Testing audio left\n"); } if(gAudioTestState == kStateTestingAudioDone) { gAudioTestState = kStateTestingAnalog; rt_printf("Testing analog\n"); } // Play a sine wave on the audio output for(unsigned int n = 0; n < context->audioFrames; n++) { // Peak detection on the audio inputs, with offset to catch // DC errors for(int ch = 0; ch < context->audioInChannels; ch++) { float value = audioRead(context, n, ch); if(value > gPositivePeakLevels[ch]) gPositivePeakLevels[ch] = value; gPositivePeakLevels[ch] += 0.1f; gPositivePeakLevels[ch] *= gPeakLevelDecayRate; gPositivePeakLevels[ch] -= 0.1f; if(value < gNegativePeakLevels[ch]) gNegativePeakLevels[ch] = value; gNegativePeakLevels[ch] -= 0.1f; gNegativePeakLevels[ch] *= gPeakLevelDecayRate; gNegativePeakLevels[ch] += 0.1f; } int enabledChannel; int disabledChannel; const char* enabledChannelLabel; const char* disabledChannelLabel; if(gAudioTestState == kStateTestingAudioLeft) { enabledChannel = 0; disabledChannel = 1; enabledChannelLabel = "Left"; disabledChannelLabel = "Right"; } else if (gAudioTestState == kStateTestingAudioRight) { enabledChannel = 1; disabledChannel = 0; enabledChannelLabel = "Right"; disabledChannelLabel = "Left"; } if(gAudioTestState == kStateTestingAudioLeft || gAudioTestState == kStateTestingAudioRight) { audioWrite(context, n, enabledChannel, 0.2f * sinf(phase)); audioWrite(context, n, disabledChannel, 0); frequency = 3000.0; phase += 2.0f * (float)M_PI * frequency / context->audioSampleRate; if(phase >= M_PI) phase -= 2.0f * (float)M_PI; gAudioTestStateSampleCount++; if(gAudioTestStateSampleCount >= gAudioTestStateSampleThreshold) { // Check if we have the expected input: signal on the enabledChannel but not // on the disabledChannel. Also check that there is not too much DC offset on the // inactive channel if((gPositivePeakLevels[enabledChannel] - gNegativePeakLevels[enabledChannel]) >= gPeakLevelHighThreshold && (gPositivePeakLevels[disabledChannel] - gNegativePeakLevels[disabledChannel]) <= gPeakLevelLowThreshold && fabsf(gPositivePeakLevels[disabledChannel]) < gDCOffsetThreshold && fabsf(gNegativePeakLevels[disabledChannel]) < gDCOffsetThreshold) { // Successful test: increment counter gAudioTestSuccessCounter++; if(gAudioTestSuccessCounter >= gAudioTestSuccessCounterThreshold) { rt_printf("Audio %s test successful\n", enabledChannelLabel); if(gAudioTestState == kStateTestingAudioLeft) { gAudioTestState = kStateTestingAudioRight; rt_printf("Testing audio Right\n"); } else if(gAudioTestState == kStateTestingAudioRight) { gAudioTestState = kStateTestingAudioDone; } gAudioTestStateSampleCount = 0; gAudioTestSuccessCounter = 0; } } else { if(!((context->audioFramesElapsed + n) % 22050)) { // Debugging print messages if((gPositivePeakLevels[enabledChannel] - gNegativePeakLevels[enabledChannel]) < gPeakLevelHighThreshold) rt_printf("%s Audio In FAIL: insufficient signal: %f\n", enabledChannelLabel, gPositivePeakLevels[enabledChannel] - gNegativePeakLevels[enabledChannel]); else if(gPositivePeakLevels[disabledChannel] - gNegativePeakLevels[disabledChannel] > gPeakLevelLowThreshold) rt_printf("%s Audio In FAIL: signal present when it should not be: %f\n", disabledChannelLabel, gPositivePeakLevels[disabledChannel] - gNegativePeakLevels[disabledChannel]); else if(fabsf(gPositivePeakLevels[disabledChannel]) >= gDCOffsetThreshold || fabsf(gNegativePeakLevels[disabledChannel]) >= gDCOffsetThreshold) rt_printf("%s Audio In FAIL: DC offset: (%f, %f)\n", disabledChannelLabel, gPositivePeakLevels[disabledChannel], gNegativePeakLevels[disabledChannel]); } gAudioTestSuccessCounter--; if(gAudioTestSuccessCounter <= 0) gAudioTestSuccessCounter = 0; } } } if( gAudioTestState == kStateTestingAnalogDone || // Bela Mini: the audio outs are used also for testing analogs, so we only play the tone at the end of all tests (gAudioTestState >= kStateTestingAudioDone && context->analogOutChannels) // Bela: we play as soon as testing audio ends, while live-testing the analogs. ) { // Audio input testing finished. Play tones depending on status of // analog testing audioWrite(context, n, 0, gEnvelopeValueL * sinf(phase)); audioWrite(context, n, 1, gEnvelopeValueR * sinf(phase)); // If one second has gone by with no error, play one sound, else // play another if(context->audioFramesElapsed + n - gLastErrorFrame > context->audioSampleRate) { gEnvelopeValueL *= gEnvelopeDecayRate; gEnvelopeValueR *= gEnvelopeDecayRate; gEnvelopeSampleCount++; if(gEnvelopeSampleCount > 22050) { if(gEnvelopeLastChannel == 0) gEnvelopeValueR = 0.5; else gEnvelopeValueL = 0.5; gEnvelopeLastChannel = !gEnvelopeLastChannel; gEnvelopeSampleCount = 0; } frequency = 880.0; if(led1) { led1->write(gEnvelopeValueL > 0.2); } if(led2) { led2->write(gEnvelopeValueR > 0.2); } } else { gEnvelopeValueL = gEnvelopeValueR = 0.5; gEnvelopeLastChannel = 0; frequency = 220.0; } phase += 2.0f * (float)M_PI * frequency / context->audioSampleRate; if(phase >= M_PI) phase -= 2.0f * (float)M_PI; } } unsigned int outChannels = context->analogOutChannels ? context->analogOutChannels : context->audioOutChannels; unsigned int outFrames = context->analogOutChannels ? context->analogFrames : context->audioFrames; if(gAudioTestState == kStateTestingAnalog) { for(unsigned int n = 0; n < outFrames; n++) { // Change outputs every 512 samples for(int k = 0; k < outChannels; k++) { float outValue; if((k % outChannels) == (invertChannel % outChannels)) outValue = sampleCounter < 512 ? ANALOG_OUT_HIGH : ANALOG_OUT_LOW; else outValue = sampleCounter < 512 ? ANALOG_OUT_LOW : ANALOG_OUT_HIGH; if(context->analogOutChannels == 0) audioWrite(context, n, k%2, outValue); // Bela Mini, using audio outs instead else analogWriteOnce(context, n, k, outValue); // Bela } } for(unsigned int n = 0; n < context->analogFrames; n++) { // Read after 256 samples: input should be low (high for inverted) // Read after 768 samples: input should be high (low for inverted) if(sampleCounter == 256 || sampleCounter == 768) { for(int k = 0; k < context->analogInChannels; k++) { float inValue = analogRead(context, n, k); bool inverted = ((k % outChannels) == (invertChannel % outChannels)); if( ( inverted && ( (sampleCounter == 256 && inValue < ANALOG_IN_HIGH) || (sampleCounter == 768 && inValue > ANALOG_IN_LOW) ) ) || ( !inverted && ( (sampleCounter == 256 && inValue > ANALOG_IN_LOW) || (sampleCounter == 768 && inValue < ANALOG_IN_HIGH) ) ) ) { rt_printf("Analog FAIL [output %d, input %d] -- output %s input %f %s\n", k % outChannels, k, (sampleCounter == 256 && inverted) || (sampleCounter == 768 && !inverted) ? "HIGH" : "LOW", inValue, inverted ? "(inverted channel)" : ""); gLastErrorFrame = context->audioFramesElapsed + n; gAnalogTestSuccessCounter = 0; } else { ++gAnalogTestSuccessCounter; } } } if(++sampleCounter >= 1024) { sampleCounter = 0; invertChannel++; if(invertChannel >= 8) invertChannel = 0; } if(gAnalogTestSuccessCounter >= 500) { static bool notified = false; if(!notified) { rt_printf("Analog test successful\n"); gAudioTestState = kStateTestingAnalogDone; } notified = true; } } } }