示例#1
0
void render(BelaContext *context, void *userData)
{
    // listen for OSC
    while (oscServer.messageWaiting()) {
        oscMessageCallback(oscServer.popMessage());
    }

    // audio loop
    for (uint32_t n = 0; n < context->audioFrames; n++) {
        currentFrame = context->audioFramesElapsed + n;
        bool beat = checkBeat(currentFrame, context->audioSampleRate);

        if (beat) {
            for (uint16_t l = 0; l < NUM_LAYERS; l++) {
                LoopLayer& layer = layers[l];

                if (layer.recordingStartScheduled()) {
                    layer.startRecording(currentFrame);
                }
                else if (layer.recordingStopScheduled()) {
                    layer.stopRecording(currentFrame);
                }
            }
            beatCount++;
        }

        const float inputSignal = audioRead(context, n, gInputChannel);

        float layerSignal = 0;
        // record into layers
        for (uint16_t l = 0; l < NUM_LAYERS; l++) {
            LoopLayer& layer = layers[l];
            // send input signal
            layer.input(currentFrame, inputSignal);
            // sum all layers, except those that are recording, as they will
            // be giving us the input signal, which we already have
            if (!layer.isRecording()) {
                layerSignal += layer.read(currentFrame);
            }
        }

        // combine input pass through and recorded layers
        float outputSignal = layerSignal;
        outputSignal += (inputSignal * layers[gCurrentLayer].getMul());

        // output
        for (uint32_t ch = 0; ch < context->audioOutChannels; ch++) {
            audioWrite(context, n, ch, outputSignal);

            if(beat) {
                audioWrite(context, n, ch, 1);
            }
        }
    }
}
示例#2
0
文件: render.cpp 项目: acarabott/Bela
void render(BelaContext *context, void *userData)
{
	// Nested for loops for audio channels
	for(unsigned int n = 0; n < context->audioFrames; n++) {

		if(!(n % gAudioFramesPerAnalogFrame)) {
			// On even audio samples:
			// Read analog channel 0 and map the range from 0-1 to 0.25-20
			// use this to set the value of gFrequency
			gFrequency = map(analogRead(context, n, 0), 0.0, 1.0, 0.25, 20.0);
		}

		// Generate a sinewave with frequency set by gFrequency
		// and amplitude from -0.5 to 0.5
		float lfo = sinf(gPhase) * 0.5;
		// Keep track and wrap the phase of the sinewave
		gPhase += 2.0 * M_PI * gFrequency * gInverseSampleRate;
		if(gPhase > 2.0 * M_PI)
			gPhase -= 2.0 * M_PI;

		for(unsigned int channel = 0; channel < context->audioOutChannels; channel++) {
			// Read the audio input and half the amplitude
			float input = audioRead(context, n, channel) * 0.5;
			// Write to audio output the audio input multiplied by the sinewave
			audioWrite(context, n, channel, (input*lfo));
		}
	}
}
示例#3
0
void render(BelaContext *context, void *userData)
{
	for(unsigned int n = 0; n < context->audioFrames; n++) {
		float sample = 0;
		float out = 0;

		// If triggered...
		if(gReadPtr != -1)
			sample += gSampleData.samples[gReadPtr++];	// ...read each sample...

		if(gReadPtr >= gSampleData.sampleLen)
			gReadPtr = -1;

		out = lb0*sample+lb1*gLastX[0]+lb2*gLastX[1]-la1*gLastY[0]-la2*gLastY[1];

		gLastX[1] = gLastX[0];
		gLastX[0] = out;
		gLastY[1] = gLastY[0];
		gLastY[0] = out;

		for(unsigned int channel = 0; channel < context->audioOutChannels; ++channel)
			// ...and copy it to all the output channels
			audioWrite(context, n, channel, out);

	}

	// Request that the lower-priority tasks run at next opportunity
	Bela_scheduleAuxiliaryTask(gChangeCoeffTask);
	Bela_scheduleAuxiliaryTask(gInputTask);
}
示例#4
0
void render(BelaContext *context, void *userData)
{
	// process the audio channels
	for(unsigned int n = 0; n < context->audioFrames; ++n)
	{
		for(unsigned int ch = 0; ch < context->audioOutChannels; ++ch)
		{
			float in = audioRead(context, n, ch);
			inAcc[ch] += in;
			++inAccCount[ch];

			float out = 0.4f * sinf(gPhase[ch]);
			gPhase[ch] += 2.f * (float)M_PI * gFreq[ch] / context->audioSampleRate;
			if(gPhase[ch] > M_PI)
				gPhase[ch] -= 2.f * (float)M_PI;
			audioWrite(context, n, ch, out);
		}
	}

	// process the analog channels
	for(unsigned int n = 0; n < context->analogFrames; ++n)
	{
		for(unsigned int ch = 0; ch < context->analogInChannels; ++ch)
		{
			int idx = ch + context->audioOutChannels;
			float in = analogRead(context, n, ch);
			inAcc[idx] += in;
			++inAccCount[idx];

			// when using the capelet, the analog output is AC-coupled,
			// so we center it around 0, exactly the same as for the audio out
			float out = 0.4f * sinf(gPhase[idx]);
			// use the analogSampleRate instead
			gPhase[idx] += 2.f * (float)M_PI * gFreq[idx] / context->analogSampleRate;
			if(gPhase[idx] > (float)M_PI)
				gPhase[idx] -= 2.f * (float)M_PI;
			analogWriteOnce(context, n, ch, out);
		}
	}

	static int count = 0;
	for(unsigned int n = 0; n < context->audioFrames; ++n)
	{
		count += 1;
		if(count % (int)(context->audioSampleRate * 0.5f) == 0)
		{
			rt_printf("Average input:\n");
			for(unsigned int n = 0; n < 10; ++n)
			{
				rt_printf("[%d]:\t%.3f\t", n, inAcc[n]/inAccCount[n]);
				if(n % 2 == 1)
					rt_printf("\n");
			}
		}
	}

}
示例#5
0
文件: render.cpp 项目: acarabott/Bela
void render(BelaContext *context, void *userData)
{
    float currentSample;
    float out = 0;

	for(unsigned int n = 0; n < context->audioFrames; n++) {

		if(!(n % gAudioFramesPerAnalogFrame)) {
			// On even audio samples:
			// Read analog input 0, piezo disk
			gPiezoInput = analogRead(context, n, 0);
		}
		
		// Re-centre around 0
		// DC Offset Filter    y[n] = x[n] - x[n-1] + R * y[n-1]
		readingDCOffset = gPiezoInput - prevPiezoReading + (R * prevReadingDCOffset);
		prevPiezoReading = gPiezoInput;
		prevReadingDCOffset = readingDCOffset;
		currentSample = readingDCOffset;

		// Full wave rectify
	    if(currentSample < 0.0)
			currentSample *= -1.0;
		
		// Onset Detection
		if(currentSample >= peakValue) { // Record the highest incoming sample
			peakValue = currentSample;
			triggered = 0;
		}
	  	else if(peakValue >= rolloffRate) // But have the peak value decay over time
	    	peakValue -= rolloffRate;       // so we can catch the next peak later
		    
	  	if(currentSample < peakValue - amountBelowPeak && peakValue >= thresholdToTrigger && !triggered) {
	    	rt_printf("%f\n", peakValue);
	    	triggered = 1; // Indicate that we've triggered and wait for the next peak before triggering
		                   // again.
	    	gReadPtr = 0;  // Start sample playback
	  	}

        for(unsigned int channel = 0; channel < context->audioOutChannels; channel++) {
            
            // If triggered...
		    if(gReadPtr != -1)
		    	out = gSampleData[channel%NUM_CHANNELS].samples[gReadPtr++];	// ...read each sample...

	    	if(gReadPtr >= gSampleData[channel%NUM_CHANNELS].sampleLen)
		    	gReadPtr = -1;
		    	
    		audioWrite(context, n, channel, out);
    	}
    	
	}
	
	// log the piezo input, peakValue from onset detection and audio output on the scope
    scope.log(gPiezoInput, peakValue, out);
}
示例#6
0
文件: render.cpp 项目: dr-offig/Bela
void render(BelaContext *context, void *userData)
{
	static float lfoPhase=0;
	float amplitude = lfoAmplitude * 4700; // range of variation around D. D has to be between [0 9999]
	lfoPhase+=lfoRate*2*M_PI*context->audioFrames/context->audioSampleRate;
	D=amplitude+amplitude*sinf(lfoPhase);

	for(unsigned int n = 0; n < context->audioFrames; n++) {
		float input = audioRead(context, n, 0) + audioRead(context, n, 1);
	    delay[writePointer++] = input + delay[readPointer]*feedback;
	    float output = (input + 0.9*delay[readPointer++] ) * 0.5;
		audioWrite(context, n, 0, output);
		audioWrite(context, n, 1, output);
		if(writePointer>=delayLength)
			writePointer-=delayLength;
		if(readPointer>=delayLength)
			readPointer-=delayLength;
	}
}
示例#7
0
文件: truetalk.c 项目: wdebeaum/cabot
static void
doAudioWrite(char *buf, int len)
{
    /* Is this the first audio we've generated this utt? */
    if (!startedSpeaking) {
	sendStartedSpeakingMsg();
	startedSpeaking = 1;
    }
    /* Send to audio device */
    audioWrite(buf, len);
}
示例#8
0
void printout(void)
{
int j;

        if (nch==2)
                j=32 * 18 * 2;
        else
                j=32 * 18;

       if (AUDIO_BUFFER_SIZE==0)
               audioWrite((char*)sample_buffer, j * sizeof(short));
       else
               audioBufferWrite((char*)sample_buffer, j * sizeof(short));
}
示例#9
0
void render(BelaContext *context, void *userData)
{
	static float lfoPhase = 0;
	float amplitude = lfoAmplitude * 4700; // range of variation around D. D has to be between [0 9999]
	lfoPhase+=lfoRate * 2.f * (float)M_PI * context->audioFrames/context->audioSampleRate;
	D=amplitude+amplitude * sinf(lfoPhase);
	Bela_scheduleAuxiliaryTask(updatePll);

	for(unsigned int n = 0; n < context->audioFrames; n++) {
		float input = 0.0;
		for(unsigned int ch = 0; ch < context->audioInChannels; ch++) {
			input += audioRead(context, n, ch);
		}
		input = input/(float)context->audioInChannels;
		delay[writePointer++] = input + delay[readPointer]*feedback;
		float output = dry * input + wet * delay[readPointer++];
		for(unsigned int ch = 0; ch < context->audioOutChannels; ch++)
			audioWrite(context, n, ch, output);
		if(writePointer>=delayLength)
			writePointer-=delayLength;
		if(readPointer>=delayLength)
			readPointer-=delayLength;
	}
}
示例#10
0
int
audioBufferOpen(int frequency, int stereo, int volume)
{
	struct ringBuffer audioBuffer;
	
	int inFd,outFd,ctlFd,cnt,pid;
	int inputFinished=FALSE;
	int percentFull;
	fd_set inFdSet,outFdSet;
	fd_set *outFdPtr; 
	struct timeval timeout;
	int filedes[2];
	int controldes[2];
	
	
	if (pipe(filedes) || pipe(controldes)) 
	{
		perror("pipe");
		exit(-1);
	}
	if ((pid=fork())!=0) 
	{  
		/* if we are the parent */
		control_fd=controldes[1];
		close(filedes[0]);
		buffer_fd=filedes[1];
		close(controldes[0]);
		return(pid);	        /* return the pid */
	}
	
	
	/* we are the child */
	close(filedes[1]);
	inFd=filedes[0];
	close(controldes[1]);
	ctlFd=controldes[0];
	audioOpen(frequency,stereo,volume);
	outFd=getAudioFd();
	initBuffer(&audioBuffer);
	
	while(1) 
	{
		timeout.tv_sec=0;
		timeout.tv_usec=0;
		FD_ZERO(&inFdSet);
		FD_ZERO(&outFdSet);
		FD_SET(ctlFd,&inFdSet);
		FD_SET(outFd,&outFdSet);
		
		if (bufferSize(&audioBuffer)<AUSIZ) 
		{					/* is the buffer too empty */
			outFdPtr = NULL;		/* yes, don't try to write */
			if (inputFinished)		/* no more input, buffer exhausted -> exit */
				break;
		} else
			outFdPtr=&outFdSet;															/* no, select on write */
		
		/* check we have at least AUSIZ bytes left (don't want <1k bits) */
		if ((bufferFree(&audioBuffer)>=AUSIZ) && !inputFinished)
			FD_SET(inFd,&inFdSet);

/* The following selects() are basically all that is left of the system
   dependent code outside the audioIO_*&c files. These selects really
   need to be moved into the audioIO_*.c files and replaced with a
   function like audioIOReady(inFd, &checkIn, &checkAudio, wait) where
   it checks the status of the input or audio output if checkIn or
   checkAudio are set and returns with checkIn or checkAudio set to TRUE
   or FALSE depending on whether or not data is available. If wait is
   FALSE the function should return immediately, if wait is TRUE the
   process should BLOCK until the required condition is met. NB: The
   process MUST relinquish the CPU during this check or it will gobble
   up all the available CPU which sort of defeats the purpose of the
   buffer.

   This is tricky for people who don't have file descriptors (and
   select) to do the job. In that case a buffer implemented using
   threads should work. The way things are set up now a threaded version
   shouldn't be to hard to implement. When I get some time... */

		/* check if we can read or write */
		if (select(MAX3(inFd,outFd,ctlFd)+1,&inFdSet,outFdPtr,NULL,NULL) > -1) 
		{
			if (outFdPtr && FD_ISSET(outFd,outFdPtr)) 
			{							/* need to write */
				int bytesToEnd = AUDIO_BUFFER_SIZE - audioBuffer.outPos;

				percentFull=100*bufferSize(&audioBuffer)/AUDIO_BUFFER_SIZE;
				if (AUSIZ>bytesToEnd) 
				{
					cnt = audioWrite(audioBuffer.bufferPtr + audioBuffer.outPos, bytesToEnd);
					cnt += audioWrite(audioBuffer.bufferPtr, AUSIZ - bytesToEnd);
					audioBuffer.outPos = AUSIZ - bytesToEnd;
				} 
				else 
				{
					cnt = audioWrite(audioBuffer.bufferPtr + audioBuffer.outPos, AUSIZ);
					audioBuffer.outPos += AUSIZ;
				}

			}
			if (FD_ISSET(inFd,&inFdSet)) 
			{								 /* need to read */
			        cnt = read(inFd, audioBuffer.bufferPtr + audioBuffer.inPos, MIN(AUSIZ, AUDIO_BUFFER_SIZE - audioBuffer.inPos));
				if (cnt >= 0) 
				{
					audioBuffer.inPos = (audioBuffer.inPos + cnt) % AUDIO_BUFFER_SIZE;

					if (cnt==0)
						inputFinished=TRUE;
				} 
				else 
					_exit(-1);
			}
			if (FD_ISSET(ctlFd,&inFdSet)) 
			{
				int dummy;

			        cnt = read(ctlFd, &dummy, sizeof dummy);
				if (cnt >= 0) 
				{
					audioBuffer.inPos = audioBuffer.outPos = 0;
					audioFlush();
				} 
				else 
					_exit(-1);
			}
		} 
		else 
			_exit(-1);
	}
	close(inFd);
	audioClose();
	exit(0);
	return 0; /* just to get rid of warnings */
}
示例#11
0
void render(BelaContext *context, void *userData)
{
	
	// float* aoc = (float*)&context->analogOutChannels;
	// *aoc = 0; // simulate Bela Mini. Should also change the condition in setup() accordingly
	static float phase = 0.0;
	static int sampleCounter = 0;
	static int invertChannel = 0;
	float frequency = 0;

	if(gAudioTestState == kStateTestingNone){
		gAudioTestState = kStateTestingAudioLeft;
		rt_printf("Testing audio left\n");
	}

	if(gAudioTestState == kStateTestingAudioDone)
	{
		gAudioTestState = kStateTestingAnalog;
		rt_printf("Testing analog\n");
	}

	// Play a sine wave on the audio output
	for(unsigned int n = 0; n < context->audioFrames; n++) {
		
		// Peak detection on the audio inputs, with offset to catch
		// DC errors
		for(int ch = 0; ch < context->audioInChannels; ch++) {
			float value = audioRead(context, n, ch);
			if(value > gPositivePeakLevels[ch])
				gPositivePeakLevels[ch] = value;
			gPositivePeakLevels[ch] += 0.1f;
			gPositivePeakLevels[ch] *= gPeakLevelDecayRate;
			gPositivePeakLevels[ch] -= 0.1f;
			if(value < gNegativePeakLevels[ch])
				gNegativePeakLevels[ch] = value;
			gNegativePeakLevels[ch] -= 0.1f;
			gNegativePeakLevels[ch] *= gPeakLevelDecayRate;
			gNegativePeakLevels[ch] += 0.1f;
		}
		
		int enabledChannel;
		int disabledChannel;
		const char* enabledChannelLabel;
		const char* disabledChannelLabel;
		if(gAudioTestState == kStateTestingAudioLeft) {
			enabledChannel = 0;
			disabledChannel = 1;
			enabledChannelLabel = "Left";
			disabledChannelLabel = "Right";
		} else if (gAudioTestState == kStateTestingAudioRight) {
			enabledChannel = 1;
			disabledChannel = 0;
			enabledChannelLabel = "Right";
			disabledChannelLabel = "Left";
		}
		if(gAudioTestState == kStateTestingAudioLeft || gAudioTestState  == kStateTestingAudioRight)
		{
			audioWrite(context, n, enabledChannel, 0.2f * sinf(phase));
			audioWrite(context, n, disabledChannel, 0);
			
			frequency = 3000.0;
			phase += 2.0f * (float)M_PI * frequency / context->audioSampleRate;
			if(phase >= M_PI)
				phase -= 2.0f * (float)M_PI;
			
			gAudioTestStateSampleCount++;
			if(gAudioTestStateSampleCount >= gAudioTestStateSampleThreshold) {
				// Check if we have the expected input: signal on the enabledChannel but not
				// on the disabledChannel. Also check that there is not too much DC offset on the
				// inactive channel
				if((gPositivePeakLevels[enabledChannel] - gNegativePeakLevels[enabledChannel]) >= gPeakLevelHighThreshold 
					&& (gPositivePeakLevels[disabledChannel] -  gNegativePeakLevels[disabledChannel]) <= gPeakLevelLowThreshold &&
					fabsf(gPositivePeakLevels[disabledChannel]) < gDCOffsetThreshold &&
					fabsf(gNegativePeakLevels[disabledChannel]) < gDCOffsetThreshold) {
					// Successful test: increment counter
					gAudioTestSuccessCounter++;
					if(gAudioTestSuccessCounter >= gAudioTestSuccessCounterThreshold) {
						rt_printf("Audio %s test successful\n", enabledChannelLabel);
						if(gAudioTestState == kStateTestingAudioLeft)
						{
							gAudioTestState = kStateTestingAudioRight;
							rt_printf("Testing audio Right\n");
						} else if(gAudioTestState == kStateTestingAudioRight)
						{
							gAudioTestState = kStateTestingAudioDone;
						}

						gAudioTestStateSampleCount = 0;
						gAudioTestSuccessCounter = 0;
					}

				}
				else {
					if(!((context->audioFramesElapsed + n) % 22050)) {
						// Debugging print messages
						if((gPositivePeakLevels[enabledChannel] - gNegativePeakLevels[enabledChannel]) < gPeakLevelHighThreshold)
							rt_printf("%s Audio In FAIL: insufficient signal: %f\n", enabledChannelLabel,
										gPositivePeakLevels[enabledChannel] - gNegativePeakLevels[enabledChannel]);
						else if(gPositivePeakLevels[disabledChannel] -  gNegativePeakLevels[disabledChannel] > gPeakLevelLowThreshold)
							rt_printf("%s Audio In FAIL: signal present when it should not be: %f\n", disabledChannelLabel,
										gPositivePeakLevels[disabledChannel] -  gNegativePeakLevels[disabledChannel]);
						else if(fabsf(gPositivePeakLevels[disabledChannel]) >= gDCOffsetThreshold ||
								fabsf(gNegativePeakLevels[disabledChannel]) >= gDCOffsetThreshold)
							rt_printf("%s Audio In FAIL: DC offset: (%f, %f)\n", disabledChannelLabel,
										gPositivePeakLevels[disabledChannel], gNegativePeakLevels[disabledChannel]);
					}
					gAudioTestSuccessCounter--;
					if(gAudioTestSuccessCounter <= 0)
						gAudioTestSuccessCounter = 0;
				}
			}
		}
		if(
			gAudioTestState == kStateTestingAnalogDone || // Bela Mini: the audio outs are used also for testing analogs, so we only play the tone at the end of all tests
			(gAudioTestState >= kStateTestingAudioDone && context->analogOutChannels) // Bela: we play as soon as testing audio ends, while live-testing the analogs.
		)
		{
			// Audio input testing finished. Play tones depending on status of
			// analog testing
			audioWrite(context, n, 0, gEnvelopeValueL * sinf(phase));
			audioWrite(context, n, 1, gEnvelopeValueR * sinf(phase));

			// If one second has gone by with no error, play one sound, else
			// play another
			if(context->audioFramesElapsed + n - gLastErrorFrame > context->audioSampleRate)
			{
				gEnvelopeValueL *= gEnvelopeDecayRate;
				gEnvelopeValueR *= gEnvelopeDecayRate;
				gEnvelopeSampleCount++;
				if(gEnvelopeSampleCount > 22050) {
					if(gEnvelopeLastChannel == 0)
						gEnvelopeValueR = 0.5;
					else
						gEnvelopeValueL = 0.5;
					gEnvelopeLastChannel = !gEnvelopeLastChannel;
					gEnvelopeSampleCount = 0;
				}
				frequency = 880.0;
				if(led1)
				{
					led1->write(gEnvelopeValueL > 0.2);
				}
				if(led2)
				{
					led2->write(gEnvelopeValueR > 0.2);
				}
			} else {
				gEnvelopeValueL = gEnvelopeValueR = 0.5;
				gEnvelopeLastChannel = 0;
				frequency = 220.0;
			}

			phase += 2.0f * (float)M_PI * frequency / context->audioSampleRate;
			if(phase >= M_PI)
				phase -= 2.0f * (float)M_PI;
		}
	}

	unsigned int outChannels = context->analogOutChannels ? context->analogOutChannels : context->audioOutChannels;
	unsigned int outFrames = context->analogOutChannels ? context->analogFrames : context->audioFrames;
	if(gAudioTestState == kStateTestingAnalog)
	{
		for(unsigned int n = 0; n < outFrames; n++) {
			// Change outputs every 512 samples
			for(int k = 0; k < outChannels; k++) {
				float outValue;
				if((k % outChannels) == (invertChannel % outChannels))
					outValue = sampleCounter < 512 ? ANALOG_OUT_HIGH : ANALOG_OUT_LOW;
				else
					outValue = sampleCounter < 512 ? ANALOG_OUT_LOW : ANALOG_OUT_HIGH;
				if(context->analogOutChannels == 0)
					audioWrite(context, n, k%2, outValue); // Bela Mini, using audio outs instead
				else
					analogWriteOnce(context, n, k, outValue); // Bela
			}
		}

		for(unsigned int n = 0; n < context->analogFrames; n++) {
			// Read after 256 samples: input should be low (high for inverted)
			// Read after 768 samples: input should be high (low for inverted)
			if(sampleCounter == 256 || sampleCounter == 768) {
				for(int k = 0; k < context->analogInChannels; k++) {
					float inValue = analogRead(context, n, k);
					bool inverted = ((k % outChannels) == (invertChannel % outChannels));
					if(
						(
							inverted &&
							(
								(sampleCounter == 256 && inValue < ANALOG_IN_HIGH) ||
								(sampleCounter == 768 && inValue > ANALOG_IN_LOW)
							)
						) || (
							!inverted &&
							(
								(sampleCounter == 256 && inValue > ANALOG_IN_LOW) ||
								(sampleCounter == 768 && inValue < ANALOG_IN_HIGH)
							)
						)
					)
					{
						rt_printf("Analog FAIL [output %d, input %d] -- output %s input %f %s\n", 
							k % outChannels, 
							k,
							(sampleCounter == 256 && inverted) || (sampleCounter == 768 && !inverted) ? "HIGH" : "LOW",
							inValue,
							inverted ? "(inverted channel)" : "");
						gLastErrorFrame = context->audioFramesElapsed + n;
						gAnalogTestSuccessCounter = 0;
					} else {
						++gAnalogTestSuccessCounter;
					}
				}
			}

			if(++sampleCounter >= 1024) {
				sampleCounter = 0;
				invertChannel++;
				if(invertChannel >= 8)
					invertChannel = 0;
			}
			if(gAnalogTestSuccessCounter >= 500) {
				static bool notified = false;
				if(!notified)
				{
					rt_printf("Analog test successful\n");
					gAudioTestState = kStateTestingAnalogDone;

				}
				notified = true;
			}
		}
	}
}