void Envelope::Draw(wxDC & dc, wxRect & r, double h, double pps, bool dB) { h -= mOffset; double tright = h + (r.width / pps); dc.SetPen(AColor::envelopePen); dc.SetBrush(*wxWHITE_BRUSH); int ctr, height; if (mMirror) { height = r.height / 2; ctr = r.y + height; } else { height = r.height; ctr = r.y + height; } int len = mEnv.Count(); for (int i = 0; i < len; i++) { if (mEnv[i]->t >= h && mEnv[i]->t <= tright) { if (i == mDragPoint) { dc.SetPen(AColor::envelopePen); dc.SetBrush(AColor::envelopeBrush); } double v = mEnv[i]->val; int x = int ((mEnv[i]->t - h) * pps); int y; if (dB) y = int (toDB(v) * height); else y = int (v * height); wxRect circle(r.x + x - 1, ctr - y, 4, 4); dc.DrawEllipse(circle); if (mMirror) { circle.y = ctr + y - 2; dc.DrawEllipse(circle); } if (i == mDragPoint) { dc.SetPen(AColor::envelopePen); dc.SetBrush(*wxWHITE_BRUSH); } } } }
void Reports::insert(const Report &report) { auto experimentOpt = Experiments{session}.getActive(); if (!experimentOpt || experimentOpt.value().ID != report.experimentID) { throw InvalidData{"given experiment ID is not an ID of currently active experiment"}; } auto experiment = experimentOpt.value(); checkID(report.ID); checkEvents(experiment, report.history); checkSurveyAns(experiment.surveyBefore, report.surveyBefore); checkSurveyAns(experiment.surveyAfter, report.surveyAfter); retainExhibits(report.history); Impl::insert(session, toDB(report)); }
void OBS::MainAudioLoop() { const unsigned int audioSamplesPerSec = App->GetSampleRateHz(); const unsigned int audioSampleSize = audioSamplesPerSec/100; DWORD taskID = 0; HANDLE hTask = AvSetMmThreadCharacteristics(TEXT("Pro Audio"), &taskID); bufferedAudioTimes.Clear(); bPushToTalkOn = false; micMax = desktopMax = VOL_MIN; micPeak = desktopPeak = VOL_MIN; UINT audioFramesSinceMeterUpdate = 0; UINT audioFramesSinceMicMaxUpdate = 0; UINT audioFramesSinceDesktopMaxUpdate = 0; List<float> mixBuffer, levelsBuffer; mixBuffer.SetSize(audioSampleSize*2); levelsBuffer.SetSize(audioSampleSize*2); latestAudioTime = 0; //--------------------------------------------- // the audio loop of doom while (true) { OSSleep(5); //screw it, just run it every 5ms if (!bRunning) break; //----------------------------------------------- float *desktopBuffer, *micBuffer; curDesktopVol = desktopVol * desktopBoost; if (bUsingPushToTalk) curMicVol = bPushToTalkOn ? micVol : 0.0f; else curMicVol = micVol; curMicVol *= micBoost; bool bDesktopMuted = (curDesktopVol < EPSILON); bool bMicEnabled = (micAudio != NULL); while (QueryNewAudio()) { QWORD timestamp = bufferedAudioTimes[0]; bufferedAudioTimes.Remove(0); zero(mixBuffer.Array(), audioSampleSize*2*sizeof(float)); zero(levelsBuffer.Array(), audioSampleSize*2*sizeof(float)); //---------------------------------------------------------------------------- // get latest sample for calculating the volume levels float *latestDesktopBuffer = NULL, *latestMicBuffer = NULL; desktopAudio->GetBuffer(&desktopBuffer, timestamp); desktopAudio->GetNewestFrame(&latestDesktopBuffer); if (micAudio != NULL) { micAudio->GetBuffer(&micBuffer, timestamp); micAudio->GetNewestFrame(&latestMicBuffer); } //---------------------------------------------------------------------------- // mix desktop samples if (desktopBuffer) MixAudio(mixBuffer.Array(), desktopBuffer, audioSampleSize*2, false); if (latestDesktopBuffer) MixAudio(levelsBuffer.Array(), latestDesktopBuffer, audioSampleSize*2, false); //---------------------------------------------------------------------------- // get latest aux volume level samples and mix OSEnterMutex(hAuxAudioMutex); for (UINT i=0; i<auxAudioSources.Num(); i++) { float *latestAuxBuffer; if(auxAudioSources[i]->GetNewestFrame(&latestAuxBuffer)) MixAudio(levelsBuffer.Array(), latestAuxBuffer, audioSampleSize*2, false); } //---------------------------------------------------------------------------- // mix output aux sound samples with the desktop for (UINT i=0; i<auxAudioSources.Num(); i++) { float *auxBuffer; if(auxAudioSources[i]->GetBuffer(&auxBuffer, timestamp)) MixAudio(mixBuffer.Array(), auxBuffer, audioSampleSize*2, false); } OSLeaveMutex(hAuxAudioMutex); //---------------------------------------------------------------------------- // multiply samples by volume and compute RMS and max of samples // Use 1.0f instead of curDesktopVol, since aux audio sources already have their volume set, and shouldn't be boosted anyway. float desktopRMS = 0, micRMS = 0, desktopMx = 0, micMx = 0; if (latestDesktopBuffer) CalculateVolumeLevels(levelsBuffer.Array(), audioSampleSize*2, 1.0f, desktopRMS, desktopMx); if (bMicEnabled && latestMicBuffer) CalculateVolumeLevels(latestMicBuffer, audioSampleSize*2, curMicVol, micRMS, micMx); //---------------------------------------------------------------------------- // convert RMS and Max of samples to dB desktopRMS = toDB(desktopRMS); micRMS = toDB(micRMS); desktopMx = toDB(desktopMx); micMx = toDB(micMx); //---------------------------------------------------------------------------- // update max if sample max is greater or after 1 second float maxAlpha = 0.15f; UINT peakMeterDelayFrames = audioSamplesPerSec * 3; if (micMx > micMax) micMax = micMx; else micMax = maxAlpha * micMx + (1.0f - maxAlpha) * micMax; if(desktopMx > desktopMax) desktopMax = desktopMx; else desktopMax = maxAlpha * desktopMx + (1.0f - maxAlpha) * desktopMax; //---------------------------------------------------------------------------- // update delayed peak meter if (micMax > micPeak || audioFramesSinceMicMaxUpdate > peakMeterDelayFrames) { micPeak = micMax; audioFramesSinceMicMaxUpdate = 0; } else { audioFramesSinceMicMaxUpdate += audioSampleSize; } if (desktopMax > desktopPeak || audioFramesSinceDesktopMaxUpdate > peakMeterDelayFrames) { desktopPeak = desktopMax; audioFramesSinceDesktopMaxUpdate = 0; } else { audioFramesSinceDesktopMaxUpdate += audioSampleSize; } //---------------------------------------------------------------------------- // low pass the level sampling float rmsAlpha = 0.15f; desktopMag = rmsAlpha * desktopRMS + desktopMag * (1.0f - rmsAlpha); micMag = rmsAlpha * micRMS + micMag * (1.0f - rmsAlpha); //---------------------------------------------------------------------------- // update the meter about every 50ms audioFramesSinceMeterUpdate += audioSampleSize; if (audioFramesSinceMeterUpdate >= (audioSampleSize*5)) { PostMessage(hwndMain, WM_COMMAND, MAKEWPARAM(ID_MICVOLUMEMETER, VOLN_METERED), 0); audioFramesSinceMeterUpdate = 0; } //---------------------------------------------------------------------------- // mix mic and desktop sound // also, it's perfectly fine to just mix into the returned buffer if (bMicEnabled && micBuffer) MixAudio(mixBuffer.Array(), micBuffer, audioSampleSize*2, bForceMicMono); EncodeAudioSegment(mixBuffer.Array(), audioSampleSize, timestamp); } //----------------------------------------------- if (!bRecievedFirstAudioFrame && pendingAudioFrames.Num()) bRecievedFirstAudioFrame = true; } desktopMag = desktopMax = desktopPeak = VOL_MIN; micMag = micMax = micPeak = VOL_MIN; PostMessage(hwndMain, WM_COMMAND, MAKEWPARAM(ID_MICVOLUMEMETER, VOLN_METERED), 0); for (UINT i=0; i<pendingAudioFrames.Num(); i++) pendingAudioFrames[i].audioData.Clear(); AvRevertMmThreadCharacteristics(hTask); }
// Returns true if parent needs to be redrawn bool Envelope::MouseEvent(wxMouseEvent & event, wxRect & r, double h, double pps, bool dB) { //h -= mOffset; int ctr, height; bool upper; if (mMirror) { height = r.height / 2; ctr = r.y + height; upper = (event.m_y < ctr); } else { height = r.height; ctr = r.y + height; upper = true; } if (event.ButtonDown()) { mIsDeleting = false; double tleft = h - mOffset; double tright = tleft + (r.width / pps); int bestNum = -1; int bestDist = 10; int len = mEnv.Count(); for (int i = 0; i < len; i++) { if (mEnv[i]->t >= tleft && mEnv[i]->t <= tright) { double v = mEnv[i]->val; int x = int ((mEnv[i]->t + mOffset - h) * pps) + r.x; int dy; if (dB) dy = int (toDB(v) * height); else dy = int (v * height); int y; if (upper) y = int (ctr - dy); else y = int (ctr + dy); #ifndef SQR #define SQR(X) ((X)*(X)) #endif int d = int (sqrt(SQR(x - event.m_x) + SQR(y - event.m_y)) + 0.5); if (d < bestDist) { bestNum = i; bestDist = d; } } } if (bestNum >= 0) { mDragPoint = bestNum; } else { // Create new point double when = h + (event.m_x - r.x) / pps - mOffset; int dy; if (upper) dy = ctr - event.m_y; else dy = event.m_y - ctr; double newVal; if (dB) newVal = fromDB(dy / double (height)); else newVal = dy / double (height); if (newVal < 0.0) newVal = 0.0; if (newVal > 1.0) newVal = 1.0; mDragPoint = Insert(when, newVal); mDirty = true; } mUpper = upper; mInitialWhen = mEnv[mDragPoint]->t; mInitialVal = mEnv[mDragPoint]->val; mInitialX = event.m_x; mInitialY = event.m_y; return true; } if (event.Dragging() && mDragPoint >= 0) { mDirty = true; wxRect larger = r; larger.Inflate(5, 5); if (!mIsDeleting && mDragPoint > 0 && mDragPoint < mEnv.Count() - 1 && !larger.Inside(event.m_x, event.m_y)) { mEnv[mDragPoint]->t = mEnv[mDragPoint - 1]->t; mEnv[mDragPoint]->val = mEnv[mDragPoint - 1]->val; mIsDeleting = true; return true; } if (larger.Inside(event.m_x, event.m_y)) mIsDeleting = false; if (mIsDeleting) return false; int y; if (mUpper) y = ctr - event.m_y; else y = event.m_y - ctr; double newVal; if (dB) newVal = fromDB(y / double (height)); else newVal = y / double (height); if (newVal < 0.0) newVal = 0.0; if (newVal > 1.0) newVal = 1.0; double newWhen = mInitialWhen + (event.m_x - mInitialX) / pps; if (mDragPoint > 0 && newWhen < mEnv[mDragPoint - 1]->t) newWhen = mEnv[mDragPoint - 1]->t; if (mDragPoint < mEnv.Count() - 1 && newWhen > mEnv[mDragPoint + 1]->t) newWhen = mEnv[mDragPoint + 1]->t; if (mDragPoint == 0) newWhen = 0; if (mDragPoint == mEnv.Count() - 1) newWhen = mTrackLen; mEnv[mDragPoint]->t = newWhen; mEnv[mDragPoint]->val = newVal; return true; } if (event.ButtonUp()) { if (mIsDeleting) { delete mEnv[mDragPoint]; mEnv.RemoveAt(mDragPoint); } mDragPoint = -1; return true; } return false; }
void Actions::insert(Actions::Action *action) { assert(action); checkName(action->text); action->ID = Impl::insert(session, toDB(*action)); }
void OBS::MainAudioLoop() { DWORD taskID = 0; HANDLE hTask = AvSetMmThreadCharacteristics(TEXT("Pro Audio"), &taskID); bPushToTalkOn = false; micMax = desktopMax = VOL_MIN; micPeak = desktopPeak = VOL_MIN; UINT audioFramesSinceMeterUpdate = 0; UINT audioFramesSinceMicMaxUpdate = 0; UINT audioFramesSinceDesktopMaxUpdate = 0; List<float> mixedLatestDesktopSamples; List<float> blank10msSample; blank10msSample.SetSize(882); QWORD lastAudioTime = 0; while(TRUE) { OSSleep(5); //screw it, just run it every 5ms if(!bRunning) break; //----------------------------------------------- float *desktopBuffer, *micBuffer; UINT desktopAudioFrames = 0, micAudioFrames = 0; UINT latestDesktopAudioFrames = 0, latestMicAudioFrames = 0; curDesktopVol = desktopVol * desktopBoost; if(bUsingPushToTalk) curMicVol = bPushToTalkOn ? micVol : 0.0f; else curMicVol = micVol; curMicVol *= micBoost; bool bDesktopMuted = (curDesktopVol < EPSILON); bool bMicEnabled = (micAudio != NULL); QWORD timestamp; while(QueryNewAudio(timestamp)) { if (!lastAudioTime) lastAudioTime = App->GetSceneTimestamp(); if (lastAudioTime < timestamp) { while ((lastAudioTime+=10) < timestamp) EncodeAudioSegment(blank10msSample.Array(), 441, lastAudioTime); } //---------------------------------------------------------------------------- // get latest sample for calculating the volume levels float *latestDesktopBuffer = NULL, *latestMicBuffer = NULL; desktopAudio->GetBuffer(&desktopBuffer, &desktopAudioFrames, timestamp-10); desktopAudio->GetNewestFrame(&latestDesktopBuffer, &latestDesktopAudioFrames); UINT totalFloats = desktopAudioFrames*2; if(bDesktopMuted) { // Clearing the desktop audio buffer before mixing in the auxiliary audio sources. zero(desktopBuffer, sizeof(*desktopBuffer)*totalFloats); } if(micAudio != NULL) { micAudio->GetBuffer(&micBuffer, &micAudioFrames, timestamp-10); micAudio->GetNewestFrame(&latestMicBuffer, &latestMicAudioFrames); } //---------------------------------------------------------------------------- // get latest aux volume level samples and mix OSEnterMutex(hAuxAudioMutex); mixedLatestDesktopSamples.CopyArray(latestDesktopBuffer, latestDesktopAudioFrames*2); for(UINT i=0; i<auxAudioSources.Num(); i++) { float *latestAuxBuffer; if(auxAudioSources[i]->GetNewestFrame(&latestAuxBuffer, &latestDesktopAudioFrames)) MixAudio(mixedLatestDesktopSamples.Array(), latestAuxBuffer, latestDesktopAudioFrames*2, false); } //---------------------------------------------------------------------------- // mix output aux sound samples with the desktop for(UINT i=0; i<auxAudioSources.Num(); i++) { float *auxBuffer; if(auxAudioSources[i]->GetBuffer(&auxBuffer, &desktopAudioFrames, timestamp-10)) MixAudio(desktopBuffer, auxBuffer, desktopAudioFrames*2, false); } OSLeaveMutex(hAuxAudioMutex); //---------------------------------------------------------------------------- //UINT totalFloats = desktopAudioFrames*2; //---------------------------------------------------------------------------- /*multiply samples by volume and compute RMS and max of samples*/ float desktopRMS = 0, micRMS = 0, desktopMx = 0, micMx = 0; // Use 1.0f instead of curDesktopVol, since aux audio sources already have their volume set, and shouldn't be boosted anyway. if(latestDesktopBuffer) CalculateVolumeLevels(mixedLatestDesktopSamples.Array(), latestDesktopAudioFrames*2, 1.0f, desktopRMS, desktopMx); if(bMicEnabled && latestMicBuffer) CalculateVolumeLevels(latestMicBuffer, latestMicAudioFrames*2, curMicVol, micRMS, micMx); /*convert RMS and Max of samples to dB*/ desktopRMS = toDB(desktopRMS); micRMS = toDB(micRMS); desktopMx = toDB(desktopMx); micMx = toDB(micMx); /* update max if sample max is greater or after 1 second */ float maxAlpha = 0.15f; UINT peakMeterDelayFrames = 44100 * 3; if(micMx > micMax) { micMax = micMx; } else { micMax = maxAlpha * micMx + (1.0f - maxAlpha) * micMax; } if(desktopMx > desktopMax) { desktopMax = desktopMx; } else { desktopMax = maxAlpha * desktopMx + (1.0f - maxAlpha) * desktopMax; } /*update delayed peak meter*/ if(micMax > micPeak || audioFramesSinceMicMaxUpdate > peakMeterDelayFrames) { micPeak = micMax; audioFramesSinceMicMaxUpdate = 0; } else { audioFramesSinceMicMaxUpdate += desktopAudioFrames; } if(desktopMax > desktopPeak || audioFramesSinceDesktopMaxUpdate > peakMeterDelayFrames) { desktopPeak = desktopMax; audioFramesSinceDesktopMaxUpdate = 0; } else { audioFramesSinceDesktopMaxUpdate += desktopAudioFrames; } /*low pass the level sampling*/ float rmsAlpha = 0.15f; desktopMag = rmsAlpha * desktopRMS + desktopMag * (1.0f - rmsAlpha); micMag = rmsAlpha * micRMS + micMag * (1.0f - rmsAlpha); /*update the meter about every 50ms*/ audioFramesSinceMeterUpdate += desktopAudioFrames; if(audioFramesSinceMeterUpdate >= 2205) { PostMessage(hwndMain, WM_COMMAND, MAKEWPARAM(ID_MICVOLUMEMETER, VOLN_METERED), 0); audioFramesSinceMeterUpdate = 0; } //---------------------------------------------------------------------------- // mix mic and desktop sound, using SSE2 if available // also, it's perfectly fine to just mix into the returned buffer if(bMicEnabled) MixAudio(desktopBuffer, micBuffer, totalFloats, bForceMicMono); EncodeAudioSegment(desktopBuffer, totalFloats>>1, lastAudioTime); } //----------------------------------------------- if(!bRecievedFirstAudioFrame && pendingAudioFrames.Num()) bRecievedFirstAudioFrame = true; } desktopMag = desktopMax = desktopPeak = VOL_MIN; micMag = micMax = micPeak = VOL_MIN; PostMessage(hwndMain, WM_COMMAND, MAKEWPARAM(ID_MICVOLUMEMETER, VOLN_METERED), 0); for(UINT i=0; i<pendingAudioFrames.Num(); i++) pendingAudioFrames[i].audioData.Clear(); AvRevertMmThreadCharacteristics(hTask); }
void PowerJuiceX2Plugin::d_run(const float** inputs, float** outputs, uint32_t frames) { const float* in1 = inputs[0]; const float* in2 = inputs[1]; float* out1 = outputs[0]; float* out2 = outputs[1]; float sum; float data; float difference; for (uint32_t i=0; i < frames; i++) { sum = 0.0f; data = 0.0f; difference = 0; //sanitizeDenormal(in1[i]); // FIXME - you cannot modify inputs //sanitizeDenormal(in2[i]); // FIXME - you cannot modify inputs /* compute last RMS */ //store audio samples in an RMS buffer line RMSStack.data[RMSStack.start++] = std::max(in1[i], in2[i]); if (RMSStack.start == kFloatRMSStackCount) RMSStack.start = 0; //compute RMS over last kFloatRMSStackCount samples for (int j=0; j < kFloatRMSStackCount; ++j) { data = RMSStack.data[(RMSStack.start+j) % kFloatRMSStackCount]; sum += data * data; } //root mean SQUARE float RMS = sqrt(sum / kFloatRMSStackCount); sanitizeDenormal(RMS); /* compute gain reduction if needed */ float RMSDB = toDB(RMS); if (RMSDB>threshold) { //attack stage float difference = (RMSDB-threshold); //sanitizeDenormal(difference); targetGR = difference - difference/ratio; if (targetGR>difference/(ratio/4.0f)) { targetGR = difference - difference/(ratio*1.5f); //more power! } // if (GR<targetGR) { //approach targetGR at attackSamples rate GR -= (GR-targetGR)/(attackSamples); } else { //approach targetGR at releaseSamples rate GR -= (GR-targetGR)/releaseSamples; } sanitizeDenormal(GR); } else { //release stage //approach targetGR at releaseSamples rate, targetGR = 0.0f GR -= GR/releaseSamples; } //store audio in lookahead buffer lookaheadStack.data[lookaheadStack.start++] = std::max(in1[i], in2[i]); //printf("rms\n"); if (lookaheadStack.start == kFloatLookaheadStackCount) lookaheadStack.start = 0; if (++averageCounter >= refreshSkip) { //add relevant values to the shared memory rms.data[rms.start++] = RMSDB; gainReduction.data[gainReduction.start++] = GR; //rewind stack reading heads if needed if (rms.start == kFloatStackCount) rms.start = 0; if (gainReduction.start == kFloatStackCount) gainReduction.start = 0; //saving in gfx format, for speed //share memory for (int j=0; j < kFloatStackCount; ++j) history.rms[j] = -toIEC(rms.data[(rms.start+j) % kFloatStackCount])/200*h +h +y; for (int j=0; j < kFloatStackCount; ++j) { history.gainReduction[j] = -toIEC(-gainReduction.data[(gainReduction.start+j) % kFloatStackCount])/200*h +h +y; } repaintSkip++; if (repaintSkip>5) { repaintSkip = 0; newRepaint = true; } averageCounter = 0; inputMax = 0.0f; } /* compress, mix, done. */ float realGR = fromDB(-GR); float compressedSignal1 = in1[i]*realGR; float compressedSignal2 = in2[i]*realGR; out1[i] = (compressedSignal1*makeupFloat*mix)+in1[i]*(1-mix); out2[i] = (compressedSignal2*makeupFloat*mix)+in2[i]*(1-mix); } }