void SSRunMixer(void) { int bytes_copied; int bytestogo; int c; int i; int pos = read_sound_int(&SOUNDSTATUS->samplepos); memset(tmp_sound_buffer, 0, sizeof(tmp_sound_buffer)); const int len = 2*SAMPLES_TO_BYTES(AUDIO_SIZE); // get data for all channels and add it to the mix for(c=0;c<SS_NUM_CHANNELS;c++) { if (channel[c].head==channel[c].tail) continue; bytestogo = len; mix_pos = 0; while(bytestogo > 0) { bytes_copied = AddBuffer(&channel[c], bytestogo); bytestogo -= bytes_copied; if (channel[c].head==channel[c].tail) { // ran out of chunks before buffer full // clear remaining portion of mixbuffer if (bytestogo) { memset(&mixbuffer[mix_pos], 0, bytestogo); } break; } } MixAudio(tmp_sound_buffer, mixbuffer, len, channel[c].volume); } // tell any callbacks that had a chunk finish, that their chunk finished for(c=0;c<SS_NUM_CHANNELS;c++) { if (channel[c].FinishedCB) { for(i=0;i<channel[c].nFinishedChunks;i++) { //stat("Telling channel %d's handler that chunk %d finished", c, channel[c].FinishedChunkUserdata[i]); (*channel[c].FinishedCB)(c, channel[c].FinishedChunkUserdata[i]); } } channel[c].nFinishedChunks = 0; } memcpy4s(RING_BUF + pos, tmp_sound_buffer, SAMPLES_TO_BYTES(AUDIO_SIZE)); }
void ZeldaUCode::HandleMail_LightVersion(u32 mail) { //ERROR_LOG(DSPHLE, "Light version mail %08X, list in progress: %s, step: %i/%i", // mail, m_list_in_progress ? "yes":"no", m_step, m_num_steps); if (m_sync_cmd_pending) { DSP::GenerateDSPInterruptFromDSPEmu(DSP::INT_DSP); MixAudio(); m_current_buffer++; if (m_current_buffer == m_num_buffers) { m_sync_cmd_pending = false; DEBUG_LOG(DSPHLE, "Update the SoundThread to be in sync"); } return; } if (!m_list_in_progress) { switch ((mail >> 24) & 0x7F) { case 0x00: m_num_steps = 1; break; // dummy case 0x01: m_num_steps = 5; break; // DsetupTable case 0x02: m_num_steps = 3; break; // DsyncFrame default: { m_num_steps = 0; PanicAlert("Zelda uCode (light version): unknown/unsupported command %02X", (mail >> 24) & 0x7F); } return; } m_list_in_progress = true; m_step = 0; } if (m_step >= sizeof(m_buffer) / 4) PanicAlert("m_step out of range"); ((u32*)m_buffer)[m_step] = mail; m_step++; if (m_step >= m_num_steps) { ExecuteList(); m_list_in_progress = false; } }
void AOS4_sound_handler::mix(boost::int16_t* outSamples, boost::int16_t* inSamples, unsigned int nSamples, float volume) { if (!_closing) { unsigned int nBytes = nSamples*2; boost::uint8_t *out = reinterpret_cast<boost::uint8_t*>(outSamples); boost::uint8_t* in = reinterpret_cast<boost::uint8_t*>(inSamples); MixAudio(out, in, nBytes, static_cast<int>(MIX_MAXVOLUME*volume)); } }
void ZeldaUCode::HandleMail_SMSVersion(u32 mail) { if (m_sync_in_progress) { if (m_sync_cmd_pending) { m_sync_flags[(m_num_sync_mail << 1) ] = mail >> 16; m_sync_flags[(m_num_sync_mail << 1) + 1] = mail & 0xFFFF; m_num_sync_mail++; if (m_num_sync_mail == 2) { m_num_sync_mail = 0; m_sync_in_progress = false; MixAudio(); m_current_buffer++; m_mail_handler.PushMail(DSP_SYNC); DSP::GenerateDSPInterruptFromDSPEmu(DSP::INT_DSP); m_mail_handler.PushMail(0xF355FF00 | m_current_buffer); if (m_current_buffer == m_num_buffers) { m_mail_handler.PushMail(DSP_FRAME_END); // DSP::GenerateDSPInterruptFromDSPEmu(DSP::INT_DSP); m_sync_cmd_pending = false; } } } else { m_sync_in_progress = false; } return; }
void OBS::MainAudioLoop() { const unsigned int audioSamplesPerSec = App->GetSampleRateHz(); const unsigned int audioSampleSize = audioSamplesPerSec/100; DWORD taskID = 0; HANDLE hTask = AvSetMmThreadCharacteristics(TEXT("Pro Audio"), &taskID); bufferedAudioTimes.Clear(); bPushToTalkOn = false; micMax = desktopMax = VOL_MIN; micPeak = desktopPeak = VOL_MIN; UINT audioFramesSinceMeterUpdate = 0; UINT audioFramesSinceMicMaxUpdate = 0; UINT audioFramesSinceDesktopMaxUpdate = 0; List<float> mixBuffer, levelsBuffer; mixBuffer.SetSize(audioSampleSize*2); levelsBuffer.SetSize(audioSampleSize*2); latestAudioTime = 0; //--------------------------------------------- // the audio loop of doom while (true) { OSSleep(5); //screw it, just run it every 5ms if (!bRunning) break; //----------------------------------------------- float *desktopBuffer, *micBuffer; curDesktopVol = desktopVol * desktopBoost; if (bUsingPushToTalk) curMicVol = bPushToTalkOn ? micVol : 0.0f; else curMicVol = micVol; curMicVol *= micBoost; bool bDesktopMuted = (curDesktopVol < EPSILON); bool bMicEnabled = (micAudio != NULL); while (QueryNewAudio()) { QWORD timestamp = bufferedAudioTimes[0]; bufferedAudioTimes.Remove(0); zero(mixBuffer.Array(), audioSampleSize*2*sizeof(float)); zero(levelsBuffer.Array(), audioSampleSize*2*sizeof(float)); //---------------------------------------------------------------------------- // get latest sample for calculating the volume levels float *latestDesktopBuffer = NULL, *latestMicBuffer = NULL; desktopAudio->GetBuffer(&desktopBuffer, timestamp); desktopAudio->GetNewestFrame(&latestDesktopBuffer); if (micAudio != NULL) { micAudio->GetBuffer(&micBuffer, timestamp); micAudio->GetNewestFrame(&latestMicBuffer); } //---------------------------------------------------------------------------- // mix desktop samples if (desktopBuffer) MixAudio(mixBuffer.Array(), desktopBuffer, audioSampleSize*2, false); if (latestDesktopBuffer) MixAudio(levelsBuffer.Array(), latestDesktopBuffer, audioSampleSize*2, false); //---------------------------------------------------------------------------- // get latest aux volume level samples and mix OSEnterMutex(hAuxAudioMutex); for (UINT i=0; i<auxAudioSources.Num(); i++) { float *latestAuxBuffer; if(auxAudioSources[i]->GetNewestFrame(&latestAuxBuffer)) MixAudio(levelsBuffer.Array(), latestAuxBuffer, audioSampleSize*2, false); } //---------------------------------------------------------------------------- // mix output aux sound samples with the desktop for (UINT i=0; i<auxAudioSources.Num(); i++) { float *auxBuffer; if(auxAudioSources[i]->GetBuffer(&auxBuffer, timestamp)) MixAudio(mixBuffer.Array(), auxBuffer, audioSampleSize*2, false); } OSLeaveMutex(hAuxAudioMutex); //---------------------------------------------------------------------------- // multiply samples by volume and compute RMS and max of samples // Use 1.0f instead of curDesktopVol, since aux audio sources already have their volume set, and shouldn't be boosted anyway. float desktopRMS = 0, micRMS = 0, desktopMx = 0, micMx = 0; if (latestDesktopBuffer) CalculateVolumeLevels(levelsBuffer.Array(), audioSampleSize*2, 1.0f, desktopRMS, desktopMx); if (bMicEnabled && latestMicBuffer) CalculateVolumeLevels(latestMicBuffer, audioSampleSize*2, curMicVol, micRMS, micMx); //---------------------------------------------------------------------------- // convert RMS and Max of samples to dB desktopRMS = toDB(desktopRMS); micRMS = toDB(micRMS); desktopMx = toDB(desktopMx); micMx = toDB(micMx); //---------------------------------------------------------------------------- // update max if sample max is greater or after 1 second float maxAlpha = 0.15f; UINT peakMeterDelayFrames = audioSamplesPerSec * 3; if (micMx > micMax) micMax = micMx; else micMax = maxAlpha * micMx + (1.0f - maxAlpha) * micMax; if(desktopMx > desktopMax) desktopMax = desktopMx; else desktopMax = maxAlpha * desktopMx + (1.0f - maxAlpha) * desktopMax; //---------------------------------------------------------------------------- // update delayed peak meter if (micMax > micPeak || audioFramesSinceMicMaxUpdate > peakMeterDelayFrames) { micPeak = micMax; audioFramesSinceMicMaxUpdate = 0; } else { audioFramesSinceMicMaxUpdate += audioSampleSize; } if (desktopMax > desktopPeak || audioFramesSinceDesktopMaxUpdate > peakMeterDelayFrames) { desktopPeak = desktopMax; audioFramesSinceDesktopMaxUpdate = 0; } else { audioFramesSinceDesktopMaxUpdate += audioSampleSize; } //---------------------------------------------------------------------------- // low pass the level sampling float rmsAlpha = 0.15f; desktopMag = rmsAlpha * desktopRMS + desktopMag * (1.0f - rmsAlpha); micMag = rmsAlpha * micRMS + micMag * (1.0f - rmsAlpha); //---------------------------------------------------------------------------- // update the meter about every 50ms audioFramesSinceMeterUpdate += audioSampleSize; if (audioFramesSinceMeterUpdate >= (audioSampleSize*5)) { PostMessage(hwndMain, WM_COMMAND, MAKEWPARAM(ID_MICVOLUMEMETER, VOLN_METERED), 0); audioFramesSinceMeterUpdate = 0; } //---------------------------------------------------------------------------- // mix mic and desktop sound // also, it's perfectly fine to just mix into the returned buffer if (bMicEnabled && micBuffer) MixAudio(mixBuffer.Array(), micBuffer, audioSampleSize*2, bForceMicMono); EncodeAudioSegment(mixBuffer.Array(), audioSampleSize, timestamp); } //----------------------------------------------- if (!bRecievedFirstAudioFrame && pendingAudioFrames.Num()) bRecievedFirstAudioFrame = true; } desktopMag = desktopMax = desktopPeak = VOL_MIN; micMag = micMax = micPeak = VOL_MIN; PostMessage(hwndMain, WM_COMMAND, MAKEWPARAM(ID_MICVOLUMEMETER, VOLN_METERED), 0); for (UINT i=0; i<pendingAudioFrames.Num(); i++) pendingAudioFrames[i].audioData.Clear(); AvRevertMmThreadCharacteristics(hTask); }
void OBS::MainAudioLoop() { DWORD taskID = 0; HANDLE hTask = AvSetMmThreadCharacteristics(TEXT("Pro Audio"), &taskID); bPushToTalkOn = false; micMax = desktopMax = VOL_MIN; micPeak = desktopPeak = VOL_MIN; UINT audioFramesSinceMeterUpdate = 0; UINT audioFramesSinceMicMaxUpdate = 0; UINT audioFramesSinceDesktopMaxUpdate = 0; List<float> mixedLatestDesktopSamples; List<float> blank10msSample; blank10msSample.SetSize(882); QWORD lastAudioTime = 0; while(TRUE) { OSSleep(5); //screw it, just run it every 5ms if(!bRunning) break; //----------------------------------------------- float *desktopBuffer, *micBuffer; UINT desktopAudioFrames = 0, micAudioFrames = 0; UINT latestDesktopAudioFrames = 0, latestMicAudioFrames = 0; curDesktopVol = desktopVol * desktopBoost; if(bUsingPushToTalk) curMicVol = bPushToTalkOn ? micVol : 0.0f; else curMicVol = micVol; curMicVol *= micBoost; bool bDesktopMuted = (curDesktopVol < EPSILON); bool bMicEnabled = (micAudio != NULL); QWORD timestamp; while(QueryNewAudio(timestamp)) { if (!lastAudioTime) lastAudioTime = App->GetSceneTimestamp(); if (lastAudioTime < timestamp) { while ((lastAudioTime+=10) < timestamp) EncodeAudioSegment(blank10msSample.Array(), 441, lastAudioTime); } //---------------------------------------------------------------------------- // get latest sample for calculating the volume levels float *latestDesktopBuffer = NULL, *latestMicBuffer = NULL; desktopAudio->GetBuffer(&desktopBuffer, &desktopAudioFrames, timestamp-10); desktopAudio->GetNewestFrame(&latestDesktopBuffer, &latestDesktopAudioFrames); UINT totalFloats = desktopAudioFrames*2; if(bDesktopMuted) { // Clearing the desktop audio buffer before mixing in the auxiliary audio sources. zero(desktopBuffer, sizeof(*desktopBuffer)*totalFloats); } if(micAudio != NULL) { micAudio->GetBuffer(&micBuffer, &micAudioFrames, timestamp-10); micAudio->GetNewestFrame(&latestMicBuffer, &latestMicAudioFrames); } //---------------------------------------------------------------------------- // get latest aux volume level samples and mix OSEnterMutex(hAuxAudioMutex); mixedLatestDesktopSamples.CopyArray(latestDesktopBuffer, latestDesktopAudioFrames*2); for(UINT i=0; i<auxAudioSources.Num(); i++) { float *latestAuxBuffer; if(auxAudioSources[i]->GetNewestFrame(&latestAuxBuffer, &latestDesktopAudioFrames)) MixAudio(mixedLatestDesktopSamples.Array(), latestAuxBuffer, latestDesktopAudioFrames*2, false); } //---------------------------------------------------------------------------- // mix output aux sound samples with the desktop for(UINT i=0; i<auxAudioSources.Num(); i++) { float *auxBuffer; if(auxAudioSources[i]->GetBuffer(&auxBuffer, &desktopAudioFrames, timestamp-10)) MixAudio(desktopBuffer, auxBuffer, desktopAudioFrames*2, false); } OSLeaveMutex(hAuxAudioMutex); //---------------------------------------------------------------------------- //UINT totalFloats = desktopAudioFrames*2; //---------------------------------------------------------------------------- /*multiply samples by volume and compute RMS and max of samples*/ float desktopRMS = 0, micRMS = 0, desktopMx = 0, micMx = 0; // Use 1.0f instead of curDesktopVol, since aux audio sources already have their volume set, and shouldn't be boosted anyway. if(latestDesktopBuffer) CalculateVolumeLevels(mixedLatestDesktopSamples.Array(), latestDesktopAudioFrames*2, 1.0f, desktopRMS, desktopMx); if(bMicEnabled && latestMicBuffer) CalculateVolumeLevels(latestMicBuffer, latestMicAudioFrames*2, curMicVol, micRMS, micMx); /*convert RMS and Max of samples to dB*/ desktopRMS = toDB(desktopRMS); micRMS = toDB(micRMS); desktopMx = toDB(desktopMx); micMx = toDB(micMx); /* update max if sample max is greater or after 1 second */ float maxAlpha = 0.15f; UINT peakMeterDelayFrames = 44100 * 3; if(micMx > micMax) { micMax = micMx; } else { micMax = maxAlpha * micMx + (1.0f - maxAlpha) * micMax; } if(desktopMx > desktopMax) { desktopMax = desktopMx; } else { desktopMax = maxAlpha * desktopMx + (1.0f - maxAlpha) * desktopMax; } /*update delayed peak meter*/ if(micMax > micPeak || audioFramesSinceMicMaxUpdate > peakMeterDelayFrames) { micPeak = micMax; audioFramesSinceMicMaxUpdate = 0; } else { audioFramesSinceMicMaxUpdate += desktopAudioFrames; } if(desktopMax > desktopPeak || audioFramesSinceDesktopMaxUpdate > peakMeterDelayFrames) { desktopPeak = desktopMax; audioFramesSinceDesktopMaxUpdate = 0; } else { audioFramesSinceDesktopMaxUpdate += desktopAudioFrames; } /*low pass the level sampling*/ float rmsAlpha = 0.15f; desktopMag = rmsAlpha * desktopRMS + desktopMag * (1.0f - rmsAlpha); micMag = rmsAlpha * micRMS + micMag * (1.0f - rmsAlpha); /*update the meter about every 50ms*/ audioFramesSinceMeterUpdate += desktopAudioFrames; if(audioFramesSinceMeterUpdate >= 2205) { PostMessage(hwndMain, WM_COMMAND, MAKEWPARAM(ID_MICVOLUMEMETER, VOLN_METERED), 0); audioFramesSinceMeterUpdate = 0; } //---------------------------------------------------------------------------- // mix mic and desktop sound, using SSE2 if available // also, it's perfectly fine to just mix into the returned buffer if(bMicEnabled) MixAudio(desktopBuffer, micBuffer, totalFloats, bForceMicMono); EncodeAudioSegment(desktopBuffer, totalFloats>>1, lastAudioTime); } //----------------------------------------------- if(!bRecievedFirstAudioFrame && pendingAudioFrames.Num()) bRecievedFirstAudioFrame = true; } desktopMag = desktopMax = desktopPeak = VOL_MIN; micMag = micMax = micPeak = VOL_MIN; PostMessage(hwndMain, WM_COMMAND, MAKEWPARAM(ID_MICVOLUMEMETER, VOLN_METERED), 0); for(UINT i=0; i<pendingAudioFrames.Num(); i++) pendingAudioFrames[i].audioData.Clear(); AvRevertMmThreadCharacteristics(hTask); }