static ALvoid CalcListenerParams(ALlistener *Listener) { ALfloat N[3], V[3], U[3]; aluVector P; /* AT then UP */ N[0] = Listener->Forward[0]; N[1] = Listener->Forward[1]; N[2] = Listener->Forward[2]; aluNormalize(N); V[0] = Listener->Up[0]; V[1] = Listener->Up[1]; V[2] = Listener->Up[2]; aluNormalize(V); /* Build and normalize right-vector */ aluCrossproduct(N, V, U); aluNormalize(U); P = Listener->Position; aluMatrixSet(&Listener->Params.Matrix, U[0], V[0], -N[0], 0.0f, U[1], V[1], -N[1], 0.0f, U[2], V[2], -N[2], 0.0f, 0.0f, 0.0f, 0.0f, 1.0f ); aluMatrixVector(&P, &Listener->Params.Matrix); aluMatrixSetRow(&Listener->Params.Matrix, 3, -P.v[0], -P.v[1], -P.v[2], 1.0f); Listener->Params.Velocity = Listener->Velocity; aluMatrixVector(&Listener->Params.Velocity, &Listener->Params.Matrix); }
ALUAPI ALvoid ALUAPIENTRY aluCalculateSourceParameters(ALuint source,ALuint freqOutput,ALuint numOutputChannels,ALfloat *drysend,ALfloat *wetsend,ALfloat *pitch) { ALfloat ListenerOrientation[6],ListenerPosition[3],ListenerVelocity[3]; ALfloat InnerAngle,OuterAngle,OuterGain,Angle,Distance,DryMix,WetMix; ALfloat Direction[3],Position[3],Velocity[3],SourceToListener[3]; ALfloat MinVolume,MaxVolume,MinDist,MaxDist,Rolloff; ALfloat Pitch,ConeVolume,SourceVolume,PanningFB,PanningLR,ListenerGain; ALuint NumBufferChannels; ALfloat U[3],V[3],N[3]; ALfloat DopplerFactor, DopplerVelocity, flSpeedOfSound, flMaxVelocity; ALfloat flVSS, flVLS; ALuint DistanceModel; ALfloat Matrix[3][3]; ALint HeadRelative; ALuint Buffer; ALenum Error; ALfloat flAttenuation; if (alIsSource(source)) { //Get global properties alGetFloatv(AL_DOPPLER_FACTOR,&DopplerFactor); alGetIntegerv(AL_DISTANCE_MODEL,&DistanceModel); alGetFloatv(AL_DOPPLER_VELOCITY,&DopplerVelocity); alGetFloatv(AL_SPEED_OF_SOUND,&flSpeedOfSound); //Get listener properties alGetListenerfv(AL_GAIN,&ListenerGain); alGetListenerfv(AL_POSITION,ListenerPosition); alGetListenerfv(AL_VELOCITY,ListenerVelocity); alGetListenerfv(AL_ORIENTATION,ListenerOrientation); //Get source properties alGetSourcef(source,AL_PITCH,&Pitch); alGetSourcef(source,AL_GAIN,&SourceVolume); alGetSourcei(source,AL_BUFFER,&Buffer); alGetSourcefv(source,AL_POSITION,Position); alGetSourcefv(source,AL_VELOCITY,Velocity); alGetSourcefv(source,AL_DIRECTION,Direction); alGetSourcef(source,AL_MIN_GAIN,&MinVolume); alGetSourcef(source,AL_MAX_GAIN,&MaxVolume); alGetSourcef(source,AL_REFERENCE_DISTANCE,&MinDist); alGetSourcef(source,AL_MAX_DISTANCE,&MaxDist); alGetSourcef(source,AL_ROLLOFF_FACTOR,&Rolloff); alGetSourcef(source,AL_CONE_OUTER_GAIN,&OuterGain); alGetSourcef(source,AL_CONE_INNER_ANGLE,&InnerAngle); alGetSourcef(source,AL_CONE_OUTER_ANGLE,&OuterAngle); alGetSourcei(source,AL_SOURCE_RELATIVE,&HeadRelative); //Set working variables DryMix=(ALfloat)(1.0f); WetMix=(ALfloat)(0.0f); //Get buffer properties alGetBufferi(Buffer,AL_CHANNELS,&NumBufferChannels); //Only apply 3D calculations for mono buffers if (NumBufferChannels==1) { //1. Translate Listener to origin (convert to head relative) if (HeadRelative==AL_FALSE) { Position[0]-=ListenerPosition[0]; Position[1]-=ListenerPosition[1]; Position[2]-=ListenerPosition[2]; } //2. Calculate distance attenuation Distance=(ALfloat)sqrt(aluDotproduct(Position,Position)); // Clamp to MinDist and MaxDist if appropriate if ((DistanceModel == AL_INVERSE_DISTANCE_CLAMPED) || (DistanceModel == AL_LINEAR_DISTANCE_CLAMPED) || (DistanceModel == AL_EXPONENT_DISTANCE_CLAMPED)) { Distance=(Distance<MinDist?MinDist:Distance); Distance=(Distance>MaxDist?MaxDist:Distance); } flAttenuation = 1.0f; switch (DistanceModel) { case AL_INVERSE_DISTANCE: if (MinDist > 0.0f) { if ((MinDist + (Rolloff * (Distance - MinDist))) > 0.0f) flAttenuation = MinDist / (MinDist + (Rolloff * (Distance - MinDist))); else flAttenuation = 1000000; } break; case AL_INVERSE_DISTANCE_CLAMPED: if ((MaxDist >= MinDist) && (MinDist > 0.0f)) { if ((MinDist + (Rolloff * (Distance - MinDist))) > 0.0f) flAttenuation = MinDist / (MinDist + (Rolloff * (Distance - MinDist))); else flAttenuation = 1000000; } break; case AL_LINEAR_DISTANCE: if (MaxDist != MinDist) flAttenuation = 1.0f - (Rolloff*(Distance-MinDist)/(MaxDist - MinDist)); break; case AL_LINEAR_DISTANCE_CLAMPED: if (MaxDist > MinDist) flAttenuation = 1.0f - (Rolloff*(Distance-MinDist)/(MaxDist - MinDist)); break; case AL_EXPONENT_DISTANCE: if ((Distance > 0.0f) && (MinDist > 0.0f)) flAttenuation = (ALfloat)pow(Distance/MinDist, -Rolloff); break; case AL_EXPONENT_DISTANCE_CLAMPED: if ((MaxDist >= MinDist) && (Distance > 0.0f) && (MinDist > 0.0f)) flAttenuation = (ALfloat)pow(Distance/MinDist, -Rolloff); break; case AL_NONE: default: flAttenuation = 1.0f; break; } // Source Gain + Attenuation DryMix = SourceVolume * flAttenuation; // Clamp to Min/Max Gain DryMix=__min(DryMix,MaxVolume); DryMix=__max(DryMix,MinVolume); WetMix=__min(WetMix,MaxVolume); WetMix=__max(WetMix,MinVolume); //3. Apply directional soundcones SourceToListener[0]=-Position[0]; SourceToListener[1]=-Position[1]; SourceToListener[2]=-Position[2]; aluNormalize(Direction); aluNormalize(SourceToListener); Angle=(ALfloat)(180.0*acos(aluDotproduct(Direction,SourceToListener))/3.141592654f); if ((Angle>=InnerAngle)&&(Angle<=OuterAngle)) ConeVolume=(1.0f+(OuterGain-1.0f)*(Angle-InnerAngle)/(OuterAngle-InnerAngle)); else if (Angle>OuterAngle) ConeVolume=(1.0f+(OuterGain-1.0f) ); else ConeVolume=1.0f; //4. Calculate Velocity if (DopplerFactor != 0.0f) { flVLS = aluDotproduct(ListenerVelocity, SourceToListener); flVSS = aluDotproduct(Velocity, SourceToListener); flMaxVelocity = (DopplerVelocity * flSpeedOfSound) / DopplerFactor; if (flVSS >= flMaxVelocity) flVSS = (flMaxVelocity - 1.0f); else if (flVSS <= -flMaxVelocity) flVSS = -flMaxVelocity + 1.0f; if (flVLS >= flMaxVelocity) flVLS = (flMaxVelocity - 1.0f); else if (flVLS <= -flMaxVelocity) flVLS = -flMaxVelocity + 1.0f; pitch[0] = Pitch * ((flSpeedOfSound * DopplerVelocity) - (DopplerFactor * flVLS)) / ((flSpeedOfSound * DopplerVelocity) - (DopplerFactor * flVSS)); } else { pitch[0] = Pitch; } //5. Align coordinate system axes aluCrossproduct(&ListenerOrientation[0],&ListenerOrientation[3],U); // Right-vector aluNormalize(U); // Normalized Right-vector memcpy(V,&ListenerOrientation[3],sizeof(V)); // Up-vector aluNormalize(V); // Normalized Up-vector memcpy(N,&ListenerOrientation[0],sizeof(N)); // At-vector aluNormalize(N); // Normalized At-vector Matrix[0][0]=U[0]; Matrix[0][1]=V[0]; Matrix[0][2]=-N[0]; Matrix[1][0]=U[1]; Matrix[1][1]=V[1]; Matrix[1][2]=-N[1]; Matrix[2][0]=U[2]; Matrix[2][1]=V[2]; Matrix[2][2]=-N[2]; aluMatrixVector(Position,Matrix); //6. Convert normalized position into font/back panning if (Distance != 0.0f) { aluNormalize(Position); PanningLR=(0.5f+0.5f*Position[0]); PanningFB=(0.5f+0.5f*Position[2]); } else { PanningLR=0.5f; PanningFB=0.5f; } //7. Convert front/back panning into channel volumes switch (numOutputChannels) { case 1: drysend[0]=(ConeVolume*ListenerGain*DryMix*(ALfloat)1.0f ); //Direct wetsend[0]=(ListenerGain*WetMix*(ALfloat)1.0f ); //Room break; case 2: drysend[0]=(ConeVolume*ListenerGain*DryMix*(ALfloat)sqrt((1.0f-PanningLR))); //FL Direct drysend[1]=(ConeVolume*ListenerGain*DryMix*(ALfloat)sqrt(( PanningLR))); //FR Direct wetsend[0]=( ListenerGain*WetMix*(ALfloat)sqrt((1.0f-PanningLR))); //FL Room wetsend[1]=( ListenerGain*WetMix*(ALfloat)sqrt(( PanningLR))); //FR Room break; default: break; } } else { //1. Stereo buffers always play from front left/front right switch (numOutputChannels) { case 1: drysend[0]=(SourceVolume*1.0f*ListenerGain); wetsend[0]=(SourceVolume*0.0f*ListenerGain); break; case 2: drysend[0]=(SourceVolume*1.0f*ListenerGain); drysend[1]=(SourceVolume*1.0f*ListenerGain); wetsend[0]=(SourceVolume*0.0f*ListenerGain); wetsend[1]=(SourceVolume*0.0f*ListenerGain); break; default: break; } pitch[0]=(ALfloat)(Pitch); } Error=alGetError(); } }
ALvoid CalcSourceParams(ALvoice *voice, const ALsource *ALSource, const ALCcontext *ALContext) { ALCdevice *Device = ALContext->Device; aluVector Position, Velocity, Direction, SourceToListener; ALfloat InnerAngle,OuterAngle,Angle,Distance,ClampedDist; ALfloat MinVolume,MaxVolume,MinDist,MaxDist,Rolloff; ALfloat ConeVolume,ConeHF,SourceVolume,ListenerGain; ALfloat DopplerFactor, SpeedOfSound; ALfloat AirAbsorptionFactor; ALfloat RoomAirAbsorption[MAX_SENDS]; ALbufferlistitem *BufferListItem; ALfloat Attenuation; ALfloat RoomAttenuation[MAX_SENDS]; ALfloat MetersPerUnit; ALfloat RoomRolloffBase; ALfloat RoomRolloff[MAX_SENDS]; ALfloat DecayDistance[MAX_SENDS]; ALfloat DryGain; ALfloat DryGainHF; ALfloat DryGainLF; ALboolean DryGainHFAuto; ALfloat WetGain[MAX_SENDS]; ALfloat WetGainHF[MAX_SENDS]; ALfloat WetGainLF[MAX_SENDS]; ALboolean WetGainAuto; ALboolean WetGainHFAuto; ALfloat Pitch; ALuint Frequency; ALint NumSends; ALint i, j; DryGainHF = 1.0f; DryGainLF = 1.0f; for(i = 0;i < MAX_SENDS;i++) { WetGainHF[i] = 1.0f; WetGainLF[i] = 1.0f; } /* Get context/device properties */ DopplerFactor = ALContext->DopplerFactor * ALSource->DopplerFactor; SpeedOfSound = ALContext->SpeedOfSound * ALContext->DopplerVelocity; NumSends = Device->NumAuxSends; Frequency = Device->Frequency; /* Get listener properties */ ListenerGain = ALContext->Listener->Gain; MetersPerUnit = ALContext->Listener->MetersPerUnit; /* Get source properties */ SourceVolume = ALSource->Gain; MinVolume = ALSource->MinGain; MaxVolume = ALSource->MaxGain; Pitch = ALSource->Pitch; Position = ALSource->Position; Direction = ALSource->Direction; Velocity = ALSource->Velocity; MinDist = ALSource->RefDistance; MaxDist = ALSource->MaxDistance; Rolloff = ALSource->RollOffFactor; InnerAngle = ALSource->InnerAngle; OuterAngle = ALSource->OuterAngle; AirAbsorptionFactor = ALSource->AirAbsorptionFactor; DryGainHFAuto = ALSource->DryGainHFAuto; WetGainAuto = ALSource->WetGainAuto; WetGainHFAuto = ALSource->WetGainHFAuto; RoomRolloffBase = ALSource->RoomRolloffFactor; voice->Direct.OutBuffer = Device->DryBuffer; voice->Direct.OutChannels = Device->NumChannels; for(i = 0;i < NumSends;i++) { ALeffectslot *Slot = ALSource->Send[i].Slot; if(!Slot && i == 0) Slot = Device->DefaultSlot; if(!Slot || Slot->EffectType == AL_EFFECT_NULL) { Slot = NULL; RoomRolloff[i] = 0.0f; DecayDistance[i] = 0.0f; RoomAirAbsorption[i] = 1.0f; } else if(Slot->AuxSendAuto) { RoomRolloff[i] = RoomRolloffBase; if(IsReverbEffect(Slot->EffectType)) { RoomRolloff[i] += Slot->EffectProps.Reverb.RoomRolloffFactor; DecayDistance[i] = Slot->EffectProps.Reverb.DecayTime * SPEEDOFSOUNDMETRESPERSEC; RoomAirAbsorption[i] = Slot->EffectProps.Reverb.AirAbsorptionGainHF; } else { DecayDistance[i] = 0.0f; RoomAirAbsorption[i] = 1.0f; } } else { /* If the slot's auxiliary send auto is off, the data sent to the * effect slot is the same as the dry path, sans filter effects */ RoomRolloff[i] = Rolloff; DecayDistance[i] = 0.0f; RoomAirAbsorption[i] = AIRABSORBGAINHF; } if(!Slot || Slot->EffectType == AL_EFFECT_NULL) voice->Send[i].OutBuffer = NULL; else voice->Send[i].OutBuffer = Slot->WetBuffer; } /* Transform source to listener space (convert to head relative) */ if(ALSource->HeadRelative == AL_FALSE) { const aluMatrix *Matrix = &ALContext->Listener->Params.Matrix; /* Transform source vectors */ aluMatrixVector(&Position, Matrix); aluMatrixVector(&Velocity, Matrix); aluMatrixVector(&Direction, Matrix); } else { const aluVector *lvelocity = &ALContext->Listener->Params.Velocity; /* Offset the source velocity to be relative of the listener velocity */ Velocity.v[0] += lvelocity->v[0]; Velocity.v[1] += lvelocity->v[1]; Velocity.v[2] += lvelocity->v[2]; } aluNormalize(Direction.v); SourceToListener.v[0] = -Position.v[0]; SourceToListener.v[1] = -Position.v[1]; SourceToListener.v[2] = -Position.v[2]; SourceToListener.v[3] = 0.0f; Distance = aluNormalize(SourceToListener.v); /* Calculate distance attenuation */ ClampedDist = Distance; Attenuation = 1.0f; for(i = 0;i < NumSends;i++) RoomAttenuation[i] = 1.0f; switch(ALContext->SourceDistanceModel ? ALSource->DistanceModel : ALContext->DistanceModel) { case InverseDistanceClamped: ClampedDist = clampf(ClampedDist, MinDist, MaxDist); if(MaxDist < MinDist) break; /*fall-through*/ case InverseDistance: if(MinDist > 0.0f) { ALfloat dist = lerp(MinDist, ClampedDist, Rolloff); if(dist > 0.0f) Attenuation = MinDist / dist; for(i = 0;i < NumSends;i++) { dist = lerp(MinDist, ClampedDist, RoomRolloff[i]); if(dist > 0.0f) RoomAttenuation[i] = MinDist / dist; } } break; case LinearDistanceClamped: ClampedDist = clampf(ClampedDist, MinDist, MaxDist); if(MaxDist < MinDist) break; /*fall-through*/ case LinearDistance: if(MaxDist != MinDist) { Attenuation = 1.0f - (Rolloff*(ClampedDist-MinDist)/(MaxDist - MinDist)); Attenuation = maxf(Attenuation, 0.0f); for(i = 0;i < NumSends;i++) { RoomAttenuation[i] = 1.0f - (RoomRolloff[i]*(ClampedDist-MinDist)/(MaxDist - MinDist)); RoomAttenuation[i] = maxf(RoomAttenuation[i], 0.0f); } } break; case ExponentDistanceClamped: ClampedDist = clampf(ClampedDist, MinDist, MaxDist); if(MaxDist < MinDist) break; /*fall-through*/ case ExponentDistance: if(ClampedDist > 0.0f && MinDist > 0.0f) { Attenuation = powf(ClampedDist/MinDist, -Rolloff); for(i = 0;i < NumSends;i++) RoomAttenuation[i] = powf(ClampedDist/MinDist, -RoomRolloff[i]); } break; case DisableDistance: ClampedDist = MinDist; break; } /* Source Gain + Attenuation */ DryGain = SourceVolume * Attenuation; for(i = 0;i < NumSends;i++) WetGain[i] = SourceVolume * RoomAttenuation[i]; /* Distance-based air absorption */ if(AirAbsorptionFactor > 0.0f && ClampedDist > MinDist) { ALfloat meters = (ClampedDist-MinDist) * MetersPerUnit; DryGainHF *= powf(AIRABSORBGAINHF, AirAbsorptionFactor*meters); for(i = 0;i < NumSends;i++) WetGainHF[i] *= powf(RoomAirAbsorption[i], AirAbsorptionFactor*meters); } if(WetGainAuto) { ALfloat ApparentDist = 1.0f/maxf(Attenuation, 0.00001f) - 1.0f; /* Apply a decay-time transformation to the wet path, based on the * attenuation of the dry path. * * Using the apparent distance, based on the distance attenuation, the * initial decay of the reverb effect is calculated and applied to the * wet path. */ for(i = 0;i < NumSends;i++) { if(DecayDistance[i] > 0.0f) WetGain[i] *= powf(0.001f/*-60dB*/, ApparentDist/DecayDistance[i]); } } /* Calculate directional soundcones */ Angle = RAD2DEG(acosf(aluDotproduct(&Direction, &SourceToListener)) * ConeScale) * 2.0f; if(Angle > InnerAngle && Angle <= OuterAngle) { ALfloat scale = (Angle-InnerAngle) / (OuterAngle-InnerAngle); ConeVolume = lerp(1.0f, ALSource->OuterGain, scale); ConeHF = lerp(1.0f, ALSource->OuterGainHF, scale); } else if(Angle > OuterAngle) { ConeVolume = ALSource->OuterGain; ConeHF = ALSource->OuterGainHF; } else { ConeVolume = 1.0f; ConeHF = 1.0f; } DryGain *= ConeVolume; if(WetGainAuto) { for(i = 0;i < NumSends;i++) WetGain[i] *= ConeVolume; } if(DryGainHFAuto) DryGainHF *= ConeHF; if(WetGainHFAuto) { for(i = 0;i < NumSends;i++) WetGainHF[i] *= ConeHF; } /* Clamp to Min/Max Gain */ DryGain = clampf(DryGain, MinVolume, MaxVolume); for(i = 0;i < NumSends;i++) WetGain[i] = clampf(WetGain[i], MinVolume, MaxVolume); /* Apply gain and frequency filters */ DryGain *= ALSource->Direct.Gain * ListenerGain; DryGainHF *= ALSource->Direct.GainHF; DryGainLF *= ALSource->Direct.GainLF; for(i = 0;i < NumSends;i++) { WetGain[i] *= ALSource->Send[i].Gain * ListenerGain; WetGainHF[i] *= ALSource->Send[i].GainHF; WetGainLF[i] *= ALSource->Send[i].GainLF; } /* Calculate velocity-based doppler effect */ if(DopplerFactor > 0.0f) { const aluVector *lvelocity = &ALContext->Listener->Params.Velocity; ALfloat VSS, VLS; if(SpeedOfSound < 1.0f) { DopplerFactor *= 1.0f/SpeedOfSound; SpeedOfSound = 1.0f; } VSS = aluDotproduct(&Velocity, &SourceToListener) * DopplerFactor; VLS = aluDotproduct(lvelocity, &SourceToListener) * DopplerFactor; Pitch *= clampf(SpeedOfSound-VLS, 1.0f, SpeedOfSound*2.0f - 1.0f) / clampf(SpeedOfSound-VSS, 1.0f, SpeedOfSound*2.0f - 1.0f); } BufferListItem = ATOMIC_LOAD(&ALSource->queue); while(BufferListItem != NULL) { ALbuffer *ALBuffer; if((ALBuffer=BufferListItem->buffer) != NULL) { /* Calculate fixed-point stepping value, based on the pitch, buffer * frequency, and output frequency. */ Pitch = Pitch * ALBuffer->Frequency / Frequency; if(Pitch > (ALfloat)MAX_PITCH) voice->Step = MAX_PITCH<<FRACTIONBITS; else { voice->Step = fastf2i(Pitch*FRACTIONONE); if(voice->Step == 0) voice->Step = 1; } break; } BufferListItem = BufferListItem->next; } if(Device->Hrtf_Mode == FullHrtf) { /* Use a binaural HRTF algorithm for stereo headphone playback */ aluVector dir = {{ 0.0f, 0.0f, -1.0f, 0.0f }}; ALfloat ev = 0.0f, az = 0.0f; ALfloat radius = ALSource->Radius; ALfloat dirfact = 1.0f; voice->Direct.OutBuffer += voice->Direct.OutChannels; voice->Direct.OutChannels = 2; if(Distance > FLT_EPSILON) { dir.v[0] = -SourceToListener.v[0]; dir.v[1] = -SourceToListener.v[1]; dir.v[2] = -SourceToListener.v[2] * ZScale; /* Calculate elevation and azimuth only when the source is not at * the listener. This prevents +0 and -0 Z from producing * inconsistent panning. Also, clamp Y in case FP precision errors * cause it to land outside of -1..+1. */ ev = asinf(clampf(dir.v[1], -1.0f, 1.0f)); az = atan2f(dir.v[0], -dir.v[2]); } if(radius > 0.0f) { if(radius >= Distance) dirfact *= Distance / radius * 0.5f; else dirfact *= 1.0f - (asinf(radius / Distance) / F_PI); } /* Check to see if the HRIR is already moving. */ if(voice->Direct.Moving) { ALfloat delta; delta = CalcFadeTime(voice->Direct.LastGain, DryGain, &voice->Direct.LastDir, &dir); /* If the delta is large enough, get the moving HRIR target * coefficients, target delays, steppping values, and counter. */ if(delta > 0.000015f) { ALuint counter = GetMovingHrtfCoeffs(Device->Hrtf, ev, az, dirfact, DryGain, delta, voice->Direct.Counter, voice->Direct.Hrtf[0].Params.Coeffs, voice->Direct.Hrtf[0].Params.Delay, voice->Direct.Hrtf[0].Params.CoeffStep, voice->Direct.Hrtf[0].Params.DelayStep ); voice->Direct.Counter = counter; voice->Direct.LastGain = DryGain; voice->Direct.LastDir = dir; } } else { /* Get the initial (static) HRIR coefficients and delays. */ GetLerpedHrtfCoeffs(Device->Hrtf, ev, az, dirfact, DryGain, voice->Direct.Hrtf[0].Params.Coeffs, voice->Direct.Hrtf[0].Params.Delay); voice->Direct.Counter = 0; voice->Direct.Moving = AL_TRUE; voice->Direct.LastGain = DryGain; voice->Direct.LastDir = dir; } voice->IsHrtf = AL_TRUE; } else { MixGains *gains = voice->Direct.Gains[0]; ALfloat dir[3] = { 0.0f, 0.0f, -1.0f }; ALfloat radius = ALSource->Radius; ALfloat Target[MAX_OUTPUT_CHANNELS]; /* Get the localized direction, and compute panned gains. */ if(Distance > FLT_EPSILON) { dir[0] = -SourceToListener.v[0]; dir[1] = -SourceToListener.v[1]; dir[2] = -SourceToListener.v[2] * ZScale; } if(radius > 0.0f) { ALfloat dirfact; if(radius >= Distance) dirfact = Distance / radius * 0.5f; else dirfact = 1.0f - (asinf(radius / Distance) / F_PI); dir[0] *= dirfact; dir[1] *= dirfact; dir[2] *= dirfact; } ComputeDirectionalGains(Device, dir, DryGain, Target); for(j = 0;j < MAX_OUTPUT_CHANNELS;j++) gains[j].Target = Target[j]; UpdateDryStepping(&voice->Direct, 1, (voice->Direct.Moving ? 64 : 0)); voice->Direct.Moving = AL_TRUE; voice->IsHrtf = AL_FALSE; } for(i = 0;i < NumSends;i++) { voice->Send[i].Gain.Target = WetGain[i]; UpdateWetStepping(&voice->Send[i], (voice->Send[i].Moving ? 64 : 0)); voice->Send[i].Moving = AL_TRUE; } { ALfloat gainhf = maxf(0.01f, DryGainHF); ALfloat gainlf = maxf(0.01f, DryGainLF); ALfloat hfscale = ALSource->Direct.HFReference / Frequency; ALfloat lfscale = ALSource->Direct.LFReference / Frequency; voice->Direct.Filters[0].ActiveType = AF_None; if(gainhf != 1.0f) voice->Direct.Filters[0].ActiveType |= AF_LowPass; if(gainlf != 1.0f) voice->Direct.Filters[0].ActiveType |= AF_HighPass; ALfilterState_setParams( &voice->Direct.Filters[0].LowPass, ALfilterType_HighShelf, gainhf, hfscale, 0.0f ); ALfilterState_setParams( &voice->Direct.Filters[0].HighPass, ALfilterType_LowShelf, gainlf, lfscale, 0.0f ); } for(i = 0;i < NumSends;i++) { ALfloat gainhf = maxf(0.01f, WetGainHF[i]); ALfloat gainlf = maxf(0.01f, WetGainLF[i]); ALfloat hfscale = ALSource->Send[i].HFReference / Frequency; ALfloat lfscale = ALSource->Send[i].LFReference / Frequency; voice->Send[i].Filters[0].ActiveType = AF_None; if(gainhf != 1.0f) voice->Send[i].Filters[0].ActiveType |= AF_LowPass; if(gainlf != 1.0f) voice->Send[i].Filters[0].ActiveType |= AF_HighPass; ALfilterState_setParams( &voice->Send[i].Filters[0].LowPass, ALfilterType_HighShelf, gainhf, hfscale, 0.0f ); ALfilterState_setParams( &voice->Send[i].Filters[0].HighPass, ALfilterType_LowShelf, gainlf, lfscale, 0.0f ); } }
ALvoid CalcNonAttnSourceParams(ALvoice *voice, const ALsource *ALSource, const ALCcontext *ALContext) { static const struct ChanMap MonoMap[1] = { { FrontCenter, 0.0f, 0.0f } }, StereoMap[2] = { { FrontLeft, DEG2RAD(-30.0f), DEG2RAD(0.0f) }, { FrontRight, DEG2RAD( 30.0f), DEG2RAD(0.0f) } }, StereoWideMap[2] = { { FrontLeft, DEG2RAD(-90.0f), DEG2RAD(0.0f) }, { FrontRight, DEG2RAD( 90.0f), DEG2RAD(0.0f) } }, RearMap[2] = { { BackLeft, DEG2RAD(-150.0f), DEG2RAD(0.0f) }, { BackRight, DEG2RAD( 150.0f), DEG2RAD(0.0f) } }, QuadMap[4] = { { FrontLeft, DEG2RAD( -45.0f), DEG2RAD(0.0f) }, { FrontRight, DEG2RAD( 45.0f), DEG2RAD(0.0f) }, { BackLeft, DEG2RAD(-135.0f), DEG2RAD(0.0f) }, { BackRight, DEG2RAD( 135.0f), DEG2RAD(0.0f) } }, X51Map[6] = { { FrontLeft, DEG2RAD( -30.0f), DEG2RAD(0.0f) }, { FrontRight, DEG2RAD( 30.0f), DEG2RAD(0.0f) }, { FrontCenter, DEG2RAD( 0.0f), DEG2RAD(0.0f) }, { LFE, 0.0f, 0.0f }, { SideLeft, DEG2RAD(-110.0f), DEG2RAD(0.0f) }, { SideRight, DEG2RAD( 110.0f), DEG2RAD(0.0f) } }, X61Map[7] = { { FrontLeft, DEG2RAD(-30.0f), DEG2RAD(0.0f) }, { FrontRight, DEG2RAD( 30.0f), DEG2RAD(0.0f) }, { FrontCenter, DEG2RAD( 0.0f), DEG2RAD(0.0f) }, { LFE, 0.0f, 0.0f }, { BackCenter, DEG2RAD(180.0f), DEG2RAD(0.0f) }, { SideLeft, DEG2RAD(-90.0f), DEG2RAD(0.0f) }, { SideRight, DEG2RAD( 90.0f), DEG2RAD(0.0f) } }, X71Map[8] = { { FrontLeft, DEG2RAD( -30.0f), DEG2RAD(0.0f) }, { FrontRight, DEG2RAD( 30.0f), DEG2RAD(0.0f) }, { FrontCenter, DEG2RAD( 0.0f), DEG2RAD(0.0f) }, { LFE, 0.0f, 0.0f }, { BackLeft, DEG2RAD(-150.0f), DEG2RAD(0.0f) }, { BackRight, DEG2RAD( 150.0f), DEG2RAD(0.0f) }, { SideLeft, DEG2RAD( -90.0f), DEG2RAD(0.0f) }, { SideRight, DEG2RAD( 90.0f), DEG2RAD(0.0f) } }; ALCdevice *Device = ALContext->Device; ALfloat SourceVolume,ListenerGain,MinVolume,MaxVolume; ALbufferlistitem *BufferListItem; enum FmtChannels Channels; ALfloat DryGain, DryGainHF, DryGainLF; ALfloat WetGain[MAX_SENDS]; ALfloat WetGainHF[MAX_SENDS]; ALfloat WetGainLF[MAX_SENDS]; ALuint NumSends, Frequency; ALboolean Relative; const struct ChanMap *chans = NULL; ALuint num_channels = 0; ALboolean DirectChannels; ALboolean isbformat = AL_FALSE; ALfloat Pitch; ALuint i, j, c; /* Get device properties */ NumSends = Device->NumAuxSends; Frequency = Device->Frequency; /* Get listener properties */ ListenerGain = ALContext->Listener->Gain; /* Get source properties */ SourceVolume = ALSource->Gain; MinVolume = ALSource->MinGain; MaxVolume = ALSource->MaxGain; Pitch = ALSource->Pitch; Relative = ALSource->HeadRelative; DirectChannels = ALSource->DirectChannels; voice->Direct.OutBuffer = Device->DryBuffer; voice->Direct.OutChannels = Device->NumChannels; for(i = 0;i < NumSends;i++) { ALeffectslot *Slot = ALSource->Send[i].Slot; if(!Slot && i == 0) Slot = Device->DefaultSlot; if(!Slot || Slot->EffectType == AL_EFFECT_NULL) voice->Send[i].OutBuffer = NULL; else voice->Send[i].OutBuffer = Slot->WetBuffer; } /* Calculate the stepping value */ Channels = FmtMono; BufferListItem = ATOMIC_LOAD(&ALSource->queue); while(BufferListItem != NULL) { ALbuffer *ALBuffer; if((ALBuffer=BufferListItem->buffer) != NULL) { Pitch = Pitch * ALBuffer->Frequency / Frequency; if(Pitch > (ALfloat)MAX_PITCH) voice->Step = MAX_PITCH<<FRACTIONBITS; else { voice->Step = fastf2i(Pitch*FRACTIONONE); if(voice->Step == 0) voice->Step = 1; } Channels = ALBuffer->FmtChannels; break; } BufferListItem = BufferListItem->next; } /* Calculate gains */ DryGain = clampf(SourceVolume, MinVolume, MaxVolume); DryGain *= ALSource->Direct.Gain * ListenerGain; DryGainHF = ALSource->Direct.GainHF; DryGainLF = ALSource->Direct.GainLF; for(i = 0;i < NumSends;i++) { WetGain[i] = clampf(SourceVolume, MinVolume, MaxVolume); WetGain[i] *= ALSource->Send[i].Gain * ListenerGain; WetGainHF[i] = ALSource->Send[i].GainHF; WetGainLF[i] = ALSource->Send[i].GainLF; } switch(Channels) { case FmtMono: chans = MonoMap; num_channels = 1; break; case FmtStereo: /* HACK: Place the stereo channels at +/-90 degrees when using non- * HRTF stereo output. This helps reduce the "monoization" caused * by them panning towards the center. */ if(Device->FmtChans == DevFmtStereo && !Device->Hrtf) chans = StereoWideMap; else chans = StereoMap; num_channels = 2; break; case FmtRear: chans = RearMap; num_channels = 2; break; case FmtQuad: chans = QuadMap; num_channels = 4; break; case FmtX51: chans = X51Map; num_channels = 6; break; case FmtX61: chans = X61Map; num_channels = 7; break; case FmtX71: chans = X71Map; num_channels = 8; break; case FmtBFormat2D: num_channels = 3; isbformat = AL_TRUE; DirectChannels = AL_FALSE; break; case FmtBFormat3D: num_channels = 4; isbformat = AL_TRUE; DirectChannels = AL_FALSE; break; } if(isbformat) { ALfloat N[3], V[3], U[3]; aluMatrix matrix; /* AT then UP */ N[0] = ALSource->Orientation[0][0]; N[1] = ALSource->Orientation[0][1]; N[2] = ALSource->Orientation[0][2]; aluNormalize(N); V[0] = ALSource->Orientation[1][0]; V[1] = ALSource->Orientation[1][1]; V[2] = ALSource->Orientation[1][2]; aluNormalize(V); if(!Relative) { const aluMatrix *lmatrix = &ALContext->Listener->Params.Matrix; aluVector at, up; aluVectorSet(&at, N[0], N[1], N[2], 0.0f); aluVectorSet(&up, V[0], V[1], V[2], 0.0f); aluMatrixVector(&at, lmatrix); aluMatrixVector(&up, lmatrix); N[0] = at.v[0]; N[1] = at.v[1]; N[2] = at.v[2]; V[0] = up.v[0]; V[1] = up.v[1]; V[2] = up.v[2]; } /* Build and normalize right-vector */ aluCrossproduct(N, V, U); aluNormalize(U); aluMatrixSet(&matrix, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, -N[2], -N[0], N[1], 0.0f, U[2], U[0], -U[1], 0.0f, -V[2], -V[0], V[1] ); for(c = 0;c < num_channels;c++) { MixGains *gains = voice->Direct.Gains[c]; ALfloat Target[MAX_OUTPUT_CHANNELS]; ComputeBFormatGains(Device, matrix.m[c], DryGain, Target); for(i = 0;i < MAX_OUTPUT_CHANNELS;i++) gains[i].Target = Target[i]; } UpdateDryStepping(&voice->Direct, num_channels, (voice->Direct.Moving ? 64 : 0)); voice->Direct.Moving = AL_TRUE; voice->IsHrtf = AL_FALSE; for(i = 0;i < NumSends;i++) WetGain[i] *= 1.4142f; } else if(DirectChannels != AL_FALSE) { if(Device->Hrtf) { voice->Direct.OutBuffer += voice->Direct.OutChannels; voice->Direct.OutChannels = 2; for(c = 0;c < num_channels;c++) { MixGains *gains = voice->Direct.Gains[c]; for(j = 0;j < MAX_OUTPUT_CHANNELS;j++) gains[j].Target = 0.0f; if(chans[c].channel == FrontLeft) gains[0].Target = DryGain; else if(chans[c].channel == FrontRight) gains[1].Target = DryGain; } } else for(c = 0;c < num_channels;c++) { MixGains *gains = voice->Direct.Gains[c]; int idx; for(j = 0;j < MAX_OUTPUT_CHANNELS;j++) gains[j].Target = 0.0f; if((idx=GetChannelIdxByName(Device, chans[c].channel)) != -1) gains[idx].Target = DryGain; } UpdateDryStepping(&voice->Direct, num_channels, (voice->Direct.Moving ? 64 : 0)); voice->Direct.Moving = AL_TRUE; voice->IsHrtf = AL_FALSE; } else if(Device->Hrtf_Mode == FullHrtf) { voice->Direct.OutBuffer += voice->Direct.OutChannels; voice->Direct.OutChannels = 2; for(c = 0;c < num_channels;c++) { if(chans[c].channel == LFE) { /* Skip LFE */ voice->Direct.Hrtf[c].Params.Delay[0] = 0; voice->Direct.Hrtf[c].Params.Delay[1] = 0; for(i = 0;i < HRIR_LENGTH;i++) { voice->Direct.Hrtf[c].Params.Coeffs[i][0] = 0.0f; voice->Direct.Hrtf[c].Params.Coeffs[i][1] = 0.0f; } } else { /* Get the static HRIR coefficients and delays for this * channel. */ GetLerpedHrtfCoeffs(Device->Hrtf, chans[c].elevation, chans[c].angle, 1.0f, DryGain, voice->Direct.Hrtf[c].Params.Coeffs, voice->Direct.Hrtf[c].Params.Delay); } } voice->Direct.Counter = 0; voice->Direct.Moving = AL_TRUE; voice->IsHrtf = AL_TRUE; } else { for(c = 0;c < num_channels;c++) { MixGains *gains = voice->Direct.Gains[c]; ALfloat Target[MAX_OUTPUT_CHANNELS]; /* Special-case LFE */ if(chans[c].channel == LFE) { int idx; for(i = 0;i < MAX_OUTPUT_CHANNELS;i++) gains[i].Target = 0.0f; if((idx=GetChannelIdxByName(Device, chans[c].channel)) != -1) gains[idx].Target = DryGain; continue; } ComputeAngleGains(Device, chans[c].angle, chans[c].elevation, DryGain, Target); for(i = 0;i < MAX_OUTPUT_CHANNELS;i++) gains[i].Target = Target[i]; } UpdateDryStepping(&voice->Direct, num_channels, (voice->Direct.Moving ? 64 : 0)); voice->Direct.Moving = AL_TRUE; voice->IsHrtf = AL_FALSE; } for(i = 0;i < NumSends;i++) { voice->Send[i].Gain.Target = WetGain[i]; UpdateWetStepping(&voice->Send[i], (voice->Send[i].Moving ? 64 : 0)); voice->Send[i].Moving = AL_TRUE; } { ALfloat gainhf = maxf(0.01f, DryGainHF); ALfloat gainlf = maxf(0.01f, DryGainLF); ALfloat hfscale = ALSource->Direct.HFReference / Frequency; ALfloat lfscale = ALSource->Direct.LFReference / Frequency; for(c = 0;c < num_channels;c++) { voice->Direct.Filters[c].ActiveType = AF_None; if(gainhf != 1.0f) voice->Direct.Filters[c].ActiveType |= AF_LowPass; if(gainlf != 1.0f) voice->Direct.Filters[c].ActiveType |= AF_HighPass; ALfilterState_setParams( &voice->Direct.Filters[c].LowPass, ALfilterType_HighShelf, gainhf, hfscale, 0.0f ); ALfilterState_setParams( &voice->Direct.Filters[c].HighPass, ALfilterType_LowShelf, gainlf, lfscale, 0.0f ); } } for(i = 0;i < NumSends;i++) { ALfloat gainhf = maxf(0.01f, WetGainHF[i]); ALfloat gainlf = maxf(0.01f, WetGainLF[i]); ALfloat hfscale = ALSource->Send[i].HFReference / Frequency; ALfloat lfscale = ALSource->Send[i].LFReference / Frequency; for(c = 0;c < num_channels;c++) { voice->Send[i].Filters[c].ActiveType = AF_None; if(gainhf != 1.0f) voice->Send[i].Filters[c].ActiveType |= AF_LowPass; if(gainlf != 1.0f) voice->Send[i].Filters[c].ActiveType |= AF_HighPass; ALfilterState_setParams( &voice->Send[i].Filters[c].LowPass, ALfilterType_HighShelf, gainhf, hfscale, 0.0f ); ALfilterState_setParams( &voice->Send[i].Filters[c].HighPass, ALfilterType_LowShelf, gainlf, lfscale, 0.0f ); } } }