void FrogApp::Update(uint32_t delta) { Spritesheet *sss; Texture *ttt; Animation *aaa; if (keyboard[SDL_SCANCODE_ESCAPE]) Stop(); //printf("VIDA PLAYER: %d", player->GetHealth()); if (player->GetHealth()==0) Stop(); // update background and tilemap UpdateBackground(1); layer1wanim.Step(); layer2wanim.Step(); layer3wanim.Step(); layer4wanim.Step(); UpdateLevel(); /* Entity*p=inicial; printf("ELIST: { "); for(;p;p=p->Next()) { printf("[ep:%d,x:%f,prev:%d,next:%d],",p,p->X(),p->Prev(),p->Next()); } printf(" }\n"); */ }
int MoveCamera(int Length) { Level *level; level = GetCurrentLevel(CurrentLevel); if(Abs_Camera.x + Camera.w >= LevelSprite->w * level->length)return 0;//we are done already. Abs_Camera.x += Length; Camera.x += Length; if(Camera.x > Camera.w) { SDL_BlitSurface(buffer,&Camera,buffer,NULL); UpdateLevel(level); Camera.x = 0; } return 1; }
void sv::ObstacleManager::Update(ulong deltaTime) { Iterator iter = First(); while(iter) { Obstacle* obstacle = Get(iter); iter = Next(iter); obstacle->Update(deltaTime); if( obstacle->IsEdge() ) HandleCollision(obstacle); if( obstacle->GetPos() == Grid::InvalidPos ) DeleteObstacle(obstacle); } UpdateLevel(deltaTime); }
VOID CPlayUpgrateCMD::OnExecuteSql(SGDP::ISDDBConnection *poDBConn) { m_bSuccess = FALSE; PKT_GSLS_PLAYER_UPGRADE_NTF* pstNtf = (PKT_GSLS_PLAYER_UPGRADE_NTF*)m_pUserData; if(NULL == pstNtf) return; if (FALSE == UpdateLevel(poDBConn, pstNtf->dwPlayerID, pstNtf->wLevel)) { m_bSuccess = FALSE; } else { m_bSuccess = TRUE; } }
static void ProcessBlock(aec_t *aec, const short *farend, const short *nearend, const short *nearendH, short *output, short *outputH) { int i; float d[PART_LEN], y[PART_LEN], e[PART_LEN], dH[PART_LEN]; short eInt16[PART_LEN]; float scale; float fft[PART_LEN2]; float xf[2][PART_LEN1], yf[2][PART_LEN1], ef[2][PART_LEN1]; complex_t df[PART_LEN1]; int ip[IP_LEN]; float wfft[W_LEN]; const float gPow[2] = {0.9f, 0.1f}; // Noise estimate constants. const int noiseInitBlocks = 500 * aec->mult; const float step = 0.1f; const float ramp = 1.0002f; const float gInitNoise[2] = {0.999f, 0.001f}; #ifdef AEC_DEBUG fwrite(farend, sizeof(short), PART_LEN, aec->farFile); fwrite(nearend, sizeof(short), PART_LEN, aec->nearFile); #endif memset(dH, 0, sizeof(dH)); // ---------- Ooura fft ---------- // Concatenate old and new farend blocks. for (i = 0; i < PART_LEN; i++) { aec->xBuf[i + PART_LEN] = (float)farend[i]; d[i] = (float)nearend[i]; } if (aec->sampFreq == 32000) { for (i = 0; i < PART_LEN; i++) { dH[i] = (float)nearendH[i]; } } memcpy(fft, aec->xBuf, sizeof(float) * PART_LEN2); memcpy(aec->dBuf + PART_LEN, d, sizeof(float) * PART_LEN); // For H band if (aec->sampFreq == 32000) { memcpy(aec->dBufH + PART_LEN, dH, sizeof(float) * PART_LEN); } // Setting this on the first call initializes work arrays. ip[0] = 0; aec_rdft_128(1, fft, ip, wfft); // Far fft xf[1][0] = 0; xf[1][PART_LEN] = 0; xf[0][0] = fft[0]; xf[0][PART_LEN] = fft[1]; for (i = 1; i < PART_LEN; i++) { xf[0][i] = fft[2 * i]; xf[1][i] = fft[2 * i + 1]; } // Near fft memcpy(fft, aec->dBuf, sizeof(float) * PART_LEN2); aec_rdft_128(1, fft, ip, wfft); df[0][1] = 0; df[PART_LEN][1] = 0; df[0][0] = fft[0]; df[PART_LEN][0] = fft[1]; for (i = 1; i < PART_LEN; i++) { df[i][0] = fft[2 * i]; df[i][1] = fft[2 * i + 1]; } // Power smoothing for (i = 0; i < PART_LEN1; i++) { aec->xPow[i] = gPow[0] * aec->xPow[i] + gPow[1] * NR_PART * (xf[0][i] * xf[0][i] + xf[1][i] * xf[1][i]); aec->dPow[i] = gPow[0] * aec->dPow[i] + gPow[1] * (df[i][0] * df[i][0] + df[i][1] * df[i][1]); } // Estimate noise power. Wait until dPow is more stable. if (aec->noiseEstCtr > 50) { for (i = 0; i < PART_LEN1; i++) { if (aec->dPow[i] < aec->dMinPow[i]) { aec->dMinPow[i] = (aec->dPow[i] + step * (aec->dMinPow[i] - aec->dPow[i])) * ramp; } else { aec->dMinPow[i] *= ramp; } } } // Smooth increasing noise power from zero at the start, // to avoid a sudden burst of comfort noise. if (aec->noiseEstCtr < noiseInitBlocks) { aec->noiseEstCtr++; for (i = 0; i < PART_LEN1; i++) { if (aec->dMinPow[i] > aec->dInitMinPow[i]) { aec->dInitMinPow[i] = gInitNoise[0] * aec->dInitMinPow[i] + gInitNoise[1] * aec->dMinPow[i]; } else { aec->dInitMinPow[i] = aec->dMinPow[i]; } } aec->noisePow = aec->dInitMinPow; } else { aec->noisePow = aec->dMinPow; } // Update the xfBuf block position. aec->xfBufBlockPos--; if (aec->xfBufBlockPos == -1) { aec->xfBufBlockPos = NR_PART - 1; } // Buffer xf memcpy(aec->xfBuf[0] + aec->xfBufBlockPos * PART_LEN1, xf[0], sizeof(float) * PART_LEN1); memcpy(aec->xfBuf[1] + aec->xfBufBlockPos * PART_LEN1, xf[1], sizeof(float) * PART_LEN1); memset(yf[0], 0, sizeof(float) * (PART_LEN1 * 2)); // Filter far WebRtcAec_FilterFar(aec, yf); // Inverse fft to obtain echo estimate and error. fft[0] = yf[0][0]; fft[1] = yf[0][PART_LEN]; for (i = 1; i < PART_LEN; i++) { fft[2 * i] = yf[0][i]; fft[2 * i + 1] = yf[1][i]; } aec_rdft_128(-1, fft, ip, wfft); scale = 2.0f / PART_LEN2; for (i = 0; i < PART_LEN; i++) { y[i] = fft[PART_LEN + i] * scale; // fft scaling } for (i = 0; i < PART_LEN; i++) { e[i] = d[i] - y[i]; } // Error fft memcpy(aec->eBuf + PART_LEN, e, sizeof(float) * PART_LEN); memset(fft, 0, sizeof(float) * PART_LEN); memcpy(fft + PART_LEN, e, sizeof(float) * PART_LEN); aec_rdft_128(1, fft, ip, wfft); ef[1][0] = 0; ef[1][PART_LEN] = 0; ef[0][0] = fft[0]; ef[0][PART_LEN] = fft[1]; for (i = 1; i < PART_LEN; i++) { ef[0][i] = fft[2 * i]; ef[1][i] = fft[2 * i + 1]; } // Scale error signal inversely with far power. WebRtcAec_ScaleErrorSignal(aec, ef); #ifdef G167 if (aec->adaptToggle) { #endif // Filter adaptation WebRtcAec_FilterAdaptation(aec, fft, ef, ip, wfft); #ifdef G167 } #endif NonLinearProcessing(aec, ip, wfft, output, outputH); #if defined(AEC_DEBUG) || defined(G167) for (i = 0; i < PART_LEN; i++) { eInt16[i] = (short)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, e[i], WEBRTC_SPL_WORD16_MIN); } #endif #ifdef G167 if (aec->nlpToggle == 0) { memcpy(output, eInt16, sizeof(eInt16)); } #endif if (aec->metricsMode == 1) { for (i = 0; i < PART_LEN; i++) { eInt16[i] = (short)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, e[i], WEBRTC_SPL_WORD16_MIN); } // Update power levels and echo metrics UpdateLevel(&aec->farlevel, farend); UpdateLevel(&aec->nearlevel, nearend); UpdateLevel(&aec->linoutlevel, eInt16); UpdateLevel(&aec->nlpoutlevel, output); UpdateMetrics(aec); } #ifdef AEC_DEBUG fwrite(eInt16, sizeof(short), PART_LEN, aec->outLpFile); fwrite(output, sizeof(short), PART_LEN, aec->outFile); #endif }
void FStreamLevelAction::UpdateOperation(FLatentResponse& Response) { ULevelStreaming* LevelStreamingObject = Level; // to avoid confusion. bool bIsOperationFinished = UpdateLevel( LevelStreamingObject ); Response.FinishAndTriggerIf(bIsOperationFinished, LatentInfo.ExecutionFunction, LatentInfo.Linkage, LatentInfo.CallbackTarget); }
void SceneNode::MoveLevel(int level) { m_level+=level; UpdateLevel(); }
void SceneNode::SetLevel(int level) { m_level=level; UpdateLevel(); }
static void NonLinearProcessing(aec_t *aec, short *output, short *outputH) { float efw[2][PART_LEN1], dfw[2][PART_LEN1], xfw[2][PART_LEN1]; complex_t comfortNoiseHband[PART_LEN1]; float fft[PART_LEN2]; float scale, dtmp; float nlpGainHband; int i, j, pos; // Coherence and non-linear filter float cohde[PART_LEN1], cohxd[PART_LEN1]; float hNlDeAvg, hNlXdAvg; float hNl[PART_LEN1]; float hNlPref[PREF_BAND_SIZE]; float hNlFb = 0, hNlFbLow = 0; const float prefBandQuant = 0.75f, prefBandQuantLow = 0.5f; const int prefBandSize = PREF_BAND_SIZE / aec->mult; const int minPrefBand = 4 / aec->mult; // Near and error power sums float sdSum = 0, seSum = 0; // Power estimate smoothing coefficients const float gCoh[2][2] = {{0.9f, 0.1f}, {0.93f, 0.07f}}; const float *ptrGCoh = gCoh[aec->mult - 1]; // Filter energy float wfEnMax = 0, wfEn = 0; const int delayEstInterval = 10 * aec->mult; float* xfw_ptr = NULL; aec->delayEstCtr++; if (aec->delayEstCtr == delayEstInterval) { aec->delayEstCtr = 0; } // initialize comfort noise for H band memset(comfortNoiseHband, 0, sizeof(comfortNoiseHband)); nlpGainHband = (float)0.0; dtmp = (float)0.0; // Measure energy in each filter partition to determine delay. // TODO: Spread by computing one partition per block? if (aec->delayEstCtr == 0) { wfEnMax = 0; aec->delayIdx = 0; for (i = 0; i < NR_PART; i++) { pos = i * PART_LEN1; wfEn = 0; for (j = 0; j < PART_LEN1; j++) { wfEn += aec->wfBuf[0][pos + j] * aec->wfBuf[0][pos + j] + aec->wfBuf[1][pos + j] * aec->wfBuf[1][pos + j]; } if (wfEn > wfEnMax) { wfEnMax = wfEn; aec->delayIdx = i; } } } // We should always have at least one element stored in |far_buf|. assert(WebRtc_available_read(aec->far_buf_windowed) > 0); // NLP WebRtc_ReadBuffer(aec->far_buf_windowed, (void**) &xfw_ptr, &xfw[0][0], 1); // TODO(bjornv): Investigate if we can reuse |far_buf_windowed| instead of // |xfwBuf|. // Buffer far. memcpy(aec->xfwBuf, xfw_ptr, sizeof(float) * 2 * PART_LEN1); // Use delayed far. memcpy(xfw, aec->xfwBuf + aec->delayIdx * PART_LEN1, sizeof(xfw)); // Windowed near fft for (i = 0; i < PART_LEN; i++) { fft[i] = aec->dBuf[i] * sqrtHanning[i]; fft[PART_LEN + i] = aec->dBuf[PART_LEN + i] * sqrtHanning[PART_LEN - i]; } aec_rdft_forward_128(fft); dfw[1][0] = 0; dfw[1][PART_LEN] = 0; dfw[0][0] = fft[0]; dfw[0][PART_LEN] = fft[1]; for (i = 1; i < PART_LEN; i++) { dfw[0][i] = fft[2 * i]; dfw[1][i] = fft[2 * i + 1]; } // Windowed error fft for (i = 0; i < PART_LEN; i++) { fft[i] = aec->eBuf[i] * sqrtHanning[i]; fft[PART_LEN + i] = aec->eBuf[PART_LEN + i] * sqrtHanning[PART_LEN - i]; } aec_rdft_forward_128(fft); efw[1][0] = 0; efw[1][PART_LEN] = 0; efw[0][0] = fft[0]; efw[0][PART_LEN] = fft[1]; for (i = 1; i < PART_LEN; i++) { efw[0][i] = fft[2 * i]; efw[1][i] = fft[2 * i + 1]; } // Smoothed PSD for (i = 0; i < PART_LEN1; i++) { aec->sd[i] = ptrGCoh[0] * aec->sd[i] + ptrGCoh[1] * (dfw[0][i] * dfw[0][i] + dfw[1][i] * dfw[1][i]); aec->se[i] = ptrGCoh[0] * aec->se[i] + ptrGCoh[1] * (efw[0][i] * efw[0][i] + efw[1][i] * efw[1][i]); // We threshold here to protect against the ill-effects of a zero farend. // The threshold is not arbitrarily chosen, but balances protection and // adverse interaction with the algorithm's tuning. // TODO: investigate further why this is so sensitive. aec->sx[i] = ptrGCoh[0] * aec->sx[i] + ptrGCoh[1] * WEBRTC_SPL_MAX(xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i], 15); aec->sde[i][0] = ptrGCoh[0] * aec->sde[i][0] + ptrGCoh[1] * (dfw[0][i] * efw[0][i] + dfw[1][i] * efw[1][i]); aec->sde[i][1] = ptrGCoh[0] * aec->sde[i][1] + ptrGCoh[1] * (dfw[0][i] * efw[1][i] - dfw[1][i] * efw[0][i]); aec->sxd[i][0] = ptrGCoh[0] * aec->sxd[i][0] + ptrGCoh[1] * (dfw[0][i] * xfw[0][i] + dfw[1][i] * xfw[1][i]); aec->sxd[i][1] = ptrGCoh[0] * aec->sxd[i][1] + ptrGCoh[1] * (dfw[0][i] * xfw[1][i] - dfw[1][i] * xfw[0][i]); sdSum += aec->sd[i]; seSum += aec->se[i]; } // Divergent filter safeguard. if (aec->divergeState == 0) { if (seSum > sdSum) { aec->divergeState = 1; } } else { if (seSum * 1.05f < sdSum) { aec->divergeState = 0; } } if (aec->divergeState == 1) { memcpy(efw, dfw, sizeof(efw)); } // Reset if error is significantly larger than nearend (13 dB). if (seSum > (19.95f * sdSum)) { memset(aec->wfBuf, 0, sizeof(aec->wfBuf)); } // Subband coherence for (i = 0; i < PART_LEN1; i++) { cohde[i] = (aec->sde[i][0] * aec->sde[i][0] + aec->sde[i][1] * aec->sde[i][1]) / (aec->sd[i] * aec->se[i] + 1e-10f); cohxd[i] = (aec->sxd[i][0] * aec->sxd[i][0] + aec->sxd[i][1] * aec->sxd[i][1]) / (aec->sx[i] * aec->sd[i] + 1e-10f); } hNlXdAvg = 0; for (i = minPrefBand; i < prefBandSize + minPrefBand; i++) { hNlXdAvg += cohxd[i]; } hNlXdAvg /= prefBandSize; hNlXdAvg = 1 - hNlXdAvg; hNlDeAvg = 0; for (i = minPrefBand; i < prefBandSize + minPrefBand; i++) { hNlDeAvg += cohde[i]; } hNlDeAvg /= prefBandSize; if (hNlXdAvg < 0.75f && hNlXdAvg < aec->hNlXdAvgMin) { aec->hNlXdAvgMin = hNlXdAvg; } if (hNlDeAvg > 0.98f && hNlXdAvg > 0.9f) { aec->stNearState = 1; } else if (hNlDeAvg < 0.95f || hNlXdAvg < 0.8f) { aec->stNearState = 0; } if (aec->hNlXdAvgMin == 1) { aec->echoState = 0; aec->overDrive = aec->minOverDrive; if (aec->stNearState == 1) { memcpy(hNl, cohde, sizeof(hNl)); hNlFb = hNlDeAvg; hNlFbLow = hNlDeAvg; } else { for (i = 0; i < PART_LEN1; i++) { hNl[i] = 1 - cohxd[i]; } hNlFb = hNlXdAvg; hNlFbLow = hNlXdAvg; } } else { if (aec->stNearState == 1) { aec->echoState = 0; memcpy(hNl, cohde, sizeof(hNl)); hNlFb = hNlDeAvg; hNlFbLow = hNlDeAvg; } else { aec->echoState = 1; for (i = 0; i < PART_LEN1; i++) { hNl[i] = WEBRTC_SPL_MIN(cohde[i], 1 - cohxd[i]); } // Select an order statistic from the preferred bands. // TODO: Using quicksort now, but a selection algorithm may be preferred. memcpy(hNlPref, &hNl[minPrefBand], sizeof(float) * prefBandSize); qsort(hNlPref, prefBandSize, sizeof(float), CmpFloat); hNlFb = hNlPref[(int)floor(prefBandQuant * (prefBandSize - 1))]; hNlFbLow = hNlPref[(int)floor(prefBandQuantLow * (prefBandSize - 1))]; } } // Track the local filter minimum to determine suppression overdrive. if (hNlFbLow < 0.6f && hNlFbLow < aec->hNlFbLocalMin) { aec->hNlFbLocalMin = hNlFbLow; aec->hNlFbMin = hNlFbLow; aec->hNlNewMin = 1; aec->hNlMinCtr = 0; } aec->hNlFbLocalMin = WEBRTC_SPL_MIN(aec->hNlFbLocalMin + 0.0008f / aec->mult, 1); aec->hNlXdAvgMin = WEBRTC_SPL_MIN(aec->hNlXdAvgMin + 0.0006f / aec->mult, 1); if (aec->hNlNewMin == 1) { aec->hNlMinCtr++; } if (aec->hNlMinCtr == 2) { aec->hNlNewMin = 0; aec->hNlMinCtr = 0; aec->overDrive = WEBRTC_SPL_MAX(aec->targetSupp / ((float)log(aec->hNlFbMin + 1e-10f) + 1e-10f), aec->minOverDrive); } // Smooth the overdrive. if (aec->overDrive < aec->overDriveSm) { aec->overDriveSm = 0.99f * aec->overDriveSm + 0.01f * aec->overDrive; } else { aec->overDriveSm = 0.9f * aec->overDriveSm + 0.1f * aec->overDrive; } WebRtcAec_OverdriveAndSuppress(aec, hNl, hNlFb, efw); // Add comfort noise. ComfortNoise(aec, efw, comfortNoiseHband, aec->noisePow, hNl); // TODO(bjornv): Investigate how to take the windowing below into account if // needed. if (aec->metricsMode == 1) { // Note that we have a scaling by two in the time domain |eBuf|. // In addition the time domain signal is windowed before transformation, // losing half the energy on the average. We take care of the first // scaling only in UpdateMetrics(). UpdateLevel(&aec->nlpoutlevel, efw); } // Inverse error fft. fft[0] = efw[0][0]; fft[1] = efw[0][PART_LEN]; for (i = 1; i < PART_LEN; i++) { fft[2*i] = efw[0][i]; // Sign change required by Ooura fft. fft[2*i + 1] = -efw[1][i]; } aec_rdft_inverse_128(fft); // Overlap and add to obtain output. scale = 2.0f / PART_LEN2; for (i = 0; i < PART_LEN; i++) { fft[i] *= scale; // fft scaling fft[i] = fft[i]*sqrtHanning[i] + aec->outBuf[i]; // Saturation protection output[i] = (short)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, fft[i], WEBRTC_SPL_WORD16_MIN); fft[PART_LEN + i] *= scale; // fft scaling aec->outBuf[i] = fft[PART_LEN + i] * sqrtHanning[PART_LEN - i]; } // For H band if (aec->sampFreq == 32000) { // H band gain // average nlp over low band: average over second half of freq spectrum // (4->8khz) GetHighbandGain(hNl, &nlpGainHband); // Inverse comfort_noise if (flagHbandCn == 1) { fft[0] = comfortNoiseHband[0][0]; fft[1] = comfortNoiseHband[PART_LEN][0]; for (i = 1; i < PART_LEN; i++) { fft[2*i] = comfortNoiseHband[i][0]; fft[2*i + 1] = comfortNoiseHband[i][1]; } aec_rdft_inverse_128(fft); scale = 2.0f / PART_LEN2; } // compute gain factor for (i = 0; i < PART_LEN; i++) { dtmp = (float)aec->dBufH[i]; dtmp = (float)dtmp * nlpGainHband; // for variable gain // add some comfort noise where Hband is attenuated if (flagHbandCn == 1) { fft[i] *= scale; // fft scaling dtmp += cnScaleHband * fft[i]; } // Saturation protection outputH[i] = (short)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, dtmp, WEBRTC_SPL_WORD16_MIN); } } // Copy the current block to the old position. memcpy(aec->dBuf, aec->dBuf + PART_LEN, sizeof(float) * PART_LEN); memcpy(aec->eBuf, aec->eBuf + PART_LEN, sizeof(float) * PART_LEN); // Copy the current block to the old position for H band if (aec->sampFreq == 32000) { memcpy(aec->dBufH, aec->dBufH + PART_LEN, sizeof(float) * PART_LEN); } memmove(aec->xfwBuf + PART_LEN1, aec->xfwBuf, sizeof(aec->xfwBuf) - sizeof(complex_t) * PART_LEN1); }
static void ProcessBlock(aec_t* aec) { int i; float d[PART_LEN], y[PART_LEN], e[PART_LEN], dH[PART_LEN]; float scale; float fft[PART_LEN2]; float xf[2][PART_LEN1], yf[2][PART_LEN1], ef[2][PART_LEN1]; float df[2][PART_LEN1]; float far_spectrum = 0.0f; float near_spectrum = 0.0f; float abs_far_spectrum[PART_LEN1]; float abs_near_spectrum[PART_LEN1]; const float gPow[2] = {0.9f, 0.1f}; // Noise estimate constants. const int noiseInitBlocks = 500 * aec->mult; const float step = 0.1f; const float ramp = 1.0002f; const float gInitNoise[2] = {0.999f, 0.001f}; int16_t nearend[PART_LEN]; int16_t* nearend_ptr = NULL; int16_t output[PART_LEN]; int16_t outputH[PART_LEN]; float* xf_ptr = NULL; memset(dH, 0, sizeof(dH)); if (aec->sampFreq == 32000) { // Get the upper band first so we can reuse |nearend|. WebRtc_ReadBuffer(aec->nearFrBufH, (void**) &nearend_ptr, nearend, PART_LEN); for (i = 0; i < PART_LEN; i++) { dH[i] = (float) (nearend_ptr[i]); } memcpy(aec->dBufH + PART_LEN, dH, sizeof(float) * PART_LEN); } WebRtc_ReadBuffer(aec->nearFrBuf, (void**) &nearend_ptr, nearend, PART_LEN); // ---------- Ooura fft ---------- // Concatenate old and new nearend blocks. for (i = 0; i < PART_LEN; i++) { d[i] = (float) (nearend_ptr[i]); } memcpy(aec->dBuf + PART_LEN, d, sizeof(float) * PART_LEN); #ifdef WEBRTC_AEC_DEBUG_DUMP { int16_t farend[PART_LEN]; int16_t* farend_ptr = NULL; WebRtc_ReadBuffer(aec->far_time_buf, (void**) &farend_ptr, farend, 1); (void)fwrite(farend_ptr, sizeof(int16_t), PART_LEN, aec->farFile); (void)fwrite(nearend_ptr, sizeof(int16_t), PART_LEN, aec->nearFile); } #endif // We should always have at least one element stored in |far_buf|. assert(WebRtc_available_read(aec->far_buf) > 0); WebRtc_ReadBuffer(aec->far_buf, (void**) &xf_ptr, &xf[0][0], 1); // Near fft memcpy(fft, aec->dBuf, sizeof(float) * PART_LEN2); TimeToFrequency(fft, df, 0); // Power smoothing for (i = 0; i < PART_LEN1; i++) { far_spectrum = (xf_ptr[i] * xf_ptr[i]) + (xf_ptr[PART_LEN1 + i] * xf_ptr[PART_LEN1 + i]); aec->xPow[i] = gPow[0] * aec->xPow[i] + gPow[1] * NR_PART * far_spectrum; // Calculate absolute spectra abs_far_spectrum[i] = sqrtf(far_spectrum); near_spectrum = df[0][i] * df[0][i] + df[1][i] * df[1][i]; aec->dPow[i] = gPow[0] * aec->dPow[i] + gPow[1] * near_spectrum; // Calculate absolute spectra abs_near_spectrum[i] = sqrtf(near_spectrum); } // Estimate noise power. Wait until dPow is more stable. if (aec->noiseEstCtr > 50) { for (i = 0; i < PART_LEN1; i++) { if (aec->dPow[i] < aec->dMinPow[i]) { aec->dMinPow[i] = (aec->dPow[i] + step * (aec->dMinPow[i] - aec->dPow[i])) * ramp; } else { aec->dMinPow[i] *= ramp; } } } // Smooth increasing noise power from zero at the start, // to avoid a sudden burst of comfort noise. if (aec->noiseEstCtr < noiseInitBlocks) { aec->noiseEstCtr++; for (i = 0; i < PART_LEN1; i++) { if (aec->dMinPow[i] > aec->dInitMinPow[i]) { aec->dInitMinPow[i] = gInitNoise[0] * aec->dInitMinPow[i] + gInitNoise[1] * aec->dMinPow[i]; } else { aec->dInitMinPow[i] = aec->dMinPow[i]; } } aec->noisePow = aec->dInitMinPow; } else { aec->noisePow = aec->dMinPow; } // Block wise delay estimation used for logging if (aec->delay_logging_enabled) { int delay_estimate = 0; // Estimate the delay delay_estimate = WebRtc_DelayEstimatorProcessFloat(aec->delay_estimator, abs_far_spectrum, abs_near_spectrum, PART_LEN1); if (delay_estimate >= 0) { // Update delay estimate buffer. aec->delay_histogram[delay_estimate]++; } } // Update the xfBuf block position. aec->xfBufBlockPos--; if (aec->xfBufBlockPos == -1) { aec->xfBufBlockPos = NR_PART - 1; } // Buffer xf memcpy(aec->xfBuf[0] + aec->xfBufBlockPos * PART_LEN1, xf_ptr, sizeof(float) * PART_LEN1); memcpy(aec->xfBuf[1] + aec->xfBufBlockPos * PART_LEN1, &xf_ptr[PART_LEN1], sizeof(float) * PART_LEN1); memset(yf, 0, sizeof(yf)); // Filter far WebRtcAec_FilterFar(aec, yf); // Inverse fft to obtain echo estimate and error. fft[0] = yf[0][0]; fft[1] = yf[0][PART_LEN]; for (i = 1; i < PART_LEN; i++) { fft[2 * i] = yf[0][i]; fft[2 * i + 1] = yf[1][i]; } aec_rdft_inverse_128(fft); scale = 2.0f / PART_LEN2; for (i = 0; i < PART_LEN; i++) { y[i] = fft[PART_LEN + i] * scale; // fft scaling } for (i = 0; i < PART_LEN; i++) { e[i] = d[i] - y[i]; } // Error fft memcpy(aec->eBuf + PART_LEN, e, sizeof(float) * PART_LEN); memset(fft, 0, sizeof(float) * PART_LEN); memcpy(fft + PART_LEN, e, sizeof(float) * PART_LEN); // TODO(bjornv): Change to use TimeToFrequency(). aec_rdft_forward_128(fft); ef[1][0] = 0; ef[1][PART_LEN] = 0; ef[0][0] = fft[0]; ef[0][PART_LEN] = fft[1]; for (i = 1; i < PART_LEN; i++) { ef[0][i] = fft[2 * i]; ef[1][i] = fft[2 * i + 1]; } if (aec->metricsMode == 1) { // Note that the first PART_LEN samples in fft (before transformation) are // zero. Hence, the scaling by two in UpdateLevel() should not be // performed. That scaling is taken care of in UpdateMetrics() instead. UpdateLevel(&aec->linoutlevel, ef); } // Scale error signal inversely with far power. WebRtcAec_ScaleErrorSignal(aec, ef); WebRtcAec_FilterAdaptation(aec, fft, ef); NonLinearProcessing(aec, output, outputH); if (aec->metricsMode == 1) { // Update power levels and echo metrics UpdateLevel(&aec->farlevel, (float (*)[PART_LEN1]) xf_ptr); UpdateLevel(&aec->nearlevel, df); UpdateMetrics(aec); } // Store the output block. WebRtc_WriteBuffer(aec->outFrBuf, output, PART_LEN); // For H band if (aec->sampFreq == 32000) { WebRtc_WriteBuffer(aec->outFrBufH, outputH, PART_LEN); } #ifdef WEBRTC_AEC_DEBUG_DUMP { int16_t eInt16[PART_LEN]; for (i = 0; i < PART_LEN; i++) { eInt16[i] = (int16_t)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, e[i], WEBRTC_SPL_WORD16_MIN); } (void)fwrite(eInt16, sizeof(int16_t), PART_LEN, aec->outLinearFile); (void)fwrite(output, sizeof(int16_t), PART_LEN, aec->outFile); } #endif }
/** * This method also increases the XP by 50 and updates the Avatar's level. */ void Avatar::Incrementwins() { _numWins++; _xp += 50; UpdateLevel(); }