OsStatus MpodOss::pushFrame(unsigned int numSamples, const MpAudioSample* samples, MpFrameTime frameTime) { if (!isEnabled()) return OS_FAILED; // Currently only full frame supported if(numSamples != mSamplesPerFrame) { OsSysLog::add(FAC_MP, PRI_ERR, "MpodOss::pushFrame (%p) given %d samples, expected full frame of: %d samples", this, numSamples, (int)mSamplesPerFrame); OsSysLog::flush(); assert(numSamples == mSamplesPerFrame); } RTL_BLOCK("MpodOss::pushFrame"); if (samples != NULL) { memcpy(mAudioFrame, samples, numSamples * sizeof(MpAudioSample)); } return OS_SUCCESS; }
/* //////////////////////////// PROTECTED ///////////////////////////////// */ void MpidOss::pushFrame() { RTL_BLOCK("MpidOss::pushFrame"); mpInputDeviceManager->pushFrame(mDeviceId, mSamplesPerFrame, mAudioFrame, mCurrentFrameTime); mCurrentFrameTime += getFramePeriod(); }
OsStatus MpodAndroid::pushFrame(unsigned int numSamples, const MpAudioSample* samples, MpFrameTime frameTime) { RTL_BLOCK("MpodAndroid::pushFrame"); if (!isEnabled()) { return OS_FAILED; } // Only full frames are supported right now. // LOGV("MpodAndroid::pushFrame(numSamples=%d, samples=%p)", numSamples, samples); assert((mSamplesPerFrame == numSamples) || (numSamples == 0 && samples== NULL)); // This pushFrame() is actually just a hack to adopt Android's AudioTrack // for use with our driver framework. We know that this function will be // called from ticker callback, so we assume that mpSampleBuffer points // to a buffer, passed to us by AudioTrack and we can simply memcpy to it. // If samples == NULL, then silent (full) frame should be inserted. if (samples != NULL) { // We have data #ifdef RTL_AUDIO_ENABLED RTL_RAW_AUDIO("MpodAndroid_pushFrame", mSamplesPerSec, numSamples, samples, frameTime*(mSamplesPerSec/1000)/mSamplesPerFrame); #endif if (mpSampleBuffer != NULL) { memcpy(mpSampleBuffer, samples, mSamplesPerFrame*sizeof(MpAudioSample)); } } else { // No data, fill with silence if (mpSampleBuffer != NULL) { memset(mpSampleBuffer, 0, mSamplesPerFrame*sizeof(MpAudioSample)); #ifdef RTL_AUDIO_ENABLED RTL_RAW_AUDIO("MpodAndroid_pushFrame", mSamplesPerSec, mSamplesPerFrame, mpSampleBuffer, frameTime*(mSamplesPerSec/1000)/mSamplesPerFrame); #endif } } return OS_SUCCESS; }
void MpodAndroid::audioCallback(int event, void* user, void *info) { //LOGV("MpodAndroid::audioCallback(event=%d)\n", event); RTL_BLOCK("MpodAndroid::audioCallback"); bool lSignal = false; if (event != MpAndroidAudioTrack::EVENT_MORE_DATA) { LOGV("MpodAndroid::audioCallback(event=%d)\n", event); return; } MpAndroidAudioTrack::Buffer *buffer = static_cast<MpAndroidAudioTrack::Buffer *>(info); MpodAndroid *pDriver = static_cast<MpodAndroid *>(user); // Start accessing non-atomic member variables AutoMutex autoLock(pDriver->mLock); int samplesToCopy = sipx_min(buffer->frameCount, pDriver->mSamplesPerFrame-pDriver->mSampleBufferIndex); #ifdef ENABLE_FRAME_TIME_LOGGING LOGV("MpodAndroid::audioCallback() buffer=%p samples=%d size=%d toCopy=%d\n", buffer->i16, buffer->frameCount, buffer->size, samplesToCopy); #endif RTL_EVENT("MpodAndroid::audioCallback_bufsize", samplesToCopy); // Copy data to buffer memcpy(buffer->i16, pDriver->mpSampleBuffer+pDriver->mSampleBufferIndex, samplesToCopy*sizeof(short)); buffer->frameCount = samplesToCopy; buffer->size = samplesToCopy*sizeof(short); pDriver->mSampleBufferIndex += samplesToCopy; #ifdef ENABLE_FILE_LOGGING fwrite(buffer->i16, 1, buffer->frameCount*sizeof(short), sgOutFile); #endif // ENABLE_FILE_LOGGING if (pDriver->mSampleBufferIndex >= pDriver->mSamplesPerFrame) { RTL_BLOCK("MpodAndroid::audioCallback_tick"); if(pDriver->mSampleBufferIndex > pDriver->mSamplesPerFrame) { LOGE("MpodAndroid::audioCallback() sample index (%d) > samples/frame (%d)\n", (int)pDriver->mSampleBufferIndex, (int)pDriver->mSamplesPerFrame); } // Return index to the beginning pDriver->mSampleBufferIndex = 0; // Fire callback. It will call our pushFrame() in turn. #ifdef ENABLE_FRAME_TIME_LOGGING LOGV("MpodAndroid::audioCallback() signal ticker, time %"PRIi64"ns\n", systemTime(SYSTEM_TIME_REALTIME)); #endif pDriver->mpTickerNotification->signal(pDriver->mSamplesPerFrame); // Update frame time. pDriver->mCurFrameTime += pDriver->mSamplesPerFrame; } // Step forward state switch (pDriver->mState) { case DRIVER_STARTING: pDriver->mState = DRIVER_PLAYING; lSignal = true; break; case DRIVER_STOPPING: // pDriver->mState = DRIVER_STOPPED; // break; case DRIVER_STOPPED: LOGV("MpodAndroid::audioCallback() stopping Track\n"); pDriver->mpAudioTrack->stop(); LOGV("MpodAndroid::audioCallback() stopped Track\n"); buffer->size = 0; pDriver->mState = DRIVER_IDLE; lSignal = true; break; default: break; } if (lSignal) { LOGV("MpodAndroid::audioCallback signalling\n"); pDriver->mWaitCbkCond.signal(); LOGV("MpodAndroid::audioCallback signalled\n"); } }
void MpidAndroid::audioCallback(int event, void* user, void *info) { bool lSignal = false; if (event != MpAndroidAudioRecord::EVENT_MORE_DATA) { RTL_BLOCK("MpidAndroid::audioCallback_nondata"); LOGV("MpidAndroid::audioCallback(event=%d)\n", event); return; } RTL_BLOCK("MpidAndroid::audioCallback"); #ifdef ENABLE_FRAME_TIME_LOGGING LOGV("MpidAndroid::audioCallback() time %"PRIi64"ns\n", systemTime(SYSTEM_TIME_REALTIME)); #endif MpAndroidAudioRecord::Buffer *buffer = static_cast<MpAndroidAudioRecord::Buffer *>(info); MpidAndroid *pDriver = static_cast<MpidAndroid *>(user); #ifdef ENABLE_FILE_LOGGING fwrite(buffer->i16, 1, buffer->frameCount*sizeof(short), sgOutFile); #endif // ENABLE_FILE_LOGGING // Start accessing non-atomic member variables AutoMutex autoLock(pDriver->mLock); #ifdef ENABLE_FRAME_TIME_LOGGING LOGV("MpidAndroid::audioCallback() frameCount=%d size=%d state=%d\n", buffer->frameCount, buffer->size, pDriver->mState); #endif // Only process if we're enabled.. if(pDriver->mIsEnabled) { if (buffer->frameCount + pDriver->mBufInternalSamples < pDriver->mSamplesPerFrameInternal) { #ifdef ENABLE_FRAME_TIME_LOGGING LOGV("frameCount=%d mBufInternalSamples=%d (sum=%d) mSamplesPerFrameInternal=%d", buffer->frameCount, pDriver->mBufInternalSamples, buffer->frameCount + pDriver->mBufInternalSamples, pDriver->mSamplesPerFrameInternal); #endif memcpy(pDriver->mpBufInternal+pDriver->mBufInternalSamples, buffer->i16, buffer->frameCount*sizeof(short)); pDriver->mBufInternalSamples += buffer->frameCount; } else { // Copy samples to the temp buffer if needed. MpAudioSample *origSamples; int origSamplesConsumed; if (pDriver->mBufInternalSamples > 0) { origSamplesConsumed = sipx_min(pDriver->mSamplesPerFrameInternal-pDriver->mBufInternalSamples, buffer->frameCount); memcpy(pDriver->mpBufInternal+pDriver->mBufInternalSamples, buffer->i16, origSamplesConsumed*sizeof(short)); pDriver->mBufInternalSamples += origSamplesConsumed; origSamples = pDriver->mpBufInternal; } else { origSamples = buffer->i16; origSamplesConsumed = pDriver->mSamplesPerFrameInternal; } // Resample is needed. MpAudioSample *pushSamples = origSamples; if (pDriver->mpResampler != NULL) { uint32_t samplesProcessed; uint32_t samplesWritten; LOGV("origSamples: %d mSamplesPerFrameInternal: %d samplesProcessed: %d mpResampleBuf: %d mSamplesPerFrame: %d samplesWritten: %d\n", pDriver->mBufInternalSamples, pDriver->mSamplesPerFrameInternal, samplesProcessed, pDriver->mpResampleBuf, pDriver->mSamplesPerFrame, samplesWritten); LOGV("pDriver->mpResampler->getInputRate(): %d pDriver->mpResampler->getOutputRate(): %d\n", pDriver->mpResampler->getInputRate(), pDriver->mpResampler->getOutputRate()); OsStatus status = pDriver->mpResampler->resample(0, pDriver->mpBufInternal, pDriver->mSamplesPerFrameInternal, samplesProcessed, pDriver->mpResampleBuf, pDriver->mSamplesPerFrame, samplesWritten); assert(status == OS_SUCCESS); if(pDriver->mSamplesPerFrameInternal != samplesProcessed || pDriver->mSamplesPerFrame != samplesWritten) { LOGE("mSamplesPerFrameInternal: %d samplesProcessed: %d mSamplesPerFrame: %d samplesWritten: %d\n", pDriver->mSamplesPerFrameInternal, samplesProcessed, pDriver->mSamplesPerFrame, samplesWritten); printf("mSamplesPerFrameInternal: %d samplesProcessed: %d mSamplesPerFrame: %d samplesWritten: %d\n", pDriver->mSamplesPerFrameInternal, samplesProcessed, pDriver->mSamplesPerFrame, samplesWritten); } assert(pDriver->mSamplesPerFrameInternal == samplesProcessed && pDriver->mSamplesPerFrame == samplesWritten); pushSamples = pDriver->mpResampleBuf; } pDriver->mpInputDeviceManager->pushFrame(pDriver->mDeviceId, pDriver->mSamplesPerFrame, pushSamples, pDriver->mCurrentFrameTime); // Copy remaining samples to temp buffer if anything left. pDriver->mBufInternalSamples = sipx_min(buffer->frameCount-origSamplesConsumed, pDriver->mSamplesPerFrameInternal); if (pDriver->mBufInternalSamples > 0) { memcpy(pDriver->mpBufInternal, buffer->i16+origSamplesConsumed, pDriver->mBufInternalSamples*sizeof(short)); if (buffer->frameCount-origSamplesConsumed >= pDriver->mSamplesPerFrameInternal) { LOGW("TOO BIG FRAMES FROM MIC: %d", buffer->frameCount); } } // Ok, we have received and pushed a frame to the manager, // Now we advance the frame time. pDriver->mCurrentFrameTime += (pDriver->mSamplesPerFrame*1000)/pDriver->mSamplesPerSec; } } switch (pDriver->mState) { case DRIVER_STARTING: pDriver->mState = DRIVER_RECORDING; lSignal = true; break; case DRIVER_STOPPING: // pDriver->mState = DRIVER_STOPPED; // break; case DRIVER_STOPPED: pDriver->mpAudioRecord->stop(); buffer->size = 0; pDriver->mState = DRIVER_IDLE; lSignal = true; break; default: break; } if (lSignal) { LOGV("MpidAndroid::audioCallback() signaling condition state=%d\n", pDriver->mState); pDriver->mWaitCbkCond.signal(); } // LOGV("MpidAndroid::audioCallback() done.\n"); }
UtlBoolean MpRtpInputAudioConnection::processFrame(void) { UtlBoolean result; #ifdef RTL_ENABLED RTL_BLOCK((UtlString)*this); #endif assert(mpDecode); if(mpDecode) { // call doProcessFrame to do any "real" work result = mpDecode->doProcessFrame(mpInBufs, mpOutBufs, mMaxInputs, mMaxOutputs, mpDecode->isEnabled(), m_samplesPerFrame, m_samplesPerSec); if(mpDtmfDetector) { result &= mpDtmfDetector->doProcessFrame(mpOutBufs, // OutBufs from mpDecoder = InBuf for InBand NULL, // No outBufs needed mMaxOutputs, // Again MaxOutputs from decoder = maxInput for InBand 0, // 0 output mpDtmfDetector->isEnabled(), m_samplesPerFrame, m_samplesPerSec); } } if (mpOutBufs) { MpAudioBufPtr audioBufPtr = *mpOutBufs; if (audioBufPtr.isValid()) { MpSpeechType speechType = audioBufPtr->getSpeechType(); if (speechType == MP_SPEECH_COMFORT_NOISE) { // comfort noise was generated, no frame was received m_inactiveFrameCount++; if (m_inactiveFrameCount >= ms_maxInactiveFrameCount) { // fire notification sendConnectionNotification(MP_NOTIFICATION_REMOTE_SILENT, m_inactiveFrameCount / FRAMES_PER_SECOND); m_inactiveFrameCount = 0; m_bAudioReceived = FALSE; // reset flag, so that we get notification when audio is received again } } else { m_inactiveFrameCount = 0; if (!m_bAudioReceived) { m_bAudioReceived = TRUE; // fire notification sendConnectionNotification(MP_NOTIFICATION_REMOTE_ACTIVE, 0); } } } } // No input buffers to release assert(mMaxInputs == 0); // Push the output buffer to the next resource assert(mMaxOutputs == 1); pushBufferDownsream(0, mpOutBufs[0]); // release the output buffer mpOutBufs[0].release(); return(result); }