コード例 #1
0
ファイル: MprFromMic.cpp プロジェクト: Konnekt/lib-sipx
UtlBoolean MprFromMic::doProcessFrame(MpBufPtr inBufs[],
                                     MpBufPtr outBufs[],
                                     int inBufsSize,
                                     int outBufsSize,
                                     UtlBoolean isEnabled,
                                     int samplesPerFrame,
                                     int samplesPerSecond)
{
	MpBufPtr        out = NULL ;
	MpBufferMsg*    pMsg;

	if (0 == outBufsSize) 
	{
		return FALSE;
	}
	

	// Clear the the number of empty frames every 512 frames
	mNumFrames++;
	if (0 == (mNumFrames & 0x1ff)) 
	{
		mNumEmpties = 0;
	}

	if (isEnabled) 
	{
		// If the microphone queue (holds unprocessed mic data) has more then
		// the max_mic_buffers threshold, drain the queue until in range)
		OsMsgQ* pMicOutQ;
		pMicOutQ = MpMisc.pMicQ;
		while (pMicOutQ && MpMisc.max_mic_buffers < pMicOutQ->numMsgs()) 
		{
	        if (OS_SUCCESS == pMicOutQ->receive((OsMsg*&) pMsg,
					OsTime::NO_WAIT)) 
			{
				MpBuf_delRef(pMsg->getTag());
				MpBuf_delRef(pMsg->getTag(1));
				pMsg->releaseMsg();
			}
		}

		if (pMicOutQ && pMicOutQ->numMsgs() <= 0)
		{
//			osPrintf("MprFromMic: No data available (total frames=%d, starved frames=%d)\n", 
//					mNumFrames, mNumEmpties);
		}
		else
		{
			if (pMicOutQ && OS_SUCCESS == pMicOutQ->receive((OsMsg*&) pMsg, 
					OsTime::NO_WAIT)) 
			{
				out = pMsg->getTag();
				pMsg->releaseMsg();
				
				if (NULL != out) 
				{
#ifdef REAL_SILENCE_DETECTION /* [ */
					Sample* shpTmpFrame;
					MpBufPtr tpBuf;
					int n;
#endif /* REAL_SILENCE_DETECTION ] */

					switch(MpBuf_getSpeech(out)) 
					{
						case MP_SPEECH_TONE:
							break;
						case MP_SPEECH_MUTED:
							MpBuf_setSpeech(out, MP_SPEECH_SILENT);
							break;
						default:
#ifdef REAL_SILENCE_DETECTION /* [ */
							Sample *shpSamples;
							n = MpBuf_getNumSamples(out);
							shpSamples = MpBuf_getSamples(out);

							tpBuf = MpBuf_getBuf(MpMisc.UcbPool, n, 0, MP_FMT_T12);
							assert(NULL != tpBuf);
							shpTmpFrame = MpBuf_getSamples(tpBuf);
							highpass_filter800(shpSamples, shpTmpFrame, n);

							if(0 == speech_detected(shpTmpFrame,n)) 
							{
								MpBuf_setSpeech(out, MP_SPEECH_SILENT);
							}
							else 
							{
								MpBuf_setSpeech(out, MP_SPEECH_ACTIVE);
							}
							MpBuf_delRef(tpBuf);
#else /* REAL_SILENCE_DETECTION ] [ */
							// 24 April 2001 (HZM)  I am disabling this because it takes
							// too long to recognize the beginning of a talk spurt, and
							// causes the bridge mixer to drop the start of each word.																
							MpBuf_isActiveAudio(out);
#endif /* REAL_SILENCE_DETECTION ] */
							break;
					}
				}
			}
		} 

#ifdef INSERT_SAWTOOTH /* [ */
		if (NULL == out)
		{
			out = MpBuf_getBuf(MpMisc.UcbPool, MpMisc.frameSamples, 0, MP_FMT_T12);
		}
		MpBuf_insertSawTooth(out);
		MpBuf_setSpeech(out, MP_SPEECH_ACTIVE);
#endif /* INSERT_SAWTOOTH ] */

		if (s_fnMicDataHook)
		{
			// 
			// Allow an external identity to source microphone data.  Ideally,
            // this should probably become a different resource, but abstracting
            // a new CallFlowGraph is a lot of work.
            //

			if (NULL == out) 
			{
				out = MpBuf_getBuf(MpMisc.UcbPool, MpMisc.frameSamples, 0, MP_FMT_T12);
			}
			
			if (NULL != out) 
			{
	            int n = 0;
				Sample* s = NULL;

				s = MpBuf_getSamples(out);
				n = MpBuf_getNumSamples(out);
				
				s_fnMicDataHook(n, s) ;

				MpBuf_setSpeech(out, MP_SPEECH_UNKNOWN);
				MpBuf_isActiveAudio(out);
			}
		}

		if (NULL == out)
		{
			out = MpBuf_getFgSilence();
		}
	}

	*outBufs = out;

	return TRUE;
}
コード例 #2
0
ファイル: MprFromMic.cpp プロジェクト: John-Chan/sipXtapi
UtlBoolean MprFromMic::doProcessFrame(MpBufPtr inBufs[],
                                      MpBufPtr outBufs[],
                                      int inBufsSize,
                                      int outBufsSize,
                                      UtlBoolean isEnabled,
                                      int samplesPerFrame,
                                      int samplesPerSecond)
{
   MpAudioBufPtr   out;
   MpBufferMsg*    pMsg;

   // We need one output buffer
   if (outBufsSize != 1) 
      return FALSE;

   // Don't waste the time if output is not connected
   if (!isOutputConnected(0))
       return TRUE;

   // One more frame processed
   mNumFrames++;

#ifdef RTL_ENABLED
   RTL_EVENT("FromMic queue", mpMicQ->numMsgs());
#endif

   if (isEnabled) 
   {
      // If the microphone queue (holds unprocessed mic data) has more then
      // the max_mic_buffers threshold, drain the queue until in range)
      while (mpMicQ && mpMicQ->numMsgs() > MpMisc.max_mic_buffers) 
      {
         if (mpMicQ->receive((OsMsg*&)pMsg, OsTime::NO_WAIT_TIME) == OS_SUCCESS) 
         {
            pMsg->releaseMsg();
                osPrintf( "mpMicQ drained. %d msgs in queue now\n"
                        , mpMicQ->numMsgs());
         }
      }

      if (mpMicQ && mpMicQ->numMsgs() > 0)
      {
         if (mpMicQ->receive((OsMsg*&)pMsg, OsTime::NO_WAIT_TIME) == OS_SUCCESS) 
         {
//                osPrintf( "mpMicQ->receive() succeed, %d msgs in queue\n"
//                        , mpMicQ->numMsgs());
            out = pMsg->getBuffer();
            pMsg->releaseMsg();
         }
      }
      else
      {
//         osPrintf("MprFromMic: No data available (total frames=%d)\n", 
//               mNumFrames);
      }

#ifdef INSERT_SAWTOOTH /* [ */
      if (!out.isValid())
      {
         out = MpMisc.RawAudioPool->getBuffer();
            if (!out.isValid())
               return FALSE;
         out->setSamplesNumber(MpMisc.frameSamples);
      }
      MpBuf_insertSawTooth(out);
      out->setSpeechType(MP_SPEECH_ACTIVE);
#endif /* INSERT_SAWTOOTH ] */

      if (s_fnMicDataHook)
      {
         // 
         // Allow an external identity to source microphone data.  Ideally,
         // this should probably become a different resource, but abstracting
         // a new CallFlowGraph is a lot of work.
         //

         if (!out.isValid())
         {
            out = MpMisc.RawAudioPool->getBuffer();
            if (!out.isValid())
               return FALSE;
            out->setSamplesNumber(MpMisc.frameSamples);
         }
         
         if (out.isValid()) 
         {
            int n = 0;
            MpAudioSample* s = NULL;

            s = out->getSamplesWritePtr();
            n = out->getSamplesNumber();
            
            s_fnMicDataHook(n, (short*)s) ;

            out->setSpeechType(MP_SPEECH_UNKNOWN);
         }
      }

      if (out.isValid())
      {
         switch(out->getSpeechType()) 
         {
            case MP_SPEECH_TONE:
               break;
            case MP_SPEECH_MUTED:
               out->setSpeechType(MP_SPEECH_SILENT);
               break;
            default:
               {
                  MpAudioSample* shpTmpFrame;
                  MpAudioBufPtr tpBuf;

                  MpAudioSample *shpSamples;
                  int n = out->getSamplesNumber();
                  shpSamples = out->getSamplesWritePtr();

                  tpBuf = MpMisc.RawAudioPool->getBuffer();
                  if (!out.isValid())
                     return FALSE;
                  tpBuf->setSamplesNumber(n);
                  assert(tpBuf.isValid());
                  shpTmpFrame = tpBuf->getSamplesWritePtr();
                  highpass_filter800(shpSamples, shpTmpFrame, n);

                  out->setSpeechType(speech_detected(shpTmpFrame,n));
               }
               break;
         }
      }
    }
    else
    {
        out = inBufs[0];
    }

   outBufs[0] = out;

   return TRUE;
}