Ejemplo n.º 1
0
void UlawToPcmFilter::AudioChunkIn(AudioChunkRef& inputAudioChunk)
{
	m_outputAudioChunk.reset();

	if(inputAudioChunk.get() == NULL)
	{
		return;
	}
	else if(inputAudioChunk->GetNumSamples() == 0)
	{
		return;
	}

	AudioChunkDetails outputDetails = *inputAudioChunk->GetDetails();
	if(SupportsInputRtpPayloadType(outputDetails.m_rtpPayloadType) == false)
	{
		return;
	}

	// Create output buffer
	m_outputAudioChunk.reset(new AudioChunk());
	outputDetails.m_rtpPayloadType = -1;		//  Override details that this filter changes
	outputDetails.m_encoding = PcmAudio;
	int numSamples = inputAudioChunk->GetNumSamples();
	outputDetails.m_numBytes = numSamples*2;
	short* outputBuffer = (short*)m_outputAudioChunk->CreateBuffer(outputDetails);
	char* inputBuffer = (char*)inputAudioChunk->m_pBuffer;
	

	for(int i=0; i<numSamples; i++)
	{
		outputBuffer[i] = (short)ulaw2linear(inputBuffer[i]);
	}	
}
Ejemplo n.º 2
0
bool BatchProcessing::SkipChunk(AudioTapeRef& audioTapeRef, AudioChunkRef& chunkRef, int& channelToSkip)
{
	AudioChunkDetails details = *chunkRef->GetDetails();
	bool skip = false;

	channelToSkip = 0;

	switch(audioTapeRef->m_audioKeepDirectionEnum)
	{
	case CaptureEvent::AudioKeepDirectionBoth:
	{
		skip = false;
		break;
	}
	case CaptureEvent::AudioKeepDirectionLocal:
	{
		switch(audioTapeRef->m_localSide)
		{
		case CaptureEvent::LocalSideUnkn:
		{
			skip = false;
			break;
		}
		case CaptureEvent::LocalSideSide1:
		{
			if(details.m_channel != 1)
			{
				skip = true;
			}
			else
			{
				skip = false;
			}
			break;
		}
		case CaptureEvent::LocalSideSide2:
		{
			if(details.m_channel != 2)
			{
				skip = true;
			}
			else
			{
				skip = false;
			}
			break;
		}
		case CaptureEvent::LocalSideBoth:
		{
			skip = false;
			break;
		}
		default:
		{
			skip = false;
			break;
		}
		}
		break;
	}
	case CaptureEvent::AudioKeepDirectionRemote:
	{
		switch(audioTapeRef->m_localSide)
		{
		case CaptureEvent::LocalSideUnkn:
		{
			skip = false;
			break;
		}
		case CaptureEvent::LocalSideSide1:
		{
			if(details.m_channel == 1)
			{
				skip = true;
			}
			else
			{
				skip = false;
			}
			break;
		}
		case CaptureEvent::LocalSideSide2:
		{
			if(details.m_channel == 2)
			{
				skip = true;
			}
			else
			{
				skip = false;
			}
			break;
		}
		case CaptureEvent::LocalSideBoth:
		{
			skip = true;
			break;
		}
		default:
		{
			skip = false;
			break;
		}
		}

		break;
	}
	case CaptureEvent::AudioKeepDirectionNone:
	{
		skip = true;
		break;
	}
	case CaptureEvent::AudioKeepDirectionInvalid:
	default:
	{
		skip = false;
		break;
	}
	}

	if(skip == true)
	{
		channelToSkip = details.m_channel;
	}

	return skip;
}
Ejemplo n.º 3
0
void BatchProcessing::ThreadHandler(void *args)
{
	SetThreadName("orka:batch");

	CStdString debug;
	CStdString logMsg;

	CStdString processorName("BatchProcessing");
	TapeProcessorRef batchProcessing = TapeProcessorRegistry::instance()->GetNewTapeProcessor(processorName);
	if(batchProcessing.get() == NULL)
	{
		LOG4CXX_ERROR(LOG.batchProcessingLog, "Could not instanciate BatchProcessing");
		return;
	}
	BatchProcessing* pBatchProcessing = (BatchProcessing*)(batchProcessing->Instanciate().get());

	pBatchProcessing->SetQueueSize(CONFIG.m_batchProcessingQueueSize);

	int threadId = 0;
	{
		MutexSentinel sentinel(pBatchProcessing->m_mutex);
		threadId = pBatchProcessing->m_threadCount++;
	}
	CStdString threadIdString = IntToString(threadId);
	debug.Format("thread Th%s starting - queue size:%d", threadIdString, CONFIG.m_batchProcessingQueueSize);
	LOG4CXX_INFO(LOG.batchProcessingLog, debug);

	bool stop = false;

	for(;stop == false;)
	{
		AudioFileRef fileRef;
		AudioFileRef outFileRef, outFileSecondaryRef;
		AudioTapeRef audioTapeRef;
		CStdString trackingId = "[no-trk]";

		try
		{
			audioTapeRef = pBatchProcessing->m_audioTapeQueue.pop();
			if(audioTapeRef.get() == NULL)
			{
				if(Daemon::Singleton()->IsStopping())
				{
					stop = true;
				}
				if(Daemon::Singleton()->GetShortLived())
				{
					Daemon::Singleton()->Stop();
				}
			}
			else
			{
				fileRef = audioTapeRef->GetAudioFileRef();
				trackingId = audioTapeRef->m_trackingId;

				audioTapeRef->m_audioOutputPath = CONFIG.m_audioOutputPath;

				// Let's work on the tape we have pulled
				//CStdString threadIdString = IntToString(threadId);
				LOG4CXX_INFO(LOG.batchProcessingLog, "[" + trackingId + "] Th" + threadIdString + " processing " + audioTapeRef->GetIdentifier() + " localside:" + CaptureEvent::LocalSideToString(audioTapeRef->m_localSide) + " audiokeepdirection:" + CaptureEvent::AudioKeepDirectionToString(audioTapeRef->m_audioKeepDirectionEnum));
				if(audioTapeRef->m_audioKeepDirectionEnum == CaptureEvent::AudioKeepDirectionInvalid)
				{
					LOG4CXX_WARN(LOG.batchProcessingLog, "[" + trackingId + 
						"] Th" + threadIdString + 
						" invalid audiokeepdirection:" + 
						IntToString(audioTapeRef->m_audioKeepDirectionEnum));
				}



				//fileRef->MoveOrig();	// #### could do this only when original and output file have the same extension. Irrelevant for now as everything is captured as mcf file
				fileRef->Open(AudioFile::READ);

				AudioChunkRef chunkRef;
				AudioChunkRef tmpChunkRef, tmpChunkSecondaryRef;
				unsigned int frameSleepCounter;

				frameSleepCounter = 0;

				switch(CONFIG.m_storageAudioFormat)
				{
				case FfUlaw:
					outFileRef.reset(new LibSndFileFile(SF_FORMAT_ULAW | SF_FORMAT_WAV));
					break;
				case FfAlaw:
					outFileRef.reset(new LibSndFileFile(SF_FORMAT_ALAW | SF_FORMAT_WAV));
					break;
				case FfGsm:
					outFileRef.reset(new LibSndFileFile(SF_FORMAT_GSM610 | SF_FORMAT_WAV));
					break;
				case FfPcmWav:
				default:
					outFileRef.reset(new LibSndFileFile(SF_FORMAT_PCM_16 | SF_FORMAT_WAV));
				}

				if(CONFIG.m_stereoRecording == true)
				{
					outFileRef->SetNumOutputChannels(2);
				}

				FilterRef rtpMixer, rtpMixerSecondary;
				FilterRef decoder1;
				FilterRef decoder2;
				FilterRef decoder;
				FilterRef audiogain;

				std::bitset<RTP_PAYLOAD_TYPE_MAX> seenRtpPayloadTypes;
				std::vector<FilterRef> decoders1;
				std::vector<FilterRef> decoders2;
				for(int pt=0; pt<RTP_PAYLOAD_TYPE_MAX; pt++)
				{
					decoder1 = FilterRegistry::instance()->GetNewFilter(pt);
					decoders1.push_back(decoder1);
					decoder2 = FilterRegistry::instance()->GetNewFilter(pt);
					decoders2.push_back(decoder2);
				}

				bool firstChunk = true;
				bool voIpSession = false;

				size_t numSamplesS1 = 0;
				size_t numSamplesS2 = 0;
				size_t numSamplesOut = 0;

				CStdString filterName("AudioGain");

				audiogain = FilterRegistry::instance()->GetNewFilter(filterName);
				if(audiogain.get() == NULL)
				{
					debug = "Could not instanciate AudioGain rtpMixer";
					throw(debug);
				}

				bool forceChannel1 = false;

				while(fileRef->ReadChunkMono(chunkRef))
				{
					// ############ HACK
					//ACE_Time_Value yield;
					//yield.set(0,1);
					//ACE_OS::sleep(yield);
					// ############ HACK

					AudioChunkDetails details = *chunkRef->GetDetails();
					int channelToSkip = 0;
					if(CONFIG.m_directionLookBack == true)					//if DirectionLookBack is not enable, DirectionSelector Tape should have taken care everything
					{
						if(BatchProcessing::SkipChunk(audioTapeRef, chunkRef, channelToSkip) == true)
						{
							LOG4CXX_DEBUG(LOG.batchProcessingLog, "[" + trackingId +
	                                                "] Th" + threadIdString +
	                                                " skipping chunk of channel:" +
							IntToString(details.m_channel));

							if(forceChannel1 == false)
							{
								if(channelToSkip == 1)
								{
									forceChannel1 = true;
								}
							}

							continue;
						}
					}

					if(forceChannel1 == true)
					{
						details.m_channel = 1;
						chunkRef->SetDetails(&details);
					}

					decoder.reset();

					if(details.m_rtpPayloadType < -1 || details.m_rtpPayloadType >= RTP_PAYLOAD_TYPE_MAX)
					{
						logMsg.Format("RTP payload type out of bound:%d", details.m_rtpPayloadType);
						LOG4CXX_DEBUG(LOG.batchProcessingLog, "[" + trackingId + "] Th" + threadIdString + " " + logMsg);
						continue;
					}

					// Instanciate any decoder we might need during a VoIP session
					if(details.m_rtpPayloadType != -1)
					{
						voIpSession = true;

						if(details.m_channel == 2)
						{
							decoder2 = decoders2.at(details.m_rtpPayloadType);
							decoder = decoder2;
						}
						else
						{
							decoder1 = decoders1.at(details.m_rtpPayloadType);
							decoder = decoder1;
						}

						bool ptAlreadySeen = seenRtpPayloadTypes.test(details.m_rtpPayloadType);
						seenRtpPayloadTypes.set(details.m_rtpPayloadType);

						if(decoder.get() == NULL)
						{
							if(ptAlreadySeen == false)
							{
								// First time we see a particular unsupported payload type in this session, log it
								CStdString rtpPayloadType = IntToString(details.m_rtpPayloadType);
								LOG4CXX_ERROR(LOG.batchProcessingLog, "[" + trackingId + "] Th" + threadIdString + " unsupported RTP payload type:" + rtpPayloadType);
							}
							// We cannot decode this chunk due to unknown codec, go to next chunk
							continue;
						}
						else if(ptAlreadySeen == false)
						{
							// First time we see a particular supported payload type in this session, log it
							CStdString rtpPayloadType = IntToString(details.m_rtpPayloadType);
							LOG4CXX_INFO(LOG.batchProcessingLog, "[" + trackingId + "] Th" + threadIdString + " RTP payload type:" + rtpPayloadType);
						}
					}
					if(!voIpSession || (firstChunk && decoder.get()))
					{
						firstChunk = false;

						// At this point, we know we have a working codec, create an RTP mixer and open the output file
						if(voIpSession)
						{
							CStdString filterName("RtpMixer");
							rtpMixer = FilterRegistry::instance()->GetNewFilter(filterName);
							if(rtpMixer.get() == NULL)
							{
								debug = "Could not instanciate RTP mixer";
								throw(debug);
							}
							if(CONFIG.m_stereoRecording == true)
							{
								rtpMixer->SetNumOutputChannels(2);
							}
							rtpMixer->SetSessionInfo(trackingId);

							//create another rtpmixer to store stereo audio
							if(CONFIG.m_audioOutputPathSecondary.length() > 3)
							{
								outFileSecondaryRef.reset(new LibSndFileFile(SF_FORMAT_PCM_16 | SF_FORMAT_WAV));
								outFileSecondaryRef->SetNumOutputChannels(2);
								rtpMixerSecondary = FilterRegistry::instance()->GetNewFilter(filterName);
								if(rtpMixerSecondary.get() == NULL)
								{
									debug = "Could not instanciate RTP mixer";
									throw(debug);
								}
								rtpMixerSecondary->SetNumOutputChannels(2);
								rtpMixerSecondary->SetSessionInfo(trackingId);

							}

						}

						CStdString path = audioTapeRef->m_audioOutputPath + "/" + audioTapeRef->GetPath();
						FileRecursiveMkdir(path, CONFIG.m_audioFilePermissions, CONFIG.m_audioFileOwner, CONFIG.m_audioFileGroup, audioTapeRef->m_audioOutputPath);

						CStdString file = path + "/" + audioTapeRef->GetIdentifier();
						outFileRef->Open(file, AudioFile::WRITE, false, fileRef->GetSampleRate());

						if(CONFIG.m_audioOutputPathSecondary.length() > 3)
						{
							path = CONFIG.m_audioOutputPathSecondary + "/" + audioTapeRef->GetPath();
							FileRecursiveMkdir(path, CONFIG.m_audioFilePermissions, CONFIG.m_audioFileOwner, CONFIG.m_audioFileGroup, CONFIG.m_audioOutputPathSecondary);
							CStdString storageFile = path + "/" + audioTapeRef->GetIdentifier();
							outFileSecondaryRef->Open(storageFile, AudioFile::WRITE, false, fileRef->GetSampleRate());
						}

					}
					if(voIpSession)
					{
						if(details.m_channel == 2)
						{
							decoder2->AudioChunkIn(chunkRef);
							decoder2->AudioChunkOut(tmpChunkRef);
							if(tmpChunkRef.get())
							{
								numSamplesS2 += tmpChunkRef->GetNumSamples();
							}

							if(rtpMixerSecondary.get() != NULL)
							{
								decoder2->AudioChunkOut(tmpChunkSecondaryRef);
							}
						}
						else
						{
							decoder1->AudioChunkIn(chunkRef);
							decoder1->AudioChunkOut(tmpChunkRef);
							if(tmpChunkRef.get())
							{
								numSamplesS1 += tmpChunkRef->GetNumSamples();
							}

							if(rtpMixerSecondary.get() != NULL)
							{
								decoder1->AudioChunkOut(tmpChunkSecondaryRef);
							}
						}

						audiogain->AudioChunkIn(tmpChunkRef);
						audiogain->AudioChunkOut(tmpChunkRef);
						rtpMixer->AudioChunkIn(tmpChunkRef);
						rtpMixer->AudioChunkOut(tmpChunkRef);
						if(rtpMixerSecondary.get() != NULL)
						{
							rtpMixerSecondary->AudioChunkIn(tmpChunkSecondaryRef);
							rtpMixerSecondary->AudioChunkOut(tmpChunkSecondaryRef);
						}

					} else {
						audiogain->AudioChunkIn(tmpChunkRef);
						audiogain->AudioChunkOut(tmpChunkRef);
					}

					outFileRef->WriteChunk(tmpChunkRef);
					if(rtpMixerSecondary.get() != NULL)
					{
						outFileSecondaryRef->WriteChunk(tmpChunkSecondaryRef);
					}

					if(tmpChunkRef.get())
					{
						numSamplesOut += tmpChunkRef->GetNumSamples();
					}

					if(CONFIG.m_batchProcessingEnhancePriority == false)
					{
						// Give up CPU between every audio buffer to make sure the actual recording always has priority
						//ACE_Time_Value yield;
						//yield.set(0,1);	// 1 us
						//ACE_OS::sleep(yield);

						// Use this instead, even if it still seems this holds the whole process under Linux instead of this thread only.
						struct timespec ts;
						ts.tv_sec = 0;
						ts.tv_nsec = 1;
						ACE_OS::nanosleep (&ts, NULL);
					}
					
					if(CONFIG.m_transcodingSleepEveryNumFrames > 0 && CONFIG.m_transcodingSleepUs > 0)
					{
						if(frameSleepCounter >= (unsigned int)CONFIG.m_transcodingSleepEveryNumFrames)
						{
							frameSleepCounter = 0;
							struct timespec ts;
							ts.tv_sec = 0;
							ts.tv_nsec = CONFIG.m_transcodingSleepUs*1000;
							ACE_OS::nanosleep (&ts, NULL);
						}
						else
						{
							frameSleepCounter += 1;
						}
					}
				}

				if(voIpSession && !firstChunk)
				{
					// Flush the RTP mixer
					AudioChunkRef stopChunk(new AudioChunk());
					stopChunk->GetDetails()->m_marker = MEDIA_CHUNK_EOS_MARKER;
					rtpMixer->AudioChunkIn(stopChunk);
					rtpMixer->AudioChunkOut(tmpChunkRef);
					if(rtpMixerSecondary.get() != NULL)
					{
						rtpMixerSecondary->AudioChunkOut(tmpChunkSecondaryRef);
					}

					while(tmpChunkRef.get())
					{
						outFileRef->WriteChunk(tmpChunkRef);
						numSamplesOut += tmpChunkRef->GetNumSamples();
						rtpMixer->AudioChunkOut(tmpChunkRef);
					}
					while(tmpChunkSecondaryRef.get())
					{
						outFileSecondaryRef->WriteChunk(tmpChunkSecondaryRef);
						rtpMixerSecondary->AudioChunkOut(tmpChunkSecondaryRef);
					}
				}

				fileRef->Close();
				outFileRef->Close();
				if(rtpMixerSecondary.get() != NULL)
				{
					outFileSecondaryRef->Close();
				}
				logMsg.Format("[%s] Th%s stop: num samples: s1:%u s2:%u out:%u queueSize:%d", trackingId, threadIdString, numSamplesS1, numSamplesS2, numSamplesOut, pBatchProcessing->m_audioTapeQueue.numElements());
				LOG4CXX_INFO(LOG.batchProcessingLog, logMsg);

				CStdString audioFilePath = audioTapeRef->m_audioOutputPath + "/" + audioTapeRef->GetPath();
				CStdString audioFileName;
				CStdString storageFilePath, storageFileName;
				if(CONFIG.m_audioOutputPathSecondary.length() > 3)
				{
					storageFilePath = CONFIG.m_audioOutputPathSecondary + "/" + audioTapeRef->GetPath();
					storageFileName = storageFilePath + "/" + audioTapeRef->GetIdentifier() + outFileRef->GetExtension();
				}

				audioFileName = audioFilePath + "/" + audioTapeRef->GetIdentifier() + outFileRef->GetExtension();
				if(CONFIG.m_audioFilePermissions) {
					if(FileSetPermissions(audioFileName, CONFIG.m_audioFilePermissions))
					{
						CStdString logMsg;

						logMsg.Format("Error setting permissions of %s to %o: %s", audioFileName.c_str(), CONFIG.m_audioFilePermissions, strerror(errno));
						LOG4CXX_ERROR(LOG.batchProcessingLog, "[" + trackingId + "] Th" + threadIdString + " " + logMsg);
					}
					if(storageFileName.length() > 5)
					{
						if(FileSetPermissions(storageFileName, CONFIG.m_audioFilePermissions))
						{
							CStdString logMsg;
							logMsg.Format("Error setting permissions of %s to %o: %s", storageFileName.c_str(), CONFIG.m_audioFilePermissions, strerror(errno));
							LOG4CXX_ERROR(LOG.batchProcessingLog, "[" + trackingId + "] Th" + threadIdString + " " + logMsg);
						}
					}
				}

				if(CONFIG.m_audioFileGroup.size() && CONFIG.m_audioFileOwner.size()) {
					if(FileSetOwnership(audioFileName, CONFIG.m_audioFileOwner, CONFIG.m_audioFileGroup))
					{
						logMsg.Format("Error setting ownership and group of %s to %s:%s: %s", audioFileName.c_str(), CONFIG.m_audioFileOwner, CONFIG.m_audioFileGroup, strerror(errno));
						LOG4CXX_ERROR(LOG.batchProcessingLog, "[" + trackingId + "] Th" + threadIdString + " " + logMsg);
					}
					if(storageFileName.length() > 5)
					{
						if(FileSetOwnership(storageFileName, CONFIG.m_audioFileOwner, CONFIG.m_audioFileGroup))
						{
							logMsg.Format("Error setting ownership and group of %s to %s:%s: %s", storageFileName.c_str(), CONFIG.m_audioFileOwner, CONFIG.m_audioFileGroup, strerror(errno));
							LOG4CXX_ERROR(LOG.batchProcessingLog, "[" + trackingId + "] Th" + threadIdString + " " + logMsg);
						}
					}
				}
				
				if(CONFIG.m_deleteNativeFile && numSamplesOut)
				{
					fileRef->Delete();
					LOG4CXX_INFO(LOG.batchProcessingLog, "[" + trackingId + "] Th" + threadIdString + " deleting native: " + audioTapeRef->GetIdentifier());
				}
				else if(CONFIG.m_deleteFailedCaptureFile)
				{
					fileRef->Delete();
					if(outFileRef.get()) 
					{
						outFileRef->Close();
						outFileRef->Delete();
					}
					LOG4CXX_INFO(LOG.batchProcessingLog, "[" + trackingId + "] Th" + threadIdString + " deleting native that could not be transcoded: " + audioTapeRef->GetIdentifier());
				}

				// Finished processing the tape, pass on to next processor
				if(numSamplesOut)
				{
					pBatchProcessing->RunNextProcessor(audioTapeRef);
				}
			}
		}
		catch (CStdString& e)
		{
			LOG4CXX_ERROR(LOG.batchProcessingLog, "[" + trackingId + "] Th" + threadIdString + " " + e);
			if(fileRef.get()) {fileRef->Close();}
			if(outFileRef.get()) {outFileRef->Close();}
			if(CONFIG.m_deleteFailedCaptureFile && fileRef.get() != NULL)
			{
				LOG4CXX_INFO(LOG.batchProcessingLog, "[" + trackingId + "] Th" + threadIdString + " deleting native and transcoded");
				if(fileRef.get()) {fileRef->Delete();}
				if(outFileRef.get()) {outFileRef->Delete();}
			}
		}
		//catch(...)
		//{
		//	LOG4CXX_ERROR(LOG.batchProcessingLog, CStdString("unknown exception"));
		//}
	}
	LOG4CXX_INFO(LOG.batchProcessingLog, CStdString("Exiting thread Th" + threadIdString));
}
Ejemplo n.º 4
0
void OggOpusFile::WriteChunk(AudioChunkRef chunkRef)
{
	if(chunkRef.get() == NULL) return;

	if(chunkRef->GetDetails()->m_numBytes == 0) return;

    if(fout == NULL) return;

    int numSamples = chunkRef->GetNumSamples() + m_extraSamplesFromLastChunk;   // total number of samples, this chunk + bytes from previous chunk which was cut out because it wont make into frames (multiple of 160 samples)
    if(numSamples < PCM_SAMPLES_IN_FRAME)
    {
        //Too less samples, store them in extraPcmBuf
        if(chunkRef->m_numChannels > 0 && chan == 2)
        {
            short *wrPtr = NULL;
            wrPtr = (short*)&m_extraStreoPcmBuf[m_extraPcmBufLen];
            for(int x = 0; x < chunkRef->GetNumSamples(); x++)
            {
                for(int i = 0; i < chunkRef->m_numChannels; i++)
                {
                    *wrPtr++ = (short)*((short*)(chunkRef->m_pChannelAudio[i])+x);
                }
            }

            m_extraPcmBufLen += chunkRef->GetNumSamples()*4;
        }
        else
        {
            memcpy(&m_extraPcmBuf[m_extraPcmBufLen], chunkRef->m_pBuffer, chunkRef->GetNumBytes());
            m_extraPcmBufLen += chunkRef->GetNumBytes();
        }
        m_extraSamplesFromLastChunk = numSamples + m_extraSamplesFromLastChunk;
        return;
    }

     int extraBytesPos = 0;  //The index of the chunkRef->m_pBuffer that will be put into the extra buffer. The bytes before this index will combine with bytes already in extra buffer into main buffer
    //Opus encoder take 160 samples at a time only. Frame=160samples
    int numFullFrames = numSamples/PCM_SAMPLES_IN_FRAME;     //number of complete frame(multiple of 160) from this chunk + previous chunks(if any)
    int numSamplesInFullFrames = numFullFrames*PCM_SAMPLES_IN_FRAME; //number of samples in all compelete frames, can be differ from total number of samples
    int numSamplesExtra = numSamples - numSamplesInFullFrames;  //number of samples did not make into complete frame, will combine with next chunk's samples
    int numBytesInFullFrames = numSamplesInFullFrames*2;
    int numBytesExtra = numSamplesExtra*2;
   
    extraBytesPos = chunkRef->GetNumBytes() - numBytesExtra;    //Index in chunk pcm payload bytes no longer fix to pcmBuf(multiple of 160 samples), from here samples will be put into extraBuf
    int extraSamplesPos = extraBytesPos/2;  //index in chunk pcm payload samples no longer fix to pcmBuf, from here samples will be put into extraBuf

    if(m_extraSamplesFromLastChunk > 0)
    {
        //This chunk has/accumulated more than x160 samples
        //If the ExtraPcmBuf has samples from previous chunk, put them into the first available space of main pcmBuf
        //Put the extra samples/bytes into extraBuf
        if(chunkRef->m_numChannels > 0 && chan == 2)
        {
            memcpy(m_pcmBuf, m_extraStreoPcmBuf, m_extraPcmBufLen);
            short *wrPtr = NULL;
            wrPtr = (short*)&m_pcmBuf[m_extraPcmBufLen];
            for(int x = 0; x < extraSamplesPos; x++)
            {
                for(int i = 0; i < chunkRef->m_numChannels; i++)
                {
                    *wrPtr++ = (short)*((short*)(chunkRef->m_pChannelAudio[i])+x);
                }
            }
            memset(m_extraStreoPcmBuf, 0, m_extraPcmBufLen);
        }
        else
        {
            memcpy(m_pcmBuf, m_extraPcmBuf, m_extraPcmBufLen);
            memcpy(&m_pcmBuf[m_extraPcmBufLen], chunkRef->m_pBuffer, extraBytesPos); 
            memset(m_extraPcmBuf, 0, m_extraPcmBufLen); 
        }
       
        m_extraPcmBufLen = 0;
        
    }
    else
    {
        //Put chunk pcm payload x160 samples to the main buffer, the rest will be put in extraPcmBuf in next step
        if(chunkRef->m_numChannels > 0 && chan == 2)
        {
            short *wrPtr = NULL;
            wrPtr = (short*)&m_pcmBuf;
            for(int x = 0; x < numBytesInFullFrames/2; x++)  //chunkRef->GetNumSamples()
            {
                for(int i = 0; i < chunkRef->m_numChannels; i++)
                {
                    *wrPtr++ = (short)*((short*)(chunkRef->m_pChannelAudio[i])+x);
                }
            }
        }
        else
        {
            memcpy(m_pcmBuf, chunkRef->m_pBuffer, numBytesInFullFrames); //would be chunkRef->GetNumBytes
        }       
    }
    if(numSamplesExtra > 0)
    {
        //Extra bytes/samples more than x160 samples will be put in extraPcmBuf
        if(chunkRef->m_numChannels > 0 && chan == 2)
        {
            short *wrPtr = NULL;
            wrPtr = (short*)&m_extraStreoPcmBuf;
            for(int x = extraSamplesPos; x < (extraSamplesPos+numBytesExtra/2); x++)
            {
                for(int i = 0; i < chunkRef->m_numChannels; i++)
                {
                    *wrPtr++ = (short)*((short*)(chunkRef->m_pChannelAudio[i])+x);
                }
            }
            m_extraPcmBufLen += numBytesExtra*2;
        }
        else
        {
            memcpy(m_extraPcmBuf, (char*)chunkRef->m_pBuffer + extraBytesPos, numBytesExtra);     //(char*) for VC++ compilation
            m_extraPcmBufLen += numBytesExtra;
        }      
    }
    //update "last" number for next chunk to signal extraPcmBuf has chunk to move to main buffer before the arrival chunk payload
    m_extraSamplesFromLastChunk = numSamplesExtra;

    bool lastChunk = false;
    if(chunkRef->GetDetails()->m_marker == MEDIA_CHUNK_EOS_MARKER)
    {
        lastChunk = true;
    } 
    
    EncodeChunks(&m_pcmBuf, numSamplesInFullFrames, lastChunk); 
}
Ejemplo n.º 5
0
void IlbcToPcmFilter::AudioChunkIn(AudioChunkRef& inputAudioChunk)
{
	int r_samples = 0, fs = 0, i = 0, pos = 0, o_samples = 0, j = 0;
	float ilbcf[240];

	m_outputAudioChunk.reset();

	if(inputAudioChunk.get() == NULL) {
		return;
	}

	if(inputAudioChunk->GetNumSamples() == 0) {
		return;
	}

	AudioChunkDetails outputDetails = *inputAudioChunk->GetDetails();

	if(SupportsInputRtpPayloadType(outputDetails.m_rtpPayloadType) == false)
	{
		return;
	}

	r_samples = inputAudioChunk->GetNumSamples();
	if((r_samples % 50) && (r_samples % 38)) {
		/* Strange iLBC frame that is not a multiple of 50 bytes
		 * (30ms frame) and neither is it a multiple of 38 bytes
		 * (20ms frame). We should probably log something? */
                LOG4CXX_ERROR(this->s_ilbclog, "Error, received iLBC frame is not a multiple of 50 or 38!");
		return;
	}

	if(!(r_samples % 50)) {
		i = r_samples / 50;
		o_samples = i * 240;
		fs = 50;
#if 0
		LOG4CXX_INFO(this->s_ilbclog, "Frame size is 50 bytes");
#endif
	} else {
		i = r_samples / 38;
		o_samples = i * 160;
		fs = 38;
#if 0
		LOG4CXX_INFO(this->s_ilbclog, "Frame size is 38 bytes");
#endif
	}

	m_outputAudioChunk.reset(new AudioChunk());
	outputDetails.m_rtpPayloadType = -1;
	outputDetails.m_encoding = PcmAudio;

	outputDetails.m_numBytes = (o_samples * 2);
	short* outputBuffer = (short*)m_outputAudioChunk->CreateBuffer(outputDetails);
	unsigned char* inputBuffer = (unsigned char*)inputAudioChunk->m_pBuffer;

	for(i = 0; i < r_samples; i += fs) {
		if((pos+((fs == 50) ? 240 : 160)) <= o_samples) {
			if(fs == 50) {
				iLBC_decode(ilbcf, inputBuffer+i, &dec30, 1);

				for(j = 0; j < 240; j++) {
					outputBuffer[pos + j] = (short)ilbcf[j];
				}

				pos += 240;
			} else {
				iLBC_decode(ilbcf, inputBuffer+i, &dec20, 1);

				for(j = 0; j < 160; j++) {
                                        outputBuffer[pos + j] = (short)ilbcf[j];
                                }

                                pos += 160;
			}
		} else {
			/* This should ordinarily never happen.
			 * Log something? */
			CStdString logMsg;

			logMsg.Format("Strange, I ran out of space: pos=%d, o_samples=%d, r_samples=%d, i=%d, "
					"(pos+((fs == 50) ? 240 : 160))=%d",
					pos, o_samples, r_samples, i, (pos+((fs == 50) ? 240 : 160)));
			LOG4CXX_ERROR(this->s_ilbclog, logMsg);
			return;
		}
	}
}
Ejemplo n.º 6
0
void DirectionSelector::ThreadHandler(void *args)
{
	SetThreadName("orka:ds");

	CStdString debug;
	CStdString logMsg;

	CStdString processorName("DirectionSelector");
	TapeProcessorRef directionSelector = TapeProcessorRegistry::instance()->GetNewTapeProcessor(processorName);
	if(directionSelector.get() == NULL)
	{
		LOG4CXX_ERROR(LOG.directionSelectorLog, "Could not instanciate DirectionSelector");
		return;
	}
	DirectionSelector* pDirectionSelector = (DirectionSelector*)(directionSelector->Instanciate().get());

	pDirectionSelector->SetQueueSize(CONFIG.m_directionSelectorQueueSize);

	int threadId = 0;
	{
		MutexSentinel sentinel(pDirectionSelector->m_mutex);
		threadId = pDirectionSelector->m_threadCount++;
	}
	CStdString threadIdString = IntToString(threadId);
	debug.Format("thread Th%s starting - queue size:%d", threadIdString, CONFIG.m_directionSelectorQueueSize);
	LOG4CXX_INFO(LOG.directionSelectorLog, debug);

	pDirectionSelector->LoadAreaCodesMap();

	bool stop = false;

	for(;stop == false;)
	{
		AudioFileRef fileRef;
		oreka::shared_ptr<MediaChunkFile> outFile (new MediaChunkFile());
		AudioTapeRef audioTapeRef;
		CStdString trackingId = "[no-trk]";
		int numSamplesOutv = 0;

		AudioChunkRef chunkRef;

		try
		{
			audioTapeRef = pDirectionSelector->m_audioTapeQueue.pop();
			if(audioTapeRef.get() == NULL)
			{
				if(Daemon::Singleton()->IsStopping())
				{
					stop = true;
				}
				if(Daemon::Singleton()->GetShortLived())
				{
					Daemon::Singleton()->Stop();
				}
			}
			else
			{
			//Iterating through area codes map to check which side will be retain
				bool found = false;
				int foundPos = -1;
				CStdString side;
				std::map<CStdString, CStdString>::iterator it;
				for(it = pDirectionSelector->m_areaCodesMap.begin(); it!= pDirectionSelector->m_areaCodesMap.end() && found == false; it++)
				{
					//For now, we dont consider local party which has nothing to do with area codes
//					foundPos = audioTapeRef->m_localParty.find(it->first);
//					if(foundPos == 0)
//					{
//						side = it->second;
//						found = true;
//						break;
//					}

					foundPos = audioTapeRef->m_remoteParty.find(it->first);
					if(foundPos == 0)
					{
						side = it->second;
						found = true;
						break;
					}
				}

				if(found == true)
				{
					AudioDirectionMarksRef defaultKeptSide(new AudioDirectionMarks());
					defaultKeptSide->m_timestamp = 1;		//make sure it the first event in the chain of event in term of timestamp
					if(audioTapeRef->m_audioDirectionMarks.size() > 0)
					{
						std::vector<AudioDirectionMarksRef>::iterator it;
						it = audioTapeRef->m_audioDirectionMarks.begin();
						defaultKeptSide->m_nextTimestampMark = (*it)->m_timestamp;	//next mark, will be the first api called, if any
					}

					defaultKeptSide->m_audioKeepDirectionEnum = (CaptureEvent::AudioKeepDirectionEnum)CaptureEvent::AudioKeepDirectionToEnum(side);
					audioTapeRef->m_audioDirectionMarks.insert(audioTapeRef->m_audioDirectionMarks.begin(),defaultKeptSide);

				}

				CStdString mcfExt, tmpExt, tmpFileName, origFileName, origFileNameWoExt;
				mcfExt = ".mcf";
				tmpExt = ".tmp";

				audioTapeRef->SetExtension(mcfExt); 		//the real extension at this point
				origFileName = audioTapeRef->m_audioOutputPath + "/"+ audioTapeRef->GetFilename();
				origFileNameWoExt = audioTapeRef->m_audioOutputPath + "/" + audioTapeRef->GetPath() + audioTapeRef->GetIdentifier();
				//copy a temporary file for processing
				audioTapeRef->SetExtension(tmpExt);
				tmpFileName = audioTapeRef->m_audioOutputPath + "/"+ audioTapeRef->GetFilename();

				if(ACE_OS::rename((PCSTR)origFileName, (PCSTR)tmpFileName) != 0){
					LOG4CXX_ERROR(LOG.directionSelectorLog, "Can not rename audio file for processing");
				}

				fileRef = audioTapeRef->GetAudioFileRef();
				trackingId = audioTapeRef->m_trackingId;
				fileRef->SetFilename(tmpFileName);			//audioTapeRef->SetExtension(fullfilename) does not take affect on audiofileRef,

				fileRef->Open(AudioFile::READ);

				outFile->Open(origFileNameWoExt, AudioFile::WRITE);
				while(fileRef->ReadChunkMono(chunkRef))
				{
					AudioChunkDetails details = *chunkRef->GetDetails();

					std::vector<AudioDirectionMarksRef>::iterator it;
					for(it = audioTapeRef->m_audioDirectionMarks.begin(); it != audioTapeRef->m_audioDirectionMarks.end(); it++)
					{
						if(((*it)->m_timestamp == 0))
						{
							continue;
						}

						if((details.m_arrivalTimestamp >= (*it)->m_timestamp) && ((details.m_arrivalTimestamp < (*it)->m_nextTimestampMark) || ((*it)->m_nextTimestampMark == 0)))	//this audio chunk is in between 2 kept-direction reports marks
						{
							if(audioTapeRef->m_localSide == CaptureEvent::LocalSideSide1)
							{
								if(((*it)->m_audioKeepDirectionEnum == CaptureEvent::AudioKeepDirectionLocal) && (details.m_channel == 2))
								{
									memset(chunkRef->m_pBuffer, 0, details.m_numBytes);		//blank side 2
								}
								else if(((*it)->m_audioKeepDirectionEnum == CaptureEvent::AudioKeepDirectionRemote) && (details.m_channel == 1))
								{
									memset(chunkRef->m_pBuffer, 0, details.m_numBytes);		//blank side 1
								}
							}
							else if(audioTapeRef->m_localSide == CaptureEvent::LocalSideSide2)
							{
								if(((*it)->m_audioKeepDirectionEnum == CaptureEvent::AudioKeepDirectionLocal) && (details.m_channel == 1))
								{
									memset(chunkRef->m_pBuffer, 0, details.m_numBytes);
								}
								else if(((*it)->m_audioKeepDirectionEnum == CaptureEvent::AudioKeepDirectionRemote) && (details.m_channel == 2))
								{
									memset(chunkRef->m_pBuffer, 0, details.m_numBytes);
								}

							}

						}
					}

					outFile->WriteChunk(chunkRef);
				}
				outFile->Close();

				ACE_OS::unlink((PCSTR)tmpFileName);
				fileRef->Close();

				audioTapeRef->SetExtension(mcfExt);		//return back to mcf ext

				fileRef->SetFilename(origFileName);		// weird here, but it needs to be done, otherwise audioTapeRef will associate with tmp File
				pDirectionSelector->RunNextProcessor(audioTapeRef);
			}
		}

		catch (CStdString& e)
		{
			LOG4CXX_ERROR(LOG.directionSelectorLog, "[" + trackingId + "] Th" + threadIdString + " " + e);
			if(fileRef.get()) {fileRef->Close();}
		}
	}
	LOG4CXX_INFO(LOG.directionSelectorLog, CStdString("Exiting thread Th" + threadIdString));
}
Ejemplo n.º 7
0
void AudioGainFilter::AudioChunkIn(AudioChunkRef& inputAudioChunk)
{
	int r_samples = 0;
	int i = 0;
	AudioChunkDetails outputDetails;

	m_outputAudioChunk.reset();
	if(inputAudioChunk.get() == NULL) {
		return;
	}

	if(inputAudioChunk->GetNumSamples() == 0) {
		return;
	}

	outputDetails = *inputAudioChunk->GetDetails();
	r_samples = inputAudioChunk->GetNumSamples();
	m_outputAudioChunk.reset(new AudioChunk());

	if(inputAudioChunk->GetEncoding() != PcmAudio)
	{
		if(m_numEncodingErrors == 0)
		{
			CStdString logMsg;

			logMsg.Format("Unexpected encoding:%d expected:%d (PcmAudio), gain not applied", inputAudioChunk->GetEncoding(), PcmAudio);
			LOG4CXX_WARN(m_log, logMsg);
		}
		m_numEncodingErrors++;
		m_outputAudioChunk->SetBuffer(inputAudioChunk->m_pBuffer, outputDetails);
		return;
	}

	short* outputBuffer = (short*)m_outputAudioChunk->CreateBuffer(outputDetails);
	short* inputBuffer = (short*)inputAudioChunk->m_pBuffer;
	int sample = 0;
	double multiplier, multiplier1, multiplier2;

	multiplier = 0.0;
	multiplier1 = 0.0;
	multiplier2 = 0.0;

	multiplier = pow(10, (CONFIG.m_audioGainDb / 20.0));
	multiplier1 = pow(10, (CONFIG.m_audioGainChannel1Db / 20.0));
	multiplier2 = pow(10, (CONFIG.m_audioGainChannel2Db / 20.0));

	for(i = 0; i < r_samples; i++) {
		sample = inputBuffer[i];
		if(CONFIG.m_audioGainDb != 0)
		{
			sample = sample * multiplier;
		}
		if(CONFIG.m_audioGainChannel1Db != 0)
		{
			if(outputDetails.m_channel == 1)
			{
				sample = sample * multiplier1;
			}
		}
		if(CONFIG.m_audioGainChannel2Db != 0)
		{
			if(outputDetails.m_channel == 2)
			{
				sample = sample * multiplier2;
			}
		}

		if(sample < -32768)
		{
			sample = -32768;
		}
		if(sample > 32767)
		{
			sample = 32767;
		}

		outputBuffer[i] = sample;
	}
}