int RTPMultiplexerSmoother::Run() { timeval prev; timespec wait; DWORD sendingTime = 0; //Calculate first getUpdDifTime(&prev); Log(">RTPMultiplexerSmoother run\n"); while(inited) { //Wait for new frame if (!queue.Wait(0)) //Check again continue; //Get it RTPPacketSched *sched = queue.Pop(); //Check it if (!sched) //Exit continue; //Multiplex Multiplex(*(RTPPacket*)sched); //Update sending time sendingTime = sched->GetSendingTime(); //Lock pthread_mutex_lock(&mutex); //Calculate timeout calcAbsTimeout(&wait,&prev,sendingTime); //Wait next or stopped pthread_cond_timedwait(&cond,&mutex,&wait); //Unlock pthread_mutex_unlock(&mutex); //If it was last if (sched->GetMark()) //Update time of the previous frame getUpdDifTime(&prev); //DElete it delete(sched); } Log("<RTPMultiplexerSmoother run\n"); return 1; }
void RTPEndpoint::onRTPPacket(RTPPacket &packet) { //Check if (!sending) //Exit return; //Get type MediaFrame::Type packetType = packet.GetMedia(); //Check types if (type!=packetType) //Exit return; //Check type if (packet.GetCodec()!=codec) { //Store it codec = packet.GetCodec(); //Depending on the type switch(packetType) { case MediaFrame::Audio: //Set it RTPSession::SetSendingAudioCodec((AudioCodec::Type)codec); break; case MediaFrame::Video: //Set it RTPSession::SetSendingVideoCodec((VideoCodec::Type)codec); break; case MediaFrame::Text: //Set it RTPSession::SetSendingTextCodec((TextCodec::Type)codec); break; } } //Get diference from latest frame QWORD dif = getUpdDifTime(&prev); //If was reseted if (reseted) { //Get new time timestamp += dif*freq/1000; //Not reseted reseted = false; } else { //Get dif from packet timestamp timestamp += packet.GetTimestamp()-prevts; } //Update prev rtp ts prevts = packet.GetTimestamp(); //Send it RTPSession::SendPacket(packet,timestamp); }
int PipeTextInput::Init() { //Protegemos pthread_mutex_lock(&mutex); //Iniciamos inited = true; //Set first timestamp getUpdDifTime(&first); //Desprotegemos pthread_mutex_unlock(&mutex); return true; }
bool MediaBridgeSession::Init() { //We are started inited = true; //Wait for first Iframe waitVideo = true; //Init rtp rtpAudio.Init(); rtpVideo.Init(); rtpText.Init(); //Init smoother for video smoother.Init(&rtpVideo); //Set first timestamp getUpdDifTime(&first); return true; }
int RTPEndpoint::Init() { //Check if (inited) //Exit return false; //Start rtp session RTPSession::Init(); //Inited inited = true; //Reset reseted = true; //No time timestamp = 0; //Init time getUpdDifTime(&prev); }
int FLVEncoder::EncodeVideo() { timeval prev; //Start Log(">FLVEncoder encode video\n"); //Allocate media frame RTMPVideoFrame frame(0,262143); //Check codec switch(videoCodec) { case VideoCodec::SORENSON: //Ser Video codec frame.SetVideoCodec(RTMPVideoFrame::FLV1); break; case VideoCodec::H264: //Ser Video codec frame.SetVideoCodec(RTMPVideoFrame::AVC); //Set NAL type frame.SetAVCType(RTMPVideoFrame::AVCNALU); //No delay frame.SetAVCTS(0); break; default: return Error("-Wrong codec type %d\n",videoCodec); } //Create the encoder VideoEncoder *encoder = VideoCodecFactory::CreateEncoder(videoCodec,videoProperties); ///Set frame rate encoder->SetFrameRate(fps,bitrate,intra); //Set dimensions encoder->SetSize(width,height); //Start capturing videoInput->StartVideoCapture(width,height,fps); //The time of the first one gettimeofday(&prev,NULL); //No wait for first DWORD frameTime = 0; Log(">FLVEncoder encode vide\n"); //Mientras tengamos que capturar while(encodingVideo) { //Nos quedamos con el puntero antes de que lo cambien BYTE* pic=videoInput->GrabFrame(frameTime); //Ensure we are still encoding if (!encodingVideo) break; //Check pic if (!pic) continue; //Check if we need to send intra if (sendFPU) { //Set it encoder->FastPictureUpdate(); //Do not send anymore sendFPU = false; } //Encode next frame VideoFrame *encoded = encoder->EncodeFrame(pic,videoInput->GetBufferSize()); //Check if (!encoded) break; //Check size if (frame.GetMaxMediaSize()<encoded->GetLength()) { //Not enougth space Error("Not enought space to copy FLV encodec frame [frame:%d,encoded:%d",frame.GetMaxMediaSize(),encoded->GetLength()); //NExt continue; } //Check if (frameTime) { timespec ts; //Lock pthread_mutex_lock(&mutex); //Calculate timeout calcAbsTimeout(&ts,&prev,frameTime); //Wait next or stopped int canceled = !pthread_cond_timedwait(&cond,&mutex,&ts); //Unlock pthread_mutex_unlock(&mutex); //Check if we have been canceled if (canceled) //Exit break; } //Set sending time of previous frame getUpdDifTime(&prev); //Set timestamp encoded->SetTimestamp(getDifTime(&first)/1000); //Set next one frameTime = 1000/fps; //Set duration encoded->SetDuration(frameTime); //Get full frame frame.SetVideoFrame(encoded->GetData(),encoded->GetLength()); //Set buffer size frame.SetMediaSize(encoded->GetLength()); //Check type if (encoded->IsIntra()) //Set type frame.SetFrameType(RTMPVideoFrame::INTRA); else //Set type frame.SetFrameType(RTMPVideoFrame::INTER); //If we need desc but yet not have it if (!frameDesc && encoded->IsIntra() && videoCodec==VideoCodec::H264) { //Create new description AVCDescriptor desc; //Set values desc.SetConfigurationVersion(1); desc.SetAVCProfileIndication(0x42); desc.SetProfileCompatibility(0x80); desc.SetAVCLevelIndication(0x0C); desc.SetNALUnitLength(3); //Get encoded data BYTE *data = encoded->GetData(); //Get size DWORD size = encoded->GetLength(); //get from frame desc.AddParametersFromFrame(data,size); //Crete desc frame frameDesc = new RTMPVideoFrame(getDifTime(&first)/1000,desc); //Lock pthread_mutex_lock(&mutex); //Send it SendMediaFrame(frameDesc); //unlock pthread_mutex_unlock(&mutex); } //Lock pthread_mutex_lock(&mutex); //Set timestamp frame.SetTimestamp(encoded->GetTimeStamp()); //Publish it SendMediaFrame(&frame); //For each listener for(MediaFrameListeners::iterator it = mediaListeners.begin(); it!=mediaListeners.end(); ++it) //Send it (*it)->onMediaFrame(RTMPMediaStream::id,*encoded); //unlock pthread_mutex_unlock(&mutex); } Log("-FLVEncoder encode video end of loop\n"); //Stop the capture videoInput->StopVideoCapture(); //Check if (encoder) //Exit delete(encoder); Log("<FLVEncoder encode vide\n"); //Exit return 1; }
int FLVEncoder::StartEncoding() { Log(">Start encoding FLV [id:%d]\n",id); //Si estabamos mandando tenemos que parar if (encodingAudio || encodingVideo) //paramos StopEncoding(); //Set init time getUpdDifTime(&first); //Check if got old meta if (meta) //Delete delete(meta); //Create metadata object meta = new RTMPMetaData(0); //Set name meta->AddParam(new AMFString(L"@setDataFrame")); //Set name meta->AddParam(new AMFString(L"onMetaData")); //Create properties string AMFEcmaArray *prop = new AMFEcmaArray(); //Set audio properties switch(audioCodec) { case AudioCodec::SPEEX16: prop->AddProperty(L"audiocodecid" ,(float)RTMPAudioFrame::SPEEX ); //Number Audio codec ID used in the file (see E.4.2.1 for available SoundFormat values) prop->AddProperty(L"audiosamplerate" ,(float)16000.0 ); // Number Frequency at which the audio stream is replayed break; case AudioCodec::NELLY11: prop->AddProperty(L"audiocodecid" ,(float)RTMPAudioFrame::NELLY ); //Number Audio codec ID used in the file (see E.4.2.1 for available SoundFormat values) prop->AddProperty(L"audiosamplerate" ,(float)11025.0 ); // Number Frequency at which the audio stream is replayed break; case AudioCodec::NELLY8: prop->AddProperty(L"audiocodecid" ,(float)RTMPAudioFrame::NELLY8khz ); //Number Audio codec ID used in the file (see E.4.2.1 for available SoundFormat values) prop->AddProperty(L"audiosamplerate" ,(float)8000.0 ); // Number Frequency at which the audio stream is replayed break; } prop->AddProperty(L"stereo" ,new AMFBoolean(false) ); // Boolean Indicating stereo audio prop->AddProperty(L"audiodelay" ,0.0 ); // Number Delay introduced by the audio codec in seconds //Set video codecs if (videoCodec==VideoCodec::SORENSON) //Set number prop->AddProperty(L"videocodecid" ,(float)RTMPVideoFrame::FLV1 ); // Number Video codec ID used in the file (see E.4.3.1 for available CodecID values) else if (videoCodec==VideoCodec::H264) //AVC prop->AddProperty(L"videocodecid" ,new AMFString(L"avc1") ); // Number Video codec ID used in the file (see E.4.3.1 for available CodecID values) prop->AddProperty(L"framerate" ,(float)fps ); // Number Number of frames per second prop->AddProperty(L"height" ,(float)height ); // Number Height of the video in pixels prop->AddProperty(L"videodatarate" ,(float)bitrate ); // Number Video bit rate in kilobits per second prop->AddProperty(L"width" ,(float)width ); // Number Width of the video in pixels prop->AddProperty(L"canSeekToEnd" ,new AMFBoolean(false) ); // Boolean Indicating the last video frame is a key frame //Add param meta->AddParam(prop); //Send metadata SendMetaData(meta); //If got audio if (audioInput) { //We are enconding encodingAudio = 1; //Start thread createPriorityThread(&encodingAudioThread,startEncodingAudio,this,1); } //If got video if (videoInput) { //We are enconding encodingVideo = 1; //Start thread createPriorityThread(&encodingVideoThread,startEncodingVideo,this,1); } Log("<Stop encoding FLV [%d]\n",encodingAudio); return 1; }
/************************************** * ProcessRequest * Procesa una peticion *************************************/ int XmlHandler::ProcessRequest(TRequestInfo *req,TSession * const ses) { xmlrpc_env env; int inputLen; char *method; xmlrpc_value *params = NULL; timeval tv; Log(">ProcessRequest [uri:%s]\n",req->uri); //Init timer getUpdDifTime(&tv); //Creamos un enviroment xmlrpc_env_init(&env); //Si no es post if (req->method != m_post) //Mandamos error return XmlRpcServer::SendError(ses, 405, "Only POST allowed"); //Obtenemos el content type const char * content_type = RequestHeaderValue(ses, (char*)"content-type"); //Si no es el bueno if (content_type == NULL || strcmp(content_type, "text/xml") != 0) return XmlRpcServer::SendError(ses, 400, "Wrong content-type"); //Obtenemos el content length const char * content_length = RequestHeaderValue(ses, (char*)"content-length"); //Si no hay if (content_length == NULL) return XmlRpcServer::SendError(ses,411,"No content-length"); //Obtenemos el entero inputLen = atoi(content_length); //Tiene que ser mayor que cero if ((inputLen < 0) || (inputLen > xmlrpc_limit_get(XMLRPC_XML_SIZE_LIMIT_ID))) return XmlRpcServer::SendError(ses,400,"Size limit"); //Creamos un buffer para el body char * buffer = (char *) malloc(inputLen); if (!XmlRpcServer::GetBody(ses,buffer,inputLen)) { //LIberamos el buffer free(buffer); //Y salimos sin devolver nada Log("Operation timedout\n"); return 1; } //Get method name xmlrpc_parse_call(&env,buffer,inputLen,(const char**)&method,¶ms); Log("-ProcessRequest [method:%s]\n",method); //Free name and params free(method); xmlrpc_DECREF(params); //Generamos la respuesta xmlrpc_mem_block *output = xmlrpc_registry_process_call( &env, registry, NULL, buffer, inputLen ); //Si todo ha ido bien if (!env.fault_occurred) { //POnemos el content type ResponseContentType(ses, (char*)"text/xml; charset=\"utf-8\""); //Y mandamos la respuesta XmlRpcServer::SendResponse(ses,200,XMLRPC_MEMBLOCK_CONTENTS(char, output), XMLRPC_MEMBLOCK_SIZE(char, output)); } else
/**************************************** * RecVideo * Obtiene los packetes y los muestra *****************************************/ int VideoStream::RecVideo() { VideoDecoder* videoDecoder = NULL; VideoCodec::Type type; timeval before; timeval lastFPURequest; DWORD lostCount=0; DWORD frameTime = (DWORD)-1; DWORD lastSeq = RTPPacket::MaxExtSeqNum; bool waitIntra = false; Log(">RecVideo\n"); //Get now gettimeofday(&before,NULL); //Not sent FPU yet setZeroTime(&lastFPURequest); //Mientras tengamos que capturar while(receivingVideo) { //Get RTP packet RTPPacket* packet = rtp.GetPacket(); //Check if (!packet) //Next continue; //Get extended sequence number and timestamp DWORD seq = packet->GetExtSeqNum(); DWORD ts = packet->GetTimestamp(); //Get packet data BYTE* buffer = packet->GetMediaData(); DWORD size = packet->GetMediaLength(); //Get type type = (VideoCodec::Type)packet->GetCodec(); //Lost packets since last DWORD lost = 0; //If not first if (lastSeq!=RTPPacket::MaxExtSeqNum) //Calculate losts lost = seq-lastSeq-1; //Increase total lost count lostCount += lost; //Update last sequence number lastSeq = seq; //If lost some packets or still have not got an iframe if(lostCount || waitIntra) { //Check if we got listener and more than 1/2 second have elapsed from last request if (listener && getDifTime(&lastFPURequest)>minFPUPeriod) { //Debug Debug("-Requesting FPU lost %d\n",lostCount); //Reset count lostCount = 0; //Request it listener->onRequestFPU(); //Request also over rtp rtp.RequestFPU(); //Update time getUpdDifTime(&lastFPURequest); //Waiting for refresh waitIntra = true; } } //Check if it is a redundant packet if (type==VideoCodec::RED) { //Get redundant packet RTPRedundantPacket* red = (RTPRedundantPacket*)packet; //Get primary codec type = (VideoCodec::Type)red->GetPrimaryCodec(); //Check it is not ULPFEC redundant packet if (type==VideoCodec::ULPFEC) { //Delete packet delete(packet); //Skip continue; } //Update primary redundant payload buffer = red->GetPrimaryPayloadData(); size = red->GetPrimaryPayloadSize(); } //Check codecs if ((videoDecoder==NULL) || (type!=videoDecoder->type)) { //If we already got one if (videoDecoder!=NULL) //Delete it delete videoDecoder; //Create video decorder for codec videoDecoder = VideoCodecFactory::CreateDecoder(type); //Check if (videoDecoder==NULL) { Error("Error creando nuevo decodificador de video [%d]\n",type); //Delete packet delete(packet); //Next continue; } } //Check if we have lost the last packet from the previous frame by comparing both timestamps if (ts>frameTime) { Debug("-lost mark packet ts:%u frameTime:%u\n",ts,frameTime); //Try to decode what is in the buffer videoDecoder->DecodePacket(NULL,0,1,1); //Get picture BYTE *frame = videoDecoder->GetFrame(); DWORD width = videoDecoder->GetWidth(); DWORD height = videoDecoder->GetHeight(); //Check values if (frame && width && height) { //Set frame size videoOutput->SetVideoSize(width,height); //Check if muted if (!muted) //Send it videoOutput->NextFrame(frame); } } //Update frame time frameTime = ts; //Decode packet if(!videoDecoder->DecodePacket(buffer,size,lost,packet->GetMark())) { //Check if we got listener and more than 1/2 seconds have elapsed from last request if (listener && getDifTime(&lastFPURequest)>minFPUPeriod) { //Debug Log("-Requesting FPU decoder error\n"); //Reset count lostCount = 0; //Request it listener->onRequestFPU(); //Request also over rtp rtp.RequestFPU(); //Update time getUpdDifTime(&lastFPURequest); //Waiting for refresh waitIntra = true; } } //Check if it is the last packet of a frame if(packet->GetMark()) { if (videoDecoder->IsKeyFrame()) Debug("-Got Intra\n"); //No frame time yet for next frame frameTime = (DWORD)-1; //Get picture BYTE *frame = videoDecoder->GetFrame(); DWORD width = videoDecoder->GetWidth(); DWORD height = videoDecoder->GetHeight(); //Check values if (frame && width && height) { //Set frame size videoOutput->SetVideoSize(width,height); //Check if muted if (!muted) //Send it videoOutput->NextFrame(frame); } //Check if we got the waiting refresh if (waitIntra && videoDecoder->IsKeyFrame()) //Do not wait anymore waitIntra = false; } //Delete packet delete(packet); } //Delete encoder delete videoDecoder; Log("<RecVideo\n"); }
/******************************************* * SendVideo * Capturamos el video y lo mandamos *******************************************/ int VideoStream::SendVideo() { timeval prev; timeval lastFPU; DWORD num = 0; QWORD overslept = 0; Acumulator bitrateAcu(1000); Acumulator fpsAcu(1000); Log(">SendVideo [width:%d,size:%d,bitrate:%d,fps:%d,intra:%d]\n",videoGrabWidth,videoGrabHeight,videoBitrate,videoFPS,videoIntraPeriod); //Creamos el encoder VideoEncoder* videoEncoder = VideoCodecFactory::CreateEncoder(videoCodec,videoProperties); //Comprobamos que se haya creado correctamente if (videoEncoder == NULL) //error return Error("Can't create video encoder\n"); //Comrpobamos que tengamos video de entrada if (videoInput == NULL) return Error("No video input"); //Iniciamos el tama�o del video if (!videoInput->StartVideoCapture(videoGrabWidth,videoGrabHeight,videoFPS)) return Error("Couldn't set video capture\n"); //Start at 80% int current = videoBitrate*0.8; //Send at higher bitrate first frame, but skip frames after that so sending bitrate is kept videoEncoder->SetFrameRate(videoFPS,current*5,videoIntraPeriod); //No wait for first QWORD frameTime = 0; //Iniciamos el tamama�o del encoder videoEncoder->SetSize(videoGrabWidth,videoGrabHeight); //The time of the previos one gettimeofday(&prev,NULL); //Fist FPU gettimeofday(&lastFPU,NULL); //Started Log("-Sending video\n"); //Mientras tengamos que capturar while(sendingVideo) { //Nos quedamos con el puntero antes de que lo cambien BYTE *pic = videoInput->GrabFrame(frameTime/1000); //Check picture if (!pic) //Exit continue; //Check if we need to send intra if (sendFPU) { //Do not send anymore sendFPU = false; //Do not send if we just send one (100ms) if (getDifTime(&lastFPU)/1000>minFPUPeriod) { //Send at higher bitrate first frame, but skip frames after that so sending bitrate is kept videoEncoder->SetFrameRate(videoFPS,current*5,videoIntraPeriod); //Reste frametime so it is calcualted afterwards frameTime = 0; //Set it videoEncoder->FastPictureUpdate(); //Update last FPU getUpdDifTime(&lastFPU); } } //Calculate target bitrate int target = current; //Check temporal limits for estimations if (bitrateAcu.IsInWindow()) { //Get real sent bitrate during last second and convert to kbits DWORD instant = bitrateAcu.GetInstantAvg()/1000; //If we are in quarentine if (videoBitrateLimitCount) //Limit sending bitrate target = videoBitrateLimit; //Check if sending below limits else if (instant<videoBitrate) //Increase a 8% each second or fps kbps target += (DWORD)(target*0.08/videoFPS)+1; } //Check target bitrate agains max conf bitrate if (target>videoBitrate*1.2) //Set limit to max bitrate allowing a 20% overflow so instant bitrate can get closer to target target = videoBitrate*1.2; //Check limits counter if (videoBitrateLimitCount>0) //One frame less of limit videoBitrateLimitCount--; //Check if we have a new bitrate if (target && target!=current) { //Reset bitrate videoEncoder->SetFrameRate(videoFPS,target,videoIntraPeriod); //Upate current current = target; } //Procesamos el frame VideoFrame *videoFrame = videoEncoder->EncodeFrame(pic,videoInput->GetBufferSize()); //If was failed if (!videoFrame) //Next continue; //Increase frame counter fpsAcu.Update(getTime()/1000,1); //Check if (frameTime) { timespec ts; //Lock pthread_mutex_lock(&mutex); //Calculate slept time QWORD sleep = frameTime; //Remove extra sleep from prev if (overslept<sleep) //Remove it sleep -= overslept; else //Do not overflow sleep = 1; //Calculate timeout calcAbsTimeoutNS(&ts,&prev,sleep); //Wait next or stopped int canceled = !pthread_cond_timedwait(&cond,&mutex,&ts); //Unlock pthread_mutex_unlock(&mutex); //Check if we have been canceled if (canceled) //Exit break; //Get differencence QWORD diff = getDifTime(&prev); //If it is biffer if (diff>frameTime) //Get what we have slept more overslept = diff-frameTime; else //No oversletp (shoulddn't be possible) overslept = 0; } //Increase frame counter fpsAcu.Update(getTime()/1000,1); //If first if (!frameTime) { //Set frame time, slower frameTime = 5*1000000/videoFPS; //Restore bitrate videoEncoder->SetFrameRate(videoFPS,current,videoIntraPeriod); } else { //Set frame time frameTime = 1000000/videoFPS; } //Add frame size in bits to bitrate calculator bitrateAcu.Update(getDifTime(&ini)/1000,videoFrame->GetLength()*8); //Set frame timestamp videoFrame->SetTimestamp(getDifTime(&ini)/1000); //Check if we have mediaListener if (mediaListener) //Call it mediaListener->onMediaFrame(*videoFrame); //Set sending time of previous frame getUpdDifTime(&prev); //Calculate sending times based on bitrate DWORD sendingTime = videoFrame->GetLength()*8/current; //Adjust to maximum time if (sendingTime>frameTime/1000) //Cap it sendingTime = frameTime/1000; //If it was a I frame if (videoFrame->IsIntra()) //Clean rtp rtx buffer rtp.FlushRTXPackets(); //Send it smoothly smoother.SendFrame(videoFrame,sendingTime); //Dump statistics if (num && ((num%videoFPS*10)==0)) { Debug("-Send bitrate target=%d current=%d avg=%llf rate=[%llf,%llf] fps=[%llf,%llf] limit=%d\n",target,current,bitrateAcu.GetInstantAvg()/1000,bitrateAcu.GetMinAvg()/1000,bitrateAcu.GetMaxAvg()/1000,fpsAcu.GetMinAvg(),fpsAcu.GetMaxAvg(),videoBitrateLimit); bitrateAcu.ResetMinMax(); fpsAcu.ResetMinMax(); } num++; } Log("-SendVideo out of loop\n"); //Terminamos de capturar videoInput->StopVideoCapture(); //Check if (videoEncoder) //Borramos el encoder delete videoEncoder; //Salimos Log("<SendVideo [%d]\n",sendingVideo); return 0; }
/*********************************** * MixAudio * Mezcla los audios ************************************/ int AudioMixer::MixAudio() { timeval tv; Audios::iterator it; DWORD step = 10; QWORD curr = 0; QWORD prev = 0; //Logeamos Log(">MixAudio\n"); //Init ts getUpdDifTime(&tv); //Mientras estemos mezclando while(mixingAudio) { //zero the mixer buffer memset(mixer_buffer, 0, MIXER_BUFFER_SIZE*sizeof(WORD)); //Wait until next to process again minus process time msleep(step*1000-(getDifTime(&tv)-curr*1000)); //Get new time curr = getDifTime(&tv)/1000; //Get time elapsed QWORD diff = curr-prev; //Update prev = curr; //Get num samples at 8Khz DWORD numSamples = diff*8; //Block list lstAudiosUse.WaitUnusedAndLock(); //At most the maximum if (numSamples>MIXER_BUFFER_SIZE) //Set it at most (shoudl never happen) numSamples = MIXER_BUFFER_SIZE; //First pass: Iterate through the audio inputs and calculate the sum of all streams for(it = lstAudios.begin(); it != lstAudios.end(); it++) { //Get the source AudioSource *audio = it->second; //And the audio buffer WORD *buffer = audio->buffer; //Get the samples from the fifo audio->len = audio->output->GetSamples(buffer,numSamples); //Mix the audio for(int i = 0; i < audio->len; i++) //MIX mixer_buffer[i] += buffer[i]; } // Second pass: Calculate this stream's output for(it = lstAudios.begin(); it != lstAudios.end(); it++) { //Get the source AudioSource *audio = it->second; //Check audio if (!audio) //Next continue; //And the audio buffer WORD *buffer = audio->buffer; //Calculate the result for(int i=0; i<audio->len; i++) //We don't want to hear our own signal buffer[i] = mixer_buffer[i] - buffer[i]; //Check length if (audio->len<numSamples) //Copy the rest memcpy(((BYTE*)buffer)+audio->len*sizeof(WORD),((BYTE*)mixer_buffer)+audio->len*sizeof(WORD),(numSamples-audio->len)*sizeof(WORD)); //PUt the output audio->input->PutSamples(buffer,numSamples); } //Unblock list lstAudiosUse.Unlock(); } //Logeamos Log("<MixAudio\n"); return 1; }
int MP4Streamer::PlayLoop() { QWORD audioNext = MP4_INVALID_TIMESTAMP; QWORD videoNext = MP4_INVALID_TIMESTAMP; QWORD textNext = MP4_INVALID_TIMESTAMP; timeval tv ; timespec ts; Log(">MP4Streamer::PlayLoop()\n"); //If it is from the begining if (!seeked) { // If we have audio if (audio) { //Reset audio->Reset(); // Send audio audioNext = audio->GetNextFrameTime(); } // If we have video if (video) { //Reset video->Reset(); // Send video videoNext = video->GetNextFrameTime(); } // If we have text if (text) { //Reset text->Reset(); //Get the next frame time textNext = text->GetNextFrameTime(); } } else { //If we have video if (video) //Get nearest i frame videoNext = video->SeekNearestSyncFrame(seeked); //If we have audio if (audio) //Get nearest frame audioNext = audio->Seek(seeked); //If we have text if (text) //Get nearest frame textNext = text->Seek(seeked); } //If first text frame is not sent inmediatelly if (text && textNext!=seeked) //send previous text subtitle text->ReadPrevious(seeked,listener); // Calculate start time getUpdDifTime(&tv); //Lock pthread_mutex_lock(&mutex); //Time counter QWORD t = 0; // Wait control messages or finish of both streams while ( opened && playing && (!(audioNext==MP4_INVALID_TIMESTAMP && videoNext==MP4_INVALID_TIMESTAMP && textNext==MP4_INVALID_TIMESTAMP))) { // Get next time if (audioNext<videoNext) { if (audioNext<textNext) t = audioNext; else t = textNext; } else { if (videoNext<textNext) t = videoNext; else t = textNext; } // Wait time diff QWORD now = (QWORD)getDifTime(&tv)/1000+seeked; if (t>now) { //Calculate timeout calcAbsTimeout(&ts,&tv,t-seeked); //Wait next or stopped pthread_cond_timedwait(&cond,&mutex,&ts); //loop continue; } // if we have to send audio if (audioNext<=t) audioNext = audio->Read(listener); // or video if (videoNext<=t) videoNext = video->Read(listener); // or text if (textNext<=t) textNext = text->Read(listener); } Log("-MP4Streamer::PlayLoop()\n"); //Check if we were stoped bool stoped = !opened || !playing; //Not playing anymore playing = 0; //Unlock pthread_mutex_unlock(&mutex); //Check end of file if (!stoped && listener) //End of file listener->onEnd(); Log("<MP4Streamer::PlayLoop()\n"); return 1; }