void ExternalOutput::writeAudioData(char* buf, int len) { RTPHeader* head = reinterpret_cast<RTPHeader*>(buf); if (initTimeAudio_ == -1) { initTimeAudio_ = head->getTimestamp(); } timeval time; gettimeofday(&time, NULL); unsigned long long millis = (time.tv_sec * 1000) + (time.tv_usec / 1000); if (millis -lastFullIntraFrameRequest_ >FIR_INTERVAL_MS) { this->sendFirPacket(); lastFullIntraFrameRequest_ = millis; } // Figure out our audio codec. if(context_->oformat->audio_codec == AV_CODEC_ID_NONE) { //We dont need any other payload at this time if(head->getPayloadType() == PCMU_8000_PT) { context_->oformat->audio_codec = AV_CODEC_ID_PCM_MULAW; } else if (head->getPayloadType() == OPUS_48000_PT) { context_->oformat->audio_codec = AV_CODEC_ID_OPUS; } } // check if we can initialize our context this->initContext(); if (audio_stream_ == NULL) { // not yet. return; } int ret = inputProcessor_->unpackageAudio(reinterpret_cast<unsigned char*>(buf), len, unpackagedAudioBuffer_); if (ret <= 0) return; // ELOG_DEBUG("Writing audio frame %d with timestamp %u, input timebase: %d/%d, target timebase: %d/%d",head->getSeqNumber(), head->getTimestamp(), // audio_stream_->codec->time_base.num, audio_stream_->codec->time_base.den, // timebase we requested // audio_stream_->time_base.num, audio_stream_->time_base.den); // actual timebase long long currentTimestamp = head->getTimestamp(); if (currentTimestamp - initTimeAudio_ < 0) { // we wrapped. add 2^32 to correct this. We only handle a single wrap around since that's 13 hours of recording, minimum. currentTimestamp += 0xFFFFFFFF; } AVPacket avpkt; av_init_packet(&avpkt); avpkt.data = unpackagedAudioBuffer_; avpkt.size = ret; avpkt.pts = (currentTimestamp - initTimeAudio_) / (audio_stream_->codec->time_base.den / audio_stream_->time_base.den); avpkt.stream_index = 1; av_write_frame(context_, &avpkt); av_free_packet(&avpkt); }
int InputProcessor::unpackageVideo(unsigned char* inBuff, int inBuffLen, unsigned char* outBuff, int* gotFrame, int* estimatedFps, double* videoTs, bool* KFrame) { if (videoUnpackager == 0) { ELOG_DEBUG("Unpackager not correctly initialized"); return -1; } bool tempKeyFrame2 = false; int inBuffOffset = 0; *gotFrame = 0; RTPHeader* head = reinterpret_cast<RTPHeader*>(inBuff); //head->getMarker()); // if ( head->getSSRC() != 55543 /*&& head->payloadtype!=101*/) { // return -1; // } if (head->getPayloadType() != 100) { return -1; } int l = inBuffLen - head->getHeaderLength(); inBuffOffset+=head->getHeaderLength(); erizo::RTPPayloadVP8* parsed = pars.parseVP8( (unsigned char*) &inBuff[inBuffOffset], l, &tempKeyFrame2); memcpy(outBuff, parsed->data, parsed->dataLength); if (tempKeyFrame2) tempKeyFrame = true; // if (tempKeyFrame) { // ELOG_WARN("GOT KEYFRAME"); // } *KFrame = tempKeyFrame; if (head->getMarker()) { *estimatedFps = 0; if (lastVideoTs_){ *estimatedFps = 90000/(head->getTimestamp() - lastVideoTs_); } lastVideoTs_ = head->getTimestamp(); *videoTs = lastVideoTs_; *gotFrame = 1; tempKeyFrame = false; } int ret = parsed->dataLength; delete parsed; return ret; }
void ExternalOutput::writeVideoData(char* buf, int len) { RTPHeader* head = reinterpret_cast<RTPHeader*>(buf); if (head->getPayloadType() == RED_90000_PT) { int totalLength = head->getHeaderLength(); int rtpHeaderLength = totalLength; redheader *redhead = (redheader*) (buf + totalLength); if (redhead->payloadtype == VP8_90000_PT) { while (redhead->follow) { totalLength += redhead->getLength() + 4; // RED header redhead = (redheader*) (buf + totalLength); } // Parse RED packet to VP8 packet. // Copy RTP header memcpy(deliverMediaBuffer_, buf, rtpHeaderLength); // Copy payload data memcpy(deliverMediaBuffer_ + totalLength, buf + totalLength + 1, len - totalLength - 1); // Copy payload type RTPHeader *mediahead = reinterpret_cast<RTPHeader*>(deliverMediaBuffer_); mediahead->setPayloadType(redhead->payloadtype); buf = reinterpret_cast<char*>(deliverMediaBuffer_); len = len - 1 - totalLength + rtpHeaderLength; } } if (initTimeVideo_ == -1) { initTimeVideo_ = head->getTimestamp(); } int gotUnpackagedFrame = false; int ret = inputProcessor_->unpackageVideo(reinterpret_cast<unsigned char*>(buf), len, unpackagedBufferpart_, &gotUnpackagedFrame); if (ret < 0) return; this->initContext(); if (video_stream_ == NULL) { // could not init our context yet. return; } unpackagedSize_ += ret; unpackagedBufferpart_ += ret; if (gotUnpackagedFrame) { unpackagedBufferpart_ -= unpackagedSize_; // ELOG_DEBUG("Writing video frame %d with timestamp %u, length %d, input timebase: %d/%d, target timebase: %d/%d", head->getSeqNumber(), // head->getTimestamp(), unpackagedSize_, // video_stream_->codec->time_base.num, video_stream_->codec->time_base.den, // timebase we requested // video_stream_->time_base.num, video_stream_->time_base.den); // actual timebase long long currentTimestamp = head->getTimestamp(); if (currentTimestamp - initTimeVideo_ < 0) { // we wrapped. add 2^32 to correct this. We only handle a single wrap around since that's ~13 hours of recording, minimum. currentTimestamp += 0xFFFFFFFF; } long long timestampToWrite = (currentTimestamp - initTimeVideo_) / (90000 / video_stream_->time_base.den); // All of our video offerings are using a 90khz clock. AVPacket avpkt; av_init_packet(&avpkt); avpkt.data = unpackagedBufferpart_; avpkt.size = unpackagedSize_; avpkt.pts = timestampToWrite; avpkt.stream_index = 0; av_write_frame(context_, &avpkt); av_free_packet(&avpkt); unpackagedSize_ = 0; unpackagedBufferpart_ = unpackagedBuffer_; } }