void RTMPSession::sendSetChunkSize(int32_t chunkSize) { m_jobQueue.enqueue([&, chunkSize] { int streamId = 0; std::vector<uint8_t> buff; put_byte(buff, 2); // chunk stream ID 2 put_be24(buff, 0); // ts put_be24(buff, 4); // size (4 bytes) put_byte(buff, RTMP_PT_CHUNK_SIZE); // chunk type put_buff(buff, (uint8_t*)&streamId, sizeof(int32_t)); // msg stream id is little-endian put_be32(buff, chunkSize); write(&buff[0], buff.size()); m_outChunkSize = chunkSize; }); }
static int gxf_write_media_preamble(ByteIOContext *pb, GXFContext *ctx, AVPacket *pkt, int size) { GXFStreamContext *sc = &ctx->streams[pkt->stream_index]; int64_t dts = av_rescale_rnd(pkt->dts, ctx->sample_rate, sc->codec->time_base.den, AV_ROUND_UP); put_byte(pb, sc->media_type); put_byte(pb, sc->index); put_be32(pb, dts); if (sc->codec->codec_type == CODEC_TYPE_AUDIO) { put_be16(pb, 0); put_be16(pb, size / 2); } else if (sc->codec->codec_id == CODEC_ID_MPEG2VIDEO) { int frame_type = gxf_parse_mpeg_frame(sc, pkt->data, pkt->size); if (frame_type == FF_I_TYPE) { put_byte(pb, 0x0d); sc->iframes++; } else if (frame_type == FF_B_TYPE) { put_byte(pb, 0x0f); sc->bframes++; } else { put_byte(pb, 0x0e); sc->pframes++; } put_be24(pb, size); } else if (sc->codec->codec_id == CODEC_ID_DVVIDEO) { put_byte(pb, size / 4096); put_be24(pb, 0); } else put_be32(pb, size); put_be32(pb, dts); put_byte(pb, 1); /* flags */ put_byte(pb, 0); /* reserved */ return 16; }
static int flv_write_packet(AVFormatContext *s, AVPacket *pkt) { ByteIOContext *pb = &s->pb; AVCodecContext *enc = s->streams[pkt->stream_index]->codec; FLVContext *flv = s->priv_data; int size= pkt->size; int flags; // av_log(s, AV_LOG_DEBUG, "type:%d pts: %"PRId64" size:%d\n", enc->codec_type, timestamp, size); if (enc->codec_type == CODEC_TYPE_VIDEO) { put_byte(pb, 9); flags = 2; // choose h263 flags |= pkt->flags & PKT_FLAG_KEY ? 0x10 : 0x20; // add keyframe indicator } else { assert(enc->codec_type == CODEC_TYPE_AUDIO); flags = get_audio_flags(enc); assert(size); put_byte(pb, 8); } put_be24(pb,size+1); // include flags put_be24(pb,pkt->pts); put_be32(pb,flv->reserved); put_byte(pb,flags); put_buffer(pb, pkt->data, size); put_be32(pb,size+1+11); // previous tag size flv->duration = pkt->pts + pkt->duration; put_flush_packet(pb); return 0; }
static void put_avc_eos_tag(ByteIOContext *pb, unsigned ts) { put_byte(pb, FLV_TAG_TYPE_VIDEO); put_be24(pb, 5); /* Tag Data Size */ put_be24(pb, ts); /* lower 24 bits of timestamp in ms*/ put_byte(pb, (ts >> 24) & 0x7F); /* MSB of ts in ms*/ put_be24(pb, 0); /* StreamId = 0 */ put_byte(pb, 23); /* ub[4] FrameType = 1, ub[4] CodecId = 7 */ put_byte(pb, 2); /* AVC end of sequence */ put_be24(pb, 0); /* Always 0 for AVC EOS. */ put_be32(pb, 16); /* Size of FLV tag */ }
static int flv_write_packet(AVFormatContext *s, AVPacket *pkt) { ByteIOContext *pb = s->pb; AVCodecContext *enc = s->streams[pkt->stream_index]->codec; FLVContext *flv = s->priv_data; int size= pkt->size; int flags, flags_size; // av_log(s, AV_LOG_DEBUG, "type:%d pts: %"PRId64" size:%d\n", enc->codec_type, timestamp, size); if(enc->codec_id == CODEC_ID_VP6 || enc->codec_id == CODEC_ID_VP6F) flags_size= 2; else flags_size= 1; if (enc->codec_type == CODEC_TYPE_VIDEO) { put_byte(pb, FLV_TAG_TYPE_VIDEO); flags = enc->codec_tag; if(flags == 0) { av_log(enc, AV_LOG_ERROR, "video codec %X not compatible with flv\n",enc->codec_id); return -1; } flags |= pkt->flags & PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER; } else { assert(enc->codec_type == CODEC_TYPE_AUDIO); flags = get_audio_flags(enc); assert(size); put_byte(pb, FLV_TAG_TYPE_AUDIO); } put_be24(pb,size + flags_size); put_be24(pb,pkt->pts); put_byte(pb,pkt->pts >> 24); put_be24(pb,flv->reserved); put_byte(pb,flags); if (enc->codec_id == CODEC_ID_VP6) put_byte(pb,0); if (enc->codec_id == CODEC_ID_VP6F) put_byte(pb, enc->extradata_size ? enc->extradata[0] : 0); put_buffer(pb, pkt->data, size); put_be32(pb,size+flags_size+11); // previous tag size flv->duration = pkt->pts + pkt->duration; put_flush_packet(pb); return 0; }
static int gxf_write_media_preamble(AVFormatContext *s, AVPacket *pkt, int size) { GXFContext *gxf = s->priv_data; ByteIOContext *pb = s->pb; AVStream *st = s->streams[pkt->stream_index]; GXFStreamContext *sc = st->priv_data; unsigned field_nb; /* If the video is frame-encoded, the frame numbers shall be represented by * even field numbers. * see SMPTE360M-2004 6.4.2.1.3 Media field number */ if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { field_nb = gxf->nb_fields; } else { field_nb = av_rescale_rnd(pkt->dts, gxf->time_base.den, (int64_t)48000*gxf->time_base.num, AV_ROUND_UP); } put_byte(pb, sc->media_type); put_byte(pb, st->index); put_be32(pb, field_nb); if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { put_be16(pb, 0); put_be16(pb, size / 2); } else if (st->codec->codec_id == CODEC_ID_MPEG2VIDEO) { int frame_type = gxf_parse_mpeg_frame(sc, pkt->data, pkt->size); if (frame_type == FF_I_TYPE) { put_byte(pb, 0x0d); sc->iframes++; } else if (frame_type == FF_B_TYPE) { put_byte(pb, 0x0f); sc->bframes++; } else { put_byte(pb, 0x0e); sc->pframes++; } put_be24(pb, size); } else if (st->codec->codec_id == CODEC_ID_DVVIDEO) { put_byte(pb, size / 4096); put_be24(pb, 0); } else put_be32(pb, size); put_be32(pb, field_nb); put_byte(pb, 1); /* flags */ put_byte(pb, 0); /* reserved */ return 16; }
void RTMPSession::sendSetBufferTime(int milliseconds) { m_jobQueue.enqueue([=]{ int streamId = 0; std::vector<uint8_t> buff; put_byte(buff, 2); put_be24(buff, 0); put_be24(buff, 10); put_byte(buff, RTMP_PT_PING); put_buff(buff, (uint8_t*)&streamId, sizeof(int32_t)); put_be16(buff, 3); // SetBufferTime put_be32(buff, m_streamId); put_be32(buff, milliseconds); write(&buff[0], buff.size()); }); } bool
static int flac_write_block_padding(ByteIOContext *pb, unsigned int n_padding_bytes, int last_block) { put_byte(pb, last_block ? 0x81 : 0x01); put_be24(pb, n_padding_bytes); while (n_padding_bytes > 0) { put_byte(pb, 0); n_padding_bytes--; } return 0; }
void RTMPSession::sendPong() { m_jobQueue.enqueue([&] { int streamId = 0; std::vector<uint8_t> buff; put_byte(buff, 2); // chunk stream ID 2 put_be24(buff, 0); // ts put_be24(buff, 6); // size (6 bytes) put_byte(buff, RTMP_PT_PING); // chunk type put_buff(buff, (uint8_t*)&streamId, sizeof(int32_t)); // msg stream id is little-endian put_be16(buff, 7); put_be16(buff, 0); put_be16(buff, 0); write(&buff[0], buff.size()); }); }
void CMp4Builder::put_AudioMediaBox() { PRINT_FUNCTION_NAME; //MediaBox put_be32(AudioMediaBox_SIZE); //uint32 size put_boxtype("mdia"); //'mdia' //MediaHeaderBox put_be32(MediaHeaderBox_SIZE); //uint32 size put_boxtype("mdhd"); //'mdhd' put_byte(0); //uint8 version put_be24(0); //bits24 flags //uint32 creation_time [version==0] uint64 creation_time [version==1] put_be32(_create_time); //uint32 modification_time [version==0] uint64 modification_time [version==1] put_be32(_create_time); //Audio's timescale is the same as Video, 90000 put_be32(mH264Info.scale); //uint32 timescale //uint32 duration [version==0] uint64 duration [version==1] put_be32(mAudioDuration); put_be16(0); //bits5 language[3] //ISO-639-2/T language code put_be16(0); //uint16 pre_defined //HandlerReferenceBox put_be32(AudioHandlerReferenceBox_SIZE); //uint32 size put_boxtype("hdlr"); //'hdlr' put_byte(0); //uint8 version put_be24(0); //bits24 flags put_be32(0); //uint32 pre_defined put_boxtype("soun"); //'soun':audio track put_be32(0); //uint32 reserved[3] put_be32(0); put_be32(0); //char name[], name[0] is actual length put_byte(AUDIO_HANDLER_NAME_LEN); put_buffer((AM_U8 *)AUDIO_HANDLER_NAME, AUDIO_HANDLER_NAME_LEN-1); put_AudioMediaInformationBox(); }
void CMp4Builder::put_VideoMediaBox(AM_UINT Duration) { PRINT_FUNCTION_NAME; INFO("Video duration is %lu", mVideoDuration); //MediaBox put_be32(VideoMediaBox_SIZE); //uint32 size put_boxtype("mdia"); //'mdia' //MediaHeaderBox put_be32(MediaHeaderBox_SIZE); //uint32 size put_boxtype("mdhd"); //'mdhd' put_byte(0); //uint8 version put_be24(0); //bits24 flags //uint32 creation_time [version==0] uint64 creation_time [version==1] put_be32(_create_time); //uint32 modification_time [version==0] uint64 modification_time [version==1] put_be32(_create_time); put_be32(mH264Info.scale); //uint32 timescale //uint32 duration [version==0] uint64 duration [version==1] put_be32(Duration); put_be16(0); //bits5 language[3] //ISO-639-2/T language code put_be16(0); //uint16 pre_defined //HandlerReferenceBox put_be32(VideoHandlerReferenceBox_SIZE); //uint32 size put_boxtype("hdlr"); //'hdlr' put_byte(0); //uint8 version put_be24(0); //bits24 flags put_be32(0); //uint32 pre_defined put_boxtype("vide"); //'vide' put_be32(0); //uint32 reserved[3] put_be32(0); put_be32(0); put_byte(VIDEO_HANDLER_NAME_LEN); //char name[], name[0] is actual length put_buffer((AM_U8 *)VIDEO_HANDLER_NAME, VIDEO_HANDLER_NAME_LEN-1); put_VideoMediaInformationBox(); }
void CMp4Builder::put_VideoTrackBox(AM_UINT TrackId, AM_UINT Duration) { PRINT_FUNCTION_NAME; //TrackBox put_be32(VideoTrackBox_SIZE);//uint32 size put_boxtype("trak"); //'trak' //TrackHeaderBox put_be32(TrackHeaderBox_SIZE);//uint32 size put_boxtype("tkhd"); //'tkhd' put_byte(0); //uint8 version //0x01:track_enabled, 0x02:track_in_movie, 0x04:track_in_preview put_be24(0x07); //bits24 flags //uint32 creation_time [version==0] uint64 creation_time [version==1] put_be32(_create_time); //uint32 modification_time [version==0] uint64 modification_time [version==1] put_be32(_create_time); put_be32(TrackId); //uint32 track_ID put_be32(0); //uint32 reserved //uint32 duration [version==0] uint64 duration [version==1] put_be32(Duration); put_be32(0); //uint32 reserved[2] put_be32(0); put_be16(0); //int16 layer put_be16(0); //int16 alternate_group put_be16(0x0000); //int16 volume put_be16(0); //uint16 reserved put_be32(0x00010000); //int32 matrix[9] put_be32(0); put_be32(0); put_be32(0); put_be32(0x00010000); put_be32(0); put_be32(0); put_be32(0); put_be32(0x40000000); put_be32(mH264Info.width<<16); //uint32 width //16.16 fixed-point put_be32(mH264Info.height<<16);//uint32 height //16.16 fixed-point put_VideoMediaBox(Duration); }
void CMp4Builder::put_VideoMediaInformationBox() { PRINT_FUNCTION_NAME; //MediaInformationBox put_be32(VideoMediaInformationBox_SIZE); //uint32 size put_boxtype("minf"); //'minf' //VideoMediaHeaderBox put_be32(VideoMediaHeaderBox_SIZE); //uint32 size put_boxtype("vmhd"); //'vmhd' put_byte(0); //uint8 version //This is a compatibility flag that allows QuickTime to distinguish // between movies created with QuickTime 1.0 and newer movies. // You should always set this flag to 1, unless you are creating a movie // intended for playback using version 1.0 of QuickTime put_be24(1); //bits24 flags put_be16(0); //uint16 graphicsmode //0=copy over the existing image put_be16(0); //uint16 opcolor[3] //(red, green, blue) put_be16(0); put_be16(0); //DataInformationBox put_be32(DataInformationBox_SIZE); //uint32 size put_boxtype("dinf"); //'dinf' //DataReferenceBox put_be32(DataReferenceBox_SIZE); //uint32 size put_boxtype("dref"); //'dref' put_byte(0); //uint8 version put_be24(0); //bits24 flags put_be32(1); //uint32 entry_count put_be32(12);//uint32 size put_boxtype("url");//'url ' put_byte(0); //uint8 version put_be24(1);//bits24 flags 1=media data is in the same file as the MediaBox //SampleTableBox put_be32(VideoSampleTableBox_SIZE); //uint32 size put_boxtype("stbl"); //'stbl' //SampleDescriptionBox put_be32(VideoSampleDescriptionBox_SIZE); //uint32 size put_boxtype("stsd"); //'stsd' put_byte(0); //uint8 version put_be24(0); //bits24 flags put_be32(1); //uint32 entry_count //VisualSampleEntry put_be32(VisualSampleEntry_SIZE); //uint32 size put_boxtype("avc1"); //'avc1' put_byte(0); //uint8 reserved[6] put_byte(0); put_byte(0); put_byte(0); put_byte(0); put_byte(0); put_be16(1); //uint16 data_reference_index put_be16(0); //uint16 pre_defined put_be16(0); //uint16 reserved put_be32(0); //uint32 pre_defined[3] put_be32(0); put_be32(0); put_be16(mH264Info.width); //uint16 width put_be16(mH264Info.height);//uint16 height put_be32(0x00480000); //uint32 horizresolution 72dpi put_be32(0x00480000); //uint32 vertresolution 72dpi put_be32(0); //uint32 reserved put_be16(1); //uint16 frame_count AM_U8 EncoderName[32]="\012AVC Coding"; //Compressorname put_buffer(EncoderName,32); put_be16(0x0018); //uint16 depth //0x0018=images are in colour with no alpha put_be16(-1); //int16 pre_defined //AvcConfigurationBox put_be32(AvcConfigurationBox_SIZE); //uint32 size put_boxtype("avcC"); //'avcC' put_byte(1); //uint8 configurationVersion put_byte(_sps[1]); //uint8 AVCProfileIndication put_byte(_sps[2]); //uint8 profile_compatibility put_byte(_sps[3]); //uint8 level //uint8 nal_length //(nal_length&0x03)+1 [reserved:6, lengthSizeMinusOne:2] put_byte(0xFF); //uint8 sps_count //sps_count&0x1f [reserved:3, numOfSequenceParameterSets:5] put_byte(0xE1); //uint16 sps_size //sequenceParameterSetLength put_be16(_sps_size); //uint8 sps[sps_size] //sequenceParameterSetNALUnit put_buffer(_sps, _sps_size); put_byte(1); //uint8 pps_count //umOfPictureParameterSets put_be16(_pps_size); //uint16 pps_size //pictureParameterSetLength put_buffer(_pps, _pps_size);//uint8 pps[pps_size] //pictureParameterSetNALUnit /* //BitrateBox put_be32(BitrateBox_SIZE); //uint32 size put_boxtype("btrt"); //'btrt' put_be32(0); //uint32 buffer_size put_be32(0); //uint32 max_bitrate put_be32(0); //uint32 avg_bitrate */ //DecodingTimeToSampleBox //bits24 flags put_be32(VideoDecodingTimeToSampleBox_SIZE); //uint32 size put_boxtype("stts"); //'stts' put_byte(0); //uint8 version put_be24(0); //bits24 flags put_be32(1); //uint32 entry_count put_be32(mVideoCnt); put_be32(mH264Info.rate); //CompositionTimeToSampleBox put_be32(CompositionTimeToSampleBox_SIZE); //uint32 size put_boxtype("ctts"); //'ctts' put_byte(0); //uint8 version put_be24(0); //bits24 flags put_be32(mVideoCnt); //uint32 entry_count put_buffer((AM_U8 *)_ctts, mVideoCnt * 2 *sizeof(_ctts[0])); //SampleToChunkBox put_be32(SampleToChunkBox_SIZE); //uint32 size put_boxtype("stsc"); //'stsc' put_byte(0); //uint8 version put_be24(0); //bits24 flags put_be32(1); //uint32 entry_count put_be32(1); //uint32 first_chunk put_be32(1); //uint32 samples_per_chunk put_be32(1); //uint32 sample_description_index //SampleSizeBox put_be32(VideoSampleSizeBox_SIZE); //uint32 size put_boxtype("stsz"); //'stsz' put_byte(0); //uint8 version put_be24(0); //bits24 flags put_be32(0); //uint32 sampleSize put_be32(mVideoCnt); //uint32 sample_count put_buffer((AM_U8 *)_v_stsz, mVideoCnt * sizeof(_v_stsz[0])); //ChunkOffsetBox put_be32(VideoChunkOffsetBox_SIZE); //uint32 size put_boxtype("stco"); //'stco' put_byte(0); //uint8 version put_be24(0); //bits24 flags put_be32(mVideoCnt); //uint32 entry_count put_buffer((AM_U8 *)_v_stco, mVideoCnt * sizeof(_v_stco[0])); //SyncSampleBox put_be32(SyncSampleBox_SIZE); //uint32 size put_boxtype("stss"); //'stss' put_byte(0); //uint8 version put_be24(0); //bits24 flags put_be32(mStssCnt); //uint32 entry_count put_buffer((AM_U8*)_stss, mStssCnt*sizeof(_stss[0])); }
static int flv_write_packet( AVFormatContext *s, AVPacket *pkt ) { LogStr("Init"); //Fernando: //printf("-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\n"); //printf("************* proc: %s - line: %d\n", __func__, __LINE__); //dumpPacket(&pkt); //getchar(); ByteIOContext *pb = s->pb; AVCodecContext *enc = s->streams[pkt->stream_index]->codec; FLVContext *flv = s->priv_data; unsigned ts; int size = pkt->size; int flags, flags_size; // av_log(s, AV_LOG_DEBUG, "type:%d pts: %"PRId64" size:%d\n", enc->codec_type, timestamp, size); if (enc->codec_id == CODEC_ID_VP6 || enc->codec_id == CODEC_ID_VP6F || enc->codec_id == CODEC_ID_AAC) { flags_size = 2; } else if (enc->codec_id == CODEC_ID_H264) { flags_size = 5; } else { flags_size = 1; } if (enc->codec_type == CODEC_TYPE_VIDEO) { put_byte(pb, FLV_TAG_TYPE_VIDEO); flags = enc->codec_tag; if (flags == 0) { av_log(enc, AV_LOG_ERROR, "video codec %X not compatible with flv\n", enc->codec_id); LogStr("Exit"); return -1; } flags |= pkt->flags & PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER; } else { assert(enc->codec_type == CODEC_TYPE_AUDIO); flags = get_audio_flags(enc); assert(size); put_byte(pb, FLV_TAG_TYPE_AUDIO); } if (enc->codec_id == CODEC_ID_H264 && /* check if extradata looks like mp4 formated */ enc->extradata_size > 0 && *(uint8_t*) enc->extradata != 1) { if (ff_avc_parse_nal_units(pkt->data, &pkt->data, &pkt->size) < 0) { LogStr("Exit"); return -1; } assert(pkt->size); size = pkt->size; /* cast needed to get negative value */ if (!flv->delay && pkt->dts < 0) { flv->delay = -pkt->dts; } } ts = pkt->dts + flv->delay; // add delay to force positive dts put_be24(pb, size + flags_size); put_be24(pb, ts); put_byte(pb, (ts >> 24) & 0x7F); // timestamps are 32bits _signed_ put_be24(pb, flv->reserved); put_byte(pb, flags); if (enc->codec_id == CODEC_ID_VP6) { put_byte(pb, 0); } if (enc->codec_id == CODEC_ID_VP6F) { put_byte(pb, enc->extradata_size ? enc->extradata[0] : 0); } else if (enc->codec_id == CODEC_ID_AAC) { put_byte(pb, 1); // AAC raw } else if (enc->codec_id == CODEC_ID_H264) { put_byte(pb, 1); // AVC NALU put_be24(pb, pkt->pts - pkt->dts); } put_buffer(pb, pkt->data, size); put_be32(pb, size + flags_size + 11); // previous tag size flv->duration = FFMAX(flv->duration, pkt->pts + flv->delay + pkt->duration); put_flush_packet(pb); LogStr("Exit"); return 0; }
void H264Packetizer::pushBuffer(const uint8_t* const inBuffer, size_t inSize, IMetadata& inMetadata) { std::vector<uint8_t>& outBuffer = m_outbuffer; outBuffer.clear(); uint8_t nal_type = inBuffer[4] & 0x1F; int flags = 0; const int flags_size = 5; const int ts = inMetadata.timestampDelta; bool is_config = (nal_type == 7 || nal_type == 8); flags = FLV_CODECID_H264; auto output = m_output.lock(); RTMPMetadata_t outMeta(inMetadata.timestampDelta); switch(nal_type) { case 7: if(m_sps.size() == 0) { m_sps.resize(inSize-4); memcpy(&m_sps[0], inBuffer+4, inSize-4); } return; case 8: if(m_pps.size() == 0) { m_pps.resize(inSize-4); memcpy(&m_pps[0], inBuffer+4, inSize-4); } flags |= FLV_FRAME_KEY; break; case 5: flags |= FLV_FRAME_KEY; break; default: flags |= FLV_FRAME_INTER; break; } if(output) { std::vector<uint8_t> conf; if(is_config && m_sps.size() > 0 && m_pps.size() > 0 ) { conf = configurationFromSpsAndPps(); inSize = conf.size(); } outBuffer.reserve(inSize + flags_size); put_byte(outBuffer, flags); put_byte(outBuffer, !is_config); put_be24(outBuffer, 0); if(is_config) { // create modified SPS/PPS buffer if(m_sps.size() > 0 && m_pps.size() > 0 && !m_sentConfig) { put_buff(outBuffer, &conf[0], conf.size()); m_sentConfig = true; } else { return; } } else { put_buff(outBuffer, inBuffer, inSize); } static auto prev_time = std::chrono::steady_clock::now(); auto now = std::chrono::steady_clock::now(); auto m_micros = std::chrono::duration_cast<std::chrono::microseconds>(now - prev_time).count(); static uint64_t total = 0; static uint64_t count = 0; total+=m_micros; count++; prev_time = now; outMeta.setData(ts, static_cast<int>(outBuffer.size()), FLV_TAG_TYPE_VIDEO, kVideoChannelStreamId); output->pushBuffer(&outBuffer[0], outBuffer.size(), outMeta); } }
void RTMPSession::pushBuffer(const uint8_t* const data, size_t size, IMetadata& metadata) { if(m_ending) { return ; } std::shared_ptr<Buffer> buf = std::make_shared<Buffer>(size); buf->put(const_cast<uint8_t*>(data), size); const RTMPMetadata_t inMetadata = static_cast<const RTMPMetadata_t&>(metadata); m_jobQueue.enqueue([=]() { if(!this->m_ending) { static int c_count = 0; c_count ++; auto packetTime = std::chrono::steady_clock::now(); std::vector<uint8_t> chunk; std::shared_ptr<std::vector<uint8_t>> outb = std::make_shared<std::vector<uint8_t>>(); outb->reserve(size + 64); size_t len = buf->size(); size_t tosend = std::min(len, m_outChunkSize); uint8_t* p; buf->read(&p, buf->size()); uint64_t ts = inMetadata.getData<kRTMPMetadataTimestamp>() ; const int streamId = inMetadata.getData<kRTMPMetadataMsgStreamId>(); #ifndef RTMP_CHUNK_TYPE_0_ONLY auto it = m_previousChunkData.find(streamId); if(it == m_previousChunkData.end()) { #endif // Type 0. put_byte(chunk, ( streamId & 0x1F)); put_be24(chunk, static_cast<uint32_t>(ts)); put_be24(chunk, inMetadata.getData<kRTMPMetadataMsgLength>()); put_byte(chunk, inMetadata.getData<kRTMPMetadataMsgTypeId>()); put_buff(chunk, (uint8_t*)&m_streamId, sizeof(int32_t)); // msg stream id is little-endian #ifndef RTMP_CHUNK_TYPE_0_ONLY } else { // Type 1. put_byte(chunk, RTMP_CHUNK_TYPE_1 | (streamId & 0x1F)); put_be24(chunk, static_cast<uint32_t>(ts - it->second)); // timestamp delta put_be24(chunk, inMetadata.getData<kRTMPMetadataMsgLength>()); put_byte(chunk, inMetadata.getData<kRTMPMetadataMsgTypeId>()); } #endif m_previousChunkData[streamId] = ts; put_buff(chunk, p, tosend); outb->insert(outb->end(), chunk.begin(), chunk.end()); len -= tosend; p += tosend; while(len > 0) { tosend = std::min(len, m_outChunkSize); p[-1] = RTMP_CHUNK_TYPE_3 | (streamId & 0x1F); outb->insert(outb->end(), p-1, p+tosend); p+=tosend; len-=tosend; // this->write(&outb[0], outb.size(), packetTime); // outb.clear(); } this->write(&(*outb)[0], outb->size(), packetTime, inMetadata.getData<kRTMPMetadataIsKeyframe>() ); } }); }
void H264Packetizer::pushBuffer(const uint8_t* const inBuffer, size_t inSize, IMetadata& inMetadata) { std::vector<uint8_t>& outBuffer = m_outbuffer; outBuffer.clear(); uint8_t nal_type = inBuffer[4] & 0x1F; int flags = 0; const int flags_size = 5; int dts = inMetadata.dts ; int pts = inMetadata.pts + m_ctsOffset; // correct for pts < dts which some players (ffmpeg) don't like dts = dts > 0 ? dts : pts - m_ctsOffset ; bool is_config = (nal_type == 7 || nal_type == 8); flags = FLV_CODECID_H264; auto output = m_output.lock(); switch(nal_type) { case 7: if(m_sps.size() == 0) { m_sps.resize(inSize-4); memcpy(&m_sps[0], inBuffer+4, inSize-4); } break; case 8: if(m_pps.size() == 0) { m_pps.resize(inSize-4); memcpy(&m_pps[0], inBuffer+4, inSize-4); } flags |= FLV_FRAME_KEY; break; case 5: flags |= FLV_FRAME_KEY; break; default: flags |= FLV_FRAME_INTER; break; } if(output) { RTMPMetadata_t outMeta(dts); std::vector<uint8_t> conf; if(is_config && m_sps.size() > 0 && m_pps.size() > 0 ) { conf = configurationFromSpsAndPps(); inSize = conf.size(); } outBuffer.reserve(inSize + flags_size); put_byte(outBuffer, flags); put_byte(outBuffer, !is_config); put_be24(outBuffer, pts - dts); // Decoder delay if(is_config ) { // create modified SPS/PPS buffer if(m_sps.size() > 0 && m_pps.size() > 0 && !m_sentConfig && (flags & FLV_FRAME_KEY)) { put_buff(outBuffer, &conf[0], conf.size()); m_sentConfig = true; } else { return; } } else { put_buff(outBuffer, inBuffer, inSize); } outMeta.setData(dts, static_cast<int>(outBuffer.size()), RTMP_PT_VIDEO, kVideoChannelStreamId, nal_type == 5); output->pushBuffer(&outBuffer[0], outBuffer.size(), outMeta); } }
static int flv_write_header(AVFormatContext *s) { ByteIOContext *pb = s->pb; FLVContext *flv = s->priv_data; int i, width, height, samplerate, samplesize, channels, audiocodecid, videocodecid; double framerate = 0.0; int metadata_size_pos, data_size; flv->hasAudio = 0; flv->hasVideo = 0; for(i=0; i<s->nb_streams; i++){ AVCodecContext *enc = s->streams[i]->codec; if (enc->codec_type == CODEC_TYPE_VIDEO) { width = enc->width; height = enc->height; if (s->streams[i]->r_frame_rate.den && s->streams[i]->r_frame_rate.num) { framerate = av_q2d(s->streams[i]->r_frame_rate); } else { framerate = 1/av_q2d(s->streams[i]->codec->time_base); } flv->hasVideo=1; videocodecid = enc->codec_tag; if(videocodecid == 0) { av_log(enc, AV_LOG_ERROR, "video codec not compatible with flv\n"); return -1; } } else { flv->hasAudio=1; samplerate = enc->sample_rate; channels = enc->channels; audiocodecid = enc->codec_tag; samplesize = (enc->codec_id == CODEC_ID_PCM_S8) ? 8 : 16; if(get_audio_flags(enc)<0) return -1; } av_set_pts_info(s->streams[i], 24, 1, 1000); /* 24 bit pts in ms */ } put_tag(pb,"FLV"); put_byte(pb,1); put_byte(pb, FLV_HEADER_FLAG_HASAUDIO * flv->hasAudio + FLV_HEADER_FLAG_HASVIDEO * flv->hasVideo); put_be32(pb,9); put_be32(pb,0); for(i=0; i<s->nb_streams; i++){ if(s->streams[i]->codec->codec_tag == 5){ put_byte(pb,8); // message type put_be24(pb,0); // include flags put_be24(pb,0); // time stamp put_be32(pb,0); // reserved put_be32(pb,11); // size flv->reserved=5; } } /* write meta_tag */ put_byte(pb, 18); // tag type META metadata_size_pos= url_ftell(pb); put_be24(pb, 0); // size of data part (sum of all parts below) put_be24(pb, 0); // time stamp put_be32(pb, 0); // reserved /* now data of data_size size */ /* first event name as a string */ put_byte(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, "onMetaData"); // 12 bytes /* mixed array (hash) with size and string/type/data tuples */ put_byte(pb, AMF_DATA_TYPE_MIXEDARRAY); put_be32(pb, 5*flv->hasVideo + 4*flv->hasAudio + 2); // +2 for duration and file size put_amf_string(pb, "duration"); flv->duration_offset= url_ftell(pb); put_amf_double(pb, 0); // delayed write if(flv->hasVideo){ put_amf_string(pb, "width"); put_amf_double(pb, width); put_amf_string(pb, "height"); put_amf_double(pb, height); put_amf_string(pb, "videodatarate"); put_amf_double(pb, s->bit_rate / 1024.0); put_amf_string(pb, "framerate"); put_amf_double(pb, framerate); put_amf_string(pb, "videocodecid"); put_amf_double(pb, videocodecid); } if(flv->hasAudio){ put_amf_string(pb, "audiosamplerate"); put_amf_double(pb, samplerate); put_amf_string(pb, "audiosamplesize"); put_amf_double(pb, samplesize); put_amf_string(pb, "stereo"); put_amf_bool(pb, (channels == 2)); put_amf_string(pb, "audiocodecid"); put_amf_double(pb, audiocodecid); } put_amf_string(pb, "filesize"); flv->filesize_offset= url_ftell(pb); put_amf_double(pb, 0); // delayed write put_amf_string(pb, ""); put_byte(pb, AMF_END_OF_OBJECT); /* write total size of tag */ data_size= url_ftell(pb) - metadata_size_pos - 10; url_fseek(pb, metadata_size_pos, SEEK_SET); put_be24(pb, data_size); url_fseek(pb, data_size + 10 - 3, SEEK_CUR); put_be32(pb, data_size + 11); return 0; }
AM_ERR CMp4Builder::put_MovieBox() { PRINT_FUNCTION_NAME; //MovieBox int ret = 0; ret += put_be32(MovieBox_SIZE); //uint32 size ret += put_boxtype("moov"); //'moov' //MovieHeaderBox ret += put_be32(MovieHeaderBox_SIZE); //uint32 size ret += put_boxtype("mvhd"); //'mvhd' ret += put_byte(0); //uint8 version put_be24(0); //bits24 flags //uint32 creation_time [version==0] uint64 creation_time [version==1] put_be32(_create_time); //uint32 modification_time [version==0] uint64 modification_time [version==1] put_be32(_create_time); put_be32(mH264Info.scale); //uint32 timescale //uint32 duration [version==0] uint64 duration [version==1] put_be32(mVideoDuration); put_be32(0x00010000); //int32 rate put_be16(0x0100); //int16 volume put_be16(0); //bits16 reserved put_be32(0); //uint32 reserved[2] put_be32(0); put_be32(0x00010000); //int32 matrix[9] put_be32(0); put_be32(0); put_be32(0); put_be32(0x00010000); put_be32(0); put_be32(0); put_be32(0); put_be32(0x40000000); put_be32(0); //bits32 pre_defined[6] put_be32(0); put_be32(0); put_be32(0); put_be32(0); put_be32(0); put_be32(3); //uint32 next_track_ID /* put_be32(ObjDescrpBox_SIZE); //uint32 size put_boxtype("iods"); //'iods' put_byte(0); //uint8 version put_be24(0); //bits24 flags put_be32(0x10808080); put_be32(0x07004FFF ); put_be32(0xFF0F7FFF); */ //UserDataBox put_be32(UserDataBox_SIZE); //uint32 size put_boxtype("udta"); //'udta' //AMBABox put_be32(AMBABox_SIZE); //uint32 size put_boxtype("Amba"); //'Amba' put_be16(mH264Info.M); //uint16 M put_be16(mH264Info.N); //uint16 N put_be32(mH264Info.scale); //uint32 scale put_be32(mH264Info.rate); //uint32 rate put_be32(_audio_info.sampleRate); //uint32 audio's samplefreq put_be32(_audio_info.channels); //unit32 audio's channel number put_be32(0); //reseverd if (mVideoDuration) { put_VideoTrackBox(1, mVideoDuration); } if (mAudioDuration) { put_AudioTrackBox(2, mAudioDuration); } UpdateIdxBox(); return ME_OK; }
void CMp4Builder::put_AudioMediaInformationBox() { PRINT_FUNCTION_NAME; //MediaInformationBox put_be32(AudioMediaInformationBox_SIZE); //uint32 size put_boxtype("minf"); //'minf' //SoundMediaHeaderBox put_be32(SoundMediaHeaderBox_SIZE); //uint32 size put_boxtype("smhd"); //'smhd' put_byte(0); //uint8 version put_be24(0); //bits24 flags put_be16(0); //int16 balance put_be16(0); //uint16 reserved //DataInformationBox put_be32(DataInformationBox_SIZE); //uint32 size put_boxtype("dinf"); //'dinf' //DataReferenceBox put_be32(DataReferenceBox_SIZE); //uint32 size put_boxtype("dref"); //'dref' put_byte(0); //uint8 version put_be24(0); //bits24 flags put_be32(1); //uint32 entry_count put_be32(12); //uint32 size put_boxtype("url"); //'url ' put_byte(0); //uint8 version //1=media data is in the same file as the MediaBox put_be24(1); //bits24 flags //SampleTableBox put_be32(AudioSampleTableBox_SIZE); //uint32 size put_boxtype("stbl"); //'stbl' //SampleDescriptionBox put_be32(AudioSampleDescriptionBox_SIZE); //uint32 size put_boxtype("stsd"); //uint32 type put_byte(0); //uint8 version put_be24(0); //bits24 flags put_be32(1); //uint32 entry_count //AudioSampleEntry put_be32(AudioSampleEntry_SIZE); //uint32 size put_boxtype("mp4a"); //'mp4a' put_byte(0); //uint8 reserved[6] put_byte(0); put_byte(0); put_byte(0); put_byte(0); put_byte(0); put_be16(1); //uint16 data_reference_index put_be32(0); //uint32 reserved[2] put_be32(0); put_be16(_audio_info.channels); //uint16 channelcount put_be16(_audio_info.sampleSize * 8); //uint16 samplesize //for QT sound put_be16(0xfffe); //uint16 pre_defined put_be16(0); //uint16 reserved //= (timescale of media << 16) put_be32(_audio_info.sampleRate<<16); //uint32 samplerate //ElementaryStreamDescriptorBox put_be32(ElementaryStreamDescriptorBox_SIZE);//uint32 size put_boxtype("esds"); //'esds' put_byte(0); //uint8 version put_be24(0); //bits24 flags //ES descriptor takes 38 bytes put_byte(3); //ES descriptor type tag put_be16(0x8080); put_byte(34); //descriptor type length put_be16(0); //ES ID put_byte(0); //stream priority //Decoder config descriptor takes 26 bytes (include decoder specific info) put_byte(4); //decoder config descriptor type tag put_be16(0x8080); put_byte(22); //descriptor type length put_byte(0x40); //object type ID MPEG-4 audio=64 AAC //stream type:6, upstream flag:1, reserved flag:1 (audio=5) Audio stream put_byte(0x15); put_be24(8192); // buffer size put_be32(128000); // max bitrate put_be32(128000); // avg bitrate //Decoder specific info descriptor takes 9 bytes //decoder specific descriptor type tag put_byte(5); put_be16(0x8080); put_byte(5); //descriptor type length put_be16((AM_UINT)_audio_spec_config); put_be16(0x0000); put_byte(0x00); //SL descriptor takes 5 bytes put_byte(6); //SL config descriptor type tag put_be16(0x8080); put_byte(1); //descriptor type length put_byte(2); //SL value //DecodingTimeToSampleBox put_be32(AudioDecodingTimeToSampleBox_SIZE);//uint32 size put_boxtype("stts"); //'stts' put_byte(0); //uint8 version put_be24(0); //bits24 flags put_be32(1); //uint32 entry_count put_be32(mAudioCnt); //uint32 sample_count put_be32(_audio_info.pktPtsIncr); //uint32 sample_delta //SampleToChunkBox put_be32(SampleToChunkBox_SIZE); //uint32 size put_boxtype("stsc"); //'stsc' put_byte(0); //uint8 version put_be24(0); //bits24 flags put_be32(1); //uint32 entry_count put_be32(1); //uint32 first_chunk put_be32(1); //uint32 samples_per_chunk put_be32(1); //uint32 sample_description_index //SampleSizeBox put_be32(AudioSampleSizeBox_SIZE); //uint32 size put_boxtype("stsz"); //'stsz' put_byte(0); //uint8 version put_be24(0); //bits24 flags put_be32(0); //uint32 sampleSize put_be32(mAudioCnt); //uint32 sample_count put_buffer((AM_U8 *)_a_stsz, mAudioCnt*sizeof(_a_stsz[0])); //ChunkOffsetBox put_be32(AudioChunkOffsetBox_SIZE); //uint32 size put_boxtype("stco"); put_byte(0); //uint8 version put_be24(0); //bits24 flags put_be32(mAudioCnt); //uint32 entry_count put_buffer((AM_U8 *)_a_stco, mAudioCnt * sizeof(_a_stco[0])); }
static int flv_write_header(AVFormatContext *s) { ByteIOContext *pb = s->pb; FLVContext *flv = s->priv_data; AVCodecContext *audio_enc = NULL, *video_enc = NULL; int i; double framerate = 0.0; int64_t metadata_size_pos, data_size; for(i=0; i<s->nb_streams; i++) { AVCodecContext *enc = s->streams[i]->codec; if (enc->codec_type == AVMEDIA_TYPE_VIDEO) { if (s->streams[i]->r_frame_rate.den && s->streams[i]->r_frame_rate.num) { framerate = av_q2d(s->streams[i]->r_frame_rate); } else { framerate = 1/av_q2d(s->streams[i]->codec->time_base); } video_enc = enc; if(enc->codec_tag == 0) { av_log(enc, AV_LOG_ERROR, "video codec not compatible with flv\n"); return -1; } } else { audio_enc = enc; if(get_audio_flags(enc)<0) return -1; } av_set_pts_info(s->streams[i], 32, 1, 1000); /* 32 bit pts in ms */ } put_tag(pb,"FLV"); put_byte(pb,1); put_byte(pb, FLV_HEADER_FLAG_HASAUDIO * !!audio_enc + FLV_HEADER_FLAG_HASVIDEO * !!video_enc); put_be32(pb,9); put_be32(pb,0); for(i=0; i<s->nb_streams; i++) { if(s->streams[i]->codec->codec_tag == 5) { put_byte(pb,8); // message type put_be24(pb,0); // include flags put_be24(pb,0); // time stamp put_be32(pb,0); // reserved put_be32(pb,11); // size flv->reserved=5; } } /* write meta_tag */ put_byte(pb, 18); // tag type META metadata_size_pos= url_ftell(pb); put_be24(pb, 0); // size of data part (sum of all parts below) put_be24(pb, 0); // time stamp put_be32(pb, 0); // reserved /* now data of data_size size */ /* first event name as a string */ put_byte(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, "onMetaData"); // 12 bytes /* mixed array (hash) with size and string/type/data tuples */ put_byte(pb, AMF_DATA_TYPE_MIXEDARRAY); put_be32(pb, 5*!!video_enc + 5*!!audio_enc + 2); // +2 for duration and file size put_amf_string(pb, "duration"); flv->duration_offset= url_ftell(pb); put_amf_double(pb, s->duration / AV_TIME_BASE); // fill in the guessed duration, it'll be corrected later if incorrect if(video_enc) { put_amf_string(pb, "width"); put_amf_double(pb, video_enc->width); put_amf_string(pb, "height"); put_amf_double(pb, video_enc->height); put_amf_string(pb, "videodatarate"); put_amf_double(pb, video_enc->bit_rate / 1024.0); put_amf_string(pb, "framerate"); put_amf_double(pb, framerate); put_amf_string(pb, "videocodecid"); put_amf_double(pb, video_enc->codec_tag); } if(audio_enc) { put_amf_string(pb, "audiodatarate"); put_amf_double(pb, audio_enc->bit_rate / 1024.0); put_amf_string(pb, "audiosamplerate"); put_amf_double(pb, audio_enc->sample_rate); put_amf_string(pb, "audiosamplesize"); put_amf_double(pb, audio_enc->codec_id == CODEC_ID_PCM_U8 ? 8 : 16); put_amf_string(pb, "stereo"); put_amf_bool(pb, audio_enc->channels == 2); put_amf_string(pb, "audiocodecid"); put_amf_double(pb, audio_enc->codec_tag); } put_amf_string(pb, "filesize"); flv->filesize_offset= url_ftell(pb); put_amf_double(pb, 0); // delayed write put_amf_string(pb, ""); put_byte(pb, AMF_END_OF_OBJECT); /* write total size of tag */ data_size= url_ftell(pb) - metadata_size_pos - 10; url_fseek(pb, metadata_size_pos, SEEK_SET); put_be24(pb, data_size); url_fseek(pb, data_size + 10 - 3, SEEK_CUR); put_be32(pb, data_size + 11); for (i = 0; i < s->nb_streams; i++) { AVCodecContext *enc = s->streams[i]->codec; if (enc->codec_id == CODEC_ID_AAC || enc->codec_id == CODEC_ID_H264) { int64_t pos; put_byte(pb, enc->codec_type == AVMEDIA_TYPE_VIDEO ? FLV_TAG_TYPE_VIDEO : FLV_TAG_TYPE_AUDIO); put_be24(pb, 0); // size patched later put_be24(pb, 0); // ts put_byte(pb, 0); // ts ext put_be24(pb, 0); // streamid pos = url_ftell(pb); if (enc->codec_id == CODEC_ID_AAC) { put_byte(pb, get_audio_flags(enc)); put_byte(pb, 0); // AAC sequence header put_buffer(pb, enc->extradata, enc->extradata_size); } else { put_byte(pb, enc->codec_tag | FLV_FRAME_KEY); // flags put_byte(pb, 0); // AVC sequence header put_be24(pb, 0); // composition time ff_isom_write_avcc(pb, enc->extradata, enc->extradata_size); } data_size = url_ftell(pb) - pos; url_fseek(pb, -data_size - 10, SEEK_CUR); put_be24(pb, data_size); url_fseek(pb, data_size + 10 - 3, SEEK_CUR); put_be32(pb, data_size + 11); // previous tag size } } return 0; }
static int flv_write_packet(AVFormatContext *s, AVPacket *pkt) { ByteIOContext *pb = s->pb; AVCodecContext *enc = s->streams[pkt->stream_index]->codec; FLVContext *flv = s->priv_data; unsigned ts; int size= pkt->size; uint8_t *data= NULL; int flags, flags_size; // av_log(s, AV_LOG_DEBUG, "type:%d pts: %"PRId64" size:%d\n", enc->codec_type, timestamp, size); if(enc->codec_id == CODEC_ID_VP6 || enc->codec_id == CODEC_ID_VP6F || enc->codec_id == CODEC_ID_AAC) flags_size= 2; else if(enc->codec_id == CODEC_ID_H264) flags_size= 5; else flags_size= 1; if (enc->codec_type == AVMEDIA_TYPE_VIDEO) { put_byte(pb, FLV_TAG_TYPE_VIDEO); flags = enc->codec_tag; if(flags == 0) { av_log(enc, AV_LOG_ERROR, "video codec %X not compatible with flv\n",enc->codec_id); return -1; } flags |= pkt->flags & AV_PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER; } else { assert(enc->codec_type == AVMEDIA_TYPE_AUDIO); flags = get_audio_flags(enc); assert(size); put_byte(pb, FLV_TAG_TYPE_AUDIO); } if (enc->codec_id == CODEC_ID_H264) { /* check if extradata looks like mp4 formated */ if (enc->extradata_size > 0 && *(uint8_t*)enc->extradata != 1) { if (ff_avc_parse_nal_units_buf(pkt->data, &data, &size) < 0) return -1; } if (!flv->delay && pkt->dts < 0) flv->delay = -pkt->dts; } ts = pkt->dts + flv->delay; // add delay to force positive dts put_be24(pb,size + flags_size); put_be24(pb,ts); put_byte(pb,(ts >> 24) & 0x7F); // timestamps are 32bits _signed_ put_be24(pb,flv->reserved); put_byte(pb,flags); if (enc->codec_id == CODEC_ID_VP6) put_byte(pb,0); if (enc->codec_id == CODEC_ID_VP6F) put_byte(pb, enc->extradata_size ? enc->extradata[0] : 0); else if (enc->codec_id == CODEC_ID_AAC) put_byte(pb,1); // AAC raw else if (enc->codec_id == CODEC_ID_H264) { put_byte(pb,1); // AVC NALU put_be24(pb,pkt->pts - pkt->dts); } put_buffer(pb, data ? data : pkt->data, size); put_be32(pb,size+flags_size+11); // previous tag size flv->duration = FFMAX(flv->duration, pkt->pts + flv->delay + pkt->duration); put_flush_packet(pb); av_free(data); return 0; }
static int flv_write_header(AVFormatContext *s) { ByteIOContext *pb = &s->pb; FLVContext *flv = s->priv_data; int i, width, height, samplerate; double framerate = 0.0; int metadata_size_pos, data_size; flv->hasAudio = 0; flv->hasVideo = 0; put_tag(pb,"FLV"); put_byte(pb,1); put_byte(pb,0); // delayed write put_be32(pb,9); put_be32(pb,0); for(i=0; i<s->nb_streams; i++){ AVCodecContext *enc = s->streams[i]->codec; if (enc->codec_type == CODEC_TYPE_VIDEO) { width = enc->width; height = enc->height; if (s->streams[i]->r_frame_rate.den && s->streams[i]->r_frame_rate.num) { framerate = av_q2d(s->streams[i]->r_frame_rate); } else { framerate = 1/av_q2d(s->streams[i]->codec->time_base); } flv->hasVideo=1; } else { flv->hasAudio=1; samplerate = enc->sample_rate; } av_set_pts_info(s->streams[i], 24, 1, 1000); /* 24 bit pts in ms */ if(enc->codec_tag == 5){ put_byte(pb,8); // message type put_be24(pb,0); // include flags put_be24(pb,0); // time stamp put_be32(pb,0); // reserved put_be32(pb,11); // size flv->reserved=5; } if(enc->codec_type == CODEC_TYPE_AUDIO && get_audio_flags(enc)<0) return -1; } /* write meta_tag */ put_byte(pb, 18); // tag type META metadata_size_pos= url_ftell(pb); put_be24(pb, 0); // size of data part (sum of all parts below) put_be24(pb, 0); // time stamp put_be32(pb, 0); // reserved /* now data of data_size size */ /* first event name as a string */ put_byte(pb, AMF_STRING); // 1 byte put_amf_string(pb, "onMetaData"); // 12 bytes /* mixed array (hash) with size and string/type/data tuples */ put_byte(pb, AMF_MIXED_ARRAY); put_be32(pb, 4*flv->hasVideo + flv->hasAudio + 2); // +2 for duration and file size put_amf_string(pb, "duration"); flv->duration_offset= url_ftell(pb); put_amf_double(pb, 0); // delayed write if(flv->hasVideo){ put_amf_string(pb, "width"); put_amf_double(pb, width); put_amf_string(pb, "height"); put_amf_double(pb, height); put_amf_string(pb, "videodatarate"); put_amf_double(pb, s->bit_rate / 1024.0); put_amf_string(pb, "framerate"); put_amf_double(pb, framerate); } if(flv->hasAudio){ put_amf_string(pb, "audiosamplerate"); put_amf_double(pb, samplerate); } put_amf_string(pb, "filesize"); flv->filesize_offset= url_ftell(pb); put_amf_double(pb, 0); // delayed write put_amf_string(pb, ""); put_byte(pb, 9); // end marker 1 byte /* write total size of tag */ data_size= url_ftell(pb) - metadata_size_pos - 10; url_fseek(pb, metadata_size_pos, SEEK_SET); put_be24(pb, data_size); url_fseek(pb, data_size + 10 - 3, SEEK_CUR); put_be32(pb, data_size + 11); return 0; }