static int latm_parse_packet(AVFormatContext *ctx, PayloadContext *data,
                             AVStream *st, AVPacket *pkt, uint32_t *timestamp,
                             const uint8_t *buf, int len, int flags)
{
	int ret, cur_len;

	if (buf)
	{
		if (!data->dyn_buf || data->timestamp != *timestamp)
		{
			av_freep(&data->buf);
			if (data->dyn_buf)
				avio_close_dyn_buf(data->dyn_buf, &data->buf);
			data->dyn_buf = NULL;
			av_freep(&data->buf);

			data->timestamp = *timestamp;
			if ((ret = avio_open_dyn_buf(&data->dyn_buf)) < 0)
				return ret;
		}
		avio_write(data->dyn_buf, buf, len);

		if (!(flags & RTP_FLAG_MARKER))
			return AVERROR(EAGAIN);
		av_free(data->buf);
		data->len = avio_close_dyn_buf(data->dyn_buf, &data->buf);
		data->dyn_buf = NULL;
		data->pos = 0;
	}

	if (!data->buf)
	{
		av_log(ctx, AV_LOG_ERROR, "No data available yet\n");
		return AVERROR(EIO);
	}

	cur_len = 0;
	while (data->pos < data->len)
	{
		uint8_t val = data->buf[data->pos++];
		cur_len += val;
		if (val != 0xff)
			break;
	}
	if (data->pos + cur_len > data->len)
	{
		av_log(ctx, AV_LOG_ERROR, "Malformed LATM packet\n");
		return AVERROR(EIO);
	}

	if ((ret = av_new_packet(pkt, cur_len)) < 0)
		return ret;
	memcpy(pkt->data, data->buf + data->pos, cur_len);
	data->pos += cur_len;
	pkt->stream_index = st->index;
	return data->pos < data->len;
}
Esempio n. 2
0
int ff_mov_add_hinted_packet(AVFormatContext *s, AVPacket *pkt,
                             int track_index, int sample,
                             uint8_t *sample_data, int sample_size)
{
    MOVMuxContext *mov = s->priv_data;
    MOVTrack *trk = &mov->tracks[track_index];
    AVFormatContext *rtp_ctx = trk->rtp_ctx;
    uint8_t *buf = NULL;
    int size;
    AVIOContext *hintbuf = NULL;
    AVPacket hint_pkt;
    int ret = 0, count;

    if (!rtp_ctx)
        return AVERROR(ENOENT);
    if (!rtp_ctx->pb)
        return AVERROR(ENOMEM);

    if (sample_data)
        sample_queue_push(&trk->sample_queue, sample_data, sample_size, sample);
    else
        sample_queue_push(&trk->sample_queue, pkt->data, pkt->size, sample);

    /* Feed the packet to the RTP muxer */
    ff_write_chained(rtp_ctx, 0, pkt, s);

    /* Fetch the output from the RTP muxer, open a new output buffer
     * for next time. */
    size = avio_close_dyn_buf(rtp_ctx->pb, &buf);
    if ((ret = ffio_open_dyn_packet_buf(&rtp_ctx->pb,
                                        RTP_MAX_PACKET_SIZE)) < 0)
        goto done;

    if (size <= 0)
        goto done;

    /* Open a buffer for writing the hint */
    if ((ret = avio_open_dyn_buf(&hintbuf)) < 0)
        goto done;
    av_init_packet(&hint_pkt);
    count = write_hint_packets(hintbuf, buf, size, trk, &hint_pkt.dts);
    av_freep(&buf);

    /* Write the hint data into the hint track */
    hint_pkt.size = size = avio_close_dyn_buf(hintbuf, &buf);
    hint_pkt.data = buf;
    hint_pkt.pts  = hint_pkt.dts;
    hint_pkt.stream_index = track_index;
    if (pkt->flags & AV_PKT_FLAG_KEY)
        hint_pkt.flags |= AV_PKT_FLAG_KEY;
    if (count > 0)
        ff_mov_write_packet(s, &hint_pkt);
done:
    av_free(buf);
    sample_queue_retain(&trk->sample_queue);
    return ret;
}
Esempio n. 3
0
File: id3v2.c Progetto: genesi/libav
/**
 * Decode characters to UTF-8 according to encoding type. The decoded buffer is
 * always null terminated. Stop reading when either *maxread bytes are read from
 * pb or U+0000 character is found.
 *
 * @param dst Pointer where the address of the buffer with the decoded bytes is
 * stored. Buffer must be freed by caller.
 * @param maxread Pointer to maximum number of characters to read from the
 * AVIOContext. After execution the value is decremented by the number of bytes
 * actually read.
 * @returns 0 if no error occurred, dst is uninitialized on error
 */
static int decode_str(AVFormatContext *s, AVIOContext *pb, int encoding,
                      uint8_t **dst, int *maxread)
{
    int ret;
    uint8_t tmp;
    uint32_t ch = 1;
    int left = *maxread;
    unsigned int (*get)(AVIOContext*) = avio_rb16;
    AVIOContext *dynbuf;

    if ((ret = avio_open_dyn_buf(&dynbuf)) < 0) {
        av_log(s, AV_LOG_ERROR, "Error opening memory stream\n");
        return ret;
    }

    switch (encoding) {

    case ID3v2_ENCODING_ISO8859:
        while (left && ch) {
            ch = avio_r8(pb);
            PUT_UTF8(ch, tmp, avio_w8(dynbuf, tmp);)
            left--;
        }
        break;

    case ID3v2_ENCODING_UTF16BOM:
        if ((left -= 2) < 0) {
            av_log(s, AV_LOG_ERROR, "Cannot read BOM value, input too short\n");
            avio_close_dyn_buf(dynbuf, dst);
            av_freep(dst);
            return AVERROR_INVALIDDATA;
        }
        switch (avio_rb16(pb)) {
        case 0xfffe:
            get = avio_rl16;
        case 0xfeff:
            break;
        default:
            av_log(s, AV_LOG_ERROR, "Incorrect BOM value\n");
            avio_close_dyn_buf(dynbuf, dst);
            av_freep(dst);
            *maxread = left;
            return AVERROR_INVALIDDATA;
        }
        // fall-through

    case ID3v2_ENCODING_UTF16BE:
        while ((left > 1) && ch) {
            GET_UTF16(ch, ((left -= 2) >= 0 ? get(pb) : 0), break;)
            PUT_UTF8(ch, tmp, avio_w8(dynbuf, tmp);)
        }
void FFMPEG_Wrapper::get_trailer(std::vector<uint8_t>& trailer)
{
  std::size_t size;
  uint8_t* output_buf;

  boost::mutex::scoped_lock lock(frame_mutex_);

  if (avio_open_dyn_buf(&ffmpeg_format_context_->pb) >= 0)
  {
    ffmpeg_format_context_->pb->seekable = 0;

    if (av_write_trailer(ffmpeg_format_context_) < 0)
    {
      fprintf(stderr, "Error occurred when opening output file\n");
      return;
    }

    size = avio_close_dyn_buf(ffmpeg_format_context_->pb, &output_buf);

    // copy header buffer to vector
    trailer.resize(size);
    memcpy(&trailer[0], output_buf, size);

    av_free(output_buf);
  }
}
void FFMPEG_Wrapper::get_header(std::vector<uint8_t>& header)
{
  boost::mutex::scoped_lock lock(frame_mutex_);

  std::size_t size;
  uint8_t* output_buf;

  // define meta data
  av_dict_set(&ffmpeg_format_context_->metadata, "author"   , "ROS topic http streamer"   , 0);
  av_dict_set(&ffmpeg_format_context_->metadata, "comment"  , "this is awesome"  , 0);
  av_dict_set(&ffmpeg_format_context_->metadata, "copyright", "BSD", 0);
  av_dict_set(&ffmpeg_format_context_->metadata, "title"    , "ROS Topic Stream" , 0);

  if (avio_open_dyn_buf(&ffmpeg_format_context_->pb) >= 0)
  {
    ffmpeg_format_context_->pb->seekable = 0;

    if (avformat_write_header(ffmpeg_format_context_, NULL) < 0)
    {
      fprintf(stderr, "Error occurred when opening output file\n");
      return;
    }

    //av_dict_free(&ffmpeg_format_context_->metadata);

    size = avio_close_dyn_buf(ffmpeg_format_context_->pb, &output_buf);

    // copy header buffer to vector
    header.resize(size);
    memcpy(&header[0], output_buf, size);

    av_free(output_buf);
  }
}
Esempio n. 6
0
static int sff_write_header(AVFormatContext *ctx)
{
	uint32_t size = 0; 
	uint8_t *buf = NULL;
	int ret = 0;
	AVIOContext *pb = NULL, *old = ctx->pb;
	if(!ctx ){
		return -1;
	}	

	if(avio_open_dyn_buf(&pb) < 0){
		return -1;
	}
	ctx->pb = pb;
	if(avformat_write_header(ctx, NULL) < 0){
		ctx->pb = old;
		return -1;
	}
		
	size = avio_close_dyn_buf(ctx->pb, &buf);
	ctx->pb = old;
	if(size > 0)
		ret = sff_write_block(ctx->pb, 1, buf, size);
	av_freep(&buf);	
	return ret;
}
Esempio n. 7
0
// Destructor  //call flush before any encoder gets deleted
EncoderFfmpegCore::~EncoderFfmpegCore() {

    qDebug() << "EncoderFfmpegCore::~EncoderFfmpegCore()";
    av_freep(&m_pSamples);
    av_freep(&m_pFltSamples);

    avio_open_dyn_buf(&m_pEncodeFormatCtx->pb);
    if (av_write_trailer(m_pEncodeFormatCtx) != 0) {
        qDebug() << "Multiplexer: failed to write a trailer.";
    } else {
        unsigned char *l_strBuffer = NULL;
        int l_iBufferLen = 0;
        l_iBufferLen = avio_close_dyn_buf(m_pEncodeFormatCtx->pb,
                                          (uint8_t**)(&l_strBuffer));
        m_pCallback->write(NULL, l_strBuffer, 0, l_iBufferLen);
        av_free(l_strBuffer);
    }


    if (m_pStream != NULL) {
        avcodec_close(m_pStream->codec);
    }

    if (m_pEncodeFormatCtx != NULL) {
        av_free(m_pEncodeFormatCtx);
    }

    // Close buffer
    delete m_pResample;
}
Esempio n. 8
0
static int chunk_end(AVFormatContext *s)
{
    WebMChunkContext *wc = s->priv_data;
    AVFormatContext *oc = wc->avf;
    int ret;
    int buffer_size;
    uint8_t *buffer;
    AVIOContext *pb;
    char filename[MAX_FILENAME_SIZE];

    if (wc->chunk_start_index == wc->chunk_index)
        return 0;
    // Flush the cluster in WebM muxer.
    oc->oformat->write_packet(oc, NULL);
    buffer_size = avio_close_dyn_buf(oc->pb, &buffer);
    ret = get_chunk_filename(s, 0, filename);
    if (ret < 0)
        goto fail;
    ret = avio_open2(&pb, filename, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL);
    if (ret < 0)
        goto fail;
    avio_write(pb, buffer, buffer_size);
    ret = avio_close(pb);
    if (ret < 0)
        goto fail;
    oc->pb = NULL;
fail:
    av_free(buffer);
    return (ret < 0) ? ret : 0;
}
Esempio n. 9
0
static void prepare_packet(AVPacket *pkt, PayloadContext *vp8, int stream)
{
    av_init_packet(pkt);
    pkt->stream_index = stream;
    pkt->size         = avio_close_dyn_buf(vp8->data, &pkt->data);
    pkt->destruct     = av_destruct_packet;
    vp8->data         = NULL;
}
Esempio n. 10
0
AVFormatContext *ff_rtp_chain_mux_open(AVFormatContext *s, AVStream *st,
                                       URLContext *handle, int packet_size)
{
    AVFormatContext *rtpctx;
    int ret;
    AVOutputFormat *rtp_format = av_guess_format("rtp", NULL, NULL);

    if (!rtp_format)
        return NULL;

    /* Allocate an AVFormatContext for each output stream */
    rtpctx = avformat_alloc_context();
    if (!rtpctx)
        return NULL;

    rtpctx->oformat = rtp_format;
    if (!av_new_stream(rtpctx, 0)) {
        av_free(rtpctx);
        return NULL;
    }
    /* Copy the max delay setting; the rtp muxer reads this. */
    rtpctx->max_delay = s->max_delay;
    /* Copy other stream parameters. */
    rtpctx->streams[0]->sample_aspect_ratio = st->sample_aspect_ratio;
    rtpctx->flags |= s->flags & AVFMT_FLAG_MP4A_LATM;

    av_set_parameters(rtpctx, NULL);
    /* Copy the rtpflags values straight through */
    if (s->oformat->priv_class &&
        av_find_opt(s->priv_data, "rtpflags", NULL, 0, 0))
        av_set_int(rtpctx->priv_data, "rtpflags",
                   av_get_int(s->priv_data, "rtpflags", NULL));

    /* Set the synchronized start time. */
    rtpctx->start_time_realtime = s->start_time_realtime;

    avcodec_copy_context(rtpctx->streams[0]->codec, st->codec);

    if (handle) {
        ffio_fdopen(&rtpctx->pb, handle);
    } else
        ffio_open_dyn_packet_buf(&rtpctx->pb, packet_size);
    ret = avformat_write_header(rtpctx, NULL);

    if (ret) {
        if (handle) {
            avio_close(rtpctx->pb);
        } else {
            uint8_t *ptr;
            avio_close_dyn_buf(rtpctx->pb, &ptr);
            av_free(ptr);
        }
        avformat_free_context(rtpctx);
        return NULL;
    }

    return rtpctx;
}
Esempio n. 11
0
static void prepare_packet(AVPacket *pkt, PayloadContext *vp8, int stream)
{
    av_init_packet(pkt);
    pkt->stream_index = stream;
    pkt->flags        = vp8->is_keyframe ? AV_PKT_FLAG_KEY : 0;
    pkt->size         = avio_close_dyn_buf(vp8->data, &pkt->data);
    pkt->destruct     = av_destruct_packet;
    vp8->data         = NULL;
}
Esempio n. 12
0
static inline void free_frame_if_needed(PayloadContext *jpeg)
{
    if (jpeg->frame) {
        uint8_t *p;
        avio_close_dyn_buf(jpeg->frame, &p);
        av_free(p);
        jpeg->frame = NULL;
    }
}
Esempio n. 13
0
static void vp8_free_context(PayloadContext *vp8)
{
    if (vp8->data) {
        uint8_t *tmp;
        avio_close_dyn_buf(vp8->data, &tmp);
        av_free(tmp);
    }
    av_free(vp8);
}
Esempio n. 14
0
static void write_header_chunk(AVIOContext *pb, AVIOContext *dpb, unsigned id)
{
    uint8_t *dyn_buf;
    int dyn_size= avio_close_dyn_buf(dpb, &dyn_buf);
    avio_wb32(pb, id);
    avio_wb32(pb, dyn_size);
    avio_write(pb, dyn_buf, dyn_size);
    av_free(dyn_buf);
}
Esempio n. 15
0
static void svq3_extradata_free(PayloadContext *sv)
{
    if (sv->pktbuf) {
        uint8_t *buf;
        avio_close_dyn_buf(sv->pktbuf, &buf);
        av_free(buf);
    }
    av_free(sv);
}
Esempio n. 16
0
AVFormatContext *ff_rtp_chain_mux_open(AVFormatContext *s, AVStream *st,
                                       URLContext *handle, int packet_size)
{
    AVFormatContext *rtpctx;
    int ret;
    AVOutputFormat *rtp_format = av_guess_format("rtp", NULL, NULL);
    uint8_t *rtpflags;
    AVDictionary *opts = NULL;

    if (!rtp_format)
        return NULL;

    /* Allocate an AVFormatContext for each output stream */
    rtpctx = avformat_alloc_context();
    if (!rtpctx)
        return NULL;

    rtpctx->oformat = rtp_format;
    if (!av_new_stream(rtpctx, 0)) {
        av_free(rtpctx);
        return NULL;
    }
    /* Copy the max delay setting; the rtp muxer reads this. */
    rtpctx->max_delay = s->max_delay;
    /* Copy other stream parameters. */
    rtpctx->streams[0]->sample_aspect_ratio = st->sample_aspect_ratio;

    if (av_opt_get(s, "rtpflags", AV_OPT_SEARCH_CHILDREN, &rtpflags) >= 0)
        av_dict_set(&opts, "rtpflags", rtpflags, AV_DICT_DONT_STRDUP_VAL);

    /* Set the synchronized start time. */
    rtpctx->start_time_realtime = s->start_time_realtime;

    avcodec_copy_context(rtpctx->streams[0]->codec, st->codec);

    if (handle) {
        ffio_fdopen(&rtpctx->pb, handle);
    } else
        ffio_open_dyn_packet_buf(&rtpctx->pb, packet_size);
    ret = avformat_write_header(rtpctx, &opts);
    av_dict_free(&opts);

    if (ret) {
        if (handle) {
            avio_close(rtpctx->pb);
        } else {
            uint8_t *ptr;
            avio_close_dyn_buf(rtpctx->pb, &ptr);
            av_free(ptr);
        }
        avformat_free_context(rtpctx);
        return NULL;
    }

    return rtpctx;
}
Esempio n. 17
0
static char *choose_pix_fmts(OutputStream *ost)
{
    AVDictionaryEntry *strict_dict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
    if(strict_dict)
        // used by choose_pixel_fmt() and below
    {
        av_opt_set(ost->enc_ctx, "strict", strict_dict->value, 0);
    }
    
    if(ost->keep_pix_fmt)
    {
        if(ost->filter)
            avfilter_graph_set_auto_convert(ost->filter->graph->graph,
                                            AVFILTER_AUTO_CONVERT_NONE);
        if(ost->enc_ctx->pix_fmt == AV_PIX_FMT_NONE)
        {
            return NULL;
        }
        return av_strdup(av_get_pix_fmt_name(ost->enc_ctx->pix_fmt));
    }
    if(ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE)
    {
        return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc_ctx, ost->enc, ost->enc_ctx->pix_fmt)));
    }
    else if(ost->enc && ost->enc->pix_fmts)
    {
        const enum AVPixelFormat *p;
        AVIOContext *s = NULL;
        uint8_t *ret;
        int len;
        
        if(avio_open_dyn_buf(&s) < 0)
        {
            exit_program(1);
        }
        
        p = ost->enc->pix_fmts;
        if(ost->enc_ctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL)
        {
            p = get_compliance_unofficial_pix_fmts(ost->enc_ctx->codec_id, p);
        }
        
        for(; *p != AV_PIX_FMT_NONE; p++)
        {
            const char *name = av_get_pix_fmt_name(*p);
            avio_printf(s, "%s|", name);
        }
        len = avio_close_dyn_buf(s, &ret);
        ret[len - 1] = 0;
        return ret;
    }
    else
    {
        return NULL;
    }
}
Esempio n. 18
0
int ff_rtp_finalize_packet(AVPacket *pkt, AVIOContext **dyn_buf, int stream_idx)
{
    av_init_packet(pkt);

    pkt->size         = avio_close_dyn_buf(*dyn_buf, &pkt->data);
    pkt->stream_index = stream_idx;
    pkt->destruct     = av_destruct_packet;
    *dyn_buf          = NULL;
    return pkt->size;
}
static void asfrtp_free_context(PayloadContext *asf)
{
    if (asf->pktbuf) {
        uint8_t *p = NULL;
        avio_close_dyn_buf(asf->pktbuf, &p);
        asf->pktbuf = NULL;
        av_free(p);
    }
    av_freep(&asf->buf);
    av_free(asf);
}
Esempio n. 20
0
static void rsfec_free_context(PayloadContext *data)
{
    if (!data)
        return;
    if (data->buf) {
        uint8_t *p;
        avio_close_dyn_buf(data->buf, &p);
        av_free(p);
    }
    av_free(data);
}
Esempio n. 21
0
File: avc.c Progetto: AVbin/libav
int ff_avc_parse_nal_units_buf(const uint8_t *buf_in, uint8_t **buf, int *size)
{
    AVIOContext *pb;
    int ret = avio_open_dyn_buf(&pb);
    if(ret < 0)
        return ret;

    ff_avc_parse_nal_units(pb, buf_in, *size);

    av_freep(buf);
    *size = avio_close_dyn_buf(pb, buf);
    return 0;
}
Esempio n. 22
0
static void put_str16(AVIOContext *s, const char *tag)
{
    int len;
    uint8_t *pb;
    AVIOContext *dyn_buf;
    if (avio_open_dyn_buf(&dyn_buf) < 0)
        return;

    avio_put_str16le(dyn_buf, tag);
    len = avio_close_dyn_buf(dyn_buf, &pb);
    avio_wl16(s, len);
    avio_write(s, pb, len);
    av_freep(&pb);
}
Esempio n. 23
0
void ff_rtp_send_punch_packets(URLContext *rtp_handle)
{
    AVIOContext *pb;
    uint8_t *buf;
    int len;

    /* Send a small RTP packet */
    if (avio_open_dyn_buf(&pb) < 0)
        return;

    avio_w8(pb, (RTP_VERSION << 6));
    avio_w8(pb, 0); /* Payload type */
    avio_wb16(pb, 0); /* Seq */
    avio_wb32(pb, 0); /* Timestamp */
    avio_wb32(pb, 0); /* SSRC */

    avio_flush(pb);
    len = avio_close_dyn_buf(pb, &buf);
    if ((len > 0) && buf)
        ffurl_write(rtp_handle, buf, len);
    av_free(buf);

    /* Send a minimal RTCP RR */
    if (avio_open_dyn_buf(&pb) < 0)
        return;

    avio_w8(pb, (RTP_VERSION << 6));
    avio_w8(pb, RTCP_RR); /* receiver report */
    avio_wb16(pb, 1); /* length in words - 1 */
    avio_wb32(pb, 0); /* our own SSRC */

    avio_flush(pb);
    len = avio_close_dyn_buf(pb, &buf);
    if ((len > 0) && buf)
        ffurl_write(rtp_handle, buf, len);
    av_free(buf);
}
Esempio n. 24
0
void ff_mov_close_hinting(MOVTrack *track) {
    AVFormatContext* rtp_ctx = track->rtp_ctx;
    uint8_t *ptr;

    av_freep(&track->enc);
    sample_queue_free(&track->sample_queue);
    if (!rtp_ctx)
        return;
    if (rtp_ctx->pb) {
        av_write_trailer(rtp_ctx);
        avio_close_dyn_buf(rtp_ctx->pb, &ptr);
        av_free(ptr);
    }
    avformat_free_context(rtp_ctx);
}
Esempio n. 25
0
bool MediaConverter::imgs2media(LPRImage *pRawImages[], size_t imgCount, EventMedia &eventMedia)
{
	if (imgCount == 0)
	{
		printf("Input is empty.\n");
		return true;
	}
	//////////////////////////////////////////////////////////////////////////
	if(!initialize(pRawImages[0], "temp.avi"))
	{
		printf("Failed to initialize.\n");
		return false;
	}
	//////////////////////////////////////////////////////////////////////////
	if (!(mOutputFormatCtxPtr->flags & AVFMT_NOFILE))
	{
		/*if (avio_open(&mOutputFormatCtxPtr->pb, mediaName.c_str(), AVIO_FLAG_WRITE) < 0)
		{
		printf("Could not open %s.\n", mediaName.c_str());
		return false;
		}*/
		if (avio_open_dyn_buf(&mOutputFormatCtxPtr->pb) < 0)
		{
			printf("Could not open avio buff.\n");
			return false;
		}
	}
	//////////////////////////////////////////////////////////////////////////
	// Output
	avformat_write_header(mOutputFormatCtxPtr, NULL);
	for (size_t i = 0; i < imgCount; ++ i)
		outputFrame(pRawImages[i]);
	flushFrames();
	av_write_trailer(mOutputFormatCtxPtr);
	//////////////////////////////////////////////////////////////////////////
	if (!(mOutputFormatCtxPtr->flags & AVFMT_NOFILE))
	{
		//avio_close(mOutputFormatCtxPtr->pb);
		eventMedia.mBufferSize = avio_close_dyn_buf(mOutputFormatCtxPtr->pb, &eventMedia.mBufferPtr);
	}
	//////////////////////////////////////////////////////////////////////////
	// 清理环境
	uninitialize();

	return true;
}
Esempio n. 26
0
static int asf_write_markers(AVFormatContext *s)
{
    ASFContext *asf = s->priv_data;
    AVIOContext *pb = s->pb;
    int i;
    AVRational scale = {1, 10000000};
    int64_t hpos = put_header(pb, &ff_asf_marker_header);

    ff_put_guid(pb, &ff_asf_reserved_4);// ASF spec mandates this reserved value
    avio_wl32(pb, s->nb_chapters);     // markers count
    avio_wl16(pb, 0);                  // ASF spec mandates 0 for this
    avio_wl16(pb, 0);                  // name length 0, no name given

    for (i = 0; i < s->nb_chapters; i++) {
        AVChapter *c = s->chapters[i];
        AVDictionaryEntry *t = av_dict_get(c->metadata, "title", NULL, 0);
        int64_t pres_time = av_rescale_q(c->start, c->time_base, scale);
        uint64_t offset;
        int32_t send_time = get_send_time(asf, pres_time, &offset);
        int len = 0;
        uint8_t *buf;
        AVIOContext *dyn_buf;
        if (t) {
            if (avio_open_dyn_buf(&dyn_buf) < 0)
                return AVERROR(ENOMEM);
            avio_put_str16le(dyn_buf, t->value);
            len = avio_close_dyn_buf(dyn_buf, &buf);
        }
        avio_wl64(pb, offset);            // offset of the packet with send_time
        avio_wl64(pb, pres_time + PREROLL_TIME * 10000); // presentation time
        avio_wl16(pb, 12 + len);          // entry length
        avio_wl32(pb, send_time);         // send time
        avio_wl32(pb, 0);                 // flags, should be 0
        avio_wl32(pb, len / 2);           // marker desc length in WCHARS!
        if (t) {
            avio_write(pb, buf, len);     // marker desc
            av_freep(&buf);
        }
    }
    end_header(pb, hpos);
    return 0;
}
Esempio n. 27
0
static int sff_write_packet(AVFormatContext *ctx, AVPacket *pkt)
{
	AVIOContext *pb = NULL, *old = NULL;
	uint8_t *buf = NULL;
	int size = 0, frame_size = -1, ret = -1;
	static int seq = 0;
	++seq;

	if(!ctx || !ctx->pb)return 0;
	if(avio_open_dyn_buf(&pb) < 0){
		return -1;
	}
	pb->seekable = 0;
	old = ctx->pb;
	ctx->pb = pb;
	
	/*add more pkt info if needed, here stream_index only for illustration.*/
	if(pkt){
		avio_wb32(pb, pkt->stream_index);
		ret = av_interleaved_write_frame(ctx, pkt);
	}else{
		avio_wb32(pb, (uint32_t)-1);
		ret = av_write_trailer(ctx);
	}
	size = avio_close_dyn_buf(pb, &buf);
	frame_size = size - 4;
	ctx->pb = old;

	if(frame_size <= 0){
		//printf("write frame fail err %x %d:%d\n", ret, seq, frame_size);
		goto fail;/*its ok*/
	}
	if(ret < 0){
		printf("write frame fail err %x %d:%d\n", ret, seq, frame_size);
		goto fail;
	}
	ret = sff_write_block(ctx->pb, 2, buf, size);
fail:
	av_freep(&buf);	
	return ret;
}
Esempio n. 28
0
static int tcp_write_packet(AVFormatContext *s, RTSPStream *rtsp_st)
{
    RTSPState *rt = s->priv_data;
    AVFormatContext *rtpctx = rtsp_st->transport_priv;
    uint8_t *buf, *ptr;
    int size;
    uint8_t *interleave_header, *interleaved_packet;

    size = avio_close_dyn_buf(rtpctx->pb, &buf);
    ptr = buf;
    while (size > 4) {
        uint32_t packet_len = AV_RB32(ptr);
        int id;
        /* The interleaving header is exactly 4 bytes, which happens to be
         * the same size as the packet length header from
         * ffio_open_dyn_packet_buf. So by writing the interleaving header
         * over these bytes, we get a consecutive interleaved packet
         * that can be written in one call. */
        interleaved_packet = interleave_header = ptr;
        ptr += 4;
        size -= 4;
        if (packet_len > size || packet_len < 2)
            break;
        if (ptr[1] >= RTCP_SR && ptr[1] <= RTCP_APP)
            id = rtsp_st->interleaved_max; /* RTCP */
        else
            id = rtsp_st->interleaved_min; /* RTP */
        interleave_header[0] = '$';
        interleave_header[1] = id;
        AV_WB16(interleave_header + 2, packet_len);
        ffurl_write(rt->rtsp_hd_out, interleaved_packet, 4 + packet_len);
        ptr += packet_len;
        size -= packet_len;
    }
    av_free(buf);
    ffio_open_dyn_packet_buf(&rtpctx->pb, RTSP_TCP_MAX_PACKET_SIZE);
    return 0;
}
Esempio n. 29
0
static int ogg_write_page(AVFormatContext *s, OGGPage *page, int extra_flags)
{
    OGGStreamContext *oggstream = (OGGStreamContext *)s->streams[page->stream_index]->priv_data;
    AVIOContext *pb;
    int64_t crc_offset;
    int ret, size;
    uint8_t *buf;

    ret = avio_open_dyn_buf(&pb);
    if (ret < 0)
        return ret;
    ffio_init_checksum(pb, ff_crc04C11DB7_update, 0);
    ffio_wfourcc(pb, (const uint8_t *)"OggS");
    avio_w8(pb, 0);
    avio_w8(pb, page->flags | extra_flags);
    avio_wl64(pb, page->granule);
    avio_wl32(pb, oggstream->serial_num);
    avio_wl32(pb, oggstream->page_counter++);
    crc_offset = avio_tell(pb);
    avio_wl32(pb, 0); // crc
    avio_w8(pb, page->segments_count);
    avio_write(pb, page->segments, page->segments_count);
    avio_write(pb, page->data, page->size);

    ogg_update_checksum(s, pb, crc_offset);
    avio_flush(pb);

    size = avio_close_dyn_buf(pb, &buf);
    if (size < 0)
        return size;

    avio_write(s->pb, buf, size);
    avio_flush(s->pb);
    av_free(buf);
    oggstream->page_count--;
    return 0;
}
Esempio n. 30
0
/** return 0 on packet, <0 on partial packet or error... */
static int svq3_parse_packet (AVFormatContext *s, PayloadContext *sv,
                              AVStream *st, AVPacket *pkt,
                              uint32_t *timestamp,
                              const uint8_t *buf, int len, uint16_t seq,
                              int flags)
{
    int config_packet, start_packet, end_packet;

    if (len < 2)
        return AVERROR_INVALIDDATA;

    config_packet = buf[0] & 0x40;
    start_packet  = buf[0] & 0x20;
    end_packet    = buf[0] & 0x10;
    buf += 2;     // ignore buf[1]
    len -= 2;

    if (config_packet) {

        av_freep(&st->codec->extradata);
        st->codec->extradata_size = 0;

        if (len < 2 || ff_alloc_extradata(st->codec, len + 8))
            return AVERROR_INVALIDDATA;

        memcpy(st->codec->extradata, "SEQH", 4);
        AV_WB32(st->codec->extradata + 4, len);
        memcpy(st->codec->extradata + 8, buf, len);

        /* We set codec_id to AV_CODEC_ID_NONE initially to
         * delay decoder initialization since extradata is
         * carried within the RTP stream, not SDP. Here,
         * by setting codec_id to AV_CODEC_ID_SVQ3, we are signalling
         * to the decoder that it is OK to initialize. */
        st->codec->codec_id = AV_CODEC_ID_SVQ3;

        return AVERROR(EAGAIN);
    }

    if (start_packet) {
        int res;

        if (sv->pktbuf) {
            uint8_t *tmp;
            avio_close_dyn_buf(sv->pktbuf, &tmp);
            av_free(tmp);
        }
        if ((res = avio_open_dyn_buf(&sv->pktbuf)) < 0)
            return res;
        sv->timestamp   = *timestamp;
    }

    if (!sv->pktbuf)
        return AVERROR_INVALIDDATA;

    avio_write(sv->pktbuf, buf, len);

    if (end_packet) {
        int ret = ff_rtp_finalize_packet(pkt, &sv->pktbuf, st->index);
        if (ret < 0)
            return ret;

        *timestamp        = sv->timestamp;
        return 0;
    }

    return AVERROR(EAGAIN);
}