コード例 #1
0
ファイル: rtpenc_chain.c プロジェクト: eugenehp/ffmbc
AVFormatContext *ff_rtp_chain_mux_open(AVFormatContext *s, AVStream *st,
                                       URLContext *handle, int packet_size)
{
    AVFormatContext *rtpctx;
    int ret;
    AVOutputFormat *rtp_format = av_guess_format("rtp", NULL, NULL);

    if (!rtp_format)
        return NULL;

    /* Allocate an AVFormatContext for each output stream */
    rtpctx = avformat_alloc_context();
    if (!rtpctx)
        return NULL;

    rtpctx->oformat = rtp_format;
    if (!av_new_stream(rtpctx, 0)) {
        av_free(rtpctx);
        return NULL;
    }
    /* Copy the max delay setting; the rtp muxer reads this. */
    rtpctx->max_delay = s->max_delay;
    /* Copy other stream parameters. */
    rtpctx->streams[0]->sample_aspect_ratio = st->sample_aspect_ratio;
    rtpctx->flags |= s->flags & AVFMT_FLAG_MP4A_LATM;

    av_set_parameters(rtpctx, NULL);
    /* Copy the rtpflags values straight through */
    if (s->oformat->priv_class &&
        av_find_opt(s->priv_data, "rtpflags", NULL, 0, 0))
        av_set_int(rtpctx->priv_data, "rtpflags",
                   av_get_int(s->priv_data, "rtpflags", NULL));

    /* Set the synchronized start time. */
    rtpctx->start_time_realtime = s->start_time_realtime;

    avcodec_copy_context(rtpctx->streams[0]->codec, st->codec);

    if (handle) {
        ffio_fdopen(&rtpctx->pb, handle);
    } else
        ffio_open_dyn_packet_buf(&rtpctx->pb, packet_size);
    ret = avformat_write_header(rtpctx, NULL);

    if (ret) {
        if (handle) {
            avio_close(rtpctx->pb);
        } else {
            uint8_t *ptr;
            avio_close_dyn_buf(rtpctx->pb, &ptr);
            av_free(ptr);
        }
        avformat_free_context(rtpctx);
        return NULL;
    }

    return rtpctx;
}
コード例 #2
0
ファイル: movenchint.c プロジェクト: AVLeo/libav
int ff_mov_add_hinted_packet(AVFormatContext *s, AVPacket *pkt,
                             int track_index, int sample,
                             uint8_t *sample_data, int sample_size)
{
    MOVMuxContext *mov = s->priv_data;
    MOVTrack *trk = &mov->tracks[track_index];
    AVFormatContext *rtp_ctx = trk->rtp_ctx;
    uint8_t *buf = NULL;
    int size;
    AVIOContext *hintbuf = NULL;
    AVPacket hint_pkt;
    int ret = 0, count;

    if (!rtp_ctx)
        return AVERROR(ENOENT);
    if (!rtp_ctx->pb)
        return AVERROR(ENOMEM);

    if (sample_data)
        sample_queue_push(&trk->sample_queue, sample_data, sample_size, sample);
    else
        sample_queue_push(&trk->sample_queue, pkt->data, pkt->size, sample);

    /* Feed the packet to the RTP muxer */
    ff_write_chained(rtp_ctx, 0, pkt, s);

    /* Fetch the output from the RTP muxer, open a new output buffer
     * for next time. */
    size = avio_close_dyn_buf(rtp_ctx->pb, &buf);
    if ((ret = ffio_open_dyn_packet_buf(&rtp_ctx->pb,
                                        RTP_MAX_PACKET_SIZE)) < 0)
        goto done;

    if (size <= 0)
        goto done;

    /* Open a buffer for writing the hint */
    if ((ret = avio_open_dyn_buf(&hintbuf)) < 0)
        goto done;
    av_init_packet(&hint_pkt);
    count = write_hint_packets(hintbuf, buf, size, trk, &hint_pkt.dts);
    av_freep(&buf);

    /* Write the hint data into the hint track */
    hint_pkt.size = size = avio_close_dyn_buf(hintbuf, &buf);
    hint_pkt.data = buf;
    hint_pkt.pts  = hint_pkt.dts;
    hint_pkt.stream_index = track_index;
    if (pkt->flags & AV_PKT_FLAG_KEY)
        hint_pkt.flags |= AV_PKT_FLAG_KEY;
    if (count > 0)
        ff_mov_write_packet(s, &hint_pkt);
done:
    av_free(buf);
    sample_queue_retain(&trk->sample_queue);
    return ret;
}
コード例 #3
0
ファイル: rtpenc_chain.c プロジェクト: ttrask/unf
AVFormatContext *ff_rtp_chain_mux_open(AVFormatContext *s, AVStream *st,
                                       URLContext *handle, int packet_size)
{
    AVFormatContext *rtpctx;
    int ret;
    AVOutputFormat *rtp_format = av_guess_format("rtp", NULL, NULL);
    uint8_t *rtpflags;
    AVDictionary *opts = NULL;

    if (!rtp_format)
        return NULL;

    /* Allocate an AVFormatContext for each output stream */
    rtpctx = avformat_alloc_context();
    if (!rtpctx)
        return NULL;

    rtpctx->oformat = rtp_format;
    if (!av_new_stream(rtpctx, 0)) {
        av_free(rtpctx);
        return NULL;
    }
    /* Copy the max delay setting; the rtp muxer reads this. */
    rtpctx->max_delay = s->max_delay;
    /* Copy other stream parameters. */
    rtpctx->streams[0]->sample_aspect_ratio = st->sample_aspect_ratio;

    if (av_opt_get(s, "rtpflags", AV_OPT_SEARCH_CHILDREN, &rtpflags) >= 0)
        av_dict_set(&opts, "rtpflags", rtpflags, AV_DICT_DONT_STRDUP_VAL);

    /* Set the synchronized start time. */
    rtpctx->start_time_realtime = s->start_time_realtime;

    avcodec_copy_context(rtpctx->streams[0]->codec, st->codec);

    if (handle) {
        ffio_fdopen(&rtpctx->pb, handle);
    } else
        ffio_open_dyn_packet_buf(&rtpctx->pb, packet_size);
    ret = avformat_write_header(rtpctx, &opts);
    av_dict_free(&opts);

    if (ret) {
        if (handle) {
            avio_close(rtpctx->pb);
        } else {
            uint8_t *ptr;
            avio_close_dyn_buf(rtpctx->pb, &ptr);
            av_free(ptr);
        }
        avformat_free_context(rtpctx);
        return NULL;
    }

    return rtpctx;
}
コード例 #4
0
ファイル: rtspenc.c プロジェクト: simock85/libav
static int tcp_write_packet(AVFormatContext *s, RTSPStream *rtsp_st)
{
    RTSPState *rt = s->priv_data;
    AVFormatContext *rtpctx = rtsp_st->transport_priv;
    uint8_t *buf, *ptr;
    int size;
    uint8_t *interleave_header, *interleaved_packet;

    size = avio_close_dyn_buf(rtpctx->pb, &buf);
    ptr = buf;
    while (size > 4) {
        uint32_t packet_len = AV_RB32(ptr);
        int id;
        /* The interleaving header is exactly 4 bytes, which happens to be
         * the same size as the packet length header from
         * ffio_open_dyn_packet_buf. So by writing the interleaving header
         * over these bytes, we get a consecutive interleaved packet
         * that can be written in one call. */
        interleaved_packet = interleave_header = ptr;
        ptr += 4;
        size -= 4;
        if (packet_len > size || packet_len < 2)
            break;
        if (ptr[1] >= RTCP_SR && ptr[1] <= RTCP_APP)
            id = rtsp_st->interleaved_max; /* RTCP */
        else
            id = rtsp_st->interleaved_min; /* RTP */
        interleave_header[0] = '$';
        interleave_header[1] = id;
        AV_WB16(interleave_header + 2, packet_len);
        ffurl_write(rt->rtsp_hd_out, interleaved_packet, 4 + packet_len);
        ptr += packet_len;
        size -= packet_len;
    }
    av_free(buf);
    ffio_open_dyn_packet_buf(&rtpctx->pb, RTSP_TCP_MAX_PACKET_SIZE);
    return 0;
}
コード例 #5
0
int ff_rtp_chain_mux_open(AVFormatContext **out, AVFormatContext *s,
                          AVStream *st, URLContext *handle, int packet_size,
                          int idx)
{
    AVFormatContext *rtpctx = NULL;
    int ret;
    AVOutputFormat *rtp_format = av_guess_format("rtp", NULL, NULL);
    uint8_t *rtpflags;
    AVDictionary *opts = NULL;

    if (!rtp_format) {
        ret = AVERROR(ENOSYS);
        goto fail;
    }

    /* Allocate an AVFormatContext for each output stream */
    rtpctx = avformat_alloc_context();
    if (!rtpctx) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }

    rtpctx->oformat = rtp_format;
    if (!avformat_new_stream(rtpctx, NULL)) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }
    /* Pass the interrupt callback on */
    rtpctx->interrupt_callback = s->interrupt_callback;
    /* Copy the max delay setting; the rtp muxer reads this. */
    rtpctx->max_delay = s->max_delay;
    /* Copy other stream parameters. */
    rtpctx->streams[0]->sample_aspect_ratio = st->sample_aspect_ratio;
    rtpctx->flags |= s->flags & AVFMT_FLAG_BITEXACT;

    /* Get the payload type from the codec */
    if (st->id < RTP_PT_PRIVATE)
        rtpctx->streams[0]->id =
            ff_rtp_get_payload_type(s, st->codecpar, idx);
    else
        rtpctx->streams[0]->id = st->id;


    if (av_opt_get(s, "rtpflags", AV_OPT_SEARCH_CHILDREN, &rtpflags) >= 0)
        av_dict_set(&opts, "rtpflags", rtpflags, AV_DICT_DONT_STRDUP_VAL);

    /* Set the synchronized start time. */
    rtpctx->start_time_realtime = s->start_time_realtime;

    avcodec_parameters_copy(rtpctx->streams[0]->codecpar, st->codecpar);
    rtpctx->streams[0]->time_base = st->time_base;

    if (handle) {
        ret = ffio_fdopen(&rtpctx->pb, handle);
        if (ret < 0)
            ffurl_close(handle);
    } else
        ret = ffio_open_dyn_packet_buf(&rtpctx->pb, packet_size);
    if (!ret)
        ret = avformat_write_header(rtpctx, &opts);
    av_dict_free(&opts);

    if (ret) {
        if (handle && rtpctx->pb) {
            avio_closep(&rtpctx->pb);
        } else if (rtpctx->pb) {
            ffio_free_dyn_buf(&rtpctx->pb);
        }
        avformat_free_context(rtpctx);
        return ret;
    }

    *out = rtpctx;
    return 0;

fail:
    avformat_free_context(rtpctx);
    if (handle)
        ffurl_close(handle);
    return ret;
}
コード例 #6
0
ファイル: rtspserver.cpp プロジェクト: 3009420/gaminganywhere
static int
rtp_new_av_stream(RTSPContext *ctx, struct sockaddr_in *sin, int streamid, enum AVCodecID codecid) {
	AVOutputFormat *fmt = NULL;
	AVFormatContext *fmtctx = NULL;
	AVStream *stream = NULL;
	AVCodecContext *encoder = NULL;
	uint8_t *dummybuf = NULL;
	//
	if(streamid > VIDEO_SOURCE_CHANNEL_MAX) {
		ga_error("invalid stream index (%d > %d)\n",
			streamid, VIDEO_SOURCE_CHANNEL_MAX);
		return -1;
	}
	if(codecid != rtspconf->video_encoder_codec->id
	&& codecid != rtspconf->audio_encoder_codec->id) {
		ga_error("invalid codec (%d)\n", codecid);
		return -1;
	}
	if(ctx->fmtctx[streamid] != NULL) {
		ga_error("duplicated setup to an existing stream (%d)\n",
			streamid);
		return -1;
	}
	if((fmt = av_guess_format("rtp", NULL, NULL)) == NULL) {
		ga_error("RTP not supported.\n");
		return -1;
	}
	if((fmtctx = avformat_alloc_context()) == NULL) {
		ga_error("create avformat context failed.\n");
		return -1;
	}
	fmtctx->oformat = fmt;
	if(ctx->mtu > 0) {
		if(fmtctx->packet_size > 0) {
			fmtctx->packet_size =
				ctx->mtu < fmtctx->packet_size ? ctx->mtu : fmtctx->packet_size;
		} else {
			fmtctx->packet_size = ctx->mtu;
		}
		ga_error("RTP: packet size set to %d (configured: %d)\n",
			fmtctx->packet_size, ctx->mtu);
	}
#ifdef HOLE_PUNCHING
	if(ffio_open_dyn_packet_buf(&fmtctx->pb, ctx->mtu) < 0) {
		ga_error("cannot open dynamic packet buffer\n");
		return -1;
	}
	ga_error("RTP: Dynamic buffer opened, max_packet_size=%d.\n",
		(int) fmtctx->pb->max_packet_size);
	if(ctx->lower_transport[streamid] == RTSP_LOWER_TRANSPORT_UDP) {
		if(rtp_open_ports(ctx, streamid) < 0) {
			ga_error("RTP: open ports failed - %s\n", strerror(errno));
			return -1;
		}
	}
#else
	if(ctx->lower_transport[streamid] == RTSP_LOWER_TRANSPORT_UDP) {
		snprintf(fmtctx->filename, sizeof(fmtctx->filename),
			"rtp://%s:%d", inet_ntoa(sin->sin_addr), ntohs(sin->sin_port));
		if(avio_open(&fmtctx->pb, fmtctx->filename, AVIO_FLAG_WRITE) < 0) {
			ga_error("cannot open URL: %s\n", fmtctx->filename);
			return -1;
		}
		ga_error("RTP/UDP: URL opened [%d]: %s, max_packet_size=%d\n",
			streamid, fmtctx->filename, fmtctx->pb->max_packet_size);
	} else if(ctx->lower_transport[streamid] == RTSP_LOWER_TRANSPORT_TCP) {
		// XXX: should we use avio_open_dyn_buf(&fmtctx->pb)?
		if(ffio_open_dyn_packet_buf(&fmtctx->pb, ctx->mtu) < 0) {
			ga_error("cannot open dynamic packet buffer\n");
			return -1;
		}
		ga_error("RTP/TCP: Dynamic buffer opened, max_packet_size=%d.\n",
			(int) fmtctx->pb->max_packet_size);
	}
#endif
	fmtctx->pb->seekable = 0;
	//
	if((stream = ga_avformat_new_stream(fmtctx, 0,
			codecid == rtspconf->video_encoder_codec->id ?
				rtspconf->video_encoder_codec : rtspconf->audio_encoder_codec)) == NULL) {
		ga_error("Cannot create new stream (%d)\n", codecid);
		return -1;
	}
	//
	if(codecid == rtspconf->video_encoder_codec->id) {
		encoder = ga_avcodec_vencoder_init(
				stream->codec,
				rtspconf->video_encoder_codec,
				video_source_out_width(streamid),
				video_source_out_height(streamid),
				rtspconf->video_fps,
				rtspconf->vso);
	} else if(codecid == rtspconf->audio_encoder_codec->id) {
		encoder = ga_avcodec_aencoder_init(
				stream->codec,
				rtspconf->audio_encoder_codec,
				rtspconf->audio_bitrate,
				rtspconf->audio_samplerate,
				rtspconf->audio_channels,
				rtspconf->audio_codec_format,
				rtspconf->audio_codec_channel_layout);
	}
	if(encoder == NULL) {
		ga_error("Cannot init encoder\n");
		return -1;
	}
	//
	ctx->encoder[streamid] = encoder;
	ctx->stream[streamid] = stream;
	ctx->fmtctx[streamid] = fmtctx;
	// write header
	if(avformat_write_header(ctx->fmtctx[streamid], NULL) < 0) {
		ga_error("Cannot write stream id %d.\n", streamid);
		return -1;
	}
#ifdef HOLE_PUNCHING
	avio_close_dyn_buf(ctx->fmtctx[streamid]->pb, &dummybuf);
	av_free(dummybuf);
#else
	if(ctx->lower_transport[streamid] == RTSP_LOWER_TRANSPORT_TCP) {
		/*int rlen;
		rlen =*/ avio_close_dyn_buf(ctx->fmtctx[streamid]->pb, &dummybuf);
		av_free(dummybuf);
	}
#endif
	return 0;
}