Example #1
0
static int tcp_write_packet(AVFormatContext *s, RTSPStream *rtsp_st)
{
    RTSPState *rt = s->priv_data;
    AVFormatContext *rtpctx = rtsp_st->transport_priv;
    uint8_t *buf, *ptr;
    int size;
    uint8_t interleave_header[4];

    size = url_close_dyn_buf(rtpctx->pb, &buf);
    ptr = buf;
    while (size > 4) {
        uint32_t packet_len = AV_RB32(ptr);
        int id;
        ptr += 4;
        size -= 4;
        if (packet_len > size || packet_len < 2)
            break;
        if (ptr[1] >= 200 && ptr[1] <= 204)
            id = rtsp_st->interleaved_max; /* RTCP */
        else
            id = rtsp_st->interleaved_min; /* RTP */
        interleave_header[0] = '$';
        interleave_header[1] = id;
        AV_WB16(interleave_header + 2, packet_len);
        url_write(rt->rtsp_hd, interleave_header, 4);
        url_write(rt->rtsp_hd, ptr, packet_len);
        ptr += packet_len;
        size -= packet_len;
    }
    av_free(buf);
    url_open_dyn_packet_buf(&rtpctx->pb, RTSP_TCP_MAX_PACKET_SIZE);
    return 0;
}
Example #2
0
AVFormatContext *ff_rtp_chain_mux_open(AVFormatContext *s, AVStream *st,
                                       URLContext *handle, int packet_size)
{
    AVFormatContext *rtpctx;
    int ret;
    AVOutputFormat *rtp_format = av_guess_format("rtp", NULL, NULL);

    if (!rtp_format)
        return NULL;

    /* Allocate an AVFormatContext for each output stream */
    rtpctx = avformat_alloc_context();
    if (!rtpctx)
        return NULL;

    rtpctx->oformat = rtp_format;
    if (!av_new_stream(rtpctx, 0)) {
        av_free(rtpctx);
        return NULL;
    }
    /* Copy the max delay setting; the rtp muxer reads this. */
    rtpctx->max_delay = s->max_delay;
    /* Copy other stream parameters. */
    rtpctx->streams[0]->sample_aspect_ratio = st->sample_aspect_ratio;

    /* Set the synchronized start time. */
    rtpctx->start_time_realtime = s->start_time_realtime;

    /* Remove the local codec, link to the original codec
     * context instead, to give the rtp muxer access to
     * codec parameters. */
    av_free(rtpctx->streams[0]->codec);
    rtpctx->streams[0]->codec = st->codec;

    if (handle) {
        url_fdopen(&rtpctx->pb, handle);
    } else
        url_open_dyn_packet_buf(&rtpctx->pb, packet_size);
    ret = av_write_header(rtpctx);

    if (ret) {
        if (handle) {
            url_fclose(rtpctx->pb);
        } else {
            uint8_t *ptr;
            url_close_dyn_buf(rtpctx->pb, &ptr);
            av_free(ptr);
        }
        av_free(rtpctx->streams[0]);
        av_free(rtpctx);
        return NULL;
    }

    /* Copy the RTP AVStream timebase back to the original AVStream */
    st->time_base = rtpctx->streams[0]->time_base;
    return rtpctx;
}
Example #3
0
int ff_mov_add_hinted_packet(AVFormatContext *s, AVPacket *pkt,
                             int track_index, int sample)
{
    MOVMuxContext *mov = s->priv_data;
    MOVTrack *trk = &mov->tracks[track_index];
    AVFormatContext *rtp_ctx = trk->rtp_ctx;
    uint8_t *buf = NULL;
    int size;
    AVIOContext *hintbuf = NULL;
    AVPacket hint_pkt;
    int ret = 0, count;

    if (!rtp_ctx)
        return AVERROR(ENOENT);
    if (!rtp_ctx->pb)
        return AVERROR(ENOMEM);

    sample_queue_push(&trk->sample_queue, pkt, sample);

    /* Feed the packet to the RTP muxer */
    ff_write_chained(rtp_ctx, 0, pkt, s);

    /* Fetch the output from the RTP muxer, open a new output buffer
     * for next time. */
    size = url_close_dyn_buf(rtp_ctx->pb, &buf);
    if ((ret = url_open_dyn_packet_buf(&rtp_ctx->pb,
                                       RTP_MAX_PACKET_SIZE)) < 0)
        goto done;

    if (size <= 0)
        goto done;

    /* Open a buffer for writing the hint */
    if ((ret = url_open_dyn_buf(&hintbuf)) < 0)
        goto done;
    av_init_packet(&hint_pkt);
    count = write_hint_packets(hintbuf, buf, size, trk, &hint_pkt.dts);
    av_freep(&buf);

    /* Write the hint data into the hint track */
    hint_pkt.size = size = url_close_dyn_buf(hintbuf, &buf);
    hint_pkt.data = buf;
    hint_pkt.pts  = hint_pkt.dts;
    hint_pkt.stream_index = track_index;
    if (pkt->flags & AV_PKT_FLAG_KEY)
        hint_pkt.flags |= AV_PKT_FLAG_KEY;
    if (count > 0)
        ff_mov_write_packet(s, &hint_pkt);
done:
    av_free(buf);
    sample_queue_retain(&trk->sample_queue);
    return ret;
}
Example #4
0
static int tcp_write_packet(AVFormatContext *s, RTSPStream *rtsp_st)
{
    RTSPState *rt = s->priv_data;
    AVFormatContext *rtpctx = rtsp_st->transport_priv;
    uint8_t *buf, *ptr;
    int size;
    uint8_t *interleave_header, *interleaved_packet;

    size = url_close_dyn_buf(rtpctx->pb, &buf);
    ptr = buf;
    while (size > 4) {
        uint32_t packet_len = AV_RB32(ptr);
        int id;
        /* The interleaving header is exactly 4 bytes, which happens to be
         * the same size as the packet length header from
         * url_open_dyn_packet_buf. So by writing the interleaving header
         * over these bytes, we get a consecutive interleaved packet
         * that can be written in one call. */
        interleaved_packet = interleave_header = ptr;
        ptr += 4;
        size -= 4;
        if (packet_len > size || packet_len < 2)
            break;
        if (ptr[1] >= RTCP_SR && ptr[1] <= RTCP_APP)
            id = rtsp_st->interleaved_max; /* RTCP */
        else
            id = rtsp_st->interleaved_min; /* RTP */
        interleave_header[0] = '$';
        interleave_header[1] = id;
        AV_WB16(interleave_header + 2, packet_len);
        url_write(rt->rtsp_hd_out, interleaved_packet, 4 + packet_len);
        ptr += packet_len;
        size -= packet_len;
    }
    av_free(buf);
    url_open_dyn_packet_buf(&rtpctx->pb, RTSP_TCP_MAX_PACKET_SIZE);
    return 0;
}
Example #5
0
/* XXX: This class should really pass the picture parameters by a separate API
 * so that we can, by contract, enforce that the frame size can't suddenly
 * change on us. */
void Java_org_devtcg_rojocam_ffmpeg_RtpOutputContext_nativeWriteFrame(JNIEnv *env,
        jclass clazz, jint nativeInt, jbyteArray data, jlong frameTime,
        jint frameFormat, jint frameWidth, jint frameHeight,
        jint frameBitsPerPixel) {
    RtpOutputContext *rtpContext = (RtpOutputContext *)nativeInt;
    AVFormatContext *avContext;
    AVCodecContext *codec;
    AVStream *outputStream;
    AVPacket pkt;
    jbyte *data_c;
    int max_packet_size;
    uint8_t *rtp_data;
    int rtp_data_len;

    avContext = rtpContext->avContext;
    outputStream = avContext->streams[0];
    codec = outputStream->codec;

    if (rtpContext->tempFrame == NULL) {
        if (!first_frame_init(env, rtpContext, frameFormat,
                frameWidth, frameHeight)) {
            LOGE("Error initializing encoding buffers, cannot stream");
            return;
        }
    }

    data_c = (*env)->GetByteArrayElements(env, data, NULL);

    /* Convert the input arguments to an AVPacket, simulating it as though we
     * read this from the ffmpeg libraries but there was no need to do this as
     * it was passed into us already as a raw video frame. */
    int frameDuration = frameTime - rtpContext->lastFrameTime;
    bool frameEncoded = encode_video_frame(rtpContext,
            outputStream, rtpContext->tempFrame, rtpContext->imgConvert,
            rtpContext->tempEncodedBuf, sizeof(rtpContext->tempEncodedBuf),
            data_c, frameTime, frameDuration, frameFormat,
            frameWidth, frameHeight, frameBitsPerPixel, &pkt);
    rtpContext->lastFrameTime = frameTime;

    (*env)->ReleaseByteArrayElements(env, data, data_c, JNI_ABORT);

    if (frameEncoded) {
#if PROFILE_WRITE_FRAME
        struct timeval then;
        gettimeofday(&then, NULL);
#endif

        max_packet_size = url_get_max_packet_size(rtpContext->urlContext);
        url_open_dyn_packet_buf(&avContext->pb, max_packet_size);

        avContext->pb->seekable = 0;

        /* This organizes our encoded packet into RTP packet segments (but it
         * doesn't actually send anything over the network yet). */
        if (av_write_frame(avContext, &pkt) < 0) {
            jniThrowException(env, "java/io/IOException", "Error writing frame to output");
        }

        /* Actually deliver the packetized RTP data to the remote peer. */
        rtp_data_len = url_close_dyn_buf(avContext->pb, &rtp_data);
        exhaustive_send(rtpContext->urlContext, rtp_data, rtp_data_len);
        av_free(rtp_data);

        /* XXX: I dunno, ffserver.c does this... */
        outputStream->codec->frame_number++;

#if PROFILE_WRITE_FRAME
        store_elapsed(&rtpContext->write_time, &then);
#endif
    } else {
#if PROFILE_WRITE_TIME
        rtpContext->write_time = 0;
#endif
    }

#if PROFILE_WRITE_FRAME
    //LOGI("resample@%ld ms; encode@%ld ms; write@%ld ms",
     //       rtpContext->resampling_time, rtpContext->encoding_time,
      //      rtpContext->write_time);
#endif
}
Example #6
0
jint Java_org_devtcg_rojocam_ffmpeg_RtpOutputContext_nativeCreate(JNIEnv *env,
        jclass clazz, jint streamConfigNativeInt, jlong nowNanoTime,
        jstring hostAddress, jint rtpPort) {
    FFStreamConfig *config = (FFStreamConfig *)streamConfigNativeInt;
    RtpOutputContext *rtpContext = NULL;
    AVFormatContext *avContext = NULL;
    AVStream *st = NULL;
    uint8_t *dummy_buf;
    int max_packet_size;

    rtpContext = av_mallocz(sizeof(RtpOutputContext));
    if (rtpContext == NULL) {
        jniThrowOOM(env);
        goto cleanup;
    }

    rtpContext->lastFrameTime = nowNanoTime;
    rtpContext->config = config;

    avContext = avformat_alloc_context();
    if (avContext == NULL) {
        jniThrowOOM(env);
        goto cleanup;
    }
    avContext->oformat = av_guess_format("rtp", NULL, NULL);
    if (avContext->oformat == NULL) {
        jniThrowException(env, "java/lang/IllegalStateException",
                "rtp avformat is not available");
        goto cleanup;
    }

    rtpContext->avContext = avContext;

    st = av_mallocz(sizeof(AVStream));
    if (st == NULL) {
        jniThrowOOM(env);
        goto cleanup;
    }
    avContext->nb_streams = 1;
    avContext->streams = av_malloc(avContext->nb_streams * sizeof(*avContext->streams));
    avContext->streams[0] = st;

    /* XXX: What would we be doing if we supported audio as well? */
    memcpy(st, config->streams[0], sizeof(AVStream));
    st->priv_data = NULL;

    const jbyte *hostAddress_str = (*env)->GetStringUTFChars(env,
            hostAddress, NULL);
    snprintf(avContext->filename, sizeof(avContext->filename),
            "rtp://%s:%d?localrtpport=5000&localrtcpport=5001",
            hostAddress_str, rtpPort);
    (*env)->ReleaseStringUTFChars(env, hostAddress, hostAddress_str);

    if (url_open(&rtpContext->urlContext,
            avContext->filename, URL_WRONLY) < 0) {
        LOGE("Cannot open url context for filename=%s", avContext->filename);
        jniThrowException(env, "java/io/IOException", "Unable to open URL");
        goto cleanup;
    }

    max_packet_size = url_get_max_packet_size(rtpContext->urlContext);

    /* XXX: No idea what purpose this serves... */
    url_open_dyn_packet_buf(&avContext->pb, max_packet_size);

    av_set_parameters(avContext, NULL);
    if (av_write_header(avContext) < 0) {
        jniThrowException(env, "java/io/IOException", "Unexpected error writing dummy RTP header");
        goto cleanup;
    }

    url_close_dyn_buf(avContext->pb, &dummy_buf);
    av_free(dummy_buf);

    return (jint)rtpContext;

cleanup:
    rtp_output_context_free(rtpContext);
    assert((*env)->ExceptionOccurred(env));

    return 0;
}
Example #7
0
int ff_mov_init_hinting(AVFormatContext *s, int index, int src_index)
{
    MOVMuxContext *mov  = s->priv_data;
    MOVTrack *track     = &mov->tracks[index];
    MOVTrack *src_track = &mov->tracks[src_index];
    AVStream *src_st    = s->streams[src_index];
    int ret = AVERROR(ENOMEM);
    AVOutputFormat *rtp_format = av_guess_format("rtp", NULL, NULL);

    track->tag = MKTAG('r','t','p',' ');
    track->src_track = src_index;

    if (!rtp_format) {
        ret = AVERROR(ENOENT);
        goto fail;
    }

    track->enc = avcodec_alloc_context();
    if (!track->enc)
        goto fail;
    track->enc->codec_type = AVMEDIA_TYPE_DATA;
    track->enc->codec_tag  = track->tag;

    track->rtp_ctx = avformat_alloc_context();
    if (!track->rtp_ctx)
        goto fail;
    track->rtp_ctx->oformat = rtp_format;
    if (!av_new_stream(track->rtp_ctx, 0))
        goto fail;

    /* Copy stream parameters */
    track->rtp_ctx->streams[0]->sample_aspect_ratio =
                        src_st->sample_aspect_ratio;

    /* Remove the allocated codec context, link to the original one
     * instead, to give the rtp muxer access to codec parameters. */
    av_free(track->rtp_ctx->streams[0]->codec);
    track->rtp_ctx->streams[0]->codec = src_st->codec;

    if ((ret = url_open_dyn_packet_buf(&track->rtp_ctx->pb,
                                       RTP_MAX_PACKET_SIZE)) < 0)
        goto fail;
    ret = av_write_header(track->rtp_ctx);
    if (ret)
        goto fail;

    /* Copy the RTP AVStream timebase back to the hint AVStream */
    track->timescale = track->rtp_ctx->streams[0]->time_base.den;

    /* Mark the hinted track that packets written to it should be
     * sent to this track for hinting. */
    src_track->hint_track = index;
    return 0;
fail:
    av_log(s, AV_LOG_WARNING,
           "Unable to initialize hinting of stream %d\n", src_index);
    if (track->rtp_ctx && track->rtp_ctx->pb) {
        uint8_t *buf;
        url_close_dyn_buf(track->rtp_ctx->pb, &buf);
        av_free(buf);
    }
    if (track->rtp_ctx && track->rtp_ctx->streams[0]) {
        av_metadata_free(&track->rtp_ctx->streams[0]->metadata);
        av_free(track->rtp_ctx->streams[0]);
    }
    if (track->rtp_ctx) {
        av_metadata_free(&track->rtp_ctx->metadata);
        av_free(track->rtp_ctx->priv_data);
        av_freep(&track->rtp_ctx);
    }
    av_freep(&track->enc);
    /* Set a default timescale, to avoid crashes in dump_format */
    track->timescale = 90000;
    return ret;
}