コード例 #1
0
ファイル: asfenc.c プロジェクト: arthurrauter/mconf-mobile
static int asf_write_header(AVFormatContext *s)
{
    ASFContext *asf = s->priv_data;

    s->packet_size  = PACKET_SIZE;
    asf->nb_packets = 0;

    asf->last_indexed_pts = 0;
    asf->index_ptr = av_malloc( sizeof(ASFIndex) * ASF_INDEX_BLOCK );
    asf->nb_index_memory_alloc = ASF_INDEX_BLOCK;
    asf->nb_index_count = 0;
    asf->maximum_packet = 0;

    /* the data-chunk-size has to be 50, which is data_size - asf->data_offset
     *  at the moment this function is done. It is needed to use asf as
     *  streamable format. */
    if (asf_write_header1(s, 0, 50) < 0) {
        //av_free(asf);
        return -1;
    }

    put_flush_packet(s->pb);

    asf->packet_nb_payloads = 0;
    asf->packet_timestamp_start = -1;
    asf->packet_timestamp_end = -1;
    init_put_byte(&asf->pb, asf->packet_buf, s->packet_size, 1,
                  NULL, NULL, NULL, NULL);

    return 0;
}
コード例 #2
0
static void flush_packet(AVFormatContext *s)
{
    ASFContext *asf = s->priv_data;
    int packet_hdr_size, packet_filled_size;

    if (asf->is_streamed) {
        put_chunk(s, 0x4424, asf->packet_size, 0);
    }

    packet_hdr_size = put_payload_parsing_info(
                            s,
                            asf->packet_timestamp_start,
                            asf->packet_timestamp_end - asf->packet_timestamp_start,
                            asf->packet_nb_payloads,
                            asf->packet_size_left
                        );

    packet_filled_size = PACKET_SIZE - asf->packet_size_left;
    assert(packet_hdr_size <= asf->packet_size_left);
    memset(asf->packet_buf + packet_filled_size, 0, asf->packet_size_left);

    put_buffer(&s->pb, asf->packet_buf, asf->packet_size - packet_hdr_size);

    put_flush_packet(&s->pb);
    asf->nb_packets++;
    asf->packet_nb_payloads = 0;
    asf->packet_timestamp_start = -1;
    asf->packet_timestamp_end = -1;
    init_put_byte(&asf->pb, asf->packet_buf, asf->packet_size, 1,
                  NULL, NULL, NULL, NULL);
}
コード例 #3
0
static int asf_write_header(AVFormatContext *s)
{
    ASFContext *asf = s->priv_data;

    asf->packet_size = PACKET_SIZE;
    asf->nb_packets = 0;

    asf->last_indexed_pts = 0;
    asf->index_ptr = (ASFIndex*)av_malloc( sizeof(ASFIndex) * ASF_INDEX_BLOCK );
    asf->nb_index_memory_alloc = ASF_INDEX_BLOCK;
    asf->nb_index_count = 0;
    asf->maximum_packet = 0;

    if (asf_write_header1(s, 0, 0) < 0) {
        //av_free(asf);
        return -1;
    }

    put_flush_packet(&s->pb);

    asf->packet_nb_payloads = 0;
    asf->packet_timestamp_start = -1;
    asf->packet_timestamp_end = -1;
    init_put_byte(&asf->pb, asf->packet_buf, asf->packet_size, 1,
                  NULL, NULL, NULL, NULL);

    return 0;
}
コード例 #4
0
ファイル: rtpdec_asf.c プロジェクト: RJVB/FFusion
static void init_packetizer(ByteIOContext *pb, uint8_t *buf, int len)
{
    init_put_byte(pb, buf, len, 0, NULL, packetizer_read, NULL, NULL);

    /* this "fills" the buffer with its current content */
    pb->pos     = len;
    pb->buf_end = buf + len;
}
コード例 #5
0
ファイル: mmst.c プロジェクト: AirDev/linphone-android
static void mms_put_utf16(MMSContext *mms, uint8_t *src)
{
    ByteIOContext bic;
    int size = mms->write_out_ptr - mms->out_buffer;
    int len;
    init_put_byte(&bic, mms->write_out_ptr,
            sizeof(mms->out_buffer) - size, 1, NULL, NULL, NULL, NULL);

    len = ff_put_str16_nolen(&bic, src);
    mms->write_out_ptr += len;
}
コード例 #6
0
ファイル: rdt.c プロジェクト: FloridaStream/stream2android
static int
rdt_load_mdpr (PayloadContext *rdt, AVStream *st, int rule_nr)
{
    ByteIOContext pb;
    int size;
    uint32_t tag;

    /**
     * Layout of the MLTI chunk:
     * 4:MLTI
     * 2:<number of streams>
     * Then for each stream ([number_of_streams] times):
     *     2:<mdpr index>
     * 2:<number of mdpr chunks>
     * Then for each mdpr chunk ([number_of_mdpr_chunks] times):
     *     4:<size>
     *     [size]:<data>
     * we skip MDPR chunks until we reach the one of the stream
     * we're interested in, and forward that ([size]+[data]) to
     * the RM demuxer to parse the stream-specific header data.
     */
    if (!rdt->mlti_data)
        return -1;
    init_put_byte(&pb, rdt->mlti_data, rdt->mlti_data_size, 0,
                  NULL, NULL, NULL, NULL);
    tag = get_le32(&pb);
    if (tag == MKTAG('M', 'L', 'T', 'I')) {
        int num, chunk_nr;

        /* read index of MDPR chunk numbers */
        num = get_be16(&pb);
        if (rule_nr < 0 || rule_nr >= num)
            return -1;
        url_fskip(&pb, rule_nr * 2);
        chunk_nr = get_be16(&pb);
        url_fskip(&pb, (num - 1 - rule_nr) * 2);

        /* read MDPR chunks */
        num = get_be16(&pb);
        if (chunk_nr >= num)
            return -1;
        while (chunk_nr--)
            url_fskip(&pb, get_be32(&pb));
        size = get_be32(&pb);
    } else {
        size = rdt->mlti_data_size;
        url_fseek(&pb, 0, SEEK_SET);
    }
    if (ff_rm_read_mdpr_codecdata(rdt->rmctx, &pb, st, rdt->rmst[st->index], size) < 0)
        return -1;

    return 0;
}
コード例 #7
0
ファイル: aviobuf.c プロジェクト: OESF-DLNA/upnp-extension
ByteIOContext *av_alloc_put_byte(
                  unsigned char *buffer,
                  int buffer_size,
                  int write_flag,
                  void *opaque,
                  int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
                  int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
                  offset_t (*seek)(void *opaque, offset_t offset, int whence)) {
    ByteIOContext *s = av_mallocz(sizeof(ByteIOContext));
    init_put_byte(s, buffer, buffer_size, write_flag, opaque,
                  read_packet, write_packet, seek);
    return s;
}
コード例 #8
0
ファイル: rdt.c プロジェクト: FloridaStream/stream2android
/**< return 0 on packet, no more left, 1 on packet, 1 on partial packet... */
static int
rdt_parse_packet (AVFormatContext *ctx, PayloadContext *rdt, AVStream *st,
                  AVPacket *pkt, uint32_t *timestamp,
                  const uint8_t *buf, int len, int flags)
{
    int seq = 1, res;
    ByteIOContext pb;

    if (rdt->audio_pkt_cnt == 0) {
        int pos;

        init_put_byte(&pb, buf, len, 0, NULL, NULL, NULL, NULL);
        flags = (flags & RTP_FLAG_KEY) ? 2 : 0;
        res = ff_rm_parse_packet (rdt->rmctx, &pb, st, rdt->rmst[st->index], len, pkt,
                                  &seq, flags, *timestamp);
        pos = url_ftell(&pb);
        if (res < 0)
            return res;
        if (res > 0) {
            if (st->codec->codec_id == CODEC_ID_AAC) {
                memcpy (rdt->buffer, buf + pos, len - pos);
                rdt->rmctx->pb = av_alloc_put_byte (rdt->buffer, len - pos, 0,
                                                    NULL, NULL, NULL, NULL);
            }
            goto get_cache;
        }
    } else {
get_cache:
        rdt->audio_pkt_cnt =
            ff_rm_retrieve_cache (rdt->rmctx, rdt->rmctx->pb,
                                  st, rdt->rmst[st->index], pkt);
        if (rdt->audio_pkt_cnt == 0 &&
            st->codec->codec_id == CODEC_ID_AAC)
            av_freep(&rdt->rmctx->pb);
    }
    pkt->stream_index = st->index;
    pkt->pts = *timestamp;

    return rdt->audio_pkt_cnt > 0;
}
コード例 #9
0
ファイル: ffmpeg_decoder.cpp プロジェクト: sandsmark/akode
bool FFMPEGDecoder::openFile() {
    d->src->openRO();
    d->src->fadvise();

    // The following duplicates what av_open_input_file would normally do

    // url_fdopen
    init_put_byte(&d->stream, d->file_buffer, FILE_BUFFER_SIZE, 0, d->src, akode_read, akode_write, akode_seek);
    d->stream.is_streamed = !d->src->seekable();
    d->stream.max_packet_size = FILE_BUFFER_SIZE;

    {
        // 2048 is PROBE_BUF_SIZE from libavformat/utils.c
        AVProbeData pd;
        uint8_t buf[2048];
        pd.filename = d->src->filename;
        pd.buf = buf;
        pd.buf_size = 0;
        pd.buf_size = get_buffer(&d->stream, buf, 2048);
        d->fmt = av_probe_input_format(&pd, 1);
        // Seek back to 0
        // copied from url_fseek
        long offset1 = 0 - (d->stream.pos - (d->stream.buf_end - d->stream.buffer));
        if (offset1 >= 0 && offset1 <= (d->stream.buf_end - d->stream.buffer)) {
            /* can do the seek inside the buffer */
            d->stream.buf_ptr = d->stream.buffer + offset1;
        } else {
            if (!d->src->seek(0)) {
                d->src->close();
                return false;
            } else {
                d->stream.pos = 0;
                d->stream.buf_ptr = d->file_buffer;
                d->stream.buf_end = d->file_buffer;
            }
        }
    }
    if (!d->fmt) {
        std::cerr << "akode: FFMPEG: Format not found\n";
        closeFile();
        return false;
    }

    if (av_open_input_stream(&d->ic, &d->stream, d->src->filename, d->fmt, 0) != 0)
    {
        closeFile();
        return false;
    }

    av_find_stream_info( d->ic );

    // Find the first a/v streams
    d->audioStream = -1;
    d->videoStream = -1;
    for (int i = 0; i < d->ic->nb_streams; i++) {
        if (d->ic->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
            d->audioStream = i;
        else
        if (d->ic->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
            d->videoStream = i;
    }
    if (d->audioStream == -1)
    {
        std::cerr << "akode: FFMPEG: Audio stream not found\n";
        // for now require an audio stream
        closeFile();
        return false;
    }

    // Set config
    if (!setAudioConfiguration(&d->config, d->ic->streams[d->audioStream]->codec))
    {
        closeFile();
        return false;
    }

    d->codec = avcodec_find_decoder(d->ic->streams[d->audioStream]->codec->codec_id);
    if (!d->codec) {
        std::cerr << "akode: FFMPEG: Codec not found\n";
        closeFile();
        return false;
    }
    avcodec_open( d->ic->streams[d->audioStream]->codec, d->codec );

    double ffpos = (double)d->ic->streams[d->audioStream]->start_time / (double)AV_TIME_BASE;
    d->position = (long)(ffpos * d->config.sample_rate);

    return true;
}
コード例 #10
0
ファイル: ffmpeg_demux.c プロジェクト: claudiordgz/gpac
static GF_Err FFD_ConnectService(GF_InputService *plug, GF_ClientService *serv, const char *url)
{
	GF_Err e;
	s64 last_aud_pts;
	u32 i;
	s32 res;
	Bool is_local;
	const char *sOpt;
	char *ext, szName[1024];
	FFDemux *ffd = plug->priv;
	AVInputFormat *av_in = NULL;
	char szExt[20];

	if (ffd->ctx) return GF_SERVICE_ERROR;

	assert( url && strlen(url) < 1024);
	strcpy(szName, url);
	ext = strrchr(szName, '#');
	ffd->service_type = 0;
	e = GF_NOT_SUPPORTED;
	ffd->service = serv;

	if (ext) {
		if (!stricmp(&ext[1], "video")) ffd->service_type = 1;
		else if (!stricmp(&ext[1], "audio")) ffd->service_type = 2;
		ext[0] = 0;
	}

	/*some extensions not supported by ffmpeg, overload input format*/
	ext = strrchr(szName, '.');
	strcpy(szExt, ext ? ext+1 : "");
	strlwr(szExt);
	if (!strcmp(szExt, "cmp")) av_in = av_find_input_format("m4v");

	is_local = (strnicmp(url, "file://", 7) && strstr(url, "://")) ? 0 : 1;

	GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[FFMPEG] opening file %s - local %d - av_in %08x\n", url, is_local, av_in));

	if (!is_local) {
		AVProbeData   pd;

		/*setup wraper for FFMPEG I/O*/
		ffd->buffer_size = 8192;
		sOpt = gf_modules_get_option((GF_BaseInterface *)plug, "FFMPEG", "IOBufferSize");
		if (sOpt) ffd->buffer_size = atoi(sOpt);
		ffd->buffer = gf_malloc(sizeof(char)*ffd->buffer_size);
#ifdef FFMPEG_DUMP_REMOTE
		ffd->outdbg = gf_f64_open("ffdeb.raw", "wb");
#endif
#ifdef USE_PRE_0_7
		init_put_byte(&ffd->io, ffd->buffer, ffd->buffer_size, 0, ffd, ff_url_read, NULL, NULL);
		ffd->io.is_streamed = 1;
#else
		ffd->io.seekable = 1;
#endif

		ffd->dnload = gf_service_download_new(ffd->service, url, GF_NETIO_SESSION_NOT_THREADED  | GF_NETIO_SESSION_NOT_CACHED, NULL, ffd);
		if (!ffd->dnload) return GF_URL_ERROR;
		while (1) {
			u32 read;
			e = gf_dm_sess_fetch_data(ffd->dnload, ffd->buffer + ffd->buffer_used, ffd->buffer_size - ffd->buffer_used, &read);
			if (e==GF_EOS) break;
			/*we're sync!!*/
			if (e==GF_IP_NETWORK_EMPTY) continue;
			if (e) goto err_exit;
			ffd->buffer_used += read;
			if (ffd->buffer_used == ffd->buffer_size) break;
		}
		if (e==GF_EOS) {
			const char *cache_file = gf_dm_sess_get_cache_name(ffd->dnload);
			res = open_file(&ffd->ctx, cache_file, av_in);
		} else {
			pd.filename = szName;
			pd.buf_size = ffd->buffer_used;
			pd.buf = (u8 *) ffd->buffer;
			av_in = av_probe_input_format(&pd, 1);
			if (!av_in) {
				GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] error probing file %s - probe start with %c %c %c %c\n", url, ffd->buffer[0], ffd->buffer[1], ffd->buffer[2], ffd->buffer[3]));
				return GF_NOT_SUPPORTED;
			}
			/*setup downloader*/
			av_in->flags |= AVFMT_NOFILE;
#ifdef USE_AVFORMAT_OPEN_INPUT /*commit ffmpeg 603b8bc2a109978c8499b06d2556f1433306eca7*/
			res = avformat_open_input(&ffd->ctx, szName, av_in, NULL);
#else
			res = av_open_input_stream(&ffd->ctx, &ffd->io, szName, av_in, NULL);
#endif
		}
	} else {
		res = open_file(&ffd->ctx, szName, av_in);
	}

	switch (res) {
#ifndef _WIN32_WCE
	case 0:
		e = GF_OK;
		break;
	case AVERROR_IO:
		e = GF_URL_ERROR;
		goto err_exit;
	case AVERROR_INVALIDDATA:
		e = GF_NON_COMPLIANT_BITSTREAM;
		goto err_exit;
	case AVERROR_NOMEM:
		e = GF_OUT_OF_MEM;
		goto err_exit;
	case AVERROR_NOFMT:
		e = GF_NOT_SUPPORTED;
		goto err_exit;
#endif
	default:
		e = GF_SERVICE_ERROR;
		goto err_exit;
	}

	GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[FFMPEG] looking for streams in %s - %d streams - type %s\n", ffd->ctx->filename, ffd->ctx->nb_streams, ffd->ctx->iformat->name));

	res = av_find_stream_info(ffd->ctx);
	if (res <0) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] cannot locate streams - error %d\n", res));
		e = GF_NOT_SUPPORTED;
		goto err_exit;
	}
	GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[FFMPEG] file %s opened - %d streams\n", url, ffd->ctx->nb_streams));

	/*figure out if we can use codecs or not*/
	ffd->audio_st = ffd->video_st = -1;
	for (i = 0; i < ffd->ctx->nb_streams; i++) {
		AVCodecContext *enc = ffd->ctx->streams[i]->codec;
		switch(enc->codec_type) {
		case AVMEDIA_TYPE_AUDIO:
			if ((ffd->audio_st<0) && (ffd->service_type!=1)) {
				ffd->audio_st = i;
				ffd->audio_tscale = ffd->ctx->streams[i]->time_base;
			}
			break;
		case AVMEDIA_TYPE_VIDEO:
			if ((ffd->video_st<0) && (ffd->service_type!=2)) {
				ffd->video_st = i;
				ffd->video_tscale = ffd->ctx->streams[i]->time_base;
			}
			break;
		default:
			break;
		}
	}
	if ((ffd->service_type==1) && (ffd->video_st<0)) goto err_exit;
	if ((ffd->service_type==2) && (ffd->audio_st<0)) goto err_exit;
	if ((ffd->video_st<0) && (ffd->audio_st<0)) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] No supported streams in file\n"));
		goto err_exit;
	}


	sOpt = gf_modules_get_option((GF_BaseInterface *)plug, "FFMPEG", "DataBufferMS");
	ffd->data_buffer_ms = 0;
	if (sOpt) ffd->data_buffer_ms = atoi(sOpt);
	if (!ffd->data_buffer_ms) ffd->data_buffer_ms = FFD_DATA_BUFFER;

	/*build seek*/
	if (is_local) {
		/*check we do have increasing pts. If not we can't rely on pts, we must skip SL
		we assume video pts is always present*/
		if (ffd->audio_st>=0) {
			last_aud_pts = 0;
			for (i=0; i<20; i++) {
				AVPacket pkt;
				pkt.stream_index = -1;
				if (av_read_frame(ffd->ctx, &pkt) <0) break;
				if (pkt.pts == AV_NOPTS_VALUE) pkt.pts = pkt.dts;
				if (pkt.stream_index==ffd->audio_st) last_aud_pts = pkt.pts;
			}
			if (last_aud_pts*ffd->audio_tscale.den<10*ffd->audio_tscale.num) ffd->unreliable_audio_timing = 1;
		}

		ffd->seekable = (av_seek_frame(ffd->ctx, -1, 0, AVSEEK_FLAG_BACKWARD)<0) ? 0 : 1;
		if (!ffd->seekable) {
#ifndef FF_API_CLOSE_INPUT_FILE
			av_close_input_file(ffd->ctx);
#else
			avformat_close_input(&ffd->ctx);
#endif
			ffd->ctx = NULL;
			open_file(&ffd->ctx, szName, av_in);
			av_find_stream_info(ffd->ctx);
		}
	}

	/*let's go*/
	gf_service_connect_ack(serv, NULL, GF_OK);
	/*if (!ffd->service_type)*/ FFD_SetupObjects(ffd);
	ffd->service_type = 0;
	return GF_OK;

err_exit:
	GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] Error opening file %s: %s\n", url, gf_error_to_string(e)));
#ifndef FF_API_CLOSE_INPUT_FILE
	if (ffd->ctx) av_close_input_file(ffd->ctx);
#else
	if (ffd->ctx) avformat_close_input(&ffd->ctx);
#endif
	ffd->ctx = NULL;
	gf_service_connect_ack(serv, NULL, e);
	return GF_OK;
}
コード例 #11
0
ファイル: sapdec.c プロジェクト: kaone3/vsmm
static int sap_read_header(AVFormatContext *s,
                           AVFormatParameters *ap)
{
    struct SAPState *sap = s->priv_data;
    char host[1024], path[1024], url[1024];
    uint8_t recvbuf[1500];
    int port;
    int ret, i;
    AVInputFormat* infmt;

    if (!ff_network_init())
        return AVERROR(EIO);

    av_url_split(NULL, 0, NULL, 0, host, sizeof(host), &port,
                 path, sizeof(path), s->filename);
    if (port < 0)
        port = 9875;

    if (!host[0]) {
        /* Listen for announcements on sap.mcast.net if no host was specified */
        av_strlcpy(host, "224.2.127.254", sizeof(host));
    }

    ff_url_join(url, sizeof(url), "udp", NULL, host, port, "?localport=%d",
                port);
    ret = url_open(&sap->ann_fd, url, URL_RDONLY);
    if (ret)
        goto fail;

    while (1) {
        int addr_type, auth_len;
        int pos;

        ret = url_read(sap->ann_fd, recvbuf, sizeof(recvbuf) - 1);
        if (ret == AVERROR(EAGAIN))
            continue;
        if (ret < 0)
            goto fail;
        recvbuf[ret] = '\0'; /* Null terminate for easier parsing */
        if (ret < 8) {
            av_log(s, AV_LOG_WARNING, "Received too short packet\n");
            continue;
        }

        if ((recvbuf[0] & 0xe0) != 0x20) {
            av_log(s, AV_LOG_WARNING, "Unsupported SAP version packet "
                                      "received\n");
            continue;
        }

        if (recvbuf[0] & 0x04) {
            av_log(s, AV_LOG_WARNING, "Received stream deletion "
                                      "announcement\n");
            continue;
        }
        addr_type = recvbuf[0] & 0x10;
        auth_len  = recvbuf[1];
        sap->hash = AV_RB16(&recvbuf[2]);
        pos = 4;
        if (addr_type)
            pos += 16; /* IPv6 */
        else
            pos += 4; /* IPv4 */
        pos += auth_len * 4;
        if (pos + 4 >= ret) {
            av_log(s, AV_LOG_WARNING, "Received too short packet\n");
            continue;
        }
#define MIME "application/sdp"
        if (strcmp(&recvbuf[pos], MIME) == 0) {
            pos += strlen(MIME) + 1;
        } else if (strncmp(&recvbuf[pos], "v=0\r\n", 5) == 0) {
            // Direct SDP without a mime type
        } else {
            av_log(s, AV_LOG_WARNING, "Unsupported mime type %s\n",
                                      &recvbuf[pos]);
            continue;
        }

        sap->sdp = av_strdup(&recvbuf[pos]);
        break;
    }

    av_log(s, AV_LOG_VERBOSE, "SDP:\n%s\n", sap->sdp);
    init_put_byte(&sap->sdp_pb, sap->sdp, strlen(sap->sdp), 0, NULL, NULL,
                  NULL, NULL);

    infmt = av_find_input_format("sdp");
    if (!infmt)
        goto fail;
    sap->sdp_ctx = avformat_alloc_context();
    if (!sap->sdp_ctx) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }
    sap->sdp_ctx->max_delay = s->max_delay;
    ap->prealloced_context = 1;
    ret = av_open_input_stream(&sap->sdp_ctx, &sap->sdp_pb, "temp.sdp",
                               infmt, ap);
    if (ret < 0)
        goto fail;
    if (sap->sdp_ctx->ctx_flags & AVFMTCTX_NOHEADER)
        s->ctx_flags |= AVFMTCTX_NOHEADER;
    for (i = 0; i < sap->sdp_ctx->nb_streams; i++) {
        AVStream *st = av_new_stream(s, i);
        if (!st) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        avcodec_copy_context(st->codec, sap->sdp_ctx->streams[i]->codec);
        st->time_base = sap->sdp_ctx->streams[i]->time_base;
    }

    return 0;

fail:
    sap_read_close(s);
    return ret;
}
コード例 #12
0
ファイル: rtpdec_asf.c プロジェクト: RJVB/FFusion
/**
 * @return 0 when a packet was written into /p pkt, and no more data is left;
 *         1 when a packet was written into /p pkt, and more packets might be left;
 *        <0 when not enough data was provided to return a full packet, or on error.
 */
static int asfrtp_parse_packet(AVFormatContext *s, PayloadContext *asf,
                               AVStream *st, AVPacket *pkt,
                               uint32_t *timestamp,
                               const uint8_t *buf, int len, int flags)
{
    ByteIOContext *pb = &asf->pb;
    int res, mflags, len_off;
    RTSPState *rt = s->priv_data;

    if (!rt->asf_ctx)
        return -1;

    if (len > 0) {
        int off, out_len = 0;

        if (len < 4)
            return -1;

        av_freep(&asf->buf);

        init_put_byte(pb, buf, len, 0, NULL, NULL, NULL, NULL);

        while (url_ftell(pb) + 4 < len) {
            int start_off = url_ftell(pb);

            mflags = get_byte(pb);
            if (mflags & 0x80)
                flags |= RTP_FLAG_KEY;
            len_off = get_be24(pb);
            if (mflags & 0x20)   /**< relative timestamp */
                url_fskip(pb, 4);
            if (mflags & 0x10)   /**< has duration */
                url_fskip(pb, 4);
            if (mflags & 0x8)    /**< has location ID */
                url_fskip(pb, 4);
            off = url_ftell(pb);

            if (!(mflags & 0x40)) {
                /**
                 * If 0x40 is not set, the len_off field specifies an offset
                 * of this packet's payload data in the complete (reassembled)
                 * ASF packet. This is used to spread one ASF packet over
                 * multiple RTP packets.
                 */
                if (asf->pktbuf && len_off != url_ftell(asf->pktbuf)) {
                    uint8_t *p;
                    url_close_dyn_buf(asf->pktbuf, &p);
                    asf->pktbuf = NULL;
                    av_free(p);
                }
                if (!len_off && !asf->pktbuf &&
                    (res = url_open_dyn_buf(&asf->pktbuf)) < 0)
                    return res;
                if (!asf->pktbuf)
                    return AVERROR(EIO);

                put_buffer(asf->pktbuf, buf + off, len - off);
                url_fskip(pb, len - off);
                if (!(flags & RTP_FLAG_MARKER))
                    return -1;
                out_len     = url_close_dyn_buf(asf->pktbuf, &asf->buf);
                asf->pktbuf = NULL;
            } else {
                /**
                 * If 0x40 is set, the len_off field specifies the length of
                 * the next ASF packet that can be read from this payload
                 * data alone. This is commonly the same as the payload size,
                 * but could be less in case of packet splitting (i.e.
                 * multiple ASF packets in one RTP packet).
                 */

                int cur_len = start_off + len_off - off;
                int prev_len = out_len;
                out_len += cur_len;
                asf->buf = av_realloc(asf->buf, out_len);
                memcpy(asf->buf + prev_len, buf + off,
                       FFMIN(cur_len, len - off));
                url_fskip(pb, cur_len);
            }
        }

        init_packetizer(pb, asf->buf, out_len);
        pb->pos += rt->asf_pb_pos;
        pb->eof_reached = 0;
        rt->asf_ctx->pb = pb;
    }

    for (;;) {
        int i;

        res = av_read_packet(rt->asf_ctx, pkt);
        rt->asf_pb_pos = url_ftell(pb);
        if (res != 0)
            break;
        for (i = 0; i < s->nb_streams; i++) {
            if (s->streams[i]->id == rt->asf_ctx->streams[pkt->stream_index]->id) {
                pkt->stream_index = i;
                return 1; // FIXME: return 0 if last packet
            }
        }
        av_free_packet(pkt);
    }

    return res == 1 ? -1 : res;
}
コード例 #13
0
ファイル: ffmpeg.cpp プロジェクト: LINGQ1991/av_cache
int CFfmpeg::Open(const char* pszUrl)
{
	unsigned int i;

	m_sUrl = pszUrl;
	m_sUrl.erase(0, strlen("ffmpeg://"));
	
	//+
	infmt_ctx = avformat_alloc_context();
	//ring_buffer_write(&cbuffer.ringbuffer, inputbuffer, sizeof(inputbuffer));
	//unsigned char* inputbuffer = NULL;
	//inputbuffer = (unsigned char*)malloc(MAIN_BUFFER_SIZE);
	init_put_byte(&inputpb, inputbuffer, MAIN_BUFFER_SIZE, 0, &cbuffer, i_read_data, NULL, i_seek_data );
	//inputpb.buf_end = inputpb.buf_ptr;
	infmt_ctx->pb = &inputpb;
	//av_read_frame(infmt_ctx, &pkt);
	//+
	avformat_open_input(&infmt_ctx, m_sUrl.c_str(), NULL, NULL);
	if(!infmt_ctx)
	{
		FFMPEG_ERROR("unknown url: %s", pszUrl);
		return -1;
	}
	
	av_find_stream_info(infmt_ctx);
	av_dump_format(infmt_ctx, 0, m_sUrl.c_str(), 0);
	
	filesize = avio_size(infmt_ctx->pb);
	printf("filesize = %d\n", filesize);
	
	check_transcode();

	if(!transcode)
	{
		if(infmt_ctx)
		{
			av_close_input_file(infmt_ctx);
			infmt_ctx = NULL;
		}
		m_pFp = fopen(m_sUrl.c_str(), "rb");
		if(!m_pFp)
		{
			//perror("fopen");
			FFMPEG_ERROR("error fopen: %s", strerror(errno));
			return -1;
		}
	}
	else
	{
		FFMPEG_DEBUG("transcode or remux");
		avformat_alloc_output_context2(&oc, NULL, "mpegts", NULL);

		unsigned int pid = 0x100;
		for(i=0; i<infmt_ctx->nb_streams; i++)
		{
			AVStream *stream = infmt_ctx->streams[i];
			if(stream->codec->codec_type==AVMEDIA_TYPE_VIDEO && video==-1)
			{
				video = i;
				FFMPEG_DEBUG("video index: %d, pid: 0x%x", i, pid++);
				vst = av_new_stream(oc, 0);
				avcodec_copy_context(vst->codec, infmt_ctx->streams[video]->codec); 
				//vst->codec->time_base = infmt_ctx->streams[video]->time_base;
				vst->codec->sample_aspect_ratio = vst->sample_aspect_ratio = infmt_ctx->streams[video]->codec->sample_aspect_ratio;
				vst->stream_copy = 1;
				vst->avg_frame_rate = infmt_ctx->streams[video]->avg_frame_rate;
				vst->discard = AVDISCARD_NONE;
				vst->disposition = infmt_ctx->streams[video]->disposition;
				vst->duration = infmt_ctx->streams[video]->duration;
				vst->first_dts = infmt_ctx->streams[video]->first_dts;
				vst->r_frame_rate = infmt_ctx->streams[video]->r_frame_rate;
				vst->time_base = infmt_ctx->streams[video]->time_base;
				vst->quality = infmt_ctx->streams[video]->quality;
				vst->start_time = infmt_ctx->streams[video]->start_time;
			}
			else if(stream->codec->codec_type==AVMEDIA_TYPE_AUDIO && audio1==-1)
			{
				audio1 = i;
				FFMPEG_DEBUG("audio1 index: %d, pid: 0x%x", i, pid++);
				ast1 = av_new_stream(oc, 0);
				if(stream->codec->codec_id == CODEC_ID_AC3
					|| stream->codec->codec_id == CODEC_ID_DTS
					|| stream->codec->codec_id == CODEC_ID_PCM_S16BE
					|| stream->codec->codec_id == CODEC_ID_PCM_S16LE)
				{
					acodec1 = stream->codec;
					AVCodec *inAcodec = avcodec_find_decoder(stream->codec->codec_id);     
					avcodec_open(stream->codec, inAcodec);     
					AVCodec *outAcodec = avcodec_find_encoder(CODEC_ID_MP2);
					//ast1->codec = avcodec_alloc_context3(outAcodec);
					ast1->codec->bit_rate = 128000;
					ast1->codec->sample_rate = stream->codec->sample_rate;
					if(stream->codec->channels > 2)
					{
						stream->codec->request_channels = 2;
					}
					ast1->codec->channels = 2;
					ast1->codec->sample_fmt = AV_SAMPLE_FMT_S16;
					avcodec_open(ast1->codec, outAcodec);
					ast1->codec->time_base = infmt_ctx->streams[audio1]->time_base;
					ring_buffer_init(&adecrbuffer1, 524288);
				}
				else
				{
					avcodec_copy_context(ast1->codec, infmt_ctx->streams[audio1]->codec);
					//ast1->codec->time_base = infmt_ctx->streams[audio1]->time_base;
					ast1->stream_copy = 1;
					ast1->first_dts = infmt_ctx->streams[audio1]->first_dts;
					ast1->r_frame_rate = infmt_ctx->streams[audio1]->r_frame_rate;
					ast1->time_base = infmt_ctx->streams[audio1]->time_base;
					ast1->quality = infmt_ctx->streams[audio1]->quality;
					ast1->start_time = infmt_ctx->streams[audio1]->start_time;
					ast1->duration = infmt_ctx->streams[audio1]->duration;
				}
			}
			else if(stream->codec->codec_type==AVMEDIA_TYPE_AUDIO && audio1!=i && audio2==-1)
			{
				audio2 = i;
				FFMPEG_DEBUG("audio2 index: %d, pid: 0x%x", i, pid++);
				ast2 = av_new_stream(oc, 0);
				if(stream->codec->codec_id == CODEC_ID_AC3
					|| stream->codec->codec_id == CODEC_ID_DTS
					|| stream->codec->codec_id == CODEC_ID_PCM_S16BE
					|| stream->codec->codec_id == CODEC_ID_PCM_S16LE)
				{
					acodec2 = stream->codec;
					AVCodec *inAcodec = avcodec_find_decoder(stream->codec->codec_id);     
					avcodec_open(stream->codec, inAcodec);     
					AVCodec *outAcodec = avcodec_find_encoder(CODEC_ID_MP2);
					//ast2->codec = avcodec_alloc_context3(outAcodec);
					ast2->codec->bit_rate = 128000;
					ast2->codec->sample_rate = stream->codec->sample_rate;
					if(stream->codec->channels > 2)
					{
						stream->codec->request_channels = 2;
					}
					ast2->codec->channels = 2;
					ast2->codec->sample_fmt = AV_SAMPLE_FMT_S16;
					avcodec_open(ast2->codec, outAcodec);
					ast2->codec->time_base = infmt_ctx->streams[audio2]->time_base;
					ring_buffer_init(&adecrbuffer2, 524288);
				}
				else
				{
					avcodec_copy_context(ast2->codec, infmt_ctx->streams[audio2]->codec);
					//ast2->codec->time_base = infmt_ctx->streams[audio2]->time_base;
					ast2->stream_copy = 1;
					ast2->first_dts = infmt_ctx->streams[audio2]->first_dts;
					ast2->r_frame_rate = infmt_ctx->streams[audio2]->r_frame_rate;
					ast2->time_base = infmt_ctx->streams[audio2]->time_base;
					ast2->quality = infmt_ctx->streams[audio2]->quality;
					ast2->start_time = infmt_ctx->streams[audio2]->start_time;
					ast2->duration = infmt_ctx->streams[audio2]->duration;
				}
			}
		}
		
		init_put_byte(&outputpb, outputbuffer, MAIN_BUFFER_SIZE, 1, &outputringbuffer, NULL, write_data, NULL );
		oc->pb = &outputpb;
		avformat_write_header(oc, NULL);
		//av_dump_format(oc, 0, "output.ts", 1);

		if(infmt_ctx->streams[video]->codec->codec_id == CODEC_ID_H264)
		{
			FFMPEG_DEBUG("open h264_mp4toannexb filter");
			bsfc = av_bitstream_filter_init("h264_mp4toannexb");
			if (!bsfc)
			{
				FFMPEG_ERROR("Cannot open the h264_mp4toannexb BSF!");
				return -1;
			}
		}
	}
	return 0;
}