コード例 #1
0
ファイル: rtpdec_asf.c プロジェクト: RJVB/FFusion
int ff_wms_parse_sdp_a_line(AVFormatContext *s, const char *p)
{
    int ret = 0;
    if (av_strstart(p, "pgmpu:data:application/vnd.ms.wms-hdr.asfv1;base64,", &p)) {
        ByteIOContext pb;
        RTSPState *rt = s->priv_data;
        int len = strlen(p) * 6 / 8;
        char *buf = av_mallocz(len);
        av_base64_decode(buf, p, len);

        if (rtp_asf_fix_header(buf, len) < 0)
            av_log(s, AV_LOG_ERROR,
                   "Failed to fix invalid RTSP-MS/ASF min_pktsize\n");
        init_packetizer(&pb, buf, len);
        if (rt->asf_ctx) {
            av_close_input_stream(rt->asf_ctx);
            rt->asf_ctx = NULL;
        }
        ret = av_open_input_stream(&rt->asf_ctx, &pb, "", &asf_demuxer, NULL);
        if (ret < 0)
            return ret;
        av_metadata_copy(&s->metadata, rt->asf_ctx->metadata, 0);
        rt->asf_pb_pos = url_ftell(&pb);
        av_free(buf);
        rt->asf_ctx->pb = NULL;
    }
    return ret;
}
コード例 #2
0
ファイル: fa_libav.c プロジェクト: stallman/showtime
AVFormatContext *
fa_libav_open_format(AVIOContext *avio, const char *url, 
		     char *errbuf, size_t errlen)
{
  AVInputFormat *fmt = NULL;
  AVFormatContext *fctx;
  int err;

  avio_seek(avio, 0, SEEK_SET);

  if((err = av_probe_input_buffer(avio, &fmt, url, NULL, 0, 0)) != 0)
    return fa_libav_open_error(errbuf, errlen,
			       "Unable to probe file", err);

  if(fmt == NULL) {
    snprintf(errbuf, errlen, "Unknown file format");
    return NULL;
  }

  if((err = av_open_input_stream(&fctx, avio, url, fmt, NULL)) != 0)
    return fa_libav_open_error(errbuf, errlen,
			       "Unable to open file as input format", err);

  if(av_find_stream_info(fctx) < 0) {
    av_close_input_stream(fctx);
    return fa_libav_open_error(errbuf, errlen,
			       "Unable to handle file contents", err);
  }

  return fctx;
}
コード例 #3
0
static PayloadContext *
rdt_new_context (void)
{
	PayloadContext *rdt = av_mallocz(sizeof(PayloadContext));

	av_open_input_stream(&rdt->rmctx, NULL, "", &ff_rdt_demuxer, NULL);

	return rdt;
}
コード例 #4
0
	int
decoder_load_info( Decoder *pDecoder ) {

	unsigned int i = 0;
	AVProbeData prbData;
	uint8_t *pBuffer, *pIoBuffer;
	AVInputFormat *pInputFmt;
	ByteIOContext *pIoCtx;

	prbData.filename = "DUMMY";
	prbData.buf_size = LARGE_BUFFER_SIZE;
	pBuffer = (uint8_t*) av_mallocz( LARGE_BUFFER_SIZE );
	memset( pBuffer + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE );

	pDecoder->fpRead( pDecoder->pOpaqueSource, pBuffer, LARGE_BUFFER_SIZE );
	prbData.buf = pBuffer;

	pInputFmt = av_probe_input_format( &prbData, 1 );

	pDecoder->fpSeek( pDecoder->pOpaqueSource, 0, SEEK_SET );

	pIoBuffer = (uint8_t*) av_mallocz( INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE );

	pIoCtx = av_alloc_put_byte( pIoBuffer, INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE,
		0, pDecoder->pOpaqueSource, pDecoder->fpRead, pDecoder->fpWrite, pDecoder->fpSeek );

	if( pIoCtx == NULL )
	{
		printf("Failed getting the byte io context \n");
		goto error;
	}

	if( av_open_input_stream( &pDecoder->pFmtCtx, pIoCtx, "DUMMY", pInputFmt, NULL ) < 0 )
	{
		error("Failed opening input stream");
		goto error;
	}

	if( __load_codec_ctx( pDecoder ) < 0 )
	{
		error("Failed loading the codec details");
		goto error;
	}

	av_free( pBuffer );
	pBuffer = NULL;
	
	return SUCCESS;

error:
	av_free( pBuffer );
	pBuffer = NULL;
	return ERROR;

}
コード例 #5
0
ファイル: ffmpeg_dev.c プロジェクト: silvansky/pjsip_mod
static pj_status_t ffmpeg_capture_open(AVFormatContext **ctx,
                                       AVInputFormat *ifmt,
                                       const char *dev_name,
                                       const pjmedia_vid_dev_param *param)
{
    AVFormatParameters fp;
    pjmedia_video_format_detail *vfd;
    int err;

    PJ_ASSERT_RETURN(ctx && ifmt && dev_name && param, PJ_EINVAL);
    PJ_ASSERT_RETURN(param->fmt.detail_type == PJMEDIA_FORMAT_DETAIL_VIDEO,
                     PJ_EINVAL);

    vfd = pjmedia_format_get_video_format_detail(&param->fmt, PJ_TRUE);

    /* Init ffmpeg format context */
    *ctx = avformat_alloc_context();

    /* Init ffmpeg format param */
    pj_bzero(&fp, sizeof(fp));
    fp.prealloced_context = 1;
    fp.width = vfd->size.w;
    fp.height = vfd->size.h;
    fp.pix_fmt = PIX_FMT_BGR24;
    fp.time_base.num = vfd->fps.denum;
    fp.time_base.den = vfd->fps.num;

    /* Open capture stream */
    err = av_open_input_stream(ctx, NULL, dev_name, ifmt, &fp);
    if (err < 0) {
        *ctx = NULL; /* ffmpeg freed its states on failure, do we must too */
        print_ffmpeg_err(err);
        return PJ_EUNKNOWN;
    }

    return PJ_SUCCESS;
}
コード例 #6
0
Omm::AvStream::Meta*
FFmpegTagger::tag(std::istream& istr)
{
    FFmpegMeta* pMeta = new FFmpegMeta;
    int error;
    pMeta->_pInputFormat = probeInputFormat(istr);

    pMeta->_pIoBuffer = new unsigned char[_IoBufferSize];
    pMeta->_pIoContext = initIo(istr, false, pMeta->_pIoBuffer);
//     pMeta->_pIoContext = initIo(istr, true, pMeta->_pIoBuffer);

    pMeta->_useAvOpenInputStream = true;

    AVFormatParameters avFormatParameters;
    memset(&avFormatParameters, 0, sizeof(avFormatParameters));
    avFormatParameters.prealloced_context = 1;
    pMeta->_pFormatContext->probesize = 20000;
    pMeta->_pFormatContext->max_analyze_duration = 5000000;
//     pMeta->_pFormatContext->flags |= AVFMT_FLAG_NONBLOCK;

    LOG(ffmpeg, trace, "ffmpeg::av_open_input_stream() ...");
    // FIXME: av_open_input_stream needs to read several megabytes of a TS.
    error = av_open_input_stream(&pMeta->_pFormatContext, pMeta->_pIoContext, "std::istream", pMeta->_pInputFormat, &avFormatParameters);
//     error = av_open_input_stream(&pMeta->_pFormatContext, pMeta->_pIoContext, "std::istream", pMeta->_pInputFormat, 0);
//     error = av_open_input_file(&pMeta->_pFormatContext, "std::istream", pMeta->_pInputFormat, 0, 0);
    if (error < 0) {
        LOGNS(Omm::AvStream, avstream, error, "av_open_input_stream() failed");
        return 0;
    }
    LOG(ffmpeg, trace, "probesize: " + Poco::NumberFormatter::format(pMeta->_pFormatContext->probesize) +\
    ", max_analyze_duration: " + Poco::NumberFormatter::format(pMeta->_pFormatContext->max_analyze_duration));

    AVMetadataTag* tag = 0;
    LOG(ffmpeg, trace, "ffmpeg::av_metadata_get() ...");
    while ((tag = av_metadata_get(pMeta->_pFormatContext->metadata, "", tag, AV_METADATA_IGNORE_SUFFIX))) {
        std::clog << tag->key << ", " << tag->value << std::endl;
        pMeta->setTag(tag->key, tag->value);
    }

    LOG(ffmpeg, trace, "ffmpeg::av_find_stream_info() ...");
    error = av_find_stream_info(pMeta->_pFormatContext);
    if (error < 0) {
        LOGNS(Omm::AvStream, avstream, error, "av_find_stream_info() failed, could not find codec parameters");
        return 0;
    }

//     pMeta->_pFormatContext->flags |= AVFMT_FLAG_NONBLOCK;

//     if (pMeta->_pFormatContext->flags & AVFMT_FLAG_NONBLOCK) {
//         std::clog << "frame reading is set to NONBLOCK" << std::endl;
//     }
//     else {
//         std::clog << "frame reading is set to BLOCK" << std::endl;
//     }

    for(int streamNr = 0; streamNr < pMeta->_pFormatContext->nb_streams; streamNr++) {
        FFmpegStreamInfo* pStreamInfo = new FFmpegStreamInfo;
        pStreamInfo->_pAvStream = pMeta->_pFormatContext->streams[streamNr];
        pMeta->addStream(pStreamInfo);
    }

    return pMeta;
}
コード例 #7
0
ファイル: sapdec.c プロジェクト: kaone3/vsmm
static int sap_read_header(AVFormatContext *s,
                           AVFormatParameters *ap)
{
    struct SAPState *sap = s->priv_data;
    char host[1024], path[1024], url[1024];
    uint8_t recvbuf[1500];
    int port;
    int ret, i;
    AVInputFormat* infmt;

    if (!ff_network_init())
        return AVERROR(EIO);

    av_url_split(NULL, 0, NULL, 0, host, sizeof(host), &port,
                 path, sizeof(path), s->filename);
    if (port < 0)
        port = 9875;

    if (!host[0]) {
        /* Listen for announcements on sap.mcast.net if no host was specified */
        av_strlcpy(host, "224.2.127.254", sizeof(host));
    }

    ff_url_join(url, sizeof(url), "udp", NULL, host, port, "?localport=%d",
                port);
    ret = url_open(&sap->ann_fd, url, URL_RDONLY);
    if (ret)
        goto fail;

    while (1) {
        int addr_type, auth_len;
        int pos;

        ret = url_read(sap->ann_fd, recvbuf, sizeof(recvbuf) - 1);
        if (ret == AVERROR(EAGAIN))
            continue;
        if (ret < 0)
            goto fail;
        recvbuf[ret] = '\0'; /* Null terminate for easier parsing */
        if (ret < 8) {
            av_log(s, AV_LOG_WARNING, "Received too short packet\n");
            continue;
        }

        if ((recvbuf[0] & 0xe0) != 0x20) {
            av_log(s, AV_LOG_WARNING, "Unsupported SAP version packet "
                                      "received\n");
            continue;
        }

        if (recvbuf[0] & 0x04) {
            av_log(s, AV_LOG_WARNING, "Received stream deletion "
                                      "announcement\n");
            continue;
        }
        addr_type = recvbuf[0] & 0x10;
        auth_len  = recvbuf[1];
        sap->hash = AV_RB16(&recvbuf[2]);
        pos = 4;
        if (addr_type)
            pos += 16; /* IPv6 */
        else
            pos += 4; /* IPv4 */
        pos += auth_len * 4;
        if (pos + 4 >= ret) {
            av_log(s, AV_LOG_WARNING, "Received too short packet\n");
            continue;
        }
#define MIME "application/sdp"
        if (strcmp(&recvbuf[pos], MIME) == 0) {
            pos += strlen(MIME) + 1;
        } else if (strncmp(&recvbuf[pos], "v=0\r\n", 5) == 0) {
            // Direct SDP without a mime type
        } else {
            av_log(s, AV_LOG_WARNING, "Unsupported mime type %s\n",
                                      &recvbuf[pos]);
            continue;
        }

        sap->sdp = av_strdup(&recvbuf[pos]);
        break;
    }

    av_log(s, AV_LOG_VERBOSE, "SDP:\n%s\n", sap->sdp);
    init_put_byte(&sap->sdp_pb, sap->sdp, strlen(sap->sdp), 0, NULL, NULL,
                  NULL, NULL);

    infmt = av_find_input_format("sdp");
    if (!infmt)
        goto fail;
    sap->sdp_ctx = avformat_alloc_context();
    if (!sap->sdp_ctx) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }
    sap->sdp_ctx->max_delay = s->max_delay;
    ap->prealloced_context = 1;
    ret = av_open_input_stream(&sap->sdp_ctx, &sap->sdp_pb, "temp.sdp",
                               infmt, ap);
    if (ret < 0)
        goto fail;
    if (sap->sdp_ctx->ctx_flags & AVFMTCTX_NOHEADER)
        s->ctx_flags |= AVFMTCTX_NOHEADER;
    for (i = 0; i < sap->sdp_ctx->nb_streams; i++) {
        AVStream *st = av_new_stream(s, i);
        if (!st) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        avcodec_copy_context(st->codec, sap->sdp_ctx->streams[i]->codec);
        st->time_base = sap->sdp_ctx->streams[i]->time_base;
    }

    return 0;

fail:
    sap_read_close(s);
    return ret;
}
コード例 #8
0
static demuxer_t* demux_open_lavf(demuxer_t *demuxer){
    AVFormatContext *avfc;
    AVFormatParameters ap;
    const AVOption *opt;
    AVMetadataTag *t = NULL;
    lavf_priv_t *priv= demuxer->priv;
    int i;
    //start_time = 0.0;
    char mp_filename[256]="mp:";
    memset(&ap, 0, sizeof(AVFormatParameters));

    //demux_lavf_find_geodata(demuxer);

    stream_seek(demuxer->stream, 0);

    int filepos=stream_tell(demuxer->stream);
    
    avfc = avformat_alloc_context();

    if (opt_cryptokey)
        parse_cryptokey(avfc, opt_cryptokey);
    if (user_correct_pts != 0)
        avfc->flags |= AVFMT_FLAG_GENPTS;
    /* if (index_mode == 0) */
    /*     avfc->flags |= AVFMT_FLAG_IGNIDX; */

    ap.prealloced_context = 1;
#if 0
    if(opt_probesize) {
        opt = av_set_int(avfc, "probesize", opt_probesize);
        if(!opt) mp_msg(MSGT_HEADER,MSGL_ERR, "demux_lavf, couldn't set option probesize to %u\n", opt_probesize);
    }
    if(opt_analyzeduration) {
        opt = av_set_int(avfc, "analyzeduration", opt_analyzeduration * AV_TIME_BASE);
        if(!opt) mp_msg(MSGT_HEADER,MSGL_ERR, "demux_lavf, couldn't set option analyzeduration to %u\n", opt_analyzeduration);
    }

    if(opt_avopt){
        if(parse_avopts(avfc, opt_avopt) < 0){
            mp_msg(MSGT_HEADER,MSGL_ERR, "Your options /%s/ look like gibberish to me pal\n", opt_avopt);
            return NULL;
        }
    }
#endif
    if(demuxer->stream->url) {
        if (!strncmp(demuxer->stream->url, "ffmpeg://rtsp:", 14))
            av_strlcpy(mp_filename, demuxer->stream->url + 9, sizeof(mp_filename));
        else
            av_strlcat(mp_filename, demuxer->stream->url, sizeof(mp_filename));
    } else
        av_strlcat(mp_filename, "foobar.dummy", sizeof(mp_filename));

    priv->pb = av_alloc_put_byte(priv->buffer, BIO_BUFFER_SIZE, 0,
                                 demuxer, mp_read, NULL, mp_seek);
    priv->pb->read_seek = mp_read_seek;
    priv->pb->is_streamed = !demuxer->stream->end_pos || (demuxer->stream->flags & MP_STREAM_SEEK) != MP_STREAM_SEEK;
    filepos=stream_tell(demuxer->stream);
    if(av_open_input_stream(&avfc, priv->pb, mp_filename, priv->avif, &ap)<0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_open_input_stream() failed\n");
        return NULL;
    }
    filepos=stream_tell(demuxer->stream);
    priv->avfc= avfc;
    if(av_find_stream_info(avfc) < 0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_find_stream_info() failed\n");
        return NULL;
    }
    filepos=stream_tell(demuxer->stream);
    if(!strncmp(avfc->iformat->name,"aac",4))
      get_aac_duration(demuxer,avfc);

    if(!strncmp(avfc->iformat->name,"mp3",4))
       priv->avfc->duration = get_mp3_duration(demuxer) * AV_TIME_BASE;

    /* Add metadata. */
    av_metadata_conv(avfc, NULL, avfc->iformat->metadata_conv);
    while((t = av_metadata_get(avfc->metadata, "", t, AV_METADATA_IGNORE_SUFFIX))){
        demux_info_add(demuxer, t->key, t->value);
    }

    for(i=0; i < avfc->nb_chapters; i++) {
        AVChapter *c = avfc->chapters[i];
        uint64_t start = av_rescale_q(c->start, c->time_base, (AVRational){1,1000});
        uint64_t end   = av_rescale_q(c->end, c->time_base, (AVRational){1,1000});
        t = av_metadata_get(c->metadata, "title", NULL, 0);
        demuxer_add_chapter(demuxer, t ? t->value : NULL, start, end);
    }
    demuxer->ttaflag = 0;
    for(i=0; i<avfc->nb_streams; i++)
        handle_stream(demuxer, avfc, i);
    if(demuxer->matroflag && !demuxer->ttaflag)
      return NULL;
    if(avfc->nb_programs) {
        int p;
        for (p = 0; p < avfc->nb_programs; p++) {
            AVProgram *program = avfc->programs[p];
            t = av_metadata_get(program->metadata, "title", NULL, 0);
            mp_msg(MSGT_HEADER,MSGL_INFO,"LAVF: Program %d %s\n", program->id, t ? t->value : "");
        }
    }

    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: %d audio and %d video streams found\n",priv->audio_streams,priv->video_streams);
    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: build %d\n", LIBAVFORMAT_BUILD);
    if(!priv->audio_streams) demuxer->audio->id=-2;  // nosound
//    else if(best_audio > 0 && demuxer->audio->id == -1) demuxer->audio->id=best_audio;
    if(!priv->video_streams){
        if(!priv->audio_streams){
	    mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF: no audio or video headers found - broken file?\n");
            return NULL;
        }
        demuxer->video->id=-2; // audio-only
    } //else if (best_video > 0 && demuxer->video->id == -1) demuxer->video->id = best_video;

    filepos=stream_tell(demuxer->stream);

    int j=0;
    AVCodecContext *codec;
    AVCodec *avc;
    demuxer->audio_info = malloc(100*avfc->nb_streams);
    for(i=0; i<avfc->nb_streams; i++){
      if(demuxer->a_streams[i]){
	char *info = malloc(100);
	codec= ((AVStream *)avfc->streams[i])->codec;
	avc = avcodec_find_decoder(codec->codec_id);
	char *cn = avc ? avc->name : "unknown";
	char *codec_name = malloc(100);
	strncpy(codec_name,cn,100);
	
	if((codec_name[0]=='d')&&(codec_name[1]=='c')&&(codec_name[2]=='a'))
	  strncpy(codec_name,"dts",100);
	int a;
	for(a=0;codec_name[a];a++) 
	  if(codec_name[a]>='a'&&codec_name[a]<='z')
	    codec_name[a]-=32; 
	sprintf(info, "%s(%dHz %dCh)--%d", codec_name,codec->sample_rate, codec->channels, i);
	free(codec_name);
	memcpy(demuxer->audio_info+j*100,info, 100);
	j++;
	free(info);
	info = NULL;
      }
    }

    j = 0;
    demuxer->sub_info = malloc(100*avfc->nb_streams);
    for(i=0; i<avfc->nb_streams; i++){
      if(demuxer->s_streams[i]){
	char *info = malloc(100);
	AVStream *st= avfc->streams[i];
	AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
	codec= st->codec;
        if (lang && lang->value)
	  strcpy(info, lang->value);
	else
	  strcpy(info, "unknown");
	sprintf(info, "%s--%d", info,i);
	memcpy(demuxer->sub_info+j*100,info, 100);
	j++;
	free(info);
	info = NULL;
      }
    }

    if(AV_NOPTS_VALUE == priv->avfc->start_time){
        LOGW("priv->avfc->start_time = AV_NOPTS_VALUE");
        demuxer->start_time = 0;
    }else{
        demuxer->start_time = priv->avfc->start_time;
    }

    return demuxer;
}
コード例 #9
0
ファイル: ff_MovieImport.c プロジェクト: eirnym/perian
ComponentResult FFAvi_MovieImportDataRef(ff_global_ptr storage, Handle dataRef, OSType dataRefType, Movie theMovie, Track targetTrack,
										 Track *usedTrack, TimeValue atTime, TimeValue *addedDuration, long inFlags, long *outFlags)
{
	ComponentResult result = noErr;
	ByteIOContext *byteContext;
	AVFormatContext *ic = NULL;
	AVFormatParameters params;
	OSType mediaType;
	Media media;
	int count, hadIndex, j;
		
	/* make sure that in case of error, the flag movieImportResultComplete is not set */
	*outFlags = 0;
	
	/* probe the format first */
	UInt8 valid = 0;
	FFAvi_MovieImportValidateDataRef(storage, dataRef, dataRefType, &valid);
	if(valid != 255)
		goto bail;
			
	/* Prepare the iocontext structure */
	result = url_open_dataref(&byteContext, dataRef, dataRefType, &storage->dataHandler, &storage->dataHandlerSupportsWideOffsets, &storage->dataSize);
	storage->isStreamed = dataRefType == URLDataHandlerSubType;
	require_noerr(result, bail);
	
	/* Open the Format Context */
	memset(&params, 0, sizeof(params));
	result = av_open_input_stream(&ic, byteContext, "", storage->format, &params);
	require_noerr(result,bail);
	storage->format_context = ic;
	
	// AVIs without an index currently add a few entries to the index so it can
	// determine codec parameters.  Check for index existence here before it
	// reads any packets.
	hadIndex = 1;
	for (j = 0; j < ic->nb_streams; j++) {
		if (ic->streams[j]->nb_index_entries <= 1)
		{
			hadIndex = 0;
			break;
		}
	}
	
	/* Get the Stream Infos if not already read */
	result = av_find_stream_info(ic);
	
	// -1 means it couldn't understand at least one stream
	// which might just mean we don't have its video decoder enabled
	if(result < 0 && result != -1)
		goto bail;
	
	// we couldn't find any streams, bail with an error.
	if(ic->nb_streams == 0) {
		result = -1; //is there a more appropriate error code?
		goto bail;
	}
	
	//determine a header offset (needed by index-based import).
	result = determine_header_offset(storage);
	if(result < 0)
		goto bail;
	
	/* Initialize the Movie */
	storage->movie = theMovie;
	if(inFlags & movieImportMustUseTrack) {
		storage->map_count = 1;
		prepare_track(storage, targetTrack, dataRef, dataRefType);
	} else {
		storage->map_count = ic->nb_streams;
		result = prepare_movie(storage, theMovie, dataRef, dataRefType);
		if (result != 0)
			goto bail;
	}
	
	/* replace the SampleDescription if user called MovieImportSetSampleDescription() */
	if(storage->imgHdl) {
		for(j = 0; j < storage->map_count; j++) {
			NCStream ncstream = storage->stream_map[j];
			GetMediaHandlerDescription(ncstream.media, &mediaType, NULL, NULL);
			if(mediaType == VideoMediaType && ncstream.sampleHdl) {
				DisposeHandle((Handle)ncstream.sampleHdl);
				ncstream.sampleHdl = (SampleDescriptionHandle)storage->imgHdl;
			}
		}
	}
	if(storage->sndHdl) {
		for(j = 0; j < storage->map_count; j++) {
			NCStream ncstream = storage->stream_map[j];
			GetMediaHandlerDescription(ncstream.media, &mediaType, NULL, NULL);
			if(mediaType == SoundMediaType && ncstream.sampleHdl) {
				DisposeHandle((Handle)ncstream.sampleHdl);
				ncstream.sampleHdl = (SampleDescriptionHandle)storage->sndHdl;
			}
		}
	}
	
	count = 0; media = NULL;
	for(j = 0; j < storage->map_count; j++) {
		media = storage->stream_map[j].media;
		if(media)
			count++;
	}
	
	if(count > 1)
		*outFlags |= movieImportResultUsedMultipleTracks;
	
	/* The usedTrack parameter. Count the number of Tracks and set usedTrack if we operated
		* on a single track. Note that this requires the media to be set by track counting above*/
	if(usedTrack && count == 1 && media)
		*usedTrack = GetMediaTrack(media);
	
	result = noErr;

	*addedDuration = 0;
	
	//attempt to import using indexes.
	result = import_using_index(storage, &hadIndex, addedDuration);
	require_noerr(result, bail);
	
	if(hadIndex) {
		//file had an index and was imported; we are done.
		*outFlags |= movieImportResultComplete;
		
	} else if(inFlags & movieImportWithIdle) {
		if(addedDuration && ic->duration > 0) {
			TimeScale movieTimeScale = GetMovieTimeScale(theMovie);
			*addedDuration = movieTimeScale * ic->duration / AV_TIME_BASE;
			
			//create a placeholder track so that progress displays correctly.
			create_placeholder_track(storage->movie, &storage->placeholderTrack, *addedDuration, dataRef, dataRefType);
			
			//give the data handler a hint as to how fast we need the data.
			//suggest a speed that's faster than the bare minimum.
			//if there's an error, the data handler probably doesn't support
			//this, so we can just ignore.
			DataHPlaybackHints(storage->dataHandler, 0, 0, -1, (storage->dataSize * 1.15) / ((double)ic->duration / AV_TIME_BASE));
		}
			
		//import with idle. Decode a little bit of data now.
		import_with_idle(storage, inFlags, outFlags, 10, 300, true);
	} else {
		//QuickTime didn't request import with idle, so do it all now.
		import_with_idle(storage, inFlags, outFlags, 0, 0, true);			
	}
	
	LoadExternalSubtitlesFromFileDataRef(dataRef, dataRefType, theMovie);

bail:
	if(result == noErr)
		storage->movieLoadState = kMovieLoadStateLoaded;
	else
		storage->movieLoadState = kMovieLoadStateError;
		
	if (result == -1)
		result = invalidMovie; // a bit better error message
	
	return result;
} /* FFAvi_MovieImportDataRef */
コード例 #10
0
static demuxer_t* demux_open_lavf(demuxer_t *demuxer){
    AVFormatContext *avfc;
    AVFormatParameters ap;
    AVOption *opt;
    lavf_priv_t *priv= demuxer->priv;
    int i,g;
    char mp_filename[256]="mp:";

    memset(&ap, 0, sizeof(AVFormatParameters));

    stream_seek(demuxer->stream, 0);

    register_protocol(&mp_protocol);

    avfc = av_alloc_format_context();

    if (correct_pts)
        avfc->flags |= AVFMT_FLAG_GENPTS;
    if (index_mode == 0)
        avfc->flags |= AVFMT_FLAG_IGNIDX;

    ap.prealloced_context = 1;
    if(opt_probesize) {
        double d = (double) opt_probesize;
        opt = av_set_double(avfc, "probesize", opt_probesize);
        if(!opt) mp_msg(MSGT_HEADER,MSGL_ERR, "demux_lavf, couldn't set option probesize to %.3f\r\n", d);
    }

    if(demuxer->stream->url)
        strncpy(mp_filename + 3, demuxer->stream->url, sizeof(mp_filename)-3);
    else
        strncpy(mp_filename + 3, "foobar.dummy", sizeof(mp_filename)-3);
    
    url_fopen(&priv->pb, mp_filename, URL_RDONLY);
    
    ((URLContext*)(priv->pb.opaque))->priv_data= demuxer->stream;
        
    if(av_open_input_stream(&avfc, &priv->pb, mp_filename, priv->avif, &ap)<0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_open_input_stream() failed\n");
        return NULL;
    }

    priv->avfc= avfc;

    if(av_find_stream_info(avfc) < 0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_find_stream_info() failed\n");
        return NULL;
    }

    if(avfc->title    [0]) demux_info_add(demuxer, "name"     , avfc->title    );
    if(avfc->author   [0]) demux_info_add(demuxer, "author"   , avfc->author   );
    if(avfc->copyright[0]) demux_info_add(demuxer, "copyright", avfc->copyright);
    if(avfc->comment  [0]) demux_info_add(demuxer, "comments" , avfc->comment  );
    if(avfc->album    [0]) demux_info_add(demuxer, "album"    , avfc->album    );
//    if(avfc->year        ) demux_info_add(demuxer, "year"     , avfc->year     );
//    if(avfc->track       ) demux_info_add(demuxer, "track"    , avfc->track    );
    if(avfc->genre    [0]) demux_info_add(demuxer, "genre"    , avfc->genre    );

    for(i=0; i<avfc->nb_streams; i++){
        AVStream *st= avfc->streams[i];
        AVCodecContext *codec= st->codec;

        switch(codec->codec_type){
        case CODEC_TYPE_AUDIO:{
            WAVEFORMATEX *wf= calloc(sizeof(WAVEFORMATEX) + codec->extradata_size, 1);
            sh_audio_t* sh_audio;
            if(priv->audio_streams >= MAX_A_STREAMS)
                break;
            sh_audio=new_sh_audio(demuxer, i);
            if(!sh_audio)
                break;
            priv->astreams[priv->audio_streams] = i;
            priv->audio_streams++;
            if(!codec->codec_tag)
                codec->codec_tag= av_codec_get_tag(mp_wav_taglists, codec->codec_id);
            wf->wFormatTag= codec->codec_tag;
            wf->nChannels= codec->channels;
            wf->nSamplesPerSec= codec->sample_rate;
            wf->nAvgBytesPerSec= codec->bit_rate/8;
            wf->nBlockAlign= codec->block_align ? codec->block_align : 1;
            wf->wBitsPerSample= codec->bits_per_sample;
            wf->cbSize= codec->extradata_size;
            if(codec->extradata_size){
                memcpy(
                    wf + 1, 
                    codec->extradata,
                    codec->extradata_size);
            }
            sh_audio->wf= wf;
            sh_audio->audio.dwSampleSize= codec->block_align;
            if(codec->frame_size && codec->sample_rate){
                sh_audio->audio.dwScale=codec->frame_size;
                sh_audio->audio.dwRate= codec->sample_rate;
            }else{
                sh_audio->audio.dwScale= codec->block_align ? codec->block_align*8 : 8;
                sh_audio->audio.dwRate = codec->bit_rate;
            }
            g= ff_gcd(sh_audio->audio.dwScale, sh_audio->audio.dwRate);
            sh_audio->audio.dwScale /= g;
            sh_audio->audio.dwRate  /= g;
//            printf("sca:%d rat:%d fs:%d sr:%d ba:%d\n", sh_audio->audio.dwScale, sh_audio->audio.dwRate, codec->frame_size, codec->sample_rate, codec->block_align);
            sh_audio->ds= demuxer->audio;
            sh_audio->format= codec->codec_tag;
            sh_audio->channels= codec->channels;
            sh_audio->samplerate= codec->sample_rate;
            sh_audio->i_bps= codec->bit_rate/8;
            switch (codec->codec_id) {
              case CODEC_ID_PCM_S8:
              case CODEC_ID_PCM_U8:
                sh_audio->samplesize = 1;
                break;
              case CODEC_ID_PCM_S16LE:
              case CODEC_ID_PCM_S16BE:
              case CODEC_ID_PCM_U16LE:
              case CODEC_ID_PCM_U16BE:
                sh_audio->samplesize = 2;
                break;
              case CODEC_ID_PCM_ALAW:
                sh_audio->format = 0x6;
                break;
              case CODEC_ID_PCM_MULAW:
                sh_audio->format = 0x7;
                break;
            }
            if( mp_msg_test(MSGT_HEADER,MSGL_V) ) print_wave_header(sh_audio->wf, MSGL_V);
	    if((audio_lang && st->language[0] && !strncmp(audio_lang, st->language, 3))
	        || (demuxer->audio->id == i || demuxer->audio->id == -1)
	    ) {
	        demuxer->audio->id = i;
                demuxer->audio->sh= demuxer->a_streams[i];
	    }
            else
                st->discard= AVDISCARD_ALL;
            break;}
        case CODEC_TYPE_VIDEO:{
            sh_video_t* sh_video;
            BITMAPINFOHEADER *bih;
            if(priv->video_streams >= MAX_V_STREAMS)
                break;
            sh_video=new_sh_video(demuxer, i);
            if(!sh_video) break;
            priv->vstreams[priv->video_streams] = i;
            priv->video_streams++;
            bih=calloc(sizeof(BITMAPINFOHEADER) + codec->extradata_size,1);

            if(!codec->codec_tag)
                codec->codec_tag= av_codec_get_tag(mp_bmp_taglists, codec->codec_id);
            bih->biSize= sizeof(BITMAPINFOHEADER) + codec->extradata_size;
            bih->biWidth= codec->width;
            bih->biHeight= codec->height;
            bih->biBitCount= codec->bits_per_sample;
            bih->biSizeImage = bih->biWidth * bih->biHeight * bih->biBitCount/8;
            bih->biCompression= codec->codec_tag;
            sh_video->bih= bih;
            sh_video->disp_w= codec->width;
            sh_video->disp_h= codec->height;
            if (st->time_base.den) { /* if container has time_base, use that */
                sh_video->video.dwRate= st->time_base.den;
                sh_video->video.dwScale= st->time_base.num;
            } else {
            sh_video->video.dwRate= codec->time_base.den;
            sh_video->video.dwScale= codec->time_base.num;
            }
            sh_video->fps=av_q2d(st->r_frame_rate);
            sh_video->frametime=1/av_q2d(st->r_frame_rate);
            sh_video->format = bih->biCompression;
            sh_video->aspect=   codec->width * codec->sample_aspect_ratio.num 
                              / (float)(codec->height * codec->sample_aspect_ratio.den);
            sh_video->i_bps= codec->bit_rate/8;
            mp_msg(MSGT_DEMUX,MSGL_DBG2,"aspect= %d*%d/(%d*%d)\n", 
                codec->width, codec->sample_aspect_ratio.num,
                codec->height, codec->sample_aspect_ratio.den);

            sh_video->ds= demuxer->video;
            if(codec->extradata_size)
                memcpy(sh_video->bih + 1, codec->extradata, codec->extradata_size);
            if( mp_msg_test(MSGT_HEADER,MSGL_V) ) print_video_header(sh_video->bih, MSGL_V);
/*    short 	biPlanes;
    int  	biXPelsPerMeter;
    int  	biYPelsPerMeter;
    int 	biClrUsed;
    int 	biClrImportant;*/
            if(demuxer->video->id != i && demuxer->video->id != -1)
                st->discard= AVDISCARD_ALL;
            else{
                demuxer->video->id = i;
                demuxer->video->sh= demuxer->v_streams[i];
            }
            break;}
        default:
            st->discard= AVDISCARD_ALL;
        }
    }
    
    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: %d audio and %d video streams found\n",priv->audio_streams,priv->video_streams);
    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: build %d\n", LIBAVFORMAT_BUILD);
    if(!priv->audio_streams) demuxer->audio->id=-2;  // nosound
//    else if(best_audio > 0 && demuxer->audio->id == -1) demuxer->audio->id=best_audio;
    if(!priv->video_streams){
        if(!priv->audio_streams){
	    mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF: no audio or video headers found - broken file?\n");
            return NULL; 
        }
        demuxer->video->id=-2; // audio-only
    } //else if (best_video > 0 && demuxer->video->id == -1) demuxer->video->id = best_video;

    return demuxer;
}
コード例 #11
0
static long aacd_ffwma_start( AACDCommonInfo *cinfo, void *ext, unsigned char *buffer, unsigned long buffer_size)
{
    AACD_TRACE( "start() start" );

    AACDFFmpegInfo *ff = (AACDFFmpegInfo*) ext;
    ff->cinfo = cinfo;

    // take control over the input reading:
    cinfo->input_ctrl = 1;

    ByteIOContext *pb = aacd_ff_create_byteioctx( ff );
    if (!pb) return -1;

    AACD_TRACE( "start() opening stream" );
    ff->bytesconsumed = 0;

    int err = av_open_input_stream( &ff->avfctx, pb, "filename.asf", ff->avifmt, NULL );
    AVFormatContext *ic = ff->avfctx;

    if (err)
    {
        char s[80];
        av_strerror( err, s, 80);
        AACD_ERROR("start() cannot open demuxer - [%d] - %s", err, s );

        // we must dealloc what we allocated locally:
        aacd_ff_destroy_byteioctx( pb );

        return -1;
    }

    AACD_TRACE( "start() stream opened" );
    //err = av_find_stream_info(ic)
    AACD_DEBUG( "start() streams=%d", ic->nb_streams);
    dump_format(ic, 0, "", 0);

    ff->audio_stream_index = aacd_ff_find_stream( ic, AVMEDIA_TYPE_AUDIO );

    if (ff->audio_stream_index < 0) 
    {
        AACD_ERROR( "start() cannot find audio stream" );

        return -1;
    }

    AVStream *st = ic->streams[ff->audio_stream_index];
    st->discard = AVDISCARD_DEFAULT;

    AVCodecContext *avctx = st->codec;
    AACD_DEBUG( "start() samplerate=%d channels=%d codec=%x", avctx->sample_rate, avctx->channels, avctx->codec_id);

    AVCodec *codec = aacd_ffwma_find_codec( avctx->codec_id );

    if (!codec)
    {
        AACD_ERROR("start() audio - not a WMA codec - %x", avctx->codec_id);

        return -1;
    }

    if (avcodec_open( avctx, codec ))
    {
        AACD_ERROR("start() audio cannot open audio codec - %x", avctx->codec_id);

        return -1;
    }

    cinfo->samplerate = avctx->sample_rate;
    cinfo->channels = avctx->channels;

    AACD_TRACE( "start() stop - ic->format=%x, ff->avfctx->format=%x", ic->iformat, ff->avfctx->iformat );

    // we return more than we consumed:
    return ff->bytesconsumed;
}
コード例 #12
0
static demuxer_t* demux_open_lavf(demuxer_t *demuxer){
    AVFormatContext *avfc;
    AVFormatParameters ap;
    const AVOption *opt;
    lavf_priv_t *priv= demuxer->priv;
    int i;
    char mp_filename[256]="mp:";

    memset(&ap, 0, sizeof(AVFormatParameters));

    stream_seek(demuxer->stream, 0);

    register_protocol(&mp_protocol);

    avfc = av_alloc_format_context();

    if (opt_cryptokey)
        parse_cryptokey(avfc, opt_cryptokey);
    if (correct_pts)
        avfc->flags |= AVFMT_FLAG_GENPTS;
    if (index_mode == 0)
        avfc->flags |= AVFMT_FLAG_IGNIDX;

    ap.prealloced_context = 1;
    if(opt_probesize) {
        opt = av_set_int(avfc, "probesize", opt_probesize);
        if(!opt) mp_msg(MSGT_HEADER,MSGL_ERR, "demux_lavf, couldn't set option probesize to %u\n", opt_probesize);
    }
    if(opt_analyzeduration) {
        opt = av_set_int(avfc, "analyzeduration", opt_analyzeduration * AV_TIME_BASE);
        if(!opt) mp_msg(MSGT_HEADER,MSGL_ERR, "demux_lavf, couldn't set option analyzeduration to %u\n", opt_analyzeduration);
    }

    if(demuxer->stream->url)
        strncpy(mp_filename + 3, demuxer->stream->url, sizeof(mp_filename)-3);
    else
        strncpy(mp_filename + 3, "foobar.dummy", sizeof(mp_filename)-3);
    
    url_fopen(&priv->pb, mp_filename, URL_RDONLY);
    
    ((URLContext*)(priv->pb->opaque))->priv_data= demuxer->stream;
        
    if(av_open_input_stream(&avfc, priv->pb, mp_filename, priv->avif, &ap)<0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_open_input_stream() failed\n");
        return NULL;
    }

    priv->avfc= avfc;

    if(av_find_stream_info(avfc) < 0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_find_stream_info() failed\n");
        return NULL;
    }

    if(avfc->title    [0]) demux_info_add(demuxer, "name"     , avfc->title    );
    if(avfc->author   [0]) demux_info_add(demuxer, "author"   , avfc->author   );
    if(avfc->copyright[0]) demux_info_add(demuxer, "copyright", avfc->copyright);
    if(avfc->comment  [0]) demux_info_add(demuxer, "comments" , avfc->comment  );
    if(avfc->album    [0]) demux_info_add(demuxer, "album"    , avfc->album    );
//    if(avfc->year        ) demux_info_add(demuxer, "year"     , avfc->year     );
//    if(avfc->track       ) demux_info_add(demuxer, "track"    , avfc->track    );
    if(avfc->genre    [0]) demux_info_add(demuxer, "genre"    , avfc->genre    );

    if(avfc->nb_programs) {
        int p, start=0, found=0;

        if(ts_prog) {
            for(p=0; p<avfc->nb_programs; p++) {
                if(avfc->programs[p]->id == ts_prog) {
                    start = p;
                    found = 1;
                    break;
                }
            }
            if(!found) {
                mp_msg(MSGT_HEADER,MSGL_ERR,"DEMUX_LAVF: program %d doesn't seem to be present\n", ts_prog);
                return NULL;
            }
        }
        p = start;
        do {
            AVProgram *program = avfc->programs[p];
            mp_msg(MSGT_HEADER,MSGL_INFO,"LAVF: Program %d %s\n", program->id, (program->name ? program->name : ""));
            for(i=0; i<program->nb_stream_indexes; i++)
                handle_stream(demuxer, avfc, program->stream_index[i]);
            if(!priv->cur_program && (demuxer->video->sh || demuxer->audio->sh))
                priv->cur_program = program->id;
            p = (p + 1) % avfc->nb_programs;
        } while(p!=start);
    } else
        for(i=0; i<avfc->nb_streams; i++)
            handle_stream(demuxer, avfc, i);
    
    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: %d audio and %d video streams found\n",priv->audio_streams,priv->video_streams);
    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: build %d\n", LIBAVFORMAT_BUILD);
    if(!priv->audio_streams) demuxer->audio->id=-2;  // nosound
//    else if(best_audio > 0 && demuxer->audio->id == -1) demuxer->audio->id=best_audio;
    if(!priv->video_streams){
        if(!priv->audio_streams){
	    mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF: no audio or video headers found - broken file?\n");
            return NULL; 
        }
        demuxer->video->id=-2; // audio-only
    } //else if (best_video > 0 && demuxer->video->id == -1) demuxer->video->id = best_video;

    return demuxer;
}
コード例 #13
0
ファイル: FFmpeg.cpp プロジェクト: tuanmasterit/audacity
// Detect type of input file and open it if recognized. Routine
// based on the av_open_input_file() libavformat function.
int ufile_fopen_input(AVFormatContext **ic_ptr, wxString & name)
{
   wxFileName f(name);
   wxCharBuffer fname;
   const char *filename;
   AVProbeData pd;
   AVIOContext *pb = NULL;
   AVInputFormat *fmt = NULL;
   AVInputFormat *fmt1;
   int probe_size;
   int err;

   // Create a dummy file name using the extension from the original
   f.SetName(wxT(UFILE_PROTOCOL));
   fname = f.GetFullName().mb_str();
   filename = (const char *) fname;

   // Initialize probe data...go ahead and preallocate the maximum buffer size.
   pd.filename = filename;
   pd.buf_size = 0;
   pd.buf = (unsigned char *) av_malloc(PROBE_BUF_MAX + AVPROBE_PADDING_SIZE);
   if (pd.buf == NULL) {
      err = AVERROR(ENOMEM);
      goto fail;
   }

   // Open the file to prepare for probing
   if ((err = ufile_fopen(&pb, name, URL_RDONLY)) < 0) {
      goto fail;
   }

   for (probe_size = PROBE_BUF_MIN; probe_size <= PROBE_BUF_MAX && !fmt; probe_size <<= 1) {
      int score_max = probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX / 4 : 0;

      // Read up to a "probe_size" worth of data
      pd.buf_size = avio_read(pb, pd.buf, probe_size);

      // AWD: with zero-length input files buf_size can come back negative;
      // this causes problems so we might as well just fail
      if (pd.buf_size < 0) {
         err = AVERROR_INVALIDDATA;
         goto fail;
      }

      // Clear up to a "AVPROBE_PADDING_SIZE" worth of unused buffer
      memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);

      // Reposition file for succeeding scan
      if (avio_seek(pb, 0, SEEK_SET) < 0) {
         err = AVERROR(EIO);
         goto fail;
      }

      // Scan all input formats
      fmt = NULL;
      for (fmt1 = av_iformat_next(NULL); fmt1 != NULL; fmt1 = av_iformat_next(fmt1)) {
         int score = 0;

         // Ignore the ones that are not file based
         if (fmt1->flags & AVFMT_NOFILE) {
            continue;
         }

         // If the format can probe the file then try that first
         if (fmt1->read_probe) {
            score = fmt1->read_probe(&pd);
         }
         // Otherwize, resort to extension matching if available
         else if (fmt1->extensions) {
            if (av_match_ext(filename, fmt1->extensions)) {
               score = 50;
            }
         }

         // Remember this format if it scored higher than a previous match
         if (score > score_max) {
            score_max = score;
            fmt = fmt1;
         }
         else if (score == score_max) {
            fmt = NULL;
         }
      }
   }

   // Didn't find a suitable format, so bail
   if (!fmt) {
      err = AVERROR(EILSEQ);
      goto fail;
   }

   // And finally, attempt to associate an input stream with the file
   err = av_open_input_stream(ic_ptr, pb, filename, fmt, NULL);
   if (err) {
      goto fail;
   }

   // Done with the probe buffer
   av_freep(&pd.buf);

   return 0;

fail:
   if (pd.buf) {
      av_freep(&pd.buf);
   }

   if (pb) {
      avio_close(pb);
   }

   *ic_ptr = NULL;

   return err;
}
コード例 #14
0
/**
* @brief 
*
* @return 
*/
int LocalVideoInput::run()
{
    AVInputFormat *inputFormat = av_find_input_format( "video4linux2" );
    if ( inputFormat == NULL)
        Fatal( "Can't load input format" );

#if 0
    AVProbeData probeData;
    probeData.filename = mSource.c_str();
    probeData.buf = new unsigned char[1024];
    probeData.buf_size = 1024;
    inputFormat = av_probe_input_format( &probeData, 0 );
    if ( inputFormat == NULL)
        Fatal( "Can't probe input format" );

    AVFormatParameters formatParameters ;
    memset( &formatParameters, 0, sizeof(formatParameters) );
    formatParameters.channels = 1;
    formatParameters.channel = 0;
    formatParameters.standard = "PAL";
    formatParameters.pix_fmt = PIX_FMT_RGB24;
    //formatParameters.time_base.num = 1;
    //formatParameters.time_base.den = 10;
    formatParameters.width = 352;
    formatParameters.height = 288;
    //formatParameters.prealloced_context = 1;
#endif

    /* New API */
    AVDictionary *opts = NULL;
    av_dict_set( &opts, "standard", "PAL", 0 );
    av_dict_set( &opts, "video_size", "320x240", 0 );
    av_dict_set( &opts, "channel", "0", 0 );
    av_dict_set( &opts, "pixel_format", "rgb24", 0 );
    //av_dict_set( &opts, "framerate", "10", 0 );
    avDumpDict( opts );

    int avError = 0;
    AVFormatContext *formatContext = NULL;
    //if ( av_open_input_file( &formatContext, mSource.c_str(), inputFormat, 0, &formatParameters ) !=0 )
    if ( (avError = avformat_open_input( &formatContext, mSource.c_str(), inputFormat, &opts )) < 0 )
        Fatal( "Unable to open input %s due to: %s", mSource.c_str(), avStrError(avError) );

    avDumpDict( opts );
#if 0
    if ( av_open_input_stream( &formatContext, 0, mSource.c_str(), inputFormat, &formatParameters ) !=0 )
        Fatal( "Unable to open input %s due to: %s", mSource.c_str(), strerror(errno) );
#endif

    // Locate stream info from input
    if ( (avError = avformat_find_stream_info( formatContext, &opts )) < 0 )
        Fatal( "Unable to find stream info from %s due to: %s", mSource.c_str(), avStrError(avError) );
    
    if ( dbgLevel > DBG_INF )
        av_dump_format( formatContext, 0, mSource.c_str(), 0 );

    // Find first video stream present
    int videoStreamId = -1;
    for ( int i=0; i < formatContext->nb_streams; i++ )
    {
        if ( formatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
        {
            videoStreamId = i;
            //set_context_opts( formatContext->streams[i]->codec, avcodec_opts[CODEC_TYPE_VIDEO], AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
            break;
        }
    }
    if ( videoStreamId == -1 )
        Fatal( "Unable to locate video stream in %s", mSource.c_str() );
    mStream = formatContext->streams[videoStreamId];
    mCodecContext = mStream->codec;

    // Try and get the codec from the codec context
    AVCodec *codec = NULL;
    if ( (codec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
        Fatal( "Can't find codec for video stream from %s", mSource.c_str() );

    // Open the codec
    if ( avcodec_open2( mCodecContext, codec, &opts ) < 0 )
        Fatal( "Unable to open codec for video stream from %s", mSource.c_str() );

    //AVFrame *savedFrame = avcodec_alloc_frame();

    // Allocate space for the native video frame
    AVFrame *frame = avcodec_alloc_frame();

    // Determine required buffer size and allocate buffer
    int pictureSize = avpicture_get_size( mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height );
    
    ByteBuffer frameBuffer( pictureSize );
    
    //avpicture_fill( (AVPicture *)savedFrame, mLastFrame.mBuffer.data(), mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height);

    AVPacket packet;
    while( !mStop )
    {
        int frameComplete = false;
        while ( !frameComplete && (av_read_frame( formatContext, &packet ) >= 0) )
        {
            Debug( 5, "Got packet from stream %d", packet.stream_index );
            if ( packet.stream_index == videoStreamId )
            {
                frameComplete = false;
                if ( avcodec_decode_video2( mCodecContext, frame, &frameComplete, &packet ) < 0 )
                    Fatal( "Unable to decode frame at frame %ju", mFrameCount );

                Debug( 3, "Decoded video packet at frame %ju, pts %jd", mFrameCount, packet.pts );

                if ( frameComplete )
                {
                    Debug( 3, "Got frame %d, pts %jd (%.3f)", mCodecContext->frame_number, frame->pkt_pts, (((double)(packet.pts-mStream->start_time)*mStream->time_base.num)/mStream->time_base.den) );

                    avpicture_layout( (AVPicture *)frame, mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height, frameBuffer.data(), frameBuffer.capacity() );

                    uint64_t timestamp = packet.pts;
                    //Debug( 3, "%d: TS: %lld, TS1: %lld, TS2: %lld, TS3: %.3f", time( 0 ), timestamp, packet.pts, ((1000000LL*packet.pts*mStream->time_base.num)/mStream->time_base.den), (((double)packet.pts*mStream->time_base.num)/mStream->time_base.den) );
                    //Info( "%ld:TS: %lld, TS1: %lld, TS2: %lld, TS3: %.3f", time( 0 ), timestamp, packet.pts, ((1000000LL*packet.pts*mStream->time_base.num)/mStream->time_base.den), (((double)packet.pts*mStream->time_base.num)/mStream->time_base.den) );

                    VideoFrame *videoFrame = new VideoFrame( this, mCodecContext->frame_number, timestamp, frameBuffer );
                    distributeFrame( FramePtr( videoFrame ) );
                }
            }
            av_free_packet( &packet );
        }
        usleep( INTERFRAME_TIMEOUT );
    }
    cleanup();

    av_freep( &frame );
    if ( mCodecContext )
    {
       avcodec_close( mCodecContext );
       mCodecContext = NULL; // Freed by avformat_close_input
    }
    if ( formatContext )
    {
        avformat_close_input( &formatContext );
        formatContext = NULL;
        //av_free( formatContext );
    }
    return( !ended() );
}
コード例 #15
0
ファイル: applehttp.c プロジェクト: MarkAufdencamp/ffmpeg4ios
static int applehttp_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
    AppleHTTPContext *c = s->priv_data;
    int ret = 0, i, j, stream_offset = 0;

    if ((ret = parse_playlist(c, s->filename, NULL, s->pb)) < 0)
        goto fail;

    if (c->n_variants == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
        ret = AVERROR_EOF;
        goto fail;
    }
    /* If the playlist only contained variants, parse each individual
     * variant playlist. */
    if (c->n_variants > 1 || c->variants[0]->n_segments == 0) {
        for (i = 0; i < c->n_variants; i++) {
            struct variant *v = c->variants[i];
            if ((ret = parse_playlist(c, v->url, v, NULL)) < 0)
                goto fail;
        }
    }

    if (c->variants[0]->n_segments == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
        ret = AVERROR_EOF;
        goto fail;
    }

    /* If this isn't a live stream, calculate the total duration of the
     * stream. */
    if (c->variants[0]->finished) {
        int64_t duration = 0;
        for (i = 0; i < c->variants[0]->n_segments; i++)
            duration += c->variants[0]->segments[i]->duration;
        s->duration = duration * AV_TIME_BASE;
    }

    /* Open the demuxer for each variant */
    for (i = 0; i < c->n_variants; i++) {
        struct variant *v = c->variants[i];
        AVInputFormat *in_fmt = NULL;
        char bitrate_str[20];
        if (v->n_segments == 0)
            continue;

        v->index  = i;
        v->needed = 1;
        v->parent = s;

        /* If this is a live stream with more than 3 segments, start at the
         * third last segment. */
        v->cur_seq_no = v->start_seq_no;
        if (!v->finished && v->n_segments > 3)
            v->cur_seq_no = v->start_seq_no + v->n_segments - 3;

        v->read_buffer = av_malloc(INITIAL_BUFFER_SIZE);
        ffio_init_context(&v->pb, v->read_buffer, INITIAL_BUFFER_SIZE, 0, v,
                          read_data, NULL, NULL);
        v->pb.seekable = 0;
        ret = av_probe_input_buffer(&v->pb, &in_fmt, v->segments[0]->url,
                                    NULL, 0, 0);
        if (ret < 0)
            goto fail;
        ret = av_open_input_stream(&v->ctx, &v->pb, v->segments[0]->url,
                                   in_fmt, NULL);
        if (ret < 0)
            goto fail;
        v->stream_offset = stream_offset;
        snprintf(bitrate_str, sizeof(bitrate_str), "%d", v->bandwidth);
        /* Create new AVStreams for each stream in this variant */
        for (j = 0; j < v->ctx->nb_streams; j++) {
            AVStream *st = av_new_stream(s, i);
            if (!st) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
            avcodec_copy_context(st->codec, v->ctx->streams[j]->codec);
            av_metadata_set2(&st->metadata, "variant_bitrate", bitrate_str, 0);
        }
        stream_offset += v->ctx->nb_streams;
    }

    c->first_packet = 1;

    return 0;
fail:
    free_variant_list(c);
    return ret;
}
コード例 #16
0
ULONG ASFDemuxer::Init()
{
    unsigned ret = RET_OK;

    av_register_all();

    m_urlProtocol.url_open  = OpenASF;
    m_urlProtocol.url_read  = ReadASF;
    m_urlProtocol.url_close = CloseASF;

    m_urlCtx.flags = URL_RDONLY;
    m_urlCtx.is_streamed = 1;
    m_urlCtx.prot = &m_urlProtocol;
    m_urlCtx.max_packet_size = 0;
    m_urlCtx.priv_data = this;

    memset(&m_byteCtx, 0, sizeof(m_byteCtx));

    url_fdopen(&m_byteCtx, &m_urlCtx);

    BYTE *pBufTmp = new BYTE[m_byteCtx.buffer_size];

    if (pBufTmp)
    {
        get_buffer(&m_byteCtx, pBufTmp, 2048);
        delete pBufTmp;

        m_byteCtx.buf_ptr = m_byteCtx.buffer;

        AVFormatParameters fmtParam;
        memset(&fmtParam, 0, sizeof(AVFormatParameters));

        fmtParam.initial_pause = 1; /* we force a pause when starting an RTSP stream */
        fmtParam.width  = 0;
        fmtParam.height = 0;
        fmtParam.time_base.num = 1;
        fmtParam.time_base.den = 25;
        fmtParam.pix_fmt = PIX_FMT_NONE;

        AVInputFormat* pInputFmt = av_find_input_format("asf");

        if (pInputFmt)
        {
            if (av_open_input_stream(&m_pFmtCtx, &m_byteCtx, "", pInputFmt, &fmtParam) == 0)
            {
                m_bInit = true;
            }
            else
            {
                ret = RET_ERROR;
            }
        }
        else
        { 
            ret = RET_ERROR;
        }
    }
    else
    {
        ret = RET_LOW_MEMORY;
    }

    return ret;
}
コード例 #17
0
ファイル: ffmpeg_demux.c プロジェクト: claudiordgz/gpac
static GF_Err FFD_ConnectService(GF_InputService *plug, GF_ClientService *serv, const char *url)
{
	GF_Err e;
	s64 last_aud_pts;
	u32 i;
	s32 res;
	Bool is_local;
	const char *sOpt;
	char *ext, szName[1024];
	FFDemux *ffd = plug->priv;
	AVInputFormat *av_in = NULL;
	char szExt[20];

	if (ffd->ctx) return GF_SERVICE_ERROR;

	assert( url && strlen(url) < 1024);
	strcpy(szName, url);
	ext = strrchr(szName, '#');
	ffd->service_type = 0;
	e = GF_NOT_SUPPORTED;
	ffd->service = serv;

	if (ext) {
		if (!stricmp(&ext[1], "video")) ffd->service_type = 1;
		else if (!stricmp(&ext[1], "audio")) ffd->service_type = 2;
		ext[0] = 0;
	}

	/*some extensions not supported by ffmpeg, overload input format*/
	ext = strrchr(szName, '.');
	strcpy(szExt, ext ? ext+1 : "");
	strlwr(szExt);
	if (!strcmp(szExt, "cmp")) av_in = av_find_input_format("m4v");

	is_local = (strnicmp(url, "file://", 7) && strstr(url, "://")) ? 0 : 1;

	GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[FFMPEG] opening file %s - local %d - av_in %08x\n", url, is_local, av_in));

	if (!is_local) {
		AVProbeData   pd;

		/*setup wraper for FFMPEG I/O*/
		ffd->buffer_size = 8192;
		sOpt = gf_modules_get_option((GF_BaseInterface *)plug, "FFMPEG", "IOBufferSize");
		if (sOpt) ffd->buffer_size = atoi(sOpt);
		ffd->buffer = gf_malloc(sizeof(char)*ffd->buffer_size);
#ifdef FFMPEG_DUMP_REMOTE
		ffd->outdbg = gf_f64_open("ffdeb.raw", "wb");
#endif
#ifdef USE_PRE_0_7
		init_put_byte(&ffd->io, ffd->buffer, ffd->buffer_size, 0, ffd, ff_url_read, NULL, NULL);
		ffd->io.is_streamed = 1;
#else
		ffd->io.seekable = 1;
#endif

		ffd->dnload = gf_service_download_new(ffd->service, url, GF_NETIO_SESSION_NOT_THREADED  | GF_NETIO_SESSION_NOT_CACHED, NULL, ffd);
		if (!ffd->dnload) return GF_URL_ERROR;
		while (1) {
			u32 read;
			e = gf_dm_sess_fetch_data(ffd->dnload, ffd->buffer + ffd->buffer_used, ffd->buffer_size - ffd->buffer_used, &read);
			if (e==GF_EOS) break;
			/*we're sync!!*/
			if (e==GF_IP_NETWORK_EMPTY) continue;
			if (e) goto err_exit;
			ffd->buffer_used += read;
			if (ffd->buffer_used == ffd->buffer_size) break;
		}
		if (e==GF_EOS) {
			const char *cache_file = gf_dm_sess_get_cache_name(ffd->dnload);
			res = open_file(&ffd->ctx, cache_file, av_in);
		} else {
			pd.filename = szName;
			pd.buf_size = ffd->buffer_used;
			pd.buf = (u8 *) ffd->buffer;
			av_in = av_probe_input_format(&pd, 1);
			if (!av_in) {
				GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] error probing file %s - probe start with %c %c %c %c\n", url, ffd->buffer[0], ffd->buffer[1], ffd->buffer[2], ffd->buffer[3]));
				return GF_NOT_SUPPORTED;
			}
			/*setup downloader*/
			av_in->flags |= AVFMT_NOFILE;
#ifdef USE_AVFORMAT_OPEN_INPUT /*commit ffmpeg 603b8bc2a109978c8499b06d2556f1433306eca7*/
			res = avformat_open_input(&ffd->ctx, szName, av_in, NULL);
#else
			res = av_open_input_stream(&ffd->ctx, &ffd->io, szName, av_in, NULL);
#endif
		}
	} else {
		res = open_file(&ffd->ctx, szName, av_in);
	}

	switch (res) {
#ifndef _WIN32_WCE
	case 0:
		e = GF_OK;
		break;
	case AVERROR_IO:
		e = GF_URL_ERROR;
		goto err_exit;
	case AVERROR_INVALIDDATA:
		e = GF_NON_COMPLIANT_BITSTREAM;
		goto err_exit;
	case AVERROR_NOMEM:
		e = GF_OUT_OF_MEM;
		goto err_exit;
	case AVERROR_NOFMT:
		e = GF_NOT_SUPPORTED;
		goto err_exit;
#endif
	default:
		e = GF_SERVICE_ERROR;
		goto err_exit;
	}

	GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[FFMPEG] looking for streams in %s - %d streams - type %s\n", ffd->ctx->filename, ffd->ctx->nb_streams, ffd->ctx->iformat->name));

	res = av_find_stream_info(ffd->ctx);
	if (res <0) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] cannot locate streams - error %d\n", res));
		e = GF_NOT_SUPPORTED;
		goto err_exit;
	}
	GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[FFMPEG] file %s opened - %d streams\n", url, ffd->ctx->nb_streams));

	/*figure out if we can use codecs or not*/
	ffd->audio_st = ffd->video_st = -1;
	for (i = 0; i < ffd->ctx->nb_streams; i++) {
		AVCodecContext *enc = ffd->ctx->streams[i]->codec;
		switch(enc->codec_type) {
		case AVMEDIA_TYPE_AUDIO:
			if ((ffd->audio_st<0) && (ffd->service_type!=1)) {
				ffd->audio_st = i;
				ffd->audio_tscale = ffd->ctx->streams[i]->time_base;
			}
			break;
		case AVMEDIA_TYPE_VIDEO:
			if ((ffd->video_st<0) && (ffd->service_type!=2)) {
				ffd->video_st = i;
				ffd->video_tscale = ffd->ctx->streams[i]->time_base;
			}
			break;
		default:
			break;
		}
	}
	if ((ffd->service_type==1) && (ffd->video_st<0)) goto err_exit;
	if ((ffd->service_type==2) && (ffd->audio_st<0)) goto err_exit;
	if ((ffd->video_st<0) && (ffd->audio_st<0)) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] No supported streams in file\n"));
		goto err_exit;
	}


	sOpt = gf_modules_get_option((GF_BaseInterface *)plug, "FFMPEG", "DataBufferMS");
	ffd->data_buffer_ms = 0;
	if (sOpt) ffd->data_buffer_ms = atoi(sOpt);
	if (!ffd->data_buffer_ms) ffd->data_buffer_ms = FFD_DATA_BUFFER;

	/*build seek*/
	if (is_local) {
		/*check we do have increasing pts. If not we can't rely on pts, we must skip SL
		we assume video pts is always present*/
		if (ffd->audio_st>=0) {
			last_aud_pts = 0;
			for (i=0; i<20; i++) {
				AVPacket pkt;
				pkt.stream_index = -1;
				if (av_read_frame(ffd->ctx, &pkt) <0) break;
				if (pkt.pts == AV_NOPTS_VALUE) pkt.pts = pkt.dts;
				if (pkt.stream_index==ffd->audio_st) last_aud_pts = pkt.pts;
			}
			if (last_aud_pts*ffd->audio_tscale.den<10*ffd->audio_tscale.num) ffd->unreliable_audio_timing = 1;
		}

		ffd->seekable = (av_seek_frame(ffd->ctx, -1, 0, AVSEEK_FLAG_BACKWARD)<0) ? 0 : 1;
		if (!ffd->seekable) {
#ifndef FF_API_CLOSE_INPUT_FILE
			av_close_input_file(ffd->ctx);
#else
			avformat_close_input(&ffd->ctx);
#endif
			ffd->ctx = NULL;
			open_file(&ffd->ctx, szName, av_in);
			av_find_stream_info(ffd->ctx);
		}
	}

	/*let's go*/
	gf_service_connect_ack(serv, NULL, GF_OK);
	/*if (!ffd->service_type)*/ FFD_SetupObjects(ffd);
	ffd->service_type = 0;
	return GF_OK;

err_exit:
	GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[FFMPEG] Error opening file %s: %s\n", url, gf_error_to_string(e)));
#ifndef FF_API_CLOSE_INPUT_FILE
	if (ffd->ctx) av_close_input_file(ffd->ctx);
#else
	if (ffd->ctx) avformat_close_input(&ffd->ctx);
#endif
	ffd->ctx = NULL;
	gf_service_connect_ack(serv, NULL, e);
	return GF_OK;
}
コード例 #18
0
static demuxer_t* demux_open_lavf(demuxer_t *demuxer){
    AVFormatContext *avfc;
    AVFormatParameters ap;
    const AVOption *opt;
    AVMetadataTag *t = NULL;
    lavf_priv_t *priv= demuxer->priv;
    int i;
    char mp_filename[256]="mp:";

    memset(&ap, 0, sizeof(AVFormatParameters));

    stream_seek(demuxer->stream, 0);

    avfc = avformat_alloc_context();

    if (opt_cryptokey)
        parse_cryptokey(avfc, opt_cryptokey);
    if (user_correct_pts != 0)
        avfc->flags |= AVFMT_FLAG_GENPTS;
    if (index_mode == 0)
        avfc->flags |= AVFMT_FLAG_IGNIDX;

    ap.prealloced_context = 1;
    if(opt_probesize) {
        opt = av_set_int(avfc, "probesize", opt_probesize);
        if(!opt) mp_msg(MSGT_HEADER,MSGL_ERR, "demux_lavf, couldn't set option probesize to %u\n", opt_probesize);
    }
    if(opt_analyzeduration) {
        opt = av_set_int(avfc, "analyzeduration", opt_analyzeduration * AV_TIME_BASE);
        if(!opt) mp_msg(MSGT_HEADER,MSGL_ERR, "demux_lavf, couldn't set option analyzeduration to %u\n", opt_analyzeduration);
    }

    if(opt_avopt){
        if(parse_avopts(avfc, opt_avopt) < 0){
            mp_msg(MSGT_HEADER,MSGL_ERR, "Your options /%s/ look like gibberish to me pal\n", opt_avopt);
            return NULL;
        }
    }

    if(demuxer->stream->url) {
        if (!strncmp(demuxer->stream->url, "ffmpeg://rtsp:", 14))
            av_strlcpy(mp_filename, demuxer->stream->url + 9, sizeof(mp_filename));
        else
            av_strlcat(mp_filename, demuxer->stream->url, sizeof(mp_filename));
    } else
        av_strlcat(mp_filename, "foobar.dummy", sizeof(mp_filename));

    if (!(priv->avif->flags & AVFMT_NOFILE)) {
        priv->pb = av_alloc_put_byte(priv->buffer, BIO_BUFFER_SIZE, 0,
                                     demuxer, mp_read, NULL, mp_seek);
        priv->pb->read_seek = mp_read_seek;
        priv->pb->is_streamed = !demuxer->stream->end_pos || (demuxer->stream->flags & MP_STREAM_SEEK) != MP_STREAM_SEEK;
    }

    if(av_open_input_stream(&avfc, priv->pb, mp_filename, priv->avif, &ap)<0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_open_input_stream() failed\n");
        return NULL;
    }

    priv->avfc= avfc;

    if(av_find_stream_info(avfc) < 0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_find_stream_info() failed\n");
        return NULL;
    }

    /* Add metadata. */
    while((t = av_metadata_get(avfc->metadata, "", t, AV_METADATA_IGNORE_SUFFIX)))
        demux_info_add(demuxer, t->key, t->value);

    for(i=0; i < avfc->nb_chapters; i++) {
        AVChapter *c = avfc->chapters[i];
        uint64_t start = av_rescale_q(c->start, c->time_base, (AVRational){1,1000});
        uint64_t end   = av_rescale_q(c->end, c->time_base, (AVRational){1,1000});
        t = av_metadata_get(c->metadata, "title", NULL, 0);
        demuxer_add_chapter(demuxer, t ? t->value : NULL, start, end);
    }

    for(i=0; i<avfc->nb_streams; i++)
        handle_stream(demuxer, avfc, i);
    priv->nb_streams_last = avfc->nb_streams;

    if(avfc->nb_programs) {
        int p;
        for (p = 0; p < avfc->nb_programs; p++) {
            AVProgram *program = avfc->programs[p];
            t = av_metadata_get(program->metadata, "title", NULL, 0);
            mp_msg(MSGT_HEADER,MSGL_INFO,"LAVF: Program %d %s\n", program->id, t ? t->value : "");
            mp_msg(MSGT_IDENTIFY, MSGL_V, "PROGRAM_ID=%d\n", program->id);
        }
    }

    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: %d audio and %d video streams found\n",priv->audio_streams,priv->video_streams);
    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: build %d\n", LIBAVFORMAT_BUILD);
    if(!priv->audio_streams) demuxer->audio->id=-2;  // nosound
//    else if(best_audio > 0 && demuxer->audio->id == -1) demuxer->audio->id=best_audio;
    if(!priv->video_streams){
        if(!priv->audio_streams){
	    mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF: no audio or video headers found - broken file?\n");
            return NULL;
        }
        demuxer->video->id=-2; // audio-only
    } //else if (best_video > 0 && demuxer->video->id == -1) demuxer->video->id = best_video;

    return demuxer;
}
コード例 #19
0
ファイル: ffmpeg_decoder.cpp プロジェクト: sandsmark/akode
bool FFMPEGDecoder::openFile() {
    d->src->openRO();
    d->src->fadvise();

    // The following duplicates what av_open_input_file would normally do

    // url_fdopen
    init_put_byte(&d->stream, d->file_buffer, FILE_BUFFER_SIZE, 0, d->src, akode_read, akode_write, akode_seek);
    d->stream.is_streamed = !d->src->seekable();
    d->stream.max_packet_size = FILE_BUFFER_SIZE;

    {
        // 2048 is PROBE_BUF_SIZE from libavformat/utils.c
        AVProbeData pd;
        uint8_t buf[2048];
        pd.filename = d->src->filename;
        pd.buf = buf;
        pd.buf_size = 0;
        pd.buf_size = get_buffer(&d->stream, buf, 2048);
        d->fmt = av_probe_input_format(&pd, 1);
        // Seek back to 0
        // copied from url_fseek
        long offset1 = 0 - (d->stream.pos - (d->stream.buf_end - d->stream.buffer));
        if (offset1 >= 0 && offset1 <= (d->stream.buf_end - d->stream.buffer)) {
            /* can do the seek inside the buffer */
            d->stream.buf_ptr = d->stream.buffer + offset1;
        } else {
            if (!d->src->seek(0)) {
                d->src->close();
                return false;
            } else {
                d->stream.pos = 0;
                d->stream.buf_ptr = d->file_buffer;
                d->stream.buf_end = d->file_buffer;
            }
        }
    }
    if (!d->fmt) {
        std::cerr << "akode: FFMPEG: Format not found\n";
        closeFile();
        return false;
    }

    if (av_open_input_stream(&d->ic, &d->stream, d->src->filename, d->fmt, 0) != 0)
    {
        closeFile();
        return false;
    }

    av_find_stream_info( d->ic );

    // Find the first a/v streams
    d->audioStream = -1;
    d->videoStream = -1;
    for (int i = 0; i < d->ic->nb_streams; i++) {
        if (d->ic->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
            d->audioStream = i;
        else
        if (d->ic->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
            d->videoStream = i;
    }
    if (d->audioStream == -1)
    {
        std::cerr << "akode: FFMPEG: Audio stream not found\n";
        // for now require an audio stream
        closeFile();
        return false;
    }

    // Set config
    if (!setAudioConfiguration(&d->config, d->ic->streams[d->audioStream]->codec))
    {
        closeFile();
        return false;
    }

    d->codec = avcodec_find_decoder(d->ic->streams[d->audioStream]->codec->codec_id);
    if (!d->codec) {
        std::cerr << "akode: FFMPEG: Codec not found\n";
        closeFile();
        return false;
    }
    avcodec_open( d->ic->streams[d->audioStream]->codec, d->codec );

    double ffpos = (double)d->ic->streams[d->audioStream]->start_time / (double)AV_TIME_BASE;
    d->position = (long)(ffpos * d->config.sample_rate);

    return true;
}
コード例 #20
0
ファイル: demuxer.c プロジェクト: groovybits/ngtc
int setupDemuxerFormat(DemuxerSettings *ds, char *file, struct hqv_data *hqv) {
	AVProbeData probe_data, *pd = &probe_data;

	ds->pFormatCtx = avformat_alloc_context();
	ds->pFormatCtx->flags |= AVFMT_FLAG_NONBLOCK;
	ds->pFormatCtx->flags |= AVFMT_FLAG_GENPTS;
#ifdef AVFMT_FLAG_IGNDTS
	ds->pFormatCtx->flags |= AVFMT_FLAG_IGNDTS;
#endif
	if (ds->streaming)
		ds->pFormatCtx->flags |= AVFMT_NOFILE|AVFMT_FLAG_IGNIDX;

        // Setup input format for raw A/V
        if (ds->input_format != NULL) {
                ds->fmt = av_iformat_next(NULL);
                if (ds->fmt != NULL && ds->fmt->name != NULL && strcmp(ds->fmt->name, ds->input_format) != 0) {
                        do {
                                ds->fmt = av_iformat_next(ds->fmt);
                                if (ds->fmt == NULL || ds->fmt->name == NULL)
                                        break;
                        } while (strcmp(ds->fmt->name, ds->input_format) != 0);
                }
                if (ds->fmt)
                        av_log(NULL, AV_LOG_INFO, "Input format: %s\n", ds->fmt->long_name);
                else
                        av_log(NULL, AV_LOG_WARNING, "Failed finding input format: %s\n", ds->input_format);
        }

	if (ds->streaming)
		ds->stream_buffer = (uint8_t*)av_malloc(STREAM_BUFFER_SIZE); 

        // Try to figure out input format
        if (ds->streaming && ds->fmt == NULL) {

                pd->filename = "";
                if (file)
                        pd->filename = file;
                pd->buf = ds->stream_buffer;
                pd->buf_size = STREAM_BUFFER_SIZE;

                // Wait till have enough input data
                while(av_fifo_size(hqv->fifo) < STREAM_BUFFER_SIZE && continueDecoding)
                        usleep(33000);
                if (!continueDecoding)
                        exit(0);
                // Copy some fifo data 
                memcpy(ds->stream_buffer, hqv->fifo->buffer, STREAM_BUFFER_SIZE);

                // Probe input format
                ds->fmt = av_probe_input_format(pd, 1);

                if (!ds->fmt) {
                        av_log(NULL, AV_LOG_FATAL, "Failed probing input file: %s\n", file);
                        exit(1);
                } else
                        av_log(NULL, AV_LOG_INFO, "Input format: %s\n", ds->fmt->long_name);
        }

        // Open Input File
        if (ds->streaming) {
                // Streaming input
                ds->pb = av_alloc_put_byte(ds->stream_buffer, STREAM_BUFFER_SIZE, 0,
                        hqv, fifo_read, NULL, NULL);
                ds->pb->is_streamed = 1;

                // Open video device stream
                if (av_open_input_stream(&ds->pFormatCtx, ds->pb, file, ds->fmt, ds->ap) != 0) {
                        av_log(NULL, AV_LOG_FATAL, "%s: could not open device stream\n", file);
                        return -1;        // Couldn't open file
                }
        } else {
                // Open file
                if (av_open_input_file(&ds->pFormatCtx, file, ds->fmt, 0, ds->ap) != 0) {
                        av_log(NULL, AV_LOG_FATAL, "%s: could not openfile\n", file);
                        return -1;        // Couldn't open file
                }
        }

        // Retrieve stream information
        if (av_find_stream_info(ds->pFormatCtx) < 0) {
                av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", file);
                return -1;
        }
	return 0;
}