Ejemplo n.º 1
0
int	 av_vid_init(char *file)
{
	int i;

	if (av_open_input_file(&pFormatCtx, file, NULL, 0, NULL)!=0)
		return -1;

	if (av_find_stream_info(pFormatCtx)<0)
		return -1;

	dump_format(pFormatCtx, 0, file, 0);
	videoStream=-1;
	for (i=0; i<pFormatCtx->nb_streams; i++) {
		if (pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
			videoStream=i;
			break;
		}
	}
	if (videoStream==-1)
		return -1;

	pCodecCtx=pFormatCtx->streams[videoStream]->codec;

	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec==NULL)
		return -1;

	if (avcodec_open(pCodecCtx, pCodec)<0)
		return -1;

	pFrame=avcodec_alloc_frame();

	return 0;
}
Ejemplo n.º 2
0
bool K3bFFMpegFile::open()
{
  close();

  // open the file
  int err = av_open_input_file( &d->formatContext, m_filename.local8Bit(), 0, 0, 0 );
  if( err < 0 ) {
    kdDebug() << "(K3bFFMpegFile) unable to open " << m_filename << " with error " << err << endl;
    return false;
  }

  // analyze the streams
  av_find_stream_info( d->formatContext );

  // we only handle files containing one audio stream
  if( d->formatContext->nb_streams != 1 ) {
    kdDebug() << "(K3bFFMpegFile) more than one stream in " << m_filename << endl;
    return false;
  }

  // urgh... ugly
#ifdef FFMPEG_BUILD_PRE_4629
  AVCodecContext* codecContext =  &d->formatContext->streams[0]->codec;
#else
  AVCodecContext* codecContext =  d->formatContext->streams[0]->codec;
#endif
  if( codecContext->codec_type != CODEC_TYPE_AUDIO ) {
    kdDebug() << "(K3bFFMpegFile) not a simple audio stream: " << m_filename << endl;
    return false;
  }

  // get the codec
  d->codec = avcodec_find_decoder(codecContext->codec_id);
  if( !d->codec ) {
    kdDebug() << "(K3bFFMpegFile) no codec found for " << m_filename << endl;
    return false;
  }

  // open the codec on our context
  kdDebug() << "(K3bFFMpegFile) found codec for " << m_filename << endl;
  if( avcodec_open( codecContext, d->codec ) < 0 ) {
    kdDebug() << "(K3bFFMpegDecoderFactory) could not open codec." << endl;
    return false;
  }

  // determine the length of the stream
  d->length = K3b::Msf::fromSeconds( (double)d->formatContext->duration / (double)AV_TIME_BASE );

  if( d->length == 0 ) {
    kdDebug() << "(K3bFFMpegDecoderFactory) invalid length." << endl;
    return false;
  }

  // dump some debugging info
  dump_format( d->formatContext, 0, m_filename.local8Bit(), 0 );

  return true;
}
Ejemplo n.º 3
0
int Encoder::dumpStreamInformation(void) {
  if (! avFormatContext || ! videoFileName) {
    systemLog->sysLog(ERROR, "avcodec context format or videoFileName not initialized. call openVideoFile first !");
    return -1;
  }
  dump_format(avFormatContext, 0, videoFileName, false);

  return 0;
}
Ejemplo n.º 4
0
//return value
//true : success
//false : fail
bool VideoIO::openInputCodec(void)
{
	//open video file
	///TODO : 20byte lost
	if(av_open_input_file(&pFormatCtx, inputFilename, NULL, 0, NULL) != 0)
	{
		fprintf(stderr, "couldn't open file : %s\n", inputFilename);
		return false;	//couldn't open file
	}

	//retrieve stream information
	if(av_find_stream_info(pFormatCtx) < 0)
	{
		fprintf(stderr, "couldn't find stream information\n");
		return false;
	}

	//dump information about file onto standard error
	dump_format(pFormatCtx, 0, inputFilename, 0);

	//find the first video stream
	videoStream = -1;
	for(unsigned int i = 0 ; i < pFormatCtx->nb_streams ; i++)
	{
		if(pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO)
		{
			videoStream = i;
			break;
		}
	}
	if(videoStream == -1)
	{
		fprintf(stderr, "didn't find a video stream");
		return false;
	}

	//get a pointer to the reading codec context for the video stream
	pInputCodecCtx = pFormatCtx->streams[videoStream]->codec;

	//find the decoder for the video stream(reaing video)
	pInputCodec = avcodec_find_decoder(pInputCodecCtx->codec_id);
	if(pInputCodec == NULL)
	{
		fprintf(stderr, "Unsupported coded!\n");
		return false;	//codec not found
	}

	//open codec
	if(avcodec_open(pInputCodecCtx, pInputCodec) < 0)
	{
		fprintf(stderr, "could not open codec\n");
		return false;
	}

	//success
	return true;
}
Ejemplo n.º 5
0
void
FFmpegMeta::print(bool isOutFormat)
{
    LOG(ffmpeg, trace, "ffmpeg::dump_format() ...");
    if (isOutFormat) {
        dump_format(_pFormatContext, 0, _pFormatContext->filename, 1);
    }
    else {
        dump_format(_pFormatContext, 0, _pFormatContext->filename, 0);

        AVMetadataTag* tag = 0;
        LOG(ffmpeg, trace, "ffmpeg::av_metadata_get() ...");
        while ((tag = av_metadata_get(_pFormatContext->metadata, "", tag, AV_METADATA_IGNORE_SUFFIX))) {
            std::clog << tag->key << ", " << tag->value << std::endl;
//             addTag(tag->key, tag->value);
        }
    }
}
Ejemplo n.º 6
0
int get_file_info(FileState *file)
{
    if(av_open_input_file(&file->pFormatCtx, file->fileName, NULL, 0, NULL) != 0)
	return -1;
    if(av_find_stream_info(file->pFormatCtx) < 0)
	return -1;
    dump_format(file->pFormatCtx, 0, file->fileName, 0);
    return 0;
}
Ejemplo n.º 7
0
status_t
AVFormatReader::Sniff(int32* _streamCount)
{
	TRACE("AVFormatReader::Sniff\n");

	BPositionIO* source = dynamic_cast<BPositionIO*>(Source());
	if (source == NULL) {
		TRACE("  not a BPositionIO, but we need it to be one.\n");
		return B_NOT_SUPPORTED;
	}

	Stream* stream = new(std::nothrow) Stream(source,
		&fSourceLock);
	if (stream == NULL) {
		ERROR("AVFormatReader::Sniff() - failed to allocate Stream\n");
		return B_NO_MEMORY;
	}

	ObjectDeleter<Stream> streamDeleter(stream);

	status_t ret = stream->Open();
	if (ret != B_OK) {
		TRACE("  failed to detect stream: %s\n", strerror(ret));
		return ret;
	}

	delete[] fStreams;
	fStreams = NULL;

	int32 streamCount = stream->CountStreams();
	if (streamCount == 0) {
		TRACE("  failed to detect any streams: %s\n", strerror(ret));
		return B_ERROR;
	}

	fStreams = new(std::nothrow) Stream*[streamCount];
	if (fStreams == NULL) {
		ERROR("AVFormatReader::Sniff() - failed to allocate streams\n");
		return B_NO_MEMORY;
	}

	memset(fStreams, 0, sizeof(Stream*) * streamCount);
	fStreams[0] = stream;
	streamDeleter.Detach();

	#ifdef TRACE_AVFORMAT_READER
	dump_format(const_cast<AVFormatContext*>(stream->Context()), 0, "", 0);
	#endif

	if (_streamCount != NULL)
		*_streamCount = streamCount;

	return B_OK;
}
Ejemplo n.º 8
0
int main(int argc, char *argv[])
{
  av_register_all();  
  AVFormatContext *pFormatCtx;
  AVCodecContext *pCodecCtx;
  AVCodec *pCodec;
  int audioStream = -1;
  
  // Open file
  if (av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL) != 0)
    return -1; // Couldn't open file
  printf("opened %s\n", argv[1]);
  
  // Get stream information
  if (av_find_stream_info(pFormatCtx) < 0)
    return -1; // No stream information found
  
  // Debugging function
  dump_format(pFormatCtx, 0, argv[1], 0);
  
  // Find the first audio stream
  for (int i = 0; i < pFormatCtx->nb_streams; i++)
  {
    if (pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO)
    {
      audioStream = i;
      break;
    }
  }
  if (audioStream == -1)
    return -1; // No audio stream found
  
  // Return a pointer to the codec context for the video stream
  pCodecCtx = pFormatCtx->streams[audioStream]->codec;
  
  // Find the correct decoder
  pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
  if (pCodec == NULL)
  {
    printf("Unsupported codec!\n");
    return -1; // Codec not found
  }
  printf("found codec %d\n", pCodecCtx->codec_id);
  
  // Open correct codec
  if (avcodec_open(pCodecCtx, pCodec) < 0)
    return -1; // Couldn't open codec
  printf("opened codec %d\n", pCodecCtx->codec_id);
  
}
Ejemplo n.º 9
0
int FFMpegEncoder::init(FFMpegEncodeProfile *myProfile)
{
	av_register_all();
	profile = *myProfile;
	
	videoEncodeBufSize = 1024000;
	videoEncodeBuf = (unsigned char*)malloc(videoEncodeBufSize);
	if (videoEncodeBuf == NULL)
		return ERR_ALLOC_VIDEO_BUF;

	audioEncodeBufSize = 4*128*1024;
	audioEncodeBuf = (unsigned char*)malloc(audioEncodeBufSize);
	if (audioEncodeBuf == NULL)
		return ERR_ALLOC_AUDIO_BUF;
	    
	pFormatCtx = avformat_alloc_context();
	
	int ret = -1;
	ret = configOutput();
	if (ret) 
	{
		printf("error configuring output!\n");
		return ret;
	}
	
	videoStream = NULL;
	ret = configVideoStream();
	if (ret) 
	{
		printf("error configuring video!\n");
		return ret;
	}
	
	audioStream = NULL;
	ret = configAudioStream();
	if (ret) 
	{
		printf("error configuring audio!\n");
		return ret;
	}

	av_set_parameters(pFormatCtx, NULL);
	dump_format(pFormatCtx, 0, (char*)profile.outputFilename, 1);
    
	av_write_header(pFormatCtx);
	audioClock = 0;
	videoClock = 0;
	return 0;
	//
}
Ejemplo n.º 10
0
bool CFFMPEGLoader::CreateMovie(const char *filename, const AVOutputFormat *format, const AVCodecContext *VideoCon, const AVCodecContext *AudioCon) {
    if(!filename)
        return false;

    AVOutputFormat *fmt;
    //*fmt=*format;
    fmt = guess_format(NULL, filename, NULL);

    pFormatCon = av_alloc_format_context();
    if(!pFormatCon) {
        cout<<"Error while allocating format context\n";
        return false;
    }
    bOutput=true;
    strcpy(pFormatCon->filename,filename);

    pFormatCon->oformat=fmt;
    pAudioStream=pVideoStream=NULL;

    if (fmt->video_codec != CODEC_ID_NONE) {
        pVideoStream = add_video_stream(pFormatCon, fmt->video_codec,VideoCon);
    }
    if (fmt->audio_codec != CODEC_ID_NONE) {
        pAudioStream = add_audio_stream(pFormatCon, fmt->audio_codec,AudioCon);
    }

    if (av_set_parameters(pFormatCon, NULL) < 0) {
        cout<<"Invalid output format parameters\n";
        return false;
    }

    if (pVideoStream)
        open_stream(pFormatCon, pVideoStream);
    if (pAudioStream)
        open_stream(pFormatCon, pAudioStream);

    dump_format(pFormatCon, 0, filename, 1);

    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (url_fopen(&pFormatCon->pb, filename, URL_WRONLY) < 0) {
            cout<<"Could not open '%s'"<<filename<<endl;
            return false;
        }
    }

    /* write the stream header, if any */
    av_write_header(pFormatCon);
    return true;
}
Ejemplo n.º 11
0
static AVFormatContext *
open_file(const char *filename)
{
    AVFormatContext *afc;
    int err = av_open_input_file(&afc, filename, NULL, 0, NULL);

    if (!err)
        err = av_find_stream_info(afc);

    if (err < 0) {
        fprintf(stderr, "%s: lavf error %d\n", filename, err);
        exit(1);
    }

    dump_format(afc, 0, filename, 0);

    return afc;
}
Ejemplo n.º 12
0
status_t 
ProducerNode::PrepareToConnect(
				const media_source & what,
				const media_destination & where,
				media_format * format,
				media_source * out_source,
				char * out_name)
{

	out("ProducerNode::PrepareToConnect\n");

	if (mOutput.source != what)
		return B_MEDIA_BAD_SOURCE;
	
	if (mOutput.destination != media_destination::null)
		return B_MEDIA_ALREADY_CONNECTED;

	if (format == NULL || out_source == NULL || out_name == NULL)
		return B_BAD_VALUE;

#if 0		
	ASSERT(mOutputEnabled == false);

	trace("old format:\n");
	dump_format(format);

	status_t status;
	
	status = specialize_format_to_inputformat(format);
	if (status != B_OK)
		return status;

#endif


	*out_source = mOutput.source;
	strcpy(out_name,mOutput.name);
	//mOutput.destination = where; //really now? fixme
	return B_OK;
}
Ejemplo n.º 13
0
void
ProducerNode::Connect(
				status_t error, 
				const media_source & source,
				const media_destination & destination,
				const media_format & format,
				char * io_name)
{
	out("ProducerNode::Connect\n");

	if (error != B_OK) {
		InitializeOutput();
		return;
	}
/*
	if (mOutput.destination != destination) { //if connected in PrepareToConnect fixme?
		trace("error mOutput.destination != destination\n");
		return;
	}
*/	
	mOutput.destination = destination;

	if (mOutput.source != source) {
		out("error mOutput.source != source\n");
		return;
	}	
		
	strcpy(io_name,mOutput.name);

#if 0
	trace("format (final and approved):\n");
	dump_format(&format);
#endif

	mOutputEnabled = true;

	return;
}
void SWDecoder::decodeStream()
{
	int res = 0;
	if(!res) 
	{
		std::cout << "opening virtual file" << std::endl;
		AVInputFormat *pFmt = av_find_input_format("mpegts");
		res = av_open_input_file(&mState.pFormatCtx, boost::str(boost::format("dvrdecode://%p") % this).c_str(), pFmt, 0, 0);
		if(res)
		{
			std::cerr << boost::format("Error opening pseudo decoder file:%s") % strerror(errno) << std::endl;
		} else
		{
			std::cout << "finding virtual stream info" << std::endl;

			res = av_find_stream_info(mState.pFormatCtx);
			if(res)
			{
				std::cerr << boost::format("Error finding stream info:%s") % strerror(errno) << std::endl;
			}
			else
			{
				std::cout << "trying to dump format" << std::endl;
				dump_format(mState.pFormatCtx, 0, boost::str(boost::format("dvrdecode://%p") % this).c_str(), false);
				std::cout << "recognized format" << std::endl;
			}
			if(!res)
			{
				decodeFrames();
			}
		}

		if(mState.pFormatCtx)
		{
			av_close_input_file(mState.pFormatCtx);
		}
	}	
}
Ejemplo n.º 15
0
static int open_input_file(AVFormatContext **fmt_ctx_ptr, const char *filename)
{
    int err, i;
    AVFormatContext *fmt_ctx;

    fmt_ctx = avformat_alloc_context();
    set_context_opts(fmt_ctx, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);

    if ((err = av_open_input_file(&fmt_ctx, filename, iformat, 0, NULL)) < 0) {
        print_error(filename, err);
        return err;
    }

    /* fill the streams in the format context */
    if ((err = av_find_stream_info(fmt_ctx)) < 0) {
        print_error(filename, err);
        return err;
    }

    dump_format(fmt_ctx, 0, filename, 0);

    /* bind a decoder to each input stream */
    for (i = 0; i < fmt_ctx->nb_streams; i++) {
        AVStream *stream = fmt_ctx->streams[i];
        AVCodec *codec;

        if (!(codec = avcodec_find_decoder(stream->codec->codec_id))) {
            fprintf(stderr, "Unsupported codec (id=%d) for input stream %d\n",
                    stream->codec->codec_id, stream->index);
        } else if (avcodec_open(stream->codec, codec) < 0) {
            fprintf(stderr, "Error while opening codec for input stream %d\n",
                    stream->index);
        }
    }

    *fmt_ctx_ptr = fmt_ctx;
    return 0;
}
Ejemplo n.º 16
0
int
scan_metadata_ffmpeg(char *file, struct media_file_info *mfi)
{
  AVFormatContext *ctx;
  const struct metadata_map *extra_md_map;
  enum CodecID codec_id;
  enum CodecID video_codec_id;
  enum CodecID audio_codec_id;
  AVStream *video_stream;
  AVStream *audio_stream;
  int mdcount;
  int i;
  int ret;

  ctx = NULL;

#if LIBAVFORMAT_VERSION_MAJOR >= 53 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVCODEC_VERSION_MINOR >= 3)
  ret = avformat_open_input(&ctx, file, NULL, NULL);
#else
  ret = av_open_input_file(&ctx, file, NULL, 0, NULL);
#endif
  if (ret != 0)
    {
      DPRINTF(E_WARN, L_SCAN, "Cannot open media file '%s': %s\n", file, strerror(AVUNERROR(ret)));

      return -1;
    }

  ret = av_find_stream_info(ctx);
  if (ret < 0)
    {
      DPRINTF(E_WARN, L_SCAN, "Cannot get stream info: %s\n", strerror(AVUNERROR(ret)));

      av_close_input_file(ctx);
      return -1;
    }

#if 0
  /* Dump input format as determined by ffmpeg */
# if LIBAVFORMAT_VERSION_MAJOR >= 52 || (LIBAVFORMAT_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 101)
  av_dump_format(ctx, 0, file, 0);
# else
  dump_format(ctx, 0, file, FALSE);
# endif
#endif

  DPRINTF(E_DBG, L_SCAN, "File has %d streams\n", ctx->nb_streams);

  /* Extract codec IDs, check for video */
  video_codec_id = CODEC_ID_NONE;
  video_stream = NULL;

  audio_codec_id = CODEC_ID_NONE;
  audio_stream = NULL;

  for (i = 0; i < ctx->nb_streams; i++)
    {
      switch (ctx->streams[i]->codec->codec_type)
	{
#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 64)
	  case AVMEDIA_TYPE_VIDEO:
#else
	  case CODEC_TYPE_VIDEO:
#endif
	    if (!video_stream)
	      {
		DPRINTF(E_DBG, L_SCAN, "File has video (stream %d)\n", i);

		mfi->has_video = 1;
		video_stream = ctx->streams[i];
		video_codec_id = video_stream->codec->codec_id;
	      }
	    break;

#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 64)
	  case AVMEDIA_TYPE_AUDIO:
#else
	  case CODEC_TYPE_AUDIO:
#endif
	    if (!audio_stream)
	      {
		audio_stream = ctx->streams[i];
		audio_codec_id = audio_stream->codec->codec_id;
	      } 
	    break;

	  default:
	    break;
	}
    }

  if (audio_codec_id == CODEC_ID_NONE)
    {
      DPRINTF(E_DBG, L_SCAN, "File has no audio streams, discarding\n");

      av_close_input_file(ctx);
      return -1;
    }

  /* Common media information */
  if (ctx->duration > 0)
    mfi->song_length = ctx->duration / (AV_TIME_BASE / 1000); /* ms */

  if (ctx->bit_rate > 0)
    mfi->bitrate = ctx->bit_rate / 1000;
  else if (ctx->duration > AV_TIME_BASE) /* guesstimate */
    mfi->bitrate = ((mfi->file_size * 8) / (ctx->duration / AV_TIME_BASE)) / 1000;

  DPRINTF(E_DBG, L_SCAN, "Duration %d ms, bitrate %d kbps\n", mfi->song_length, mfi->bitrate);

  /* Get some more information on the audio stream */
  if (audio_stream)
    {
      if (audio_stream->codec->sample_rate != 0)
	mfi->samplerate = audio_stream->codec->sample_rate;

      /* Try sample format first */
#if LIBAVUTIL_VERSION_MAJOR >= 51 || (LIBAVUTIL_VERSION_MAJOR == 51 && LIBAVUTIL_VERSION_MINOR >= 4)
      mfi->bits_per_sample = 8 * av_get_bytes_per_sample(audio_stream->codec->sample_fmt);
#elif LIBAVCODEC_VERSION_MAJOR >= 53
      mfi->bits_per_sample = av_get_bits_per_sample_fmt(audio_stream->codec->sample_fmt);
#else
      mfi->bits_per_sample = av_get_bits_per_sample_format(audio_stream->codec->sample_fmt);
#endif
      if (mfi->bits_per_sample == 0)
	{
	  /* Try codec */
	  mfi->bits_per_sample = av_get_bits_per_sample(audio_codec_id);
	}

      DPRINTF(E_DBG, L_SCAN, "samplerate %d, bps %d\n", mfi->samplerate, mfi->bits_per_sample);
    }

  /* Check codec */
  extra_md_map = NULL;
  codec_id = (mfi->has_video) ? video_codec_id : audio_codec_id;
  switch (codec_id)
    {
      case CODEC_ID_AAC:
	DPRINTF(E_DBG, L_SCAN, "AAC\n");
	mfi->type = strdup("m4a");
	mfi->codectype = strdup("mp4a");
	mfi->description = strdup("AAC audio file");
	break;

      case CODEC_ID_ALAC:
	DPRINTF(E_DBG, L_SCAN, "ALAC\n");
	mfi->type = strdup("m4a");
	mfi->codectype = strdup("alac");
	mfi->description = strdup("AAC audio file");
	break;

      case CODEC_ID_FLAC:
	DPRINTF(E_DBG, L_SCAN, "FLAC\n");
	mfi->type = strdup("flac");
	mfi->codectype = strdup("flac");
	mfi->description = strdup("FLAC audio file");

	extra_md_map = md_map_vorbis;
	break;

      case CODEC_ID_MUSEPACK7:
      case CODEC_ID_MUSEPACK8:
	DPRINTF(E_DBG, L_SCAN, "Musepack\n");
	mfi->type = strdup("mpc");
	mfi->codectype = strdup("mpc");
	mfi->description = strdup("Musepack audio file");
	break;

      case CODEC_ID_MPEG4: /* Video */
      case CODEC_ID_H264:
	DPRINTF(E_DBG, L_SCAN, "MPEG4 video\n");
	mfi->type = strdup("m4v");
	mfi->codectype = strdup("mp4v");
	mfi->description = strdup("MPEG-4 video file");

	extra_md_map = md_map_tv;
	break;

      case CODEC_ID_MP3:
	DPRINTF(E_DBG, L_SCAN, "MP3\n");
	mfi->type = strdup("mp3");
	mfi->codectype = strdup("mpeg");
	mfi->description = strdup("MPEG audio file");

	extra_md_map = md_map_id3;
	break;

      case CODEC_ID_VORBIS:
	DPRINTF(E_DBG, L_SCAN, "VORBIS\n");
	mfi->type = strdup("ogg");
	mfi->codectype = strdup("ogg");
	mfi->description = strdup("Ogg Vorbis audio file");

	extra_md_map = md_map_vorbis;
	break;

      case CODEC_ID_WMAVOICE:
	DPRINTF(E_DBG, L_SCAN, "WMA Voice\n");
	mfi->type = strdup("wma");
	mfi->codectype = strdup("wmav");
	mfi->description = strdup("WMA audio file");
	break;

      case CODEC_ID_WMAPRO:
	DPRINTF(E_DBG, L_SCAN, "WMA Pro\n");
	mfi->type = strdup("wmap");
	mfi->codectype = strdup("wma");
	mfi->description = strdup("WMA audio file");
	break;

      case CODEC_ID_WMALOSSLESS:
	DPRINTF(E_DBG, L_SCAN, "WMA Lossless\n");
	mfi->type = strdup("wma");
	mfi->codectype = strdup("wmal");
	mfi->description = strdup("WMA audio file");
	break;

      case CODEC_ID_WMAV1:
      case CODEC_ID_WMAV2:
	DPRINTF(E_DBG, L_SCAN, "WMA V1/V2\n");
	mfi->type = strdup("wma");
	mfi->codectype = strdup("wma");
	mfi->description = strdup("WMA audio file");
	break;

      case CODEC_ID_PCM_S16LE ... CODEC_ID_PCM_F64LE:
	if (strcmp(ctx->iformat->name, "aiff") == 0)
	  {
	    DPRINTF(E_DBG, L_SCAN, "AIFF\n");
	    mfi->type = strdup("aif");
	    mfi->codectype = strdup("aif");
	    mfi->description = strdup("AIFF audio file");
	    break;
	  }
	else if (strcmp(ctx->iformat->name, "wav") == 0)
	  {
	    DPRINTF(E_DBG, L_SCAN, "WAV\n");
	    mfi->type = strdup("wav");
	    mfi->codectype = strdup("wav");
	    mfi->description = strdup("WAV audio file");
	    break;
	  }
	/* WARNING: will fallthrough to default case, don't move */
	/* FALLTHROUGH */

      default:
	DPRINTF(E_DBG, L_SCAN, "Unknown codec 0x%x (video: %s), format %s (%s)\n",
		codec_id, (mfi->has_video) ? "yes" : "no", ctx->iformat->name, ctx->iformat->long_name);
	mfi->type = strdup("unkn");
	mfi->codectype = strdup("unkn");
	if (mfi->has_video)
	  {
	    mfi->description = strdup("Unknown video file format");
	    extra_md_map = md_map_tv;
	  }
	else
	  mfi->description = strdup("Unknown audio file format");
	break;
    }

  mdcount = 0;

  if ((!ctx->metadata) && (!audio_stream->metadata)
      && (video_stream && !video_stream->metadata))
    {
      DPRINTF(E_WARN, L_SCAN, "ffmpeg reports no metadata\n");

      goto skip_extract;
    }

  if (extra_md_map)
    {
      ret = extract_metadata(mfi, ctx, audio_stream, video_stream, extra_md_map);
      mdcount += ret;

      DPRINTF(E_DBG, L_SCAN, "Picked up %d tags with extra md_map\n", ret);
    }

#if LIBAVFORMAT_VERSION_MAJOR < 53
  av_metadata_conv(ctx, NULL, ctx->iformat->metadata_conv);
#endif

  ret = extract_metadata(mfi, ctx, audio_stream, video_stream, md_map_generic);
  mdcount += ret;

  DPRINTF(E_DBG, L_SCAN, "Picked up %d tags with generic md_map, %d tags total\n", ret, mdcount);

  /* fix up TV metadata */
  if (mfi->media_kind == 10)
    {
      /* I have no idea why this is, but iTunes reports a media kind of 64 for stik==10 (?!) */
      mfi->media_kind = 64;
    }
  /* Unspecified video files are "Movies", media_kind 2 */
  else if (mfi->has_video == 1)
    {
      mfi->media_kind = 2;
    }

 skip_extract:
  if (mdcount == 0)
    {
      /* ffmpeg doesn't support FLAC nor Musepack metadata,
       * and is buggy for some WMA variants, so fall back to the
       * legacy format-specific parsers until it gets fixed */
      if ((codec_id == CODEC_ID_WMAPRO)
	  || (codec_id == CODEC_ID_WMAVOICE)
	  || (codec_id == CODEC_ID_WMALOSSLESS))
	{
	  DPRINTF(E_WARN, L_SCAN, "Falling back to legacy WMA scanner\n");

	  av_close_input_file(ctx);
	  return (scan_get_wmainfo(file, mfi) ? 0 : -1);
	}
#ifdef FLAC
      else if (codec_id == CODEC_ID_FLAC)
	{
	  DPRINTF(E_WARN, L_SCAN, "Falling back to legacy FLAC scanner\n");

	  av_close_input_file(ctx);
	  return (scan_get_flacinfo(file, mfi) ? 0 : -1);
	}
#endif /* FLAC */
#ifdef MUSEPACK
      else if ((codec_id == CODEC_ID_MUSEPACK7)
	       || (codec_id == CODEC_ID_MUSEPACK8))
	{
	  DPRINTF(E_WARN, L_SCAN, "Falling back to legacy Musepack scanner\n");

	  av_close_input_file(ctx);
	  return (scan_get_mpcinfo(file, mfi) ? 0 : -1);
	}
#endif /* MUSEPACK */
      else
	DPRINTF(E_WARN, L_SCAN, "Could not extract any metadata\n");
    }

  /* Just in case there's no title set ... */
  if (mfi->title == NULL)
    mfi->title = strdup(mfi->fname);

  /* All done */
  av_close_input_file(ctx);

  return 0;
}
Ejemplo n.º 17
0
bool FFmpegDecoder::open(const std::string & filename, FFmpegParameters* parameters)
{
    try
    {
        // Open video file
        AVFormatContext * p_format_context = 0;

        if (filename.compare(0, 5, "/dev/")==0)
        {
            avdevice_register_all();
        
            OSG_NOTICE<<"Attempting to stream "<<filename<<std::endl;

            AVFormatParameters formatParams;
            memset(&formatParams, 0, sizeof(AVFormatParameters));
            AVInputFormat *iformat;

            formatParams.channel = 0;
            formatParams.standard = 0;
#if 1
            formatParams.width = 320;
            formatParams.height = 240;
#else
            formatParams.width = 640;
            formatParams.height = 480;
#endif            
            formatParams.time_base.num = 1;
            formatParams.time_base.den = 30;

            std::string format = "video4linux2";
            iformat = av_find_input_format(format.c_str());
            
            if (iformat)
            {
                OSG_NOTICE<<"Found input format: "<<format<<std::endl;
            }
            else
            {
                OSG_NOTICE<<"Failed to find input format: "<<format<<std::endl;
            }

            int error = av_open_input_file(&p_format_context, filename.c_str(), iformat, 0, &formatParams);
            if (error != 0)
            {
                std::string error_str;
                switch (error)
                {
                    //case AVERROR_UNKNOWN: error_str = "AVERROR_UNKNOWN"; break;   // same value as AVERROR_INVALIDDATA
                    case AVERROR_IO: error_str = "AVERROR_IO"; break;
                    case AVERROR_NUMEXPECTED: error_str = "AVERROR_NUMEXPECTED"; break;
                    case AVERROR_INVALIDDATA: error_str = "AVERROR_INVALIDDATA"; break;
                    case AVERROR_NOMEM: error_str = "AVERROR_NOMEM"; break;
                    case AVERROR_NOFMT: error_str = "AVERROR_NOFMT"; break;
                    case AVERROR_NOTSUPP: error_str = "AVERROR_NOTSUPP"; break;
                    case AVERROR_NOENT: error_str = "AVERROR_NOENT"; break;
                    case AVERROR_PATCHWELCOME: error_str = "AVERROR_PATCHWELCOME"; break;
                    default: error_str = "Unknown error"; break;
                }

                throw std::runtime_error("av_open_input_file() failed : " + error_str);
            }
        }
        else
        {
            AVInputFormat* av_format = (parameters ? parameters->getFormat() : 0);
            AVFormatParameters* av_params = (parameters ? parameters->getFormatParameter() : 0);
            if (av_open_input_file(&p_format_context, filename.c_str(), av_format, 0, av_params) !=0 )
                throw std::runtime_error("av_open_input_file() failed");
        }
        
        m_format_context.reset(p_format_context);

        // Retrieve stream info
        if (av_find_stream_info(p_format_context) < 0)
            throw std::runtime_error("av_find_stream_info() failed");

        m_duration = double(m_format_context->duration) / AV_TIME_BASE;
        m_start = double(m_format_context->start_time) / AV_TIME_BASE;

        // TODO move this elsewhere
        m_clocks.reset(m_start);

        // Dump info to stderr
        dump_format(p_format_context, 0, filename.c_str(), false);

        // Find and open the first video and audio streams (note that audio stream is optional and only opened if possible)

        findVideoStream();
        findAudioStream();

        m_video_decoder.open(m_video_stream);

        try
        {
            m_audio_decoder.open(m_audio_stream);
        }

        catch (const std::runtime_error & error)
        {
            OSG_WARN << "FFmpegImageStream::open audio failed, audio stream will be disabled: " << error.what() << std::endl;
        }
    }

    catch (const std::runtime_error & error)
    {
        OSG_WARN << "FFmpegImageStream::open : " << error.what() << std::endl;
        return false;
    }
    
    return true;
}
Ejemplo n.º 18
0
bool DataSource::open() {
    if (av_open_input_file(&m_formatCtx,
                           qPrintable(m_filename),
                           NULL,
                           0,
                           NULL) != 0)
    {
        DPRINT("can not open file.");
        return false;
    }

    if(av_find_stream_info(m_formatCtx) < 0) {
        DPRINT("can not find stream info.");
        return false;
    }

    dump_format(m_formatCtx, 0, 0, 0);

    AVCodecContext *codecCtx = NULL;

    for (uint i = 0; i < m_formatCtx->nb_streams; i++) {
        codecCtx = m_formatCtx->streams[i]->codec;

        if (codecCtx->codec_type == CODEC_TYPE_VIDEO) {
            m_videoStream = i;
            //DPRINT("video stream index: %d - %dx%d", m_videoStream, codecCtx->width, codecCtx->height);

            if (!this->openCodec(codecCtx, &m_videoCodec)) {
                closeInputFile();
                DPRINT("can not open video codec.");
                return false;
            }

            this->m_swsCtx = sws_getContext(codecCtx->width,
                                            codecCtx->height,
                                            codecCtx->pix_fmt,
                                            codecCtx->width,
                                            codecCtx->height,
                                            PIX_FMT_RGB24,
                                            SWS_BICUBIC,
                                            NULL,
                                            NULL,
                                            NULL);

            if (this->m_swsCtx == NULL) {
                closeInputFile();
                DPRINT("can not get swscale context");
                return false;
            }

            m_timeBase = av_q2d(m_formatCtx->streams[m_videoStream]->time_base);
            m_rawFrame = avcodec_alloc_frame();
        }
        else if (codecCtx->codec_type == CODEC_TYPE_AUDIO) {
            m_audioStream = i;

            if (!this->openCodec(codecCtx, &m_audioCodec)) {
                closeInputFile();
                DPRINT("can not open audio codec.");
                return false;
            }
        }
    }

    return true;
}
Ejemplo n.º 19
0
int decode_thread(void *arg) {

  VideoState *is = (VideoState *)arg;
  AVFormatContext *pFormatCtx;
  AVPacket pkt1, *packet = &pkt1;

  int video_index = -1;
  int audio_index = -1;
  int i;

  is->videoStream=-1;
  is->audioStream=-1;

  global_video_state = is;
  // will interrupt blocking functions if we quit!
  url_set_interrupt_cb(decode_interrupt_cb);

  // Open video file
  if(av_open_input_file(&pFormatCtx, is->filename, NULL, 0, NULL)!=0)
    return -1; // Couldn't open file

  is->pFormatCtx = pFormatCtx;
  
  // Retrieve stream information
  if(av_find_stream_info(pFormatCtx)<0)
    return -1; // Couldn't find stream information
  
  // Dump information about file onto standard error
  dump_format(pFormatCtx, 0, is->filename, 0);
  
  // Find the first video stream
  for(i=0; i<pFormatCtx->nb_streams; i++) {
    if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO &&
       video_index < 0) {
      video_index=i;
    }
    if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO &&
       audio_index < 0) {
      audio_index=i;
    }
  }
  if(audio_index >= 0) {
    stream_component_open(is, audio_index);
  }
  if(video_index >= 0) {
    stream_component_open(is, video_index);
  }   

  if(is->videoStream < 0 || is->audioStream < 0) {
    fprintf(stderr, "%s: could not open codecs\n", is->filename);
    goto fail;
  }

  // main decode loop

  for(;;) {
    if(is->quit) {
      break;
    }
    // seek stuff goes here
    if(is->seek_req) {
      int stream_index= -1;
      int64_t seek_target = is->seek_pos;

      if     (is->videoStream >= 0) stream_index = is->videoStream;
      else if(is->audioStream >= 0) stream_index = is->audioStream;

      if(stream_index>=0){
	seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, pFormatCtx->streams[stream_index]->time_base);
      }
      if(!av_seek_frame(is->pFormatCtx, stream_index, seek_target, is->seek_flags)) {
	fprintf(stderr, "%s: error while seeking\n", is->pFormatCtx->filename);
      } else {
	if(is->audioStream >= 0) {
	  packet_queue_flush(&is->audioq);
	  packet_queue_put(&is->audioq, &flush_pkt);
	}
	if(is->videoStream >= 0) {
	  packet_queue_flush(&is->videoq);
	  packet_queue_put(&is->videoq, &flush_pkt);
	}
      }
      is->seek_req = 0;
    }

    if(is->audioq.size > MAX_AUDIOQ_SIZE ||
       is->videoq.size > MAX_VIDEOQ_SIZE) {
      SDL_Delay(10);
      continue;
    }
    if(av_read_frame(is->pFormatCtx, packet) < 0) {
      if(url_ferror(&pFormatCtx->pb) == 0) {
	SDL_Delay(100); /* no error; wait for user input */
	continue;
      } else {
	break;
      }
    }
    // Is this a packet from the video stream?
    if(packet->stream_index == is->videoStream) {
      packet_queue_put(&is->videoq, packet);
    } else if(packet->stream_index == is->audioStream) {
      packet_queue_put(&is->audioq, packet);
    } else {
      av_free_packet(packet);
    }
  }
  /* all done - wait for it */
  while(!is->quit) {
    SDL_Delay(100);
  }
 fail:
  {
    SDL_Event event;
    event.type = FF_QUIT_EVENT;
    event.user.data1 = is;
    SDL_PushEvent(&event);
  }
  return 0;
}
Ejemplo n.º 20
0
GF_AbstractTSMuxer * ts_amux_new(GF_AVRedirect * avr, u32 videoBitrateInBitsPerSec, u32 width, u32 height, u32 audioBitRateInBitsPerSec) {
	GF_AbstractTSMuxer * ts = gf_malloc( sizeof(GF_AbstractTSMuxer));
	memset( ts, 0, sizeof( GF_AbstractTSMuxer));
	ts->oc = avformat_alloc_context();
	ts->destination = avr->destination;
	av_register_all();
	ts->oc->oformat = GUESS_FORMAT(NULL, avr->destination, NULL);
	if (!ts->oc->oformat)
		ts->oc->oformat = GUESS_FORMAT("mpegts", NULL, NULL);
	assert( ts->oc->oformat);
#if REDIRECT_AV_AUDIO_ENABLED
	ts->audio_st = av_new_stream(ts->oc, avr->audioCodec->id);
	{
		AVCodecContext * c = ts->audio_st->codec;
		c->codec_id = avr->audioCodec->id;
		c->codec_type = AVMEDIA_TYPE_AUDIO;
		/* put sample parameters */
		c->sample_fmt = SAMPLE_FMT_S16;
		c->bit_rate = audioBitRateInBitsPerSec;
		c->sample_rate = avr->audioSampleRate;
		c->channels = 2;
		c->time_base.num = 1;
		c->time_base.den = 1000;
		// some formats want stream headers to be separate
		if (ts->oc->oformat->flags & AVFMT_GLOBALHEADER)
			c->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}
#endif

	ts->video_st = av_new_stream(ts->oc, avr->videoCodec->id);
	{
		AVCodecContext * c = ts->video_st->codec;
		c->codec_id = avr->videoCodec->id;
		c->codec_type = AVMEDIA_TYPE_VIDEO;

		/* put sample parameters */
		c->bit_rate = videoBitrateInBitsPerSec;
		/* resolution must be a multiple of two */
		c->width = width;
		c->height = height;
		/* time base: this is the fundamental unit of time (in seconds) in terms
		   of which frame timestamps are represented. for fixed-fps content,
		   timebase should be 1/framerate and timestamp increments should be
		   identically 1. */
		c->time_base.den = STREAM_FRAME_RATE;
		c->time_base.num = 1;
		c->gop_size = 12; /* emit one intra frame every twelve frames at most */
		c->pix_fmt = STREAM_PIX_FMT;
		if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
			/* just for testing, we also add B frames */
			c->max_b_frames = 2;
		}
		if (c->codec_id == CODEC_ID_MPEG1VIDEO) {
			/* Needed to avoid using macroblocks in which some coeffs overflow.
			   This does not happen with normal video, it just happens here as
			   the motion of the chroma plane does not match the luma plane. */
			c->mb_decision=2;
		}
		// some formats want stream headers to be separate
		if (ts->oc->oformat->flags & AVFMT_GLOBALHEADER)
			c->flags |= CODEC_FLAG_GLOBAL_HEADER;

	}
	//av_set_pts_info(ts->audio_st, 33, 1, audioBitRateInBitsPerSec);

#ifndef AVIO_FLAG_WRITE
	/* set the output parameters (must be done even if no
	   parameters). */
	if (av_set_parameters(ts->oc, NULL) < 0) {
		fprintf(stderr, "Invalid output format parameters\n");
		return NULL;
	}
#endif

	dump_format(ts->oc, 0, avr->destination, 1);
	GF_LOG(GF_LOG_INFO, GF_LOG_MODULE, ("[AVRedirect] DUMPING to %s...\n", ts->destination));

#if (LIBAVCODEC_VERSION_MAJOR<55)
	if (avcodec_open(ts->video_st->codec, avr->videoCodec) < 0) {
#else
	if (avcodec_open2(ts->video_st->codec, avr->videoCodec, NULL) < 0) {
#endif
		GF_LOG(GF_LOG_ERROR, GF_LOG_MODULE, ("[AVRedirect] failed to open video codec\n"));
		return NULL;
	}
#if REDIRECT_AV_AUDIO_ENABLED
#if (LIBAVCODEC_VERSION_MAJOR<55)
	if (avcodec_open(ts->audio_st->codec, avr->audioCodec) < 0) {
#else
	if (avcodec_open2(ts->audio_st->codec, avr->audioCodec, NULL) < 0) {
#endif
		GF_LOG(GF_LOG_ERROR, GF_LOG_MODULE, ("[AVRedirect] failed to open audio codec\n"));
		return NULL;
	}
	ts->audioMx = gf_mx_new("TS_AudioMx");
#endif
	ts->videoMx = gf_mx_new("TS_VideoMx");
	ts->tsEncodingThread = gf_th_new("ts_interleave_thread_run");
	ts->encode = 1;
	ts->audioPackets = NULL;
	ts->videoPackets = NULL;
	gf_th_run(ts->tsEncodingThread, ts_interleave_thread_run, ts);
	return ts;
}

void ts_amux_del(GF_AbstractTSMuxer * muxerToDelete) {
	if (!muxerToDelete)
		return;
	muxerToDelete->encode = 0;
	gf_sleep(100);
	gf_th_stop(muxerToDelete->tsEncodingThread);
	muxerToDelete->tsEncodingThread = NULL;
#if REDIRECT_AV_AUDIO_ENABLED
	gf_mx_del(muxerToDelete->audioMx);
	muxerToDelete->audioMx = NULL;
#endif
	gf_mx_del(muxerToDelete->videoMx);
	muxerToDelete->videoMx = NULL;
	if (muxerToDelete->video_st) {
		avcodec_close(muxerToDelete->video_st->codec);
		muxerToDelete->video_st = NULL;
	}
#if REDIRECT_AV_AUDIO_ENABLED
	if (muxerToDelete->audio_st) {
		avcodec_close(muxerToDelete->audio_st->codec);
		muxerToDelete->audio_st = NULL;
	}
#endif
	/* write the trailer, if any.  the trailer must be written
	 * before you close the CodecContexts open when you wrote the
	 * header; otherwise write_trailer may try to use memory that
	 * was freed on av_codec_close() */
	if (muxerToDelete->oc) {
		u32 i;
		/* free the streams */
		for (i = 0; i < muxerToDelete->oc->nb_streams; i++) {
			av_freep(&muxerToDelete->oc->streams[i]->codec);
			av_freep(&muxerToDelete->oc->streams[i]);
		}

		/* free the stream */
		av_free(muxerToDelete->oc);
		muxerToDelete->oc = NULL;
	}
}

Bool ts_encode_audio_frame(GF_AbstractTSMuxer * ts, uint8_t * data, int encoded, u64 pts) {
	AVPacketList *pl;
	AVPacket * pkt;
	if (!ts->encode)
		return 1;
	pl = gf_malloc(sizeof(AVPacketList));
	pl->next = NULL;
	pkt = &(pl->pkt);
	av_init_packet(pkt);
	assert( ts->audio_st);
	assert( ts->audio_st->codec);
	pkt->flags = 0;
	if (ts->audio_st->codec->coded_frame) {
		if (ts->audio_st->codec->coded_frame->key_frame)
			pkt->flags = AV_PKT_FLAG_KEY;
		if (ts->audio_st->codec->coded_frame->pts != AV_NOPTS_VALUE) {
			pkt->pts = av_rescale_q(ts->audio_st->codec->coded_frame->pts, ts->audio_st->codec->time_base, ts->audio_st->time_base);
		} else {
			if (pts == AV_NOPTS_VALUE)
				pkt->pts = AV_NOPTS_VALUE;
			else {
				pkt->pts = av_rescale_q(pts, ts->audio_st->codec->time_base, ts->audio_st->time_base);
			}
		}
	} else {
		if (pts == AV_NOPTS_VALUE)
			pkt->pts = AV_NOPTS_VALUE;
		else
			pkt->pts = av_rescale_q(pts, ts->audio_st->codec->time_base, ts->audio_st->time_base);
	}
	pkt->stream_index= ts->audio_st->index;
	pkt->data = data;
	pkt->size = encoded;
	//fprintf(stderr, "AUDIO PTS="LLU" was: "LLU" (%p)\n", pkt->pts, pts, pl);
	gf_mx_p(ts->audioMx);
	if (!ts->audioPackets)
		ts->audioPackets = pl;
	else {
		AVPacketList * px = ts->audioPackets;
		while (px->next)
			px = px->next;
		px->next = pl;
	}
	gf_mx_v(ts->audioMx);
	return 0;
}

Bool ts_encode_video_frame(GF_AbstractTSMuxer* ts, uint8_t* data, int encoded) {
	AVPacketList *pl;
	AVPacket * pkt;
	if (!ts->encode)
		return 1;
	pl = gf_malloc(sizeof(AVPacketList));
	pl->next = NULL;
	pkt = &(pl->pkt);

	av_init_packet(pkt);

	if (ts->video_st->codec->coded_frame->pts != AV_NOPTS_VALUE) {
		//pkt->pts= av_rescale_q(ts->video_st->codec->coded_frame->pts, ts->video_st->codec->time_base, ts->video_st->time_base);
		pkt->pts = ts->video_st->codec->coded_frame->pts * ts->video_st->time_base.den / ts->video_st->time_base.num / 1000;
		//pkt->pts = ts->video_st->codec->coded_frame->pts;
	}
	if (ts->video_st->codec->coded_frame->key_frame)
		pkt->flags |= AV_PKT_FLAG_KEY;
	pkt->stream_index= ts->video_st->index;
	pkt->data= data;
	pkt->size= encoded;
	//fprintf(stderr, "VIDEO PTS="LLU" was: "LLU" (%p)\n", pkt->pts, ts->video_st->codec->coded_frame->pts, pl);
	gf_mx_p(ts->videoMx);
	if (!ts->videoPackets)
		ts->videoPackets = pl;
	else {
		AVPacketList * px = ts->videoPackets;
		while (px->next)
			px = px->next;
		px->next = pl;
	}
	gf_mx_v(ts->videoMx);
	return 0;
}
Ejemplo n.º 21
0
uint8_t lavMuxer::open(const char *filename,uint32_t inbitrate, ADM_MUXER_TYPE type, aviInfo *info,
              uint32_t videoExtraDataSize, uint8_t *videoExtraData, WAVHeader *audioheader,
              uint32_t audioextraSize,uint8_t *audioextraData)
{
 AVCodecContext *c;
 	_type=type;
	_fps1000=info->fps1000;
	switch(_type)
	{
	case MUXER_TS:
		fmt=guess_format("mpegts", NULL, NULL);
		break;
	case MUXER_DVD:
		fmt = guess_format("dvd", NULL, NULL);
		break;
	case MUXER_VCD:
		fmt = guess_format("vcd", NULL, NULL);
		break;
	case MUXER_SVCD:
		fmt = guess_format("svcd", NULL, NULL);
		break;
	case MUXER_MP4:
		fmt = guess_format("mp4", NULL, NULL);
		break;
	case MUXER_PSP:
		fmt = guess_format("psp", NULL, NULL);
		break;
	case MUXER_FLV:
		fmt = guess_format("flv", NULL, NULL);
		break;          
	case MUXER_MATROSKA:
		fmt = guess_format("matroska", NULL, NULL);
		break;          

	default:
		fmt=NULL;
	}
	if (!fmt) 
	{
        	printf("Lav:Cannot guess format\n");
                ADM_assert(0);
		return 0;
	}
	oc = av_alloc_format_context();
	if (!oc) 
	{
       		printf("Lav:Cannot allocate context\n");
		return 0;
	}
	oc->oformat = fmt;
	snprintf(oc->filename,1000,"file://%s",filename);
	// Video
	//________
	
	video_st = av_new_stream(oc, 0);
	if (!video_st) 
	{
		printf("Lav: new stream failed\n");
		return 0;
	}	
	
	c = video_st->codec;
	switch(_type)
	{
				case MUXER_FLV:
					 c->codec=new AVCodec;
					 memset(c->codec,0,sizeof(AVCodec));
					 if(fourCC::check(info->fcc,(uint8_t *)"FLV1"))
					 {
						 c->codec_id=CODEC_ID_FLV1;
					 	 c->codec->name=ADM_strdup("FLV1");
					 }else
					 {
						 if(fourCC::check(info->fcc,(uint8_t *)"VP6F"))
						 			{
							 		 c->codec_id=CODEC_ID_VP6F;
					 				 c->codec->name=ADM_strdup("VP6F");
						 			}
						 else
							 ADM_assert(0);
					 
					 }
					 
					 break;
                case MUXER_MATROSKA:
                        strcpy(oc->title,"Avidemux");
                        strcpy(oc->author,"Avidemux");
                        c->sample_aspect_ratio.num=1;
                        c->sample_aspect_ratio.den=1;
                        if(isMpeg4Compatible(info->fcc))
                        {
                                c->codec_id = CODEC_ID_MPEG4;
                                c->has_b_frames=1; // in doubt...
                        }else
                        {
                                if(isH264Compatible(info->fcc))
                                {
                                        c->has_b_frames=1; // in doubt...
                                        c->codec_id = CODEC_ID_H264;
                                        c->codec=new AVCodec;
                                        memset(c->codec,0,sizeof(AVCodec));
                                        c->codec->name=ADM_strdup("H264");
                                }
                                else
                                {
                                   if(!ADM_4cc_to_lavcodec((const char *)&(info->fcc),&(c->codec_id)))
                                   {
                                      printf("[lavFormat] Cannot map  this\n");
                                      return 0;
                                   }
                                  
                                }
                        }
                        if(videoExtraDataSize)
                        {
                                c->extradata=videoExtraData;
                                c->extradata_size= videoExtraDataSize;
                        }
                        break;
                case MUXER_MP4:
                case MUXER_PSP:
                {
                        // probably a memeleak here
                        char *foo=ADM_strdup(filename);
                        
                        strcpy(oc->title,ADM_GetFileName(foo));
                        strcpy(oc->author,"Avidemux");
                        c->sample_aspect_ratio.num=1;
                        c->sample_aspect_ratio.den=1;
                        if(isMpeg4Compatible(info->fcc))
                        {
                                c->codec_id = CODEC_ID_MPEG4;
                                c->has_b_frames=1; // in doubt...
                        }else
                        {
                                if(isH264Compatible(info->fcc))
                                {
                                        c->has_b_frames=1; // in doubt...
                                        c->codec_id = CODEC_ID_H264;
                                        c->codec=new AVCodec;
                                        memset(c->codec,0,sizeof(AVCodec));
                                        c->codec->name=ADM_strdup("H264");
                                }
                                else
                                {
                                        if(isDVCompatible(info->fcc))
                                        {
                                          c->codec_id = CODEC_ID_DVVIDEO;
                                        }else
                                        {
                                          if(fourCC::check(info->fcc,(uint8_t *)"H263"))
                                          {
                                                    c->codec_id=CODEC_ID_H263;
                                            }else{
                                                    c->codec_id = CODEC_ID_MPEG4; // Default value
                                                    printf("Ooops, cant mux that...\n");
                                                    printf("Ooops, cant mux that...\n");
                                                    printf("Ooops, cant mux that...\n");
                                                }
                                        }
                                }
                        }
                        if(videoExtraDataSize)
                        {
                                c->extradata=videoExtraData;
                                c->extradata_size= videoExtraDataSize;
                        }
                        if(MUXER_PSP==_type)
                        {
                            c->rc_buffer_size=0; //8*1024*224;
                            c->rc_max_rate=0; //768*1000;
                            c->rc_min_rate=0;
                            c->bit_rate=768*1000;
                        }
                        else
                        {
                            c->rc_buffer_size=8*1024*224;
                            c->rc_max_rate=9500*1000;
                            c->rc_min_rate=0;
                            if(!inbitrate)
                                    c->bit_rate=9000*1000;
                            else
                                    c->bit_rate=inbitrate;
                        }
                }
                        break;
                case MUXER_TS:
                        c->codec_id = CODEC_ID_MPEG2VIDEO;
                        c->rc_buffer_size=8*1024*224;
                        c->rc_max_rate=9500*1000;
                        c->rc_min_rate=0;
                        if(!inbitrate)
                                c->bit_rate=9000*1000;
                        else
                                c->bit_rate=inbitrate;
        
                        break;
		case MUXER_DVD:
			c->codec_id = CODEC_ID_MPEG2VIDEO;
			c->rc_buffer_size=8*1024*224;
			c->rc_max_rate=9500*1000;
			c->rc_min_rate=0;
			if(!inbitrate)
				c->bit_rate=9000*1000;
			else
				c->bit_rate=inbitrate;
	
			break;
		case MUXER_VCD:
			c->codec_id = CODEC_ID_MPEG1VIDEO;

			c->rc_buffer_size=8*1024*40;
			c->rc_max_rate=1152*1000;
			c->rc_min_rate=1152*1000;
			
			c->bit_rate=1152*1000;
			

			break;
		case MUXER_SVCD:
			c->codec_id = CODEC_ID_MPEG2VIDEO;

			c->rc_buffer_size=8*1024*112;
			c->rc_max_rate=2500*1000;
			c->rc_min_rate=0*1000;
			if(!inbitrate)
				c->bit_rate=2040*1000;
			else
				c->bit_rate=inbitrate;

			break;
		default:
			ADM_assert(0);
	}
	
	c->codec_type = CODEC_TYPE_VIDEO;
	c->flags=CODEC_FLAG_QSCALE;   
	c->width = info->width;  
	c->height = info->height; 

       AVRational fps25=(AVRational){1001,25025};
       AVRational fps24=(AVRational){1001,24000};
       AVRational fps30= (AVRational){1001,30000};
       AVRational fpsfree= (AVRational){1000,_fps1000};

        
    	switch(_fps1000)
	{
		case 25000:
                {
			 c->time_base= fps25; 
			 break;
                }
		case 23976:
                        if(_type==MUXER_MP4 || _type==MUXER_PSP || _type==MUXER_FLV || _type==MUXER_MATROSKA)
                        {
                                 c->time_base= fps24; //(AVRational){1001,24000};
                                break;
                        }
		case  29970:
			 c->time_base=fps30;
			break;
		default:
                      {
                            if(_type==MUXER_MP4 || _type==MUXER_PSP || _type==MUXER_FLV || _type==MUXER_MATROSKA)
                            {
                                    c->time_base=fpsfree;// (AVRational){1000,_fps1000};
                                    break;
                            }
                            else
                            {
                                GUI_Error_HIG(QT_TR_NOOP("Incompatible frame rate"), NULL);
                                return 0;
                            }
                            }
                        break;
	}

			
	c->gop_size=15;
	c->max_b_frames=2;
	c->has_b_frames=1;

	
	// Audio
	//________
        if(audioheader)
        {
          audio_st = av_new_stream(oc, 1);
          if (!audio_st) 
          {
                  printf("Lav: new stream failed\n");
                  return 0;
          }
  
                  
          c = audio_st->codec;
          c->frame_size=1024; //For AAC mainly, sample per frame
          printf("[LavFormat] Bitrate %u\n",(audioheader->byterate*8)/1000);
          _audioFq=c->sample_rate = audioheader->frequency;
#if 0
           if(_type== MUXER_PSP && audioheader->encoding==WAV_AAC)
            {
                    _audioFq=c->sample_rate = audioheader->frequency/2;                 //_audioFq*=2; // SBR
             }
#endif
          
          switch(audioheader->encoding)
          {
                  case WAV_AC3: c->codec_id = CODEC_ID_AC3;break;
                  case WAV_MP2: c->codec_id = CODEC_ID_MP2;break;
                  case WAV_MP3:
  #warning FIXME : Probe deeper
                              c->frame_size=1152;
                              c->codec_id = CODEC_ID_MP3;
                              break;
                  case WAV_PCM: 
                                  // One chunk is 10 ms (1/100 of fq)
                                  c->frame_size=4;
                                  c->codec_id = CODEC_ID_PCM_S16LE;break;
                  case WAV_AAC: 
                                  c->extradata=audioextraData;
                                  c->extradata_size= audioextraSize;
                                  c->codec_id = CODEC_ID_AAC;
                                  break;
                  default:
                          if(_type==MUXER_MATROSKA)
                          {
                           if(ADM_WaveTag_to_lavcodec(audioheader->encoding, &(c->codec_id)))
                           {
                             if(audioextraData)
                             {
                                  c->extradata=audioextraData;
                                  c->extradata_size= audioextraSize;
                             }
                             // Put a dummy time increment
                              c->time_base= fps25;
                             break;
                           }
                          }
                            
                          printf("Cant mux that ! audio\n"); 
                          printf("Cant mux that ! audio\n");
                          c->codec_id = CODEC_ID_MP2;
                          return 0;
                          break;
          }
          c->codec_type = CODEC_TYPE_AUDIO;
          
          c->bit_rate = audioheader->byterate*8;
          c->rc_buffer_size=(c->bit_rate/(2*8)); // 500 ms worth
          
          c->channels = audioheader->channels;
          _audioByterate=audioheader->byterate;
          
        }
        // /audio
	
	
//----------------------
	switch(_type)
	{
				case MUXER_FLV:
                case MUXER_PSP:
                case MUXER_MP4:
                case MUXER_MATROSKA:
                        oc->mux_rate=10080*1000; // Needed ?
                        break;

                case MUXER_TS:
                        oc->mux_rate=10080*1000;
                        break;
		case MUXER_DVD:
			oc->packet_size=2048;
			oc->mux_rate=10080*1000;
			break;
		case MUXER_VCD:
			oc->packet_size=2324;
			oc->mux_rate=2352 * 75 * 8;
			
			break;
		case MUXER_SVCD:
			
			oc->packet_size=2324;
			oc->mux_rate=2*2352 * 75 * 8; // ?
			
			break;
		default:
			ADM_assert(0);
	}
	oc->preload=AV_TIME_BASE/10; // 100 ms preloading
	oc->max_delay=200*1000; // 500 ms
	
	if (av_set_parameters(oc, NULL) < 0) 
	{
		printf("Lav: set param failed \n");
		return 0;
	}
	 if (url_fopen(&(oc->pb), filename, URL_WRONLY) < 0) 
	 {
	 	printf("Lav: Failed to open file :%s\n",filename);
		return 0;
        }

	ADM_assert(av_write_header(oc)>=0);
	dump_format(oc, 0, filename, 1);


	printf("lavformat mpeg muxer initialized\n");
	
	_running=1;

	one=(1000*1000*1000)/_fps1000; 
	_curDTS=one;

	return 1;
}
Ejemplo n.º 22
0
//TODO: handle error
SoundSourceFFmpeg::SoundSourceFFmpeg(QString qFilename) : SoundSource(qFilename)
{
    AVFormatParameters param;
    int i;
    QByteArray fname;

    packet.data = NULL;
    bufferOffset = 0;
    bufferSize = 0;
    memset(buffer, 0, AVCODEC_MAX_AUDIO_FRAME_SIZE);
    fname = qFilename.toLatin1();
    FFmpegInit();

    qDebug() << "New SoundSourceFFmpeg :" << fname;

    /* initialize param to something so av_open_input_file works for raw */
    memset(&param, 0, sizeof(AVFormatParameters));
    param.channels = 2;
    param.sample_rate = 44100;

    iformat = av_find_input_format(fname.constData());
    // Open audio file
    if(av_open_input_file(&pFormatCtx, fname.constData(), iformat, 0, &param)!=0) {
        qDebug() << "av_open_input_file: cannot open" << fname;
        return;
    }

    // Retrieve stream information
    if(av_find_stream_info(pFormatCtx)<0) {
        qDebug() << "av_find_stream_info: cannot open" << fname;
        return;
    }
    //debug only
    dump_format(pFormatCtx, 0, fname.constData(), false);

    qDebug() << "ffmpeg: using the first audio stream available";
    // Find the first video stream
    audioStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO) {
            audioStream=i;
            break;
        }
    if(audioStream==-1) {
        qDebug() << "cannot find an audio stream: cannot open" << fname;
        return;
    }

    // Get a pointer to the codec context for the video stream
    pCodecCtx=pFormatCtx->streams[audioStream]->codec;

    // Find the decoder for the audio stream
    if(!(pCodec=avcodec_find_decoder(pCodecCtx->codec_id))) {
        qDebug() << "cannot find a decoder for" << fname;
        return;
    }

    qDebug() << "ffmpeg: opening the audio codec";
    //avcodec_open is not thread safe
    lock();
    if(avcodec_open(pCodecCtx, pCodec)<0) {
        qDebug() << "avcodec: cannot open" << fname;
        return;
    }
    unlock();

    pFrame=avcodec_alloc_frame();
    channels = pCodecCtx->channels;
    SRATE = pCodecCtx->sample_rate;

    qDebug() << "Samplerate: " << SRATE << ", Channels: " << channels << "\n";
    if(channels > 2){
        qDebug() << "ffmpeg: No support for more than 2 channels!";
        return;
    }
    filelength = (long int) ((double)pFormatCtx->duration * 2 / AV_TIME_BASE * SRATE);

    qDebug() << "ffmpeg: filelength: " << filelength << "d -|- duration: " << pFormatCtx->duration << "ld -- starttime: " << pFormatCtx->streams[audioStream]->start_time << "ld -- " << AV_TIME_BASE << " " << pFormatCtx->streams[audioStream]->codec_info_duration << "ld";
}
Ejemplo n.º 23
0
bool QVideoDecoder::openFile(QString filename)
{
   // Close last video..
   close();

   LastLastFrameTime=INT_MIN;       // Last last must be small to handle the seek well
   LastFrameTime=0;
   LastLastFrameNumber=INT_MIN;
   LastFrameNumber=0;
   DesiredFrameTime=DesiredFrameNumber=0;
   LastFrameOk=false;


   // Open video file
   if(av_open_input_file(&pFormatCtx, filename.toStdString().c_str(), NULL, 0, NULL)!=0)
       return false; // Couldn't open file

   // Retrieve stream information
   if(av_find_stream_info(pFormatCtx)<0)
       return false; // Couldn't find stream information

   // Dump information about file onto standard error
   dump_format(pFormatCtx, 0, filename.toStdString().c_str(), false);

   // Find the first video stream
   videoStream=-1;
   for(unsigned i=0; i<pFormatCtx->nb_streams; i++)
       if(pFormatCtx->streams[i]->codec->codec_type==ffmpeg::AVMEDIA_TYPE_VIDEO)
       {
           videoStream=i;
           break;
       }
   if(videoStream==-1)
       return false; // Didn't find a video stream

   // Get a pointer to the codec context for the video stream
   pCodecCtx=pFormatCtx->streams[videoStream]->codec;

   // Find the decoder for the video stream
   pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
   if(pCodec==NULL)
       return false; // Codec not found

   // Open codec
   if(avcodec_open(pCodecCtx, pCodec)<0)
       return false; // Could not open codec

   // Hack to correct wrong frame rates that seem to be generated by some
   // codecs
  if(pCodecCtx->time_base.num>1000 && pCodecCtx->time_base.den==1)
     pCodecCtx->time_base.den=1000;

   // Allocate video frame
   pFrame=ffmpeg::avcodec_alloc_frame();

   // Allocate an AVFrame structure
   pFrameRGB=ffmpeg::avcodec_alloc_frame();
   if(pFrameRGB==NULL)
       return false;

   // Determine required buffer size and allocate buffer
   numBytes=ffmpeg::avpicture_get_size(ffmpeg::PIX_FMT_RGB24, pCodecCtx->width,pCodecCtx->height);
   buffer=new uint8_t[numBytes];

   // Assign appropriate parts of buffer to image planes in pFrameRGB
   avpicture_fill((ffmpeg::AVPicture *)pFrameRGB, buffer, ffmpeg::PIX_FMT_RGB24,
       pCodecCtx->width, pCodecCtx->height);

   ok=true;
   return true;
}
int main(int argc, char *argv[]) {
    AVFormatContext *pFormatCtx;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;
    AVFrame         *pFrame;
    AVPacket        packet;
    int             frameFinished;
    float           aspect_ratio;
    struct SwsContext *img_convert_ctx;

    SDL_Overlay     *bmp;
    SDL_Surface     *screen;
    SDL_Rect        rect;
    SDL_Event       event;

    if(argc < 2) {
        fprintf(stderr, "Usage: test <file>\n");
        exit(1);
    }
    // Register all formats and codecs
    av_register_all();

    if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        exit(1);
    }

    // Open video file
    if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if(av_find_stream_info(pFormatCtx)<0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream
    videoStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
            videoStream=i;
            break;
        }
    if(videoStream==-1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    pCodecCtx=pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }

    // Open codec
    if(avcodec_open(pCodecCtx, pCodec)<0)
        return -1; // Could not open codec

    // Allocate video frame
    pFrame=avcodec_alloc_frame();

    // Make a screen to put our video
#ifndef __DARWIN__
    screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
#else
    screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif
    if(!screen) {
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
        exit(1);
    }

    // Allocate a place to put our YUV image on that screen
    bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
                               pCodecCtx->height,
                               SDL_YV12_OVERLAY,
                               screen);


    // Read frames and save first five frames to disk
    i=0;
    while(av_read_frame(pFormatCtx, &packet)>=0) {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream) {
            // Decode video frame
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
                                  &packet);

            // Did we get a video frame?
            if(frameFinished) {
                SDL_LockYUVOverlay(bmp);

                AVPicture pict;
                pict.data[0] = bmp->pixels[0];
                pict.data[1] = bmp->pixels[2];
                pict.data[2] = bmp->pixels[1];

                pict.linesize[0] = bmp->pitches[0];
                pict.linesize[1] = bmp->pitches[2];
                pict.linesize[2] = bmp->pitches[1];

                // Convert the image into YUV format that SDL uses
                /*
                	img_convert(&pict, PIX_FMT_YUV420P,
                                    (AVPicture *)pFrame, pCodecCtx->pix_fmt,
                		    pCodecCtx->width, pCodecCtx->height);
                */
                int dstFmt;
                dstFmt = PIX_FMT_YUV420P;

                img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
                                                 pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
                                                 dstFmt, SWS_BICUBIC, NULL, NULL, NULL);

                sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize,
                          0, pCodecCtx->height, pict.data, pict.linesize);

                /*
                	printf("*(pict.data[0]: %d\n", *(pict.data[0]));
                	printf("*(pict.data[1]: %d\n", *(pict.data[1]));
                	printf("*(pict.data[2]: %d\n", *(pict.data[2]));
                	printf("*(pict.data[3]: %d\n", *(pict.data[3]));
                	printf("linesize[0]: %d\n", pict.linesize[0]);
                	printf("linesize[1]: %d\n", pict.linesize[1]);
                	printf("linesize[2]: %d\n", pict.linesize[2]);
                	printf("linesize[3]: %d\n", pict.linesize[3]);
                	printf("width: %d\n", pCodecCtx->width);
                	printf("height: %d\n", pCodecCtx->height);
                */
                ++i;
                if(i>50)
                    if(i<=51) {
                        printf("frame 51\n");
                        if( *(pict.data[0]) == 20)
                            printf("frame 51, line 0, x=1, 20\n");
                    }

                SDL_UnlockYUVOverlay(bmp);

                rect.x = 0;
                rect.y = 0;
                rect.w = pCodecCtx->width;
                rect.h = pCodecCtx->height;
                SDL_DisplayYUVOverlay(bmp, &rect);
//SDL_Delay(1000);
//return 0;
            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
        SDL_PollEvent(&event);
        switch(event.type) {
        case SDL_QUIT:
            SDL_Quit();
            exit(0);
            break;
        default:
            break;
        }

    }

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    av_close_input_file(pFormatCtx);

    return 0;
}
Ejemplo n.º 25
0
int main(int argc, char *argv[]) {
    AVFormatContext *pFormatCtx;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;
    AVFrame         *pFrame;
    AVFrame         *pFrameRGB;
    AVPacket        packet;
    int             frameFinished;
    int             numBytes;
    uint8_t         *buffer;
    struct SwsContext *img_convert_ctx;

    if(argc < 2) {
        printf("Please provide a movie file\n");
        return -1;
    }
    // Register all formats and codecs
    av_register_all();

    // Open video file
    if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if(av_find_stream_info(pFormatCtx)<0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream
    videoStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) {
            videoStream=i;
            break;
        }
    if(videoStream==-1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    pCodecCtx=pFormatCtx->streams[videoStream]->codec;
    
    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);// other codes

    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }
    // Open codec
    if(avcodec_open(pCodecCtx, pCodec)<0)
        return -1; // Could not open codec

    // Allocate video frame
    pFrame=avcodec_alloc_frame();

    // Allocate an AVFrame structure
    pFrameRGB=avcodec_alloc_frame();
    if(pFrameRGB==NULL)
        return -1;

    // Determine required buffer size and allocate buffer
    numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
                                pCodecCtx->height);
    buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

    // Assign appropriate parts of buffer to image planes in pFrameRGB
    // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
    // of AVPicture
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
                   pCodecCtx->width, pCodecCtx->height);

    // Read frames and save first five frames to disk
    i=0;
    while(av_read_frame(pFormatCtx, &packet)>=0) {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream) {
            // Decode video frame
            avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
                                 packet.data, packet.size);

            // Did we get a video frame?
            if(frameFinished) {
                // Convert the image from its native format to RGB
                //img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24, (AVPicture*)pFrame, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
                // Convert the image from its native format to RGB
                sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize,0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);

                // Save the frame to disk
                if(++i<=5)
                    SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height,
                              i);
            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
    }

    // Free the Sw context
    sws_freeContext(img_convert_ctx);

    // Free the RGB image
    av_free(buffer);
    av_free(pFrameRGB);

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    av_close_input_file(pFormatCtx);

    return 0;
}
Ejemplo n.º 26
0
__declspec(dllexport) char* createsub(char* filename, int cameratype, char token) {

	AVFormatContext *pFormatCtx;
  int strm;
  int videoStream;
  AVCodecContext *pCodecCtx;
  AVCodec *pCodec;
  AVFrame *pFrame;
  int frameFinished=1;
  AVPacket packet;
  uint8_t * ptr;
  int j;
  int tag, num_tags, i;
  int year, month, day, hour, minute, second, tz; 	// ..., timezone
  int latH, latD, latM, latS; 				// latitude hemi, deg, min, sec
  int lonE, lonD, lonM, lonS;               		// longitude "east", deg, min, sec
  int altS, altL, altD;                               	// altitude below/above sea, level, divisor
  int speed, speD, speU;				// speed, speed divisor, speed unit
  unsigned long duration = 0;
  char* bufferptr;   //this is the pointer to the actual results string

 // adding previous globals here

	int srt=-1;			// srt counter
	char srtT[256], srtTn[256];	// srt text: current and new
	int srtTi=0, srtTni=0;		// index in srtT ans srtTn arrays for next addition
	time_t secsince1970, srtTsec=0;	    // Tricky: if new sub is one sec before current, ignore.
	struct tm t;			// to calculate srtTsec
	time_t start_time, end_time;	    // to measure the duration per file
	char SL;			// will be + for above and - for below sea level
	long int srtTimer;		// start time (in milliseconds) for current srtT
	int srtH, srtM, srtS, srtmS;	// Hour, Minute, Seconds, milli
	int foundgeo;
	int frm;			// frame counter
	int fps_den, fps_num;		// for the frame rate
	float fps;
	char fileout[1024];		// output file name
	int alignct = 0; // added - used to keep track of the output string position

	//done with previous globals here


  fprintf(stderr, "Input file name to dll is %s\n", filename);

  av_register_all();


  /* allocate the media context */
  pFormatCtx = avformat_alloc_context();
  if (!pFormatCtx) {
      fprintf(stderr, "Memory error\n");
	  return ("memory_error");
  }

  // Open video file
  if(av_open_input_file(&pFormatCtx, filename, NULL, 0, NULL)!=0) {
    fprintf(stderr, "Cannot open file %s.\n", filename);
	return ("file_open_error");
  }

  //if (set_output_file(argv[1])) return -1;   //comment out for use in library

  // Retrieve stream information
  if(av_find_stream_info(pFormatCtx)<0) {
    fprintf(stderr, "Could not find stream information\n");
	return ("no_stream_info");
  }
  duration = pFormatCtx->duration;     //obtain length of movie clip in ffmpeg units whatever they are
  duration = duration / 1000000 + 10;  //convert to seconds and add 10, just to be safe

  //bufferptr = (char*) malloc (duration * 64);  //allocate memory to store subtitle string based on duration. 64 bytes is longer than any subtitle section, but check on long movies!
  bufferptr = (char*) malloc (duration * 100);   //100 bytes per subtitle section should be sufficient.

  fprintf(stderr, "the duration var is %d\n",duration);
  // Dump information about file onto standard error:
  dump_format(pFormatCtx, 0, filename, 0);

  start_time = time (NULL);

  // Find the first video stream
  videoStream=-1;
  for(strm=0; strm<pFormatCtx->nb_streams; strm++)
    if(pFormatCtx->streams[strm]->codec->codec_type==CODEC_TYPE_VIDEO) {
      videoStream=strm;
	  //need to place the selecton "if" statements around this
	  //changed the comments from bottom to to top two
	  if (cameratype == 0) {
	  fps_num=pFormatCtx->streams[strm]->r_frame_rate.num;   //version 5
	  fps_den=pFormatCtx->streams[strm]->r_frame_rate.den;   //version 5
	  fps = (float) fps_num / fps_den;  //version 5
	  }
	  else if (cameratype == 1) {
		fps_num=pFormatCtx->streams[strm]->codec->time_base.num; //version 4 //seems to work
		fps_den=pFormatCtx->streams[strm]->codec->time_base.den; //version 4 //seems to work	  
		//fps = (float) fps_den / fps_num / 2.0;   //copyied from version 4  //works but 2.0 wrong
		fps = (float) fps_den / fps_num;   //copyied from version 4 but edited  //this seems to work!
	  }
	  else {
		  return ("camera_not_supported_error");
	  }
	 
      fprintf(stderr, "  Frame rate: %2.2f (%d/%d)\n", fps, fps_num, fps_den);
      break;
    }

  fprintf(stderr, "  Output file name: \'%s\'\n", fileout);

  if(videoStream==-1) {
    fprintf(stderr, "Did not find a video stream\n");
    //return -1; // Didn't find a video stream  //commented out for use in library
	return ("videostream_not_found_error");
  }

  // Get a pointer to the codec context for the video stream
  pCodecCtx=pFormatCtx->streams[videoStream]->codec;

  // Find the decoder for the video stream
  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
  if(pCodec==NULL) {
    fprintf(stderr, "Unsupported codec!\n");
    //return -1; // Codec not found  //commented out for use in library
	return ("codec_not_found_error");
  }

  // Open codec
  if(avcodec_open(pCodecCtx, pCodec)<0) {
    fprintf(stderr, "Could not open codec\n");
    //return -1; // Could not open codec  //commented out for use in library
	return ("open_codec_error");
  }

  // Allocate video frame
  pFrame=avcodec_alloc_frame();

  frm=0;
  while(av_read_frame(pFormatCtx, &packet)>=0) {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {
   	  // Decode video frame

//      fprintf(stderr, "Packet size before decode: %0d\n", packet.size);    //used in version 4
//      avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,              //used in version 4
//                           packet.data, packet.size);
//      fprintf(stderr, "Packet size after decode: %0d\n", packet.size);    
//      Can we do this quicker? All we need is a correct value of frameFinished.
        

// For Sony, the meta data appears in the key frame, so we looked for it
// only in those: if ((packet.flags & PKT_FLAG_KEY) != 0) {...
// But for Panasonic this did not work. So now we just always do it:

      if (1) { 

/*          if ((packet.flags & PKT_FLAG_KEY) != 0) 
             fprintf(stderr, "Found a key frame, frame number %0d (finished: %d)\n", frm+1, frameFinished);
          else
             fprintf(stderr, "Found a non-key frame, frame number %0d (finished: %d)\n", frm+1, frameFinished); 
          av_pkt_dump(stderr, &packet, 1);
        av_hex_dump(stderr, packet.data, 48);
*/

// start looking for the message - assuming it's within the first 256 bytes of the keyframe

        year = -1; month = -1; day = -1; hour = -1; minute = -1; second = -1; tz = -1;
        latH = -1; latD = -1; latM = -1; latS = -1; 
        lonE = -1; lonD = -1; lonM = -1; lonS = -1;
        altS = -1; altL = -1; altD = -1;
        speed = -1; speD = -1; speU = -1;

        ptr = packet.data;
        j=0;
        while ((j<256) && (memcmp(ptr+j, avchd_mdpm, 20))) j++;
        if (j<256) {
//          fprintf(stderr, "Found the message at bytes %0d\n", j);
//          av_hex_dump(stderr, ptr+j, 80);

          /* Skip GUID + MDPM */
          ptr += j+20;

          num_tags = *ptr; ptr++;
          
          for(i = 0; i < num_tags; i++)
            {
            tag = *ptr; ptr++;

if (tag==0) {	// hack - hope it's right
  tag = *ptr; ptr++;
}

//            fprintf(stdout, "Tag: 0x%02x, Data: %02x %02x %02x %02x\n",
//                    tag, ptr[0], ptr[1], ptr[2], ptr[3]);

// For Panasonic, some tags appear twice in the message, screwing it up.
// Am probably not reading the message properly. Never happened for Sony.
// So now we only take the first, hence the additional checks on ==-1 below.
// It may also be needed for Geo info - need to have that tested.

            switch(tag)
              {
              case 0x18:
                if (year==-1) { tz=ptr[0];
                                year  = BCD_2_INT(ptr[1])*100 + BCD_2_INT(ptr[2]);
                                month = BCD_2_INT(ptr[3]);}
                break;
              case 0x19:
                if (day==-1) { day    = BCD_2_INT(ptr[0]); hour   = BCD_2_INT(ptr[1]);
                               minute = BCD_2_INT(ptr[2]); second = BCD_2_INT(ptr[3]);}
                break;
	      case 0xb1:
		latH   = (ptr[0]);
		break;
	      case 0xb2:
		latD   = (ptr[0])*256 + (ptr[1]);
                break;
              case 0xb3:
                latM   = (ptr[0])*256 + (ptr[1]);
                break;
              case 0xb4:
                latS   = (ptr[0])*256 + (ptr[1]);
                break;
	      case 0xb5:
		lonE   = (ptr[0]);
		break;
	      case 0xb6:
		lonD   = (ptr[0])*256 + (ptr[1]);
                break;
              case 0xb7:
                lonM   = (ptr[0])*256 + (ptr[1]);
                break;
              case 0xb8:
                lonS   = (ptr[0])*256 + (ptr[1]);
                break;
              case 0xb9:
                altS   = (ptr[0]);
                break;
              case 0xba:
                altL   = (ptr[0])*256 + (ptr[1]);
                altD   = (ptr[3]);
                break;
              case 0xc1:
                speU   = (ptr[0]);
                break;
              case 0xc2:
                speed  = (ptr[0])*256 + (ptr[1]);
                speD   = (ptr[3]);
                break;
              }
            ptr += 4;
            }
          
          srtTni=0;

          if((year >= 0) && (month >= 0) && (day >= 0) &&
             (hour >= 0) && (minute >= 0) && (second >= 0))
            {
              t.tm_year = year-1900;  t.tm_mon = month-1; t.tm_mday = day;
              t.tm_hour = hour;       t.tm_min = minute;  t.tm_sec = second;
              t.tm_isdst = 0;
              secsince1970 = mktime(&t);
			  

			  //here we edit the output format
              srtTni += snprintf(&(srtTn[srtTni]), 40, "%s %02d-%s-%04d %02d:%02d:%02d", //original code
                                weekday[t.tm_wday],                                     //original code
                                day, monthname[month-1], year, hour, minute, second);   //original code

			  //srtTni += snprintf(&(srtTn[srtTni]), 40, "%s %02d-%s-%04d", //edited code - stops short for some reason
              //                   weekday[t.tm_wday],                                     //edited code
              //                   day, monthname[month-1], year);   //edited code


              //if (tz<64) { // valid time zone value             //appears as time zone data - edited out
              //  if (tz<32) SL='+'; else SL='-';                 //edited out
              //  tz=tz%32;                                       //edited out
              //  srtTni += snprintf(&(srtTn[srtTni]), 30, " (%c%02d:%02d)",   //edited out and still works
              //                     SL, tz/2, 30*(tz%2));                     //edited out and still works
              //}                                                              //edited out and still works
              srtTni += snprintf(&(srtTn[srtTni]), 3, "\n");
            }
          foundgeo=0;
          if((latH >= 0) && (latD >= 0) && (latM >= 0) && (latS >= 0) && 
             (lonE >= 0) && (lonD >= 0) && (lonM >= 0) && (lonS >= 0))
            {
            foundgeo=1;
            srtTni += snprintf(&(srtTn[srtTni]), 60, "GPS: %0d %0d %0.2f %c %0d %0d %0.2f %c",
                               latD, latM, ((float)latS)/1000.0, latH,
                               lonD, lonM, ((float)lonS)/1000.0, lonE);
            }
          if ((altS >= 0) && (altL >= 0))
            {
            foundgeo=1;
            if (altS==0) SL='+'; else SL='-';          // Above + or Below - Sea level
            if (altD==0) altD = 10;			// stub - just assume, not sure
            if (altD==1) 
               srtTni += snprintf(&(srtTn[srtTni]), 30, " %c%0d m", SL, altL);
            else
               srtTni += snprintf(&(srtTn[srtTni]), 30, " %c%0.1f m", SL, ((float)altL)/(float)altD);
            }
          if ((speed>=0) && (speD>0) && (speU>0))
            {
            foundgeo=1;
            if (speD==1) 
               srtTni += snprintf(&(srtTn[srtTni]), 30, " %0d", speed);
            else
               srtTni += snprintf(&(srtTn[srtTni]), 30, " %0.1f", ((float)speed)/(float)speD);
            if (speU=='K')
               srtTni += snprintf(&(srtTn[srtTni]), 30, " %s", "km/h");
            else if (speU=='M')
               srtTni += snprintf(&(srtTn[srtTni]), 30, " %s", "mph");
            else if (speU=='N')
               srtTni += snprintf(&(srtTn[srtTni]), 30, " %s", "knots");
            }
          if (foundgeo) {
            srtTni += snprintf(&(srtTn[srtTni]), 30, "\n"); 
          }

          if ( (strcmp (srtT,srtTn) != 0) && ((srtTsec-secsince1970) != 1) ) {
							// new subtitle not the same as current
 		// and the new subtitle is not one second older than current. In latter case we assume
		// B-frames and silenty ignore for now
            //if (srtTi) print_one_srt_entry(bufferptr, token);		// so print & clear the current one if it exists
			//going to try to add the print code in here to avoid use of globals - line above will be commented out
			if (srtTi) {

				srtH = (int) (srtTimer/3600000L); srtTimer -= 3600000L*(long int) srtH;
				srtM = (int) (srtTimer/60000L); srtTimer -= 60000L*(long int) srtM;
				srtS = (int) (srtTimer/1000L); srtmS = (int) (srtTimer-1000L*(long int)srtS);
				alignct += snprintf (&(bufferptr[alignct]), 256, "%0d\n", srt+1);   //added - print to storage buffer
				alignct += snprintf (&(bufferptr[alignct]), 256, "%02d:%02d:%02d,%03d --> ", srtH, srtM, srtS, srtmS);  //added - print to storage buffer

				srtTimer = (long int) (((long int)(frm)*1000L)/fps);
				srtH = (int) (srtTimer/3600000L); srtTimer -= 3600000L*(long int) srtH;
				srtM = (int) (srtTimer/60000L); srtTimer -= 60000L*(long int) srtM;
				srtS = (int) (srtTimer/1000L); srtmS = (int) (srtTimer-1000L*(long int)srtS);
				alignct += snprintf (&(bufferptr[alignct]), 256, "%02d:%02d:%02d,%03d\n", srtH, srtM, srtS, srtmS);   //added - print to storage buffer
				//fprintf(filesrt, "%s\n", srtT);  //comment out for use in library
				//alignct += snprintf (&(bufferptr[alignct]), 256, "%s\n", srtT);  //added - print to storage buffer - does not add separateer character
				alignct += snprintf (&(bufferptr[alignct]), 256, "%s%c", srtT,token);  //added - print to storage buffer - adds the separator character after each subtitle
				srtTi = snprintf(srtT, 256, "");

			}
			//done adding print code



            if (srtTni) {				// and if there is a new one, make current
              srt++;
              srtTimer = (long int) (((long int)(frm)*1000L)/fps);
              srtTsec = secsince1970;
              srtTi = snprintf(srtT, 256, "%s", srtTn);
            }
          } // else if new subtitle is the same as previous, do nothing
        } else {
//          fprintf(stderr, "did not find the message\n");
        }
      }
      // Did we get a video frame?
      if(frameFinished) {
        frm++;
      }
    }
    
    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);
  }

  //if (srtTi>0) 
  //print_one_srt_entry(bufferptr, token);
  //going to try to add the print code in here to avoid use of globals - line above will be commented out

	if (srtTi) {

		srtH = (int) (srtTimer/3600000L); srtTimer -= 3600000L*(long int) srtH;
		srtM = (int) (srtTimer/60000L); srtTimer -= 60000L*(long int) srtM;
		srtS = (int) (srtTimer/1000L); srtmS = (int) (srtTimer-1000L*(long int)srtS);
		alignct += snprintf (&(bufferptr[alignct]), 256, "%0d\n", srt+1);   //added - print to storage buffer
		alignct += snprintf (&(bufferptr[alignct]), 256, "%02d:%02d:%02d,%03d --> ", srtH, srtM, srtS, srtmS);  //added - print to storage buffer
		srtTimer = (long int) (((long int)(frm)*1000L)/fps);
		srtH = (int) (srtTimer/3600000L); srtTimer -= 3600000L*(long int) srtH;
		srtM = (int) (srtTimer/60000L); srtTimer -= 60000L*(long int) srtM;
		srtS = (int) (srtTimer/1000L); srtmS = (int) (srtTimer-1000L*(long int)srtS);
		alignct += snprintf (&(bufferptr[alignct]), 256, "%02d:%02d:%02d,%03d\n", srtH, srtM, srtS, srtmS);   //added - print to storage buffer
		//fprintf(filesrt, "%s\n", srtT);  //comment out for use in library
		//alignct += snprintf (&(bufferptr[alignct]), 256, "%s\n", srtT);  //added - print to storage buffer - does not add separateer character
		alignct += snprintf (&(bufferptr[alignct]), 256, "%s%c", srtT,token);  //added - print to storage buffer - adds the separator character after each subtitle
		srtTi = snprintf(srtT, 256, "");

	}
  //done adding

  fprintf(stderr, "  Read %d frames\n", frm);

  end_time = time (NULL);

  fprintf(stderr, "Processed in %0d seconds\n", (int)difftime(end_time, start_time));

  // Close the SRT output file
  //fclose(filesrt);   //comment out for use in library

  // Free the YUV frame
  av_free(pFrame);

  // Close the codec
  avcodec_close(pCodecCtx);

  // Close the video file
  av_close_input_file(pFormatCtx);

  return (bufferptr);

}
Ejemplo n.º 27
0
uint8_t lavMuxer::open(const char *filename,uint32_t inbitrate, ADM_MUXER_TYPE type, aviInfo *info,uint32_t videoExtraDataSize,
                        uint8_t *videoExtraData, WAVHeader *audioheader,uint32_t audioextraSize,uint8_t *audioextraData)
{
 AVCodecContext *c;
 	_type=type;
	_fps1000=info->fps1000;
	switch(_type)
	{
                case MUXER_TS:
                        fmt=guess_format("mpegts", NULL, NULL);
                        break;
		case MUXER_DVD:
			fmt = guess_format("dvd", NULL, NULL);
			break;
		case MUXER_VCD:
			fmt = guess_format("vcd", NULL, NULL);
			break;
		case MUXER_SVCD:
			fmt = guess_format("svcd", NULL, NULL);
			break;
                case MUXER_MP4:
                        fmt = guess_format("mp4", NULL, NULL);
                        break;
                case MUXER_PSP:
                        fmt = guess_format("psp", NULL, NULL);
                        break;                        
		default:
			fmt=NULL;
	}
	if (!fmt) 
	{
        	printf("Lav:Cannot guess format\n");
		return 0;
	}
	oc = av_alloc_format_context();
	if (!oc) 
	{
       		printf("Lav:Cannot allocate context\n");
		return 0;
	}
	oc->oformat = fmt;
	snprintf(oc->filename,1000,"file://%s",filename);
	// Video
	//________
	
	video_st = av_new_stream(oc, 0);
	if (!video_st) 
	{
		printf("Lav: new stream failed\n");
		return 0;
	}	
	
	c = video_st->codec;
	switch(_type)
	{
                case MUXER_MP4:
                        if(isMpeg4Compatible(info->fcc))
                        {
                                c->codec_id = CODEC_ID_MPEG4;
                                c->has_b_frames=1; // in doubt...
                        }else
                        {
                                if(isH264Compatible(info->fcc))
                                {
                                        c->has_b_frames=1; // in doubt...
                                        c->codec_id = CODEC_ID_H264;
                                        c->codec=new AVCodec;
                                        memset(c->codec,0,sizeof(AVCodec));
                                        c->codec->name=ADM_strdup("H264");
                                }
                                else
                                {
                                         c->codec_id = CODEC_ID_MPEG4; // Default value
                                        printf("Ooops, cant mux that...\n");
                                        printf("Ooops, cant mux that...\n");
                                        printf("Ooops, cant mux that...\n");
                                        //return 0;
                                }
                        }
                        if(videoExtraDataSize)
                        {
                                c->extradata=videoExtraData;
                                c->extradata_size= videoExtraDataSize;
                        }
                        c->rc_buffer_size=8*1024*224;
                        c->rc_max_rate=9500*1000;
                        c->rc_min_rate=0;
                        if(!inbitrate)
                                c->bit_rate=9000*1000;
                        else
                                c->bit_rate=inbitrate;
        
                        break;
                case MUXER_TS:
                        c->codec_id = CODEC_ID_MPEG2VIDEO;
                        c->rc_buffer_size=8*1024*224;
                        c->rc_max_rate=9500*1000;
                        c->rc_min_rate=0;
                        if(!inbitrate)
                                c->bit_rate=9000*1000;
                        else
                                c->bit_rate=inbitrate;
        
                        break;
		case MUXER_DVD:
			c->codec_id = CODEC_ID_MPEG2VIDEO;
			c->rc_buffer_size=8*1024*224;
			c->rc_max_rate=9500*1000;
			c->rc_min_rate=0;
			if(!inbitrate)
				c->bit_rate=9000*1000;
			else
				c->bit_rate=inbitrate;
	
			break;
		case MUXER_VCD:
			c->codec_id = CODEC_ID_MPEG1VIDEO;

			c->rc_buffer_size=8*1024*40;
			c->rc_max_rate=1152*1000;
			c->rc_min_rate=1152*1000;
			
			c->bit_rate=1152*1000;
			

			break;
		case MUXER_SVCD:
			c->codec_id = CODEC_ID_MPEG2VIDEO;

			c->rc_buffer_size=8*1024*112;
			c->rc_max_rate=2500*1000;
			c->rc_min_rate=0*1000;
			if(!inbitrate)
				c->bit_rate=2040*1000;
			else
				c->bit_rate=inbitrate;

			break;
		default:
			ADM_assert(0);
	}
	
	c->codec_type = CODEC_TYPE_VIDEO;
	c->flags=CODEC_FLAG_QSCALE;   
	c->width = info->width;  
	c->height = info->height; 
	
    	switch(_fps1000)
	{
		case 25000:
			 c->time_base= (AVRational){1001,25025};
			//c->frame_rate = 25025;  
			//c->frame_rate_base = 1001;	
			break;
		case 23976:
/*
			c->frame_rate = 24000;  
			c->frame_rate_base = 1001;	
			break;
*/
                        if(_type==MUXER_MP4)
                        {
                                 c->time_base= (AVRational){1001,24000};
                                break;
                        }
		case  29970:
			 c->time_base= (AVRational){1001,30000};
			//c->frame_rate = 30000;  
			//c->frame_rate_base = 1001;	
			break;
		default:
                        if(_type==MUXER_MP4)
                        {
                                c->time_base= (AVRational){1000,_fps1000};
                                break;
                        }
                        else
                        {
                          GUI_Error_HIG(_("Incompatible frame rate"), NULL);
                            return 0;
                        }
	}

			
	c->gop_size=15;
	c->max_b_frames=2;
	c->has_b_frames=1;

	
	// Audio
	//________
        if(audioheader)
        {
	audio_st = av_new_stream(oc, 1);
	if (!audio_st) 
	{
		printf("Lav: new stream failed\n");
		return 0;
	}

		
	c = audio_st->codec;
        c->frame_size=1024; //For AAC mainly, sample per frame
        switch(audioheader->encoding)
        {
                case WAV_AC3: c->codec_id = CODEC_ID_AC3;break;
                case WAV_MP2: c->codec_id = CODEC_ID_MP2;break;
                case WAV_MP3:
#warning FIXME : Probe deeper
                            c->frame_size=1152;
                            c->codec_id = CODEC_ID_MP3;
                            break;
                case WAV_PCM: 
                                // One chunk is 10 ms (1/100 of fq)
                                c->frame_size=4;
                                c->codec_id = CODEC_ID_PCM_S16LE;break;
                case WAV_AAC: 
                                c->extradata=audioextraData;
                                c->extradata_size= audioextraSize;
                                c->codec_id = CODEC_ID_AAC;
                                break;
                default:
                        printf("Cant mux that ! audio\n"); 
                        printf("Cant mux that ! audio\n");
                        c->codec_id = CODEC_ID_MP2;
                        return 0;
                        break;
        }
	c->codec_type = CODEC_TYPE_AUDIO;
	
	c->bit_rate = audioheader->byterate*8;
        c->rc_buffer_size=(c->bit_rate/(2*8)); // 500 ms worth
	_audioFq=c->sample_rate = audioheader->frequency;
	c->channels = audioheader->channels;
        _audioByterate=audioheader->byterate;
        }
        // /audio
	
	
//----------------------
	switch(_type)
	{
                case MUXER_MP4:
                        oc->mux_rate=10080*1000; // Needed ?
                        break;

                case MUXER_TS:
                        oc->mux_rate=10080*1000;
                        break;
		case MUXER_DVD:
			oc->packet_size=2048;
			oc->mux_rate=10080*1000;
			break;
		case MUXER_VCD:
			oc->packet_size=2324;
			oc->mux_rate=2352 * 75 * 8;
			
			break;
		case MUXER_SVCD:
			
			oc->packet_size=2324;
			oc->mux_rate=2*2352 * 75 * 8; // ?
			
			break;
		default:
			ADM_assert(0);
	}
	oc->preload=AV_TIME_BASE/10; // 100 ms preloading
	oc->max_delay=200*1000; // 500 ms
	
	if (av_set_parameters(oc, NULL) < 0) 
	{
		printf("Lav: set param failed \n");
		return 0;
	}
	 if (url_fopen(&(oc->pb), filename, URL_WRONLY) < 0) 
	 {
	 	printf("Lav: Failed to open file :%s\n",filename);
		return 0;
        }

	av_write_header(oc);
	dump_format(oc, 0, filename, 1);


	printf("lavformat mpeg muxer initialized\n");
	
	_running=1;

	one=(1000*1000*1000)/_fps1000; 
	_curDTS=one;

	return 1;
}
Ejemplo n.º 28
0
__declspec(dllexport)  char * getmetadata(char* filename, int cameratype, int numberoftags, int metaframelength, int numberofframes, metaframe* allmetadata)     //unsigned char* metadataarray)
	{


		//int cameratype = 1;
		AVFormatContext *pFormatCtx;
		int strm;
		int videoStream;
		AVCodecContext *pCodecCtx;
		AVCodec *pCodec;
		AVFrame *pFrame;
		int frameFinished=1;
		AVPacket packet;
		uint8_t * ptr;
		int j;
		int tag, num_tags, i;
		int year, month, day, hour, minute, second, tz; 	// ..., timezone
		int latH, latD, latM, latS; 				// latitude hemi, deg, min, sec
		int lonE, lonD, lonM, lonS;               		// longitude "east", deg, min, sec
		int altS, altL, altD;                               	// altitude below/above sea, level, divisor
		int speed, speD, speU;				// speed, speed divisor, speed unit
		unsigned long duration = 0;
		char* bufferptr;   //this is the pointer to the actual results string

		int srt=-1;			// srt counter
		char srtT[256], srtTn[256];	// srt text: current and new
		int srtTi=0, srtTni=0;		// index in srtT ans srtTn arrays for next addition
		time_t secsince1970, srtTsec=0;	    // Tricky: if new sub is one sec before current, ignore.
		struct tm t;			// to calculate srtTsec
		time_t start_time, end_time;	    // to measure the duration per file
		char SL;			// will be + for above and - for below sea level
		long int srtTimer;		// start time (in milliseconds) for current srtT
		int srtH, srtM, srtS, srtmS;	// Hour, Minute, Seconds, milli
		int foundgeo;
		int frm;			// frame counter
		int fps_den, fps_num;		// for the frame rate
		float fps;
		char fileout[1024];		// output file name
		int alignct = 0; // added - used to keep track of the output string position
		int exceededtags = 0;
		int exceededframes = 0;
		int p_flags = 0;
		long p_pts = 0;
		long p_dts = 0;
		long p_pos = 0;
		int p_dur = 0;

		fprintf(stderr, "Input file name to dll is %s\n", filename);

		av_register_all();


		/* allocate the media context */
		pFormatCtx = avformat_alloc_context();
		if (!pFormatCtx) {
			fprintf(stderr, "Memory error\n");
			return ("memory_error");
		}

		// Open video file
		if(av_open_input_file(&pFormatCtx, filename, NULL, 0, NULL)!=0) {
			fprintf(stderr, "Cannot open file %s.\n", filename);
			return ("file_open_error");
		}

	  //if (set_output_file(argv[1])) return -1;   //comment out for use in library

		// Retrieve stream information
		if(av_find_stream_info(pFormatCtx)<0) {
			fprintf(stderr, "Could not find stream information\n");
			return ("no_stream_info");
		}
		duration = pFormatCtx->duration;     //obtain length of movie clip in ffmpeg units whatever they are
		duration = duration / 1000000 + 10;  //convert to seconds and add 10, just to be safe

		//bufferptr = (char*) malloc (duration * 64);  //allocate memory to store subtitle string based on duration. 64 bytes is longer than any subtitle section, but check on long movies!
		//bufferptr = (char*) malloc (duration * 100);   //100 bytes per subtitle section should be sufficient.

		fprintf(stderr, "the duration var is %d\n",duration);
		// Dump information about file onto standard error:
		dump_format(pFormatCtx, 0, filename, 0);

		start_time = time (NULL);

		// Find the first video stream
		videoStream=-1;
		for(strm=0; strm<pFormatCtx->nb_streams; strm++)
			if(pFormatCtx->streams[strm]->codec->codec_type==CODEC_TYPE_VIDEO) {
			videoStream=strm;
			//need to place the selecton "if" statements around this
			//changed the comments from bottom to to top two
			if (cameratype == 0) {
			fps_num=pFormatCtx->streams[strm]->r_frame_rate.num;   //version 5
			fps_den=pFormatCtx->streams[strm]->r_frame_rate.den;   //version 5
			fps = (float) fps_num / fps_den;  //version 5
			}
			else if (cameratype == 1) {
				fps_num=pFormatCtx->streams[strm]->codec->time_base.num; //version 4 //seems to work
				fps_den=pFormatCtx->streams[strm]->codec->time_base.den; //version 4 //seems to work	  
				//fps = (float) fps_den / fps_num / 2.0;   //copyied from version 4  //works but 2.0 wrong
				fps = (float) fps_den / fps_num;   //copyied from version 4 but edited  //this seems to work!
			}
			else {
				return ("camera_not_supported_error");
			}
	 
			fprintf(stderr, "  Frame rate: %2.2f (%d/%d)\n", fps, fps_num, fps_den);
			break;
			}

		fprintf(stderr, "  Output file name: \'%s\'\n", fileout);

		if(videoStream==-1) {
			fprintf(stderr, "Did not find a video stream\n");
			//return -1; // Didn't find a video stream  //commented out for use in library
			return ("videostream_not_found_error");
		}

		// Get a pointer to the codec context for the video stream
		pCodecCtx=pFormatCtx->streams[videoStream]->codec;

		// Find the decoder for the video stream
		pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
		if(pCodec==NULL) {
			fprintf(stderr, "Unsupported codec!\n");
			//return -1; // Codec not found  //commented out for use in library
			return ("codec_not_found_error");
		}

		// Open codec
		if(avcodec_open(pCodecCtx, pCodec)<0) {
			fprintf(stderr, "Could not open codec\n");
			//return -1; // Could not open codec  //commented out for use in library
			return ("open_codec_error");
		}

		// Allocate video frame
			pFrame=avcodec_alloc_frame();
		
		//missing tons of stuff between here:
		//start by grabbing metadata from first frame
		frm=0;
		while (av_read_frame(pFormatCtx, &packet)>=0)  {

			//frm++;
			if (frm > numberofframes) {
				exceededframes = 1;
				break;
				//return ("file_exceeded_frames_allocated");
			}

				//Is this a packet from the video stream?
			if(packet.stream_index==videoStream) {
				
				ptr = packet.data;
				p_flags = packet.flags;
				p_pts = packet.pts;
				p_dts = packet.dts;
				p_pos = packet.pos;
				p_dur = packet.duration;

				j=0;
				while ((j<256) && (memcmp(ptr+j, avchd_mdpm, 20))) j++;
				if ((j<256)&&(p_dur > 0)) {
					//fprintf(stderr, "Found the message at bytes %0d\n", j);
					//av_hex_dump(stderr, ptr+j, 80);

					/* Skip GUID + MDPM */
					frm++;
					ptr += j+20;

					num_tags = *ptr; ptr++;
					if (num_tags > numberoftags) {
						exceededtags = 1;
					}
					//*numberoftags = num_tags;
					//*metaframelength = num_tags * 5;
						//need to allocate storage for frame here using number of tags plus tag length
						//*framearray = (unsigned char *) malloc (num_tags*5+1);
						/*for(i = 0; i < num_tags; i++)
							{
								tag = *ptr; ptr++;

								if (tag==0) {	// hack - hope it's right
									tag = *ptr; ptr++;
								}

							}*/

					//ptr is now positioned at the proper place on the metadata, ready to add read code.
					//memcpy (metadataarray,ptr,(num_tags*5));
					//return ("before use");
					allmetadata[frm-1].frameno = frm;
					allmetadata[frm-1].p_flags = p_flags;
					allmetadata[frm-1].p_pts = p_pts;
					allmetadata[frm-1].p_dts = p_dts;
					allmetadata[frm-1].p_pos = p_pos;
					allmetadata[frm-1].p_dur = p_dur;
					//return ("after set frameno"); //gets to here, so what gives?
					//allmetadata[frm-1].framedata = (unsigned char *)malloc (metaframelength);
					memcpy (allmetadata[frm-1].framedata,ptr,(metaframelength));   //all the trouble comes down to this. The frameno works ok.
					//memcpy (allmetadata[frm-1].framedata,ptr,2);
					//return ("after first copy");
				}
					
			}
		av_free_packet(&packet);
		}
		//av_free_packet(&packet);
		av_free(pFrame);
		avcodec_close(pCodecCtx);
		av_close_input_file (pFormatCtx);
		if (exceededtags == 1)
		{
			return ("exceeded_tags_metadata_copied");
		}
		if (exceededframes == 1)
		{
			return ("exceeded_framenumber_metadata_copied");
		}
		return ("metadata_copied");
	}
Ejemplo n.º 29
0
int main(int argc, char *argv[]) {
    AVFormatContext *pFormatCtx;
    int             i, videoStream, audioStream;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;
    AVFrame         *pFrame;
    AVPacket        packet;
    int             frameFinished;
    float           aspect_ratio;
    struct SwsContext *img_convert_ctx;

    AVCodecContext  *aCodecCtx;
    AVCodec         *aCodec;

    SDL_Overlay     *bmp;
    SDL_Surface     *screen;
    SDL_Rect        rect;
    SDL_Event       event;
    SDL_AudioSpec   wanted_spec, spec;

    if(argc < 2) {
        fprintf(stderr, "Usage: test <file>\n");
        exit(1);
    }
    // Register all formats and codecs
    av_register_all();

    if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        exit(1);
    }

    // Open video file
    if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if(av_find_stream_info(pFormatCtx)<0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream
    videoStream=-1;
    audioStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++) {
        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO &&
                videoStream < 0) {
            videoStream=i;
        }
        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO &&
                audioStream < 0) {
            audioStream=i;
        }
    }
    if(videoStream==-1)
        return -1; // Didn't find a video stream
    if(audioStream==-1)
        return -1;

    aCodecCtx=pFormatCtx->streams[audioStream]->codec;
    // Set audio settings from codec info
    wanted_spec.freq = aCodecCtx->sample_rate;
    wanted_spec.format = AUDIO_S16SYS;
    wanted_spec.channels = aCodecCtx->channels;
    wanted_spec.silence = 0;
    wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
    wanted_spec.callback = audio_callback;
    wanted_spec.userdata = aCodecCtx;

    if(SDL_OpenAudio(&wanted_spec, &spec) < 0) {
        fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
        return -1;
    }
    aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
    if(!aCodec) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1;
    }
    
    if (avcodec_open(aCodecCtx, aCodec) < 0) {
		fprintf(stderr, "Cannot open audio codec!\n");
		return -1;
	}

    // audio_st = pFormatCtx->streams[index]
    packet_queue_init(&audioq);
    SDL_PauseAudio(0);

    // Get a pointer to the codec context for the video stream
    pCodecCtx=pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }
    // Open codec
    if(avcodec_open(pCodecCtx, pCodec)<0) {
		fprintf(stderr, "Cannot open video codec!\n");
        return -1; // Could not open codec
	}
	
	// construct the scale context, conversing to PIX_FMT_YUV420P
    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);// other codes
	if (img_convert_ctx == NULL) {
		fprintf(stderr, "Cannot initialize the conversion context!\n");
		return -1;
	}

    // Allocate video frame
    pFrame=avcodec_alloc_frame();

    // Make a screen to put our video

#ifndef __DARWIN__
    screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
#else
    screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif
    if(!screen) {
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
        exit(1);
    }

    // Allocate a place to put our YUV image on that screen
    bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
                               pCodecCtx->height,
                               SDL_YV12_OVERLAY,
                               screen);


    // Read frames and save first five frames to disk
    i=0;
    while(av_read_frame(pFormatCtx, &packet)>=0) {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream) {
            // Decode video frame
            avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
                                 packet.data, packet.size);

            // Did we get a video frame?
            if(frameFinished) {
                SDL_LockYUVOverlay(bmp);

                AVPicture pict;
                pict.data[0] = bmp->pixels[0];
                pict.data[1] = bmp->pixels[2];
                pict.data[2] = bmp->pixels[1];

                pict.linesize[0] = bmp->pitches[0];
                pict.linesize[1] = bmp->pitches[2];
                pict.linesize[2] = bmp->pitches[1];

                // Convert the image into YUV format that SDL uses
                /*
                img_convert(&pict, PIX_FMT_YUV420P,
                            (AVPicture *)pFrame, pCodecCtx->pix_fmt,
                            pCodecCtx->width, pCodecCtx->height);
				*/
				sws_scale(img_convert_ctx, (const uint8_t * const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pict.data, pict.linesize);
				
                SDL_UnlockYUVOverlay(bmp);

                rect.x = 0;
                rect.y = 0;
                rect.w = pCodecCtx->width;
                rect.h = pCodecCtx->height;
                SDL_DisplayYUVOverlay(bmp, &rect);
                av_free_packet(&packet);
            }
        } else if(packet.stream_index==audioStream) {
            packet_queue_put(&audioq, &packet);
        } else {
            av_free_packet(&packet);
        }
        // Free the packet that was allocated by av_read_frame
        SDL_PollEvent(&event);
        switch(event.type) {
        case SDL_QUIT:
            quit = 1;
            SDL_Quit();
            exit(0);
            break;
        default:
            break;
        }

    }
    
    sws_freeContext(img_convert_ctx);

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    av_close_input_file(pFormatCtx);

    return 0;
}
void create_video_file(const char*filename,int width,int height)
{
/* auto detect the output format from the name. default is
       mpeg. */
    //fmt = av_guess_format(NULL, filename, NULL);

#if (LIBAVFORMAT_VERSION_INT>=AV_VERSION_INT(52,81,0))
	#define libavformat_guess_format av_guess_format
#else
	#define libavformat_guess_format guess_format
#endif

	fmt = libavformat_guess_format(NULL, filename, NULL);

	if (!fmt) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        //fmt = av_guess_format("mpeg", NULL, NULL);
        fmt = libavformat_guess_format("mpeg", NULL, NULL);
    }
    if (!fmt) {
        fprintf(stderr, "Could not find suitable output format\n");
        exit(1);
    }

    /* allocate the output media context */
    oc = avformat_alloc_context();
    if (!oc) {
        fprintf(stderr, "Memory error\n");
        exit(1);
    }
    oc->oformat = fmt;
    snprintf(oc->filename, sizeof(oc->filename), "%s", filename);

    /* add the audio and video streams using the default format codecs
       and initialize the codecs */
    video_st = NULL;

    if (fmt->video_codec != CODEC_ID_NONE) {
        video_st = add_video_stream(oc, fmt->video_codec,width,height);
    }

    /* set the output parameters (must be done even if no
       parameters). */
    if (av_set_parameters(oc, NULL) < 0) {
        fprintf(stderr, "Invalid output format parameters\n");
        exit(1);
    }

    dump_format(oc, 0, filename, 1);

    /* now that all the parameters are set, we can open the audio and
       video codecs and allocate the necessary encode buffers */
    if (video_st)
        open_video(oc, video_st);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
            fprintf(stderr, "Could not open '%s'\n", filename);
            exit(1);
        }
    }

    /* write the stream header, if any */
    av_write_header(oc);
 }