Beispiel #1
0
bool FFMpegDecoder::open(const char *file)
{
  close();

  m_position = 0;

  if (av_open_input_file(&m_format, file, 0, 0, 0) != 0) {
    debug("couldn't open media file for playback: %s\n", file);
    m_format = 0;
    return false;
  }

  debug("opened %s (%s)\n", file, m_format->iformat->long_name);  

  m_file_size = File::size(file);

  for (int i=0; i < m_format->nb_streams; i++) {
    if (m_format->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO) {
      m_audio_stream = m_format->streams[i];
      break;
    }
  }
  if (!m_audio_stream) {
    debug("no audio streams found in %s\n", file);
    close();
    return false;
  }

  m_codec_ctx = m_audio_stream->codec;
  m_codec = avcodec_find_decoder(m_codec_ctx->codec_id);
  if (!m_codec) {
    debug("couldn't find a decoder for %s\n", file); 
    close();
    return false;
  }

  if (avcodec_open(m_codec_ctx, m_codec) < 0) {
    debug("couldn't open codec: %s\n", m_codec->name);
    close();
    return false;
  }

  if (m_codec_ctx->sample_rate) m_rate = m_codec_ctx->sample_rate;
  if (m_codec_ctx->channels) m_channels = m_codec_ctx->channels;

  m_frame = avcodec_alloc_frame();
  if (!m_frame) {
    debug("couldn't allocate frame while opening %s\n", file);
    close();
    return false;
  }

  m_buffer = (unsigned char *)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
  if (!m_buffer) {
    debug("couldn't allocate playback buffer while opening %s\n", file);
    m_buffer_size = 0;
    close();
    return false;
  }
  m_buffer_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;

  return true;
}
Beispiel #2
0
static int jpeg_write(ByteIOContext *pb, AVImageInfo *info)
{
    AVCodecContext *c;
    uint8_t *outbuf = NULL;
    int outbuf_size, ret, size, i;
    AVFrame *picture;

    ret = -1;
    c = avcodec_alloc_context();
    if (!c)
        return -1;
    picture = avcodec_alloc_frame();
    if (!picture)
        goto fail2;
    c->width = info->width;
    c->height = info->height;
    /* XXX: currently move that to the codec ? */
    switch(info->pix_fmt) {
    case PIX_FMT_YUVJ420P:
        c->pix_fmt = PIX_FMT_YUV420P;
        break;
    case PIX_FMT_YUVJ422P:
        c->pix_fmt = PIX_FMT_YUV422P;
        break;
    case PIX_FMT_YUVJ444P:
        c->pix_fmt = PIX_FMT_YUV444P;
        break;
    default:
        goto fail1;
    }
    for(i=0;i<3;i++) {
        picture->data[i] = info->pict.data[i];
        picture->linesize[i] = info->pict.linesize[i];
    }
    /* set the quality */
    picture->quality = 3; /* XXX: a parameter should be used */
    c->flags |= CODEC_FLAG_QSCALE;
    
    if (avcodec_open(c, &mjpeg_encoder) < 0)
        goto fail1;
    
    /* XXX: needs to sort out that size problem */
    outbuf_size = 1000000;
    outbuf = av_malloc(outbuf_size);

    size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
    if (size < 0)
        goto fail;
    put_buffer(pb, outbuf, size);
    put_flush_packet(pb);
    ret = 0;

 fail:
    avcodec_close(c);
    av_free(outbuf);
 fail1:
    av_free(picture);
 fail2:
    av_free(c);
    return ret;
}
Beispiel #3
0
int E_(OpenEncoder)( vlc_object_t *p_this )
{
    encoder_t *p_enc = (encoder_t *)p_this;
    encoder_sys_t *p_sys = p_enc->p_sys;
    AVCodecContext *p_context;
    AVCodec *p_codec;
    int i_codec_id, i_cat;
    char *psz_namecodec;
    vlc_value_t val;

    if( !E_(GetFfmpegCodec)( p_enc->fmt_out.i_codec, &i_cat, &i_codec_id,
                             &psz_namecodec ) )
    {
        if( E_(GetFfmpegChroma)( p_enc->fmt_out.i_codec ) < 0 )
        {
            /* handed chroma output */
            return VLC_EGENERIC;
        }
        i_cat      = VIDEO_ES;
        i_codec_id = CODEC_ID_RAWVIDEO;
        psz_namecodec = "Raw video";
    }


    if( p_enc->fmt_out.i_cat == VIDEO_ES && i_cat != VIDEO_ES )
    {
        msg_Err( p_enc, "\"%s\" is not a video encoder", psz_namecodec );
        return VLC_EGENERIC;
    }

    if( p_enc->fmt_out.i_cat == AUDIO_ES && i_cat != AUDIO_ES )
    {
        msg_Err( p_enc, "\"%s\" is not an audio encoder", psz_namecodec );
        return VLC_EGENERIC;
    }

    /* Initialization must be done before avcodec_find_decoder() */
    E_(InitLibavcodec)(p_this);

    p_codec = avcodec_find_encoder( i_codec_id );
    if( !p_codec )
    {
        msg_Err( p_enc, "cannot find encoder %s", psz_namecodec );
        return VLC_EGENERIC;
    }

    /* Allocate the memory needed to store the decoder's structure */
    if( ( p_sys = (encoder_sys_t *)malloc(sizeof(encoder_sys_t)) ) == NULL )
    {
        msg_Err( p_enc, "out of memory" );
        return VLC_EGENERIC;
    }
    memset( p_sys, 0, sizeof(encoder_sys_t) );
    p_enc->p_sys = p_sys;
    p_sys->p_codec = p_codec;

    p_enc->pf_encode_video = EncodeVideo;
    p_enc->pf_encode_audio = EncodeAudio;

    p_sys->p_buffer_out = NULL;
    p_sys->p_buffer = NULL;

    p_sys->p_context = p_context = avcodec_alloc_context();

    /* Set CPU capabilities */
    p_context->dsp_mask = 0;
    if( !(p_enc->p_libvlc->i_cpu & CPU_CAPABILITY_MMX) )
    {
        p_context->dsp_mask |= FF_MM_MMX;
    }
    if( !(p_enc->p_libvlc->i_cpu & CPU_CAPABILITY_MMXEXT) )
    {
        p_context->dsp_mask |= FF_MM_MMXEXT;
    }
    if( !(p_enc->p_libvlc->i_cpu & CPU_CAPABILITY_3DNOW) )
    {
        p_context->dsp_mask |= FF_MM_3DNOW;
    }
    if( !(p_enc->p_libvlc->i_cpu & CPU_CAPABILITY_SSE) )
    {
        p_context->dsp_mask |= FF_MM_SSE;
        p_context->dsp_mask |= FF_MM_SSE2;
    }

    sout_CfgParse( p_enc, ENC_CFG_PREFIX, ppsz_enc_options, p_enc->p_cfg );

    var_Get( p_enc, ENC_CFG_PREFIX "keyint", &val );
    p_sys->i_key_int = val.i_int;

    var_Get( p_enc, ENC_CFG_PREFIX "bframes", &val );
    p_sys->i_b_frames = val.i_int;

    var_Get( p_enc, ENC_CFG_PREFIX "vt", &val );
    p_sys->i_vtolerance = val.i_int;

    var_Get( p_enc, ENC_CFG_PREFIX "interlace", &val );
    p_sys->b_interlace = val.b_bool;

    var_Get( p_enc, ENC_CFG_PREFIX "pre-me", &val );
    p_sys->b_pre_me = val.b_bool;

    var_Get( p_enc, ENC_CFG_PREFIX "hurry-up", &val );
    p_sys->b_hurry_up = val.b_bool;
    if( p_sys->b_hurry_up )
    {
        /* hurry up mode needs noise reduction, even small */
        p_sys->i_noise_reduction = 1;
    }

    var_Get( p_enc, ENC_CFG_PREFIX "strict-rc", &val );
    p_sys->b_strict_rc = val.b_bool;
    var_Get( p_enc, ENC_CFG_PREFIX "rc-buffer-size", &val );
    p_sys->i_rc_buffer_size = val.i_int;
    var_Get( p_enc, ENC_CFG_PREFIX "rc-buffer-aggressivity", &val );
    p_sys->f_rc_buffer_aggressivity = val.f_float;

    var_Get( p_enc, ENC_CFG_PREFIX "i-quant-factor", &val );
    p_sys->f_i_quant_factor = val.f_float;

    var_Get( p_enc, ENC_CFG_PREFIX "noise-reduction", &val );
    p_sys->i_noise_reduction = val.i_int;

    var_Get( p_enc, ENC_CFG_PREFIX "mpeg4-matrix", &val );
    p_sys->b_mpeg4_matrix = val.b_bool;

    var_Get( p_enc, ENC_CFG_PREFIX "qscale", &val );
    if( val.f_float < 0.01 || val.f_float > 255.0 ) val.f_float = 0;
    p_sys->i_quality = (int)(FF_QP2LAMBDA * val.f_float + 0.5);

    var_Get( p_enc, ENC_CFG_PREFIX "hq", &val );
    if( val.psz_string && *val.psz_string )
    {
        if( !strcmp( val.psz_string, "rd" ) )
            p_sys->i_hq = FF_MB_DECISION_RD;
        else if( !strcmp( val.psz_string, "bits" ) )
            p_sys->i_hq = FF_MB_DECISION_BITS;
        else if( !strcmp( val.psz_string, "simple" ) )
            p_sys->i_hq = FF_MB_DECISION_SIMPLE;
        else
            p_sys->i_hq = FF_MB_DECISION_RD;
    }
    if( val.psz_string ) free( val.psz_string );

    var_Get( p_enc, ENC_CFG_PREFIX "qmin", &val );
    p_sys->i_qmin = val.i_int;
    var_Get( p_enc, ENC_CFG_PREFIX "qmax", &val );
    p_sys->i_qmax = val.i_int;
    var_Get( p_enc, ENC_CFG_PREFIX "trellis", &val );
    p_sys->b_trellis = val.b_bool;

    var_Get( p_enc, ENC_CFG_PREFIX "strict", &val );
    if( val.i_int < - 1 || val.i_int > 1 ) val.i_int = 0;
    p_context->strict_std_compliance = val.i_int;

    if( p_enc->fmt_in.i_cat == VIDEO_ES )
    {
        int i_aspect_num, i_aspect_den;

        if( !p_enc->fmt_in.video.i_width || !p_enc->fmt_in.video.i_height )
        {
            msg_Warn( p_enc, "invalid size %ix%i", p_enc->fmt_in.video.i_width,
                      p_enc->fmt_in.video.i_height );
            free( p_sys );
            return VLC_EGENERIC;
        }

        p_context->width = p_enc->fmt_in.video.i_width;
        p_context->height = p_enc->fmt_in.video.i_height;

        p_context->frame_rate = p_enc->fmt_in.video.i_frame_rate;
        p_context->frame_rate_base= p_enc->fmt_in.video.i_frame_rate_base;

        /* Defaults from ffmpeg.c */
        p_context->qblur = 0.5;
        p_context->qcompress = 0.5;
        p_context->b_quant_offset = 1.25;
        p_context->b_quant_factor = 1.25;
        p_context->i_quant_offset = 0.0;
        p_context->i_quant_factor = -0.8;

        if( p_sys->i_key_int > 0 )
            p_context->gop_size = p_sys->i_key_int;
        p_context->max_b_frames =
            __MAX( __MIN( p_sys->i_b_frames, FF_MAX_B_FRAMES ), 0 );
        p_context->b_frame_strategy = 0;

#if LIBAVCODEC_BUILD >= 4687
        av_reduce( &i_aspect_num, &i_aspect_den,
                   p_enc->fmt_in.video.i_aspect,
                   VOUT_ASPECT_FACTOR, 1 << 30 /* something big */ );
        av_reduce( &p_context->sample_aspect_ratio.num,
                   &p_context->sample_aspect_ratio.den,
                   i_aspect_num * (int64_t)p_context->height,
                   i_aspect_den * (int64_t)p_context->width, 1 << 30 );
#else
        p_context->aspect_ratio = ((float)p_enc->fmt_in.video.i_aspect) /
            VOUT_ASPECT_FACTOR;
#endif

        p_sys->p_buffer_out = malloc( AVCODEC_MAX_VIDEO_FRAME_SIZE );

        p_enc->fmt_in.i_codec = VLC_FOURCC('I','4','2','0');

        if ( p_sys->b_strict_rc )
        {
            p_context->rc_max_rate = p_enc->fmt_out.i_bitrate;
            p_context->rc_buffer_size = p_sys->i_rc_buffer_size;
            p_context->rc_buffer_aggressivity = p_sys->f_rc_buffer_aggressivity;
        }

        if ( p_sys->f_i_quant_factor != 0.0 )
            p_context->i_quant_factor = p_sys->f_i_quant_factor;

#if LIBAVCODEC_BUILD >= 4690
        p_context->noise_reduction = p_sys->i_noise_reduction;
#endif

        if ( p_sys->b_mpeg4_matrix )
        {
            p_context->intra_matrix = ff_mpeg4_default_intra_matrix;
            p_context->inter_matrix = ff_mpeg4_default_non_intra_matrix;
        }

        if ( p_sys->b_pre_me )
        {
            p_context->pre_me = 1;
            p_context->me_pre_cmp = FF_CMP_CHROMA;
        }

        if ( p_sys->b_interlace )
        {
            p_context->flags |= CODEC_FLAG_INTERLACED_DCT;
#if LIBAVCODEC_BUILD >= 4698
            p_context->flags |= CODEC_FLAG_INTERLACED_ME;
#endif
        }

        if ( p_sys->b_trellis )
            p_context->flags |= CODEC_FLAG_TRELLIS_QUANT;

#if LIBAVCODEC_BUILD >= 4702
        if ( p_enc->i_threads >= 1 )
            p_context->thread_count = p_enc->i_threads;
#endif

        if( p_sys->i_vtolerance > 0 )
            p_context->bit_rate_tolerance = p_sys->i_vtolerance;

        if( p_sys->i_qmin > 0 )
            p_context->mb_qmin = p_context->qmin = p_sys->i_qmin;
        if( p_sys->i_qmax > 0 )
            p_context->mb_qmax = p_context->qmax = p_sys->i_qmax;
        p_context->max_qdiff = 3;

        p_context->mb_decision = p_sys->i_hq;

        if( p_sys->i_quality )
        {
            p_context->flags |= CODEC_FLAG_QSCALE;
#if LIBAVCODEC_BUILD >= 4668
            p_context->global_quality = p_sys->i_quality;
#endif
        }
    }
    else if( p_enc->fmt_in.i_cat == AUDIO_ES )
    {
        p_enc->fmt_in.i_codec  = AOUT_FMT_S16_NE;
        p_context->sample_rate = p_enc->fmt_in.audio.i_rate;
        p_context->channels    = p_enc->fmt_in.audio.i_channels;
    }

    /* Misc parameters */
    p_context->bit_rate = p_enc->fmt_out.i_bitrate;

    if( i_codec_id == CODEC_ID_RAWVIDEO )
    {
        /* XXX: hack: Force same codec (will be handled by transcode) */
        p_enc->fmt_in.i_codec = p_enc->fmt_out.i_codec;
        p_context->pix_fmt = E_(GetFfmpegChroma)( p_enc->fmt_in.i_codec );
    }

    /* Make sure we get extradata filled by the encoder */
    p_context->extradata_size = 0;
    p_context->extradata = NULL;
    p_context->flags |= CODEC_FLAG_GLOBAL_HEADER;

    if( avcodec_open( p_context, p_codec ) )
    {
        if( p_enc->fmt_in.i_cat == AUDIO_ES && p_context->channels > 2 )
        {
            p_context->channels = 2;
            p_enc->fmt_in.audio.i_channels = 2; // FIXME
            if( avcodec_open( p_context, p_codec ) )
            {
                msg_Err( p_enc, "cannot open encoder" );
                free( p_sys );
                return VLC_EGENERIC;
            }
            msg_Warn( p_enc, "stereo mode selected (codec limitation)" );
        }
        else
        {
            msg_Err( p_enc, "cannot open encoder" );
            free( p_sys );
            return VLC_EGENERIC;
        }
    }

    p_enc->fmt_out.i_extra = p_context->extradata_size;
    p_enc->fmt_out.p_extra = p_context->extradata;
    p_context->flags &= ~CODEC_FLAG_GLOBAL_HEADER;

    if( p_enc->fmt_in.i_cat == AUDIO_ES )
    {
        p_sys->p_buffer_out = malloc( 2 * AVCODEC_MAX_AUDIO_FRAME_SIZE );
        p_sys->i_frame_size = p_context->frame_size * 2 * p_context->channels;
        p_sys->p_buffer = malloc( p_sys->i_frame_size );
    }

    msg_Dbg( p_enc, "found encoder %s", psz_namecodec );

    return VLC_SUCCESS;
}
Beispiel #4
0
int AVIFile::open(char* aviname,int initFlags,int channel)
{
	int i;
	filename = aviname;
	av_register_all();
	// Open video file
	pFormatCtx = NULL;
	
	if(avformat_open_input(&pFormatCtx, aviname, NULL, NULL) != 0) {
		std::cout<<"Couldn't open video file:"<<aviname<<std::endl;
		return 0; // Couldn't open file
	}
	// Retrieve stream information
	if(avformat_find_stream_info(pFormatCtx, NULL) < 0) {
		std::cout<<"Couldn't find stream information file:"<<aviname<<std::endl;
		return 0; // Couldn't find stream information
	}
	// Find the first video stream
	videoStream=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++)
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
			videoStream=i;
			break;
		}
	if(videoStream==-1) {
		std::cout<<"Didn't find a video stream file:"<<aviname<<std::endl;
		return 0; // Didn't find a video stream
	}

	// Get a pointer to the codec context for the video stream
	pCodecCtx=pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL) {
		std::cout<<"Unsupported codec! file:"<<aviname<<std::endl;
		return 0; // Codec not found
	}
	// Open codec
#if LIBAVCODEC_VERSION_MAJOR < 53 && LIBAVCODEC_VERSION_MINOR < 35
	if(avcodec_open(pCodecCtx, pCodec)<0) {
#else
	if(avcodec_open2(pCodecCtx, pCodec, NULL)<0) {
#endif
		std::cout<<"Could not open codec file:"<<aviname<<std::endl;
		return 0; // Could not open codec
	}

	// Allocate video frame
	pFrame=avcodec_alloc_frame();

	width = pCodecCtx->width;
	height = pCodecCtx->height;
	flags = initFlags;
	if (flags & AVI_INTERPOLATE)
		return 0; // XXX: add support for palettes

	for (i = 0; i < 256; i++){
		//gray
		palette[i*3] = i >> 2;
		palette[i*3 + 1] = i >> 2;
		palette[i*3 + 2] = i >> 2;
	}

	released = redraw = x = y = 0;

	if(flags & AVI_DRAW){
		draw();
	}
	avCriticalSection = SDL_CreateMutex();
	pause = 1;
	return 1;
}

void AVIFile::draw(void) {
	if(pause) 
		return;
	int i, frameFinished;
	if(!released) {
		redraw = 1;
		int frame=-1;
		while((frame=av_read_frame(pFormatCtx, &packet))>=0) {
			// Is this a packet from the video stream?
			if(packet.stream_index==videoStream) {
				// Decode video frame
#if LIBAVCODEC_VERSION_MAJOR < 53 && LIBAVCODEC_VERSION_MINOR < 35
				avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet.data, packet.size);
#else
				avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
#endif
				 // Did we get a video frame?
				if(frameFinished) {
					av_free_packet(&packet);
					break;
				}
			}
		av_free_packet(&packet);
		}
		if(frame<0 && (flags & AVI_LOOPING)) {
				// Close the codec
				avcodec_close(pCodecCtx);
				// Close the video file
				//av_close_input_file(pFormatCtx);
				avformat_close_input(&pFormatCtx);
				// Open video file
				if(avformat_open_input(&pFormatCtx, filename.c_str(), NULL, NULL)!=0) {
					std::cout<<"Couldn't open video file"<<std::endl;
					return; // Couldn't open file
				}
				// Get a pointer to the codec context for the video stream
				pCodecCtx=pFormatCtx->streams[videoStream]->codec;
				// Find the decoder for the video stream
				pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
				if(pCodec==NULL) {
					std::cout<<"Unsupported codec!"<<std::endl;
					return; // Codec not found
				}
				// Open codec
#if LIBAVCODEC_VERSION_MAJOR < 53 && LIBAVCODEC_VERSION_MINOR < 35
				if(avcodec_open(pCodecCtx, pCodec)<0) {
#else
				if(avcodec_open2(pCodecCtx, pCodec, NULL)<0) {
#endif
					std::cout<<"Could not open codec"<<std::endl;
					return; // Could not open codec
				}
				draw();
			}else{
			for (i = 0; i < 256; i++) {
				palette[i*3] = pFrame->data[1][i*4+2] >> 2;
				palette[i*3+1] = pFrame->data[1][i*4+1] >> 2;
				palette[i*3+2] = pFrame->data[1][i*4] >> 2;
				}
			}
	}
}
Beispiel #5
0
static int save_image(const AVPicture *picture, enum PixelFormat pix_fmt,
        int width, int height, const char *filename)
{
    AVCodec *codec;
    AVCodecContext *avctx;
    AVFrame *tmp_picture;
    uint8_t *outbuf;
    int outbuf_size;
    int size;
    FILE *f;

    avctx = avcodec_alloc_context();

    avctx->codec_id = CODEC_ID_BMP;
    avctx->codec_type = AVMEDIA_TYPE_VIDEO;
    avctx->width = width;
    avctx->height = height;
    avctx->pix_fmt = PIX_FMT_BGR24;
    avctx->time_base = (AVRational) {1, 1};

    codec = avcodec_find_encoder(avctx->codec_id);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        return -1;
    }

    /* open the codec */
    if (avcodec_open(avctx, codec) < 0) {
        fprintf(stderr, "could not open codec\n");
        return -1;
    }

    if (pix_fmt != PIX_FMT_BGR24) {
        struct SwsContext *sctx;

        tmp_picture = alloc_picture(PIX_FMT_BGR24, avctx->width, avctx->height);
        if (!tmp_picture)
            return -1;

        sctx = sws_getContext(avctx->width, avctx->height, pix_fmt,
                avctx->width, avctx->height, PIX_FMT_BGR24,
                SWS_POINT, NULL, NULL, NULL);

        sws_scale(sctx, (const uint8_t * const *) picture->data, picture->linesize,
                0, avctx->height, tmp_picture->data, tmp_picture->linesize);
    } else {
        tmp_picture = (AVFrame *)picture;
    }

    outbuf_size = 6*1024*1024;
    outbuf = av_malloc(outbuf_size);
    if (!outbuf)
        return AVERROR(ENOMEM);

    f = fopen(filename, "wb");
    size = avcodec_encode_video(avctx, outbuf, outbuf_size, tmp_picture);
    fwrite(outbuf, sizeof(uint8_t), size, f);
    fclose(f);

    if (pix_fmt != PIX_FMT_BGR24) {
        CMEM_free(tmp_picture->data[0], &alloc_params);
    }
    av_free(tmp_picture);
    av_free(outbuf);
    av_free(avctx);

    return 0;
}
int main(int argc, char *argv[]) {
    AVFormatContext *pFormatCtx;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;
    AVFrame         *pFrame;
    AVPacket        packet;
    int             frameFinished;
    float           aspect_ratio;
    struct SwsContext *img_convert_ctx;

    SDL_Overlay     *bmp;
    SDL_Surface     *screen;
    SDL_Rect        rect;
    SDL_Event       event;

    if(argc < 2) {
        fprintf(stderr, "Usage: test <file>\n");
        exit(1);
    }
    // Register all formats and codecs
    av_register_all();

    if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        exit(1);
    }

    // Open video file
    if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if(av_find_stream_info(pFormatCtx)<0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream
    videoStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
            videoStream=i;
            break;
        }
    if(videoStream==-1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    pCodecCtx=pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }

    // Open codec
    if(avcodec_open(pCodecCtx, pCodec)<0)
        return -1; // Could not open codec

    // Allocate video frame
    pFrame=avcodec_alloc_frame();

    // Make a screen to put our video
#ifndef __DARWIN__
    screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
#else
    screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif
    if(!screen) {
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
        exit(1);
    }

    // Allocate a place to put our YUV image on that screen
    bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
                               pCodecCtx->height,
                               SDL_YV12_OVERLAY,
                               screen);


    // Read frames and save first five frames to disk
    i=0;
    while(av_read_frame(pFormatCtx, &packet)>=0) {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream) {
            // Decode video frame
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
                                  &packet);

            // Did we get a video frame?
            if(frameFinished) {
                SDL_LockYUVOverlay(bmp);

                AVPicture pict;
                pict.data[0] = bmp->pixels[0];
                pict.data[1] = bmp->pixels[2];
                pict.data[2] = bmp->pixels[1];

                pict.linesize[0] = bmp->pitches[0];
                pict.linesize[1] = bmp->pitches[2];
                pict.linesize[2] = bmp->pitches[1];

                // Convert the image into YUV format that SDL uses
                /*
                	img_convert(&pict, PIX_FMT_YUV420P,
                                    (AVPicture *)pFrame, pCodecCtx->pix_fmt,
                		    pCodecCtx->width, pCodecCtx->height);
                */
                int dstFmt;
                dstFmt = PIX_FMT_YUV420P;

                img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
                                                 pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
                                                 dstFmt, SWS_BICUBIC, NULL, NULL, NULL);

                sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize,
                          0, pCodecCtx->height, pict.data, pict.linesize);

                /*
                	printf("*(pict.data[0]: %d\n", *(pict.data[0]));
                	printf("*(pict.data[1]: %d\n", *(pict.data[1]));
                	printf("*(pict.data[2]: %d\n", *(pict.data[2]));
                	printf("*(pict.data[3]: %d\n", *(pict.data[3]));
                	printf("linesize[0]: %d\n", pict.linesize[0]);
                	printf("linesize[1]: %d\n", pict.linesize[1]);
                	printf("linesize[2]: %d\n", pict.linesize[2]);
                	printf("linesize[3]: %d\n", pict.linesize[3]);
                	printf("width: %d\n", pCodecCtx->width);
                	printf("height: %d\n", pCodecCtx->height);
                */
                ++i;
                if(i>50)
                    if(i<=51) {
                        printf("frame 51\n");
                        if( *(pict.data[0]) == 20)
                            printf("frame 51, line 0, x=1, 20\n");
                    }

                SDL_UnlockYUVOverlay(bmp);

                rect.x = 0;
                rect.y = 0;
                rect.w = pCodecCtx->width;
                rect.h = pCodecCtx->height;
                SDL_DisplayYUVOverlay(bmp, &rect);
//SDL_Delay(1000);
//return 0;
            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
        SDL_PollEvent(&event);
        switch(event.type) {
        case SDL_QUIT:
            SDL_Quit();
            exit(0);
            break;
        default:
            break;
        }

    }

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    av_close_input_file(pFormatCtx);

    return 0;
}
LAVCAudioProvider::LAVCAudioProvider(wxString _filename, VideoProvider *vpro)
	: lavcfile(NULL), codecContext(NULL), rsct(NULL), buffer(NULL)
{
	try {
#if 0
	/* since seeking currently is likely to be horribly broken with two
	 * providers accessing the same stream, this is disabled for now.
	 */
	LAVCVideoProvider *vpro_lavc = dynamic_cast<LAVCVideoProvider *>(vpro);
	if (vpro_lavc) {
		lavcfile = vpro->lavcfile->AddRef();
		filename = vpro_lavc->GetFilename();
	} else {
#endif
		lavcfile = LAVCFile::Create(_filename);
		filename = _filename;
#if 0
	}
#endif
	audStream = -1;
	for (int i = 0; i < lavcfile->fctx->nb_streams; i++) {
		codecContext = lavcfile->fctx->streams[i]->codec;
		if (codecContext->codec_type == CODEC_TYPE_AUDIO) {
			stream = lavcfile->fctx->streams[i];
			audStream = i;
			break;
		}
	}
	if (audStream == -1)
		throw _T("Could not find an audio stream");
	AVCodec *codec = avcodec_find_decoder(codecContext->codec_id);
	if (!codec)
		throw _T("Could not find a suitable audio decoder");
	if (avcodec_open(codecContext, codec) < 0)
		throw _T("Failed to open audio decoder");

	/* aegisub currently supports mono only, so always resample */

	sample_rate = Options.AsInt(_T("Audio Sample Rate"));
	if (!sample_rate)
		sample_rate = codecContext->sample_rate;

	channels = 1;
	bytes_per_sample = 2;

	rsct = audio_resample_init(1, codecContext->channels, sample_rate, codecContext->sample_rate);
	if (!rsct)
		throw _T("Failed to initialize resampling");

	resample_ratio = (float)sample_rate / (float)codecContext->sample_rate;

	double length = (double)stream->duration * av_q2d(stream->time_base);
	num_samples = (__int64)(length * sample_rate);

	buffer = (int16_t *)malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
	if (!buffer)
		throw _T("Out of memory");

	} catch (...) {
		Destroy();
		throw;
	}
}
int main(int argc, char *argv[]) {
  AVFormatContext *pFormatCtx;
  int             i, videoStream, audioStream;
  AVCodecContext  *pCodecCtx;
  AVCodec         *pCodec;
  AVFrame         *pFrame; 
  AVPacket        packet;
  int             frameFinished;
  float           aspect_ratio;
  static struct SwsContext *img_convert_ctx;

  AVCodecContext  *aCodecCtx;
  AVCodec         *aCodec;

  SDL_Overlay     *bmp;
  SDL_Surface     *screen;
  SDL_Rect        rect;
  SDL_Event       event;
  SDL_AudioSpec   wanted_spec, spec;

  if(argc < 2) {
    fprintf(stderr, "Usage: test <file>\n");
    exit(1);
  }
  // Register all formats and codecs
  av_register_all();
  
  if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
    fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
    exit(1);
  }

  // Open video file
  if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
    return -1; // Couldn't open file
  
  // Retrieve stream information
  if(av_find_stream_info(pFormatCtx)<0)
    return -1; // Couldn't find stream information
  
  // Dump information about file onto standard error
  dump_format(pFormatCtx, 0, argv[1], 0);
  
  // Find the first video stream
  videoStream=-1;
  audioStream=-1;
  for(i=0; i<pFormatCtx->nb_streams; i++) {
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO &&
       videoStream < 0) {
      videoStream=i;
    }
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO &&
       audioStream < 0) {
      audioStream=i;
    }
  }
  if(videoStream==-1)
    return -1; // Didn't find a video stream
  if(audioStream==-1)
    return -1;
   
  aCodecCtx=pFormatCtx->streams[audioStream]->codec;
  // Set audio settings from codec info
  wanted_spec.freq = aCodecCtx->sample_rate;
  wanted_spec.format = AUDIO_S16SYS;
  wanted_spec.channels = aCodecCtx->channels;
  wanted_spec.silence = 0;
  wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
  wanted_spec.callback = audio_callback;
  wanted_spec.userdata = aCodecCtx;
  
  if(SDL_OpenAudio(&wanted_spec, &spec) < 0) {
    fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
    return -1;
  }
  aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
  if(!aCodec) {
    fprintf(stderr, "Unsupported codec!\n");
    return -1;
  }
  avcodec_open(aCodecCtx, aCodec);

  // audio_st = pFormatCtx->streams[index]
  packet_queue_init(&audioq);
  SDL_PauseAudio(0);

  // Get a pointer to the codec context for the video stream
  pCodecCtx=pFormatCtx->streams[videoStream]->codec;
  
  // Find the decoder for the video stream
  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
  if(pCodec==NULL) {
    fprintf(stderr, "Unsupported codec!\n");
    return -1; // Codec not found
  }
  // Open codec
  if(avcodec_open(pCodecCtx, pCodec)<0)
    return -1; // Could not open codec
  
  // Allocate video frame
  pFrame=avcodec_alloc_frame();

  // Make a screen to put our video

#ifndef __DARWIN__
        screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
#else
        screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif
  if(!screen) {
    fprintf(stderr, "SDL: could not set video mode - exiting\n");
    exit(1);
  }
  
  // Allocate a place to put our YUV image on that screen
  bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
				 pCodecCtx->height,
				 SDL_YV12_OVERLAY,
				 screen);


  // Read frames and save first five frames to disk
  i=0;
  while(av_read_frame(pFormatCtx, &packet)>=0) {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {
      // Decode video frame
      avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 
			   &packet);
      
      // Did we get a video frame?
      if(frameFinished) {
	SDL_LockYUVOverlay(bmp);

	AVPicture pict;
	pict.data[0] = bmp->pixels[0];
	pict.data[1] = bmp->pixels[2];
	pict.data[2] = bmp->pixels[1];

	pict.linesize[0] = bmp->pitches[0];
	pict.linesize[1] = bmp->pitches[2];
	pict.linesize[2] = bmp->pitches[1];

	// Convert the image into YUV format that SDL uses
/*
	img_convert(&pict, PIX_FMT_YUV420P,
                    (AVPicture *)pFrame, pCodecCtx->pix_fmt, 
		    pCodecCtx->width, pCodecCtx->height);
*/

	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
 pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
 PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

	sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize,
 0, pCodecCtx->height, pict.data, pict.linesize);
	
	SDL_UnlockYUVOverlay(bmp);
	
	rect.x = 0;
	rect.y = 0;
	rect.w = pCodecCtx->width;
	rect.h = pCodecCtx->height;
	SDL_DisplayYUVOverlay(bmp, &rect);
	av_free_packet(&packet);
      }
    } else if(packet.stream_index==audioStream) {
      packet_queue_put(&audioq, &packet);
    } else {
      av_free_packet(&packet);
    }
    // Free the packet that was allocated by av_read_frame
    SDL_PollEvent(&event);
    switch(event.type) {
    case SDL_QUIT:
      quit = 1;
      SDL_Quit();
      exit(0);
      break;
    default:
      break;
    }

  }

  // Free the YUV frame
  av_free(pFrame);
  
  // Close the codec
  avcodec_close(pCodecCtx);
  
  // Close the video file
  av_close_input_file(pFormatCtx);
  
  return 0;
}
Beispiel #9
0
int
vdpau_codec_create(media_codec_t *mc, enum CodecID id,
		   AVCodecContext *ctx, media_codec_params_t *mcp,
		   media_pipe_t *mp)
{
  VdpDecoderProfile profile;
  vdpau_dev_t *vd = mp->mp_vdpau_dev;
  VdpStatus r;
  int refframes;

  if(vd == NULL)
    return 1;

  if(mcp->width == 0 || mcp->height == 0)
    return 1;

  switch(id) {

  case CODEC_ID_MPEG1VIDEO:
    profile = VDP_DECODER_PROFILE_MPEG1; 
    mc->codec = avcodec_find_decoder_by_name("mpegvideo_vdpau");
    refframes = 2;
    break;

  case CODEC_ID_MPEG2VIDEO:
    profile = VDP_DECODER_PROFILE_MPEG2_MAIN; 
    mc->codec = avcodec_find_decoder_by_name("mpegvideo_vdpau");
    refframes = 2;
    break;

  case CODEC_ID_H264:
    profile = VDP_DECODER_PROFILE_H264_HIGH; 
    mc->codec = avcodec_find_decoder_by_name("h264_vdpau");
    refframes = 16;
    break;
#if 0 // Seems broken
  case CODEC_ID_VC1:
    profile = VDP_DECODER_PROFILE_VC1_ADVANCED; 
    mc->codec = avcodec_find_decoder_by_name("vc1_vdpau");
    refframes = 16;
    break;

  case CODEC_ID_WMV3:
    profile = VDP_DECODER_PROFILE_VC1_MAIN;
    mc->codec = avcodec_find_decoder_by_name("wmv3_vdpau");
    refframes = 16;
    break;
#endif
  default:
    return 1;
  }

  if(mc->codec == NULL)
    return -1;

  vdpau_codec_t *vc = calloc(1, sizeof(vdpau_codec_t));
  TAILQ_INIT(&vc->vc_vvs_alloc);
  TAILQ_INIT(&vc->vc_vvs_free);
  vc->vc_vd = vd;
  vc->vc_width = mcp->width;

  if(mcp->height == 1088)
    vc->vc_height = 1080;
  else
    vc->vc_height = mcp->height;

  vc->vc_profile = profile;
  vc->vc_refframes = refframes;

  r = vd->vdp_decoder_create(vd->vd_dev, vc->vc_profile, 
			     vc->vc_width, vc->vc_height,
			     vc->vc_refframes, &vc->vc_decoder);

  if(r != VDP_STATUS_OK) {
    TRACE(TRACE_INFO, "VDPAU", "Unable to create decoder: %s",
	  vdpau_errstr(vd, r));
    vc_destroy(vc);
    return -1;
  }

  
  r = vdpau_create_buffers(vc, vc->vc_width, vc->vc_height,
			   vc->vc_refframes + 5);

  if(r != VDP_STATUS_OK) {
    TRACE(TRACE_INFO, "VDPAU", "Unable to allocate decoding buffers");
    vc_destroy(vc);
    return -1;
  }

  TRACE(TRACE_DEBUG, "VDPAU", "Decoder initialized");
	  
  mc->codec_ctx = ctx ?: avcodec_alloc_context();
  mc->codec_ctx->codec_id   = mc->codec->id;
  mc->codec_ctx->codec_type = mc->codec->type;

  if(avcodec_open(mc->codec_ctx, mc->codec) < 0) {
    if(ctx == NULL)
      free(mc->codec_ctx);
    mc->codec = NULL;
    vc_destroy(vc);
    return -1;
  }

  mc->codec_ctx->get_buffer      = vdpau_get_buffer;
  mc->codec_ctx->release_buffer  = vdpau_release_buffer;
  mc->codec_ctx->draw_horiz_band = vdpau_draw_horiz_band;
  mc->codec_ctx->get_format      = vdpau_get_pixfmt;

  mc->codec_ctx->slice_flags = SLICE_FLAG_CODED_ORDER | SLICE_FLAG_ALLOW_FIELD;

  mc->codec_ctx->opaque = mc;
  mc->opaque = vc;
  mc->decode = vdpau_decode;
  mc->close  = vdpau_codec_close;
  mc->reinit = vdpau_codec_reinit;
  return 0;
}
Beispiel #10
0
void AUD_FFMPEGReader::init()
{
	m_position = 0;
	m_pkgbuf_left = 0;

	if(av_find_stream_info(m_formatCtx)<0)
		AUD_THROW(AUD_ERROR_FFMPEG, streaminfo_error);

	// find audio stream and codec
	m_stream = -1;

	for(unsigned int i = 0; i < m_formatCtx->nb_streams; i++)
	{
		if((m_formatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
			&& (m_stream < 0))
		{
			m_stream=i;
			break;
		}
	}

	if(m_stream == -1)
		AUD_THROW(AUD_ERROR_FFMPEG, noaudio_error);

	m_codecCtx = m_formatCtx->streams[m_stream]->codec;

	// get a decoder and open it
	AVCodec *aCodec = avcodec_find_decoder(m_codecCtx->codec_id);
	if(!aCodec)
		AUD_THROW(AUD_ERROR_FFMPEG, nodecoder_error);

	if(avcodec_open(m_codecCtx, aCodec)<0)
		AUD_THROW(AUD_ERROR_FFMPEG, codecopen_error);

	// XXX this prints file information to stdout:
	//dump_format(m_formatCtx, 0, NULL, 0);

	m_specs.channels = (AUD_Channels) m_codecCtx->channels;

	switch(m_codecCtx->sample_fmt)
	{
	case SAMPLE_FMT_U8:
		m_convert = AUD_convert_u8_float;
		m_specs.format = AUD_FORMAT_U8;
		break;
	case SAMPLE_FMT_S16:
		m_convert = AUD_convert_s16_float;
		m_specs.format = AUD_FORMAT_S16;
		break;
	case SAMPLE_FMT_S32:
		m_convert = AUD_convert_s32_float;
		m_specs.format = AUD_FORMAT_S32;
		break;
	case SAMPLE_FMT_FLT:
		m_convert = AUD_convert_copy<float>;
		m_specs.format = AUD_FORMAT_FLOAT32;
		break;
	case SAMPLE_FMT_DBL:
		m_convert = AUD_convert_double_float;
		m_specs.format = AUD_FORMAT_FLOAT64;
		break;
	default:
		AUD_THROW(AUD_ERROR_FFMPEG, format_error);
	}

	m_specs.rate = (AUD_SampleRate) m_codecCtx->sample_rate;
}
 FFData(const std::string &path, VSFileSystem::VSFileType type, Format &fmt, int streamIdx) throw(Exception) :
     pFormatCtx(0),
     pCodecCtx(0),
     pCodec(0),
     pStream(0),
     packetBuffer(0),
     packetBufferSize(0),
     sampleBufferBase(0),
     filepath(path),
     filetype(type),
     audioStreamIndex(streamIdx)
 {
     packet.data = 0;
     
     char buf[(sizeof(type)+1)/2+1];
     sprintf(buf, "%d", type);
     
     // Initialize libavcodec/libavformat if necessary
     FFMpeg::initLibraries();
     
     // Open file
     std::string npath = std::string("vsfile:") + path + "|" + buf;
     std::string errbase = std::string("Cannot open URL \"") + npath + "\"";
     
     if (  (0 != av_open_input_file(&pFormatCtx, npath.c_str(), NULL, BUFFER_SIZE, NULL))
         ||(0 >  av_find_stream_info(pFormatCtx))  )
         throw FileOpenException(errbase + " (wrong format or file not found)"); 
     
     // Dump format info in case we want to know...
     #ifdef VS_DEBUG
     dump_format(pFormatCtx, 0, npath.c_str(), false);
     #endif
     
     // Find audio stream
     pCodecCtx = 0;
     streamIndex = -1;
     for (unsigned int i=0; (pCodecCtx==0) && (i < pFormatCtx->nb_streams); ++i)
         if ((pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO) && (streamIdx-- == 0))
             pCodecCtx = (pStream = pFormatCtx->streams[streamIndex = i])->codec;
     if (pCodecCtx == 0)
         throw FileOpenException(errbase + " (wrong or no audio stream)");
     
     // Find codec for the audio stream and open it
     pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
     if(pCodec == 0)
         throw CodecNotFoundException(errbase + " (unsupported codec)");
     
     if(avcodec_open(pCodecCtx, pCodec) < 0)
         throw CodecNotFoundException(errbase + " (unsupported codec)");
     
     // Get some info
     fmt.sampleFrequency = pCodecCtx->sample_rate;
     fmt.channels = pCodecCtx->channels;
     fmt.nativeOrder = 1; // always so for ffmpeg
     switch (pCodecCtx->sample_fmt) {
     case SAMPLE_FMT_U8:  fmt.bitsPerSample = 8;
                          fmt.signedSamples = 0;
                          break;
     case SAMPLE_FMT_S16: fmt.bitsPerSample = 16;
                          fmt.signedSamples = 1;
                          break;
     #ifdef SAMPLE_FMT_S24
     case SAMPLE_FMT_S24: fmt.bitsPerSample = 24;
                          fmt.signedSamples = 1;
                          break;
     #endif
     #ifdef SAMPLE_FMT_S32
     case SAMPLE_FMT_S32: fmt.bitsPerSample = 32;
                          fmt.signedSamples = 1;
                          break;
     #endif
     default:             throw CodecNotFoundException(errbase + " (unsupported audio format)");
     }
     sampleSize = (fmt.bitsPerSample + 7) / 8 * fmt.channels;
     assert(sampleSize > 0);
     
     // Initialize timebase counter
     sampleBufferStart = 0;
     streamSize = 0;
 
     // Initialize sample buffer
     sampleBufferBase = malloc(sampleSize * BUFFER_SIZE + BUFFER_ALIGNMENT);
     ptrdiff_t offs = ((reinterpret_cast<ptrdiff_t>(sampleBufferBase)) & (BUFFER_ALIGNMENT-1));
     sampleBufferAligned = ((char*)sampleBufferBase) + BUFFER_ALIGNMENT - offs;
     sampleBufferAlloc = sampleSize * BUFFER_SIZE;
     sampleBuffer = 0;
     sampleBufferSize = 0;
 }
/***********************
* DecodePacket 
*	Decodifica un packete
************************/
int H263Decoder1996::DecodePacket(BYTE *in,DWORD len,int lost,int last)
{
	int ret = 1;

	//Comprobamos que quepa
	if(len<4)
		return Error("Recived short packed [%d]\n",len);

	//Get header
	H263HeadersBasic* headers = H263HeadersBasic::CreateHeaders(in[0]);

	//Parse it
	DWORD parsed = headers->Parse(in,len);

	//Check
	if (!parsed)
		return Error("Error parsing H263 RFC 2190 packet\n");
	
	//Si ha cambiado el formato
	if (!src)
	{
		//Guardamos el formato
		src = headers->src;
	} else if(src!=headers->src) {
		Log("-Source format changed, reopening codec [%d,%d]\n",src,headers->src);

		//Cerramos el codec
		avcodec_close(ctx);

		//Y lo reabrimos
		avcodec_open(ctx, codec);

		//Guardamos el formato
		src = headers->src;

		//Nos cargamos el buffer
		bufLen = 0;
	}

	//Aumentamos el puntero
	in+=parsed;
	len-=parsed;

	//POnemos en blanco el primer bit hasta el comienzo
	in[0] &= 0xff >> headers->sbits;

	//Y el final
	if (len>0)
		in[len-1] &= 0xff << headers->ebits;

	//Si el hay solapamiento de bytes
	if(headers->sbits!=0 && bufLen>0)
	{
		//A�adimos lo que falta
		buffer[bufLen-1] |= in[0];

		//Y avanzamos
		in++;
		len--;
	}

	//Free headers
	delete(headers);
	
	//Nos aseguramos que quepa
	if (len<0 || bufLen+len+FF_INPUT_BUFFER_PADDING_SIZE>bufSize)
		return Error("Wrong size of packet [%d,%d]\n",bufLen,len);

	//Copiamos 
	memcpy(buffer+bufLen,in,len);

	//Aumentamos la longitud
	bufLen+=len;

	//Si es el ultimo
	if(last)
	{
		//Borramos el final
		memset(buffer+bufLen,0,FF_INPUT_BUFFER_PADDING_SIZE);

		//Decode
		ret = Decode(buffer,bufLen);
		//Y resetamos el buffer
		bufLen=0;
	}
	//Return
	return ret;
}
/***********************
* OpenCodec
*	Abre el codec
************************/
int H263Encoder1996::OpenCodec()
{
	Log("-OpenCodec H263 [%dbps,%dfps]\n",bitrate,fps);

	// Check
	if (codec==NULL)
		return Error("No codec\n");

	// Check
	if (opened)
		return Error("Already opened\n");

	//If already got a buffer
	if (frame)
		//Free it
		delete(frame);

	//Set new buffer size
	DWORD bufSize = (int)bitrate/(8*fps);

	//Check size
	if (bufSize<FF_MIN_BUFFER_SIZE)
		//Set minimun
		bufSize = FF_MIN_BUFFER_SIZE*2;

	//Y alocamos el buffer
	frame = new VideoFrame(type,bufSize);

#if 0
	//Set rtp info
	ctx->rtp_payload_size	= 400;
	ctx->rtp_callback	= RTPCallback;
	ctx->opaque		= this;
#else
	ctx->rtp_payload_size	= 1;
#endif
	
	// Bitrate,fps
	ctx->bit_rate 		= bitrate;
	ctx->bit_rate_tolerance = bitrate/fps+1;
	ctx->time_base          = (AVRational){1,fps};
	ctx->gop_size		= intraPeriod;

	// Encoder quality
	ctx->rc_min_rate 	= bitrate;
	ctx->rc_max_rate	= bitrate;
	ctx->rc_buffer_size	= bitrate/fps+1;
	ctx->rc_buffer_aggressivity	 = 1;
	ctx->rc_initial_buffer_occupancy = 0;
	ctx->rc_qsquish 	= 1;
	ctx->max_b_frames	= 0;
	ctx->dia_size		= 1024;
	ctx->mb_decision	= FF_MB_DECISION_RD;
	
	// Open codec
	if (avcodec_open(ctx, codec)<0)
		return Error("Unable to open H263 codec\n");

	// We are opened
	opened=true;

	// Exit
	return 1;
}
Beispiel #14
0
int FFmpegEncoder::open()
{
	LOGI("FFmpegEncoder::open, begin!");
	if (this->opened) {
		LOGW("FFmpegEncoder::open, try to reopen!");
		return -1;
	}

	if (this->videoParam.videoCodecName.empty() && 
		this->audioParam.audioCodecName.empty()) {
		LOGE("FFmpegEncoder::open, no output or codec name");
		return -1;
	}

	// allocate the output media context
	this->outputContext = avformat_alloc_context();
	if (!this->outputContext) {
		LOGE("FFmpegEncoder::open, failed to alloc context!");
		return -1;
	}

	// video related initialization if necessary
	if (this->encodeVideo) {
		// validate the video codec
		if (this->videoParam.videoCodecName.empty()) {
			LOGE("FFmpegEncoder::open, no video codec name!");
			return -1;
		}

		// find the video encoder
		AVCodec *videoCodec = NULL;

		// use the codec name preferentially if it is specified in the input param
		videoCodec = avcodec_find_encoder_by_name(this->videoParam.videoCodecName.c_str());

		if (!videoCodec) {
			LOGE("FFmpegEncoder::open, find no video codec!");
			return -1;
		}

		// add the video stream with stream id 0
		this->videoStream = av_new_stream(this->outputContext, 0);
		if (!this->videoStream)	{
			LOGE("FFmpegEncoder::open, failed to new video stream!");
			return -1;
		}

		// set the parameters for video codec context
		AVCodecContext *videoCodecContext = this->videoStream->codec;
		videoCodecContext->codec_id       = videoCodec->id;
		videoCodecContext->codec_type     = CODEC_TYPE_VIDEO;
		videoCodecContext->bit_rate       = this->videoParam.bitRate;
		videoCodecContext->width          = this->videoParam.width;
		videoCodecContext->height         = this->videoParam.height;
		videoCodecContext->time_base.den  = this->videoParam.frameRate;
		videoCodecContext->time_base.num  = 1;

		// tune for video encoding
		videoCodecContext->gop_size = 24;
		videoCodecContext->qmin = 3;
		videoCodecContext->qmax = 33;
		videoCodecContext->max_qdiff = 4;
		videoCodecContext->qcompress = 0.6f;
	
		videoCodecContext->me_method = ME_FULL;
		videoCodecContext->me_range = 32;
		videoCodecContext->partitions = X264_PART_I4X4 | X264_PART_I8X8 | X264_PART_P8X8 | X264_PART_P4X4 | X264_PART_B8X8;
		videoCodecContext->coder_type = FF_CODER_TYPE_AC;
		videoCodecContext->max_b_frames = 1;

		// set the PixelFormat of the target encoded video
		if (videoCodec->pix_fmts) {
			// try to find the PixelFormat required by the input param,
			// use the default PixelFormat directly if required format not found
			const enum PixelFormat *p= videoCodec->pix_fmts;
			for ( ; *p != PIX_FMT_NONE; p ++) {
				if (*p == this->videoParam.pixelFormat)
					break;
			}
			
			if (*p == PIX_FMT_NONE)
				videoCodecContext->pix_fmt = videoCodec->pix_fmts[0];
			else
				videoCodecContext->pix_fmt = *p;
		}

		// open the video codec
		if (avcodec_open(videoCodecContext, videoCodec) < 0) {
			LOGE("FFmpegEncoder.open, find but failed to open video codec!");
			return -1;
		}

		// allocate the output buffer
		// the maximum possible buffer size could be the raw bmp format with R/G/B/A
		this->videoBufferSize = 4 * this->videoParam.width * this->videoParam.height;
		this->videoBuffer     = (uint8_t*)(av_malloc(this->videoBufferSize));
	}

	// audio related initialization if necessary
	if (this->encodeAudio)
	{
		// validate the audio codec
		if (this->audioParam.audioCodecName.empty())
		{
			LOGE("FFmpegEncoder.open, no outputformat or no audio codec name!");
			return -1;
		}

		// find the audio encoder
		AVCodec *audioCodec = NULL;

		// use the codec name preferentially if it is specified in the input param
		audioCodec = avcodec_find_encoder_by_name(this->audioParam.audioCodecName.c_str());

		if (!audioCodec)
		{
			LOGE("FFmpegEncoder.open, invalid audio codec!");
			return -1;
		}

		// add the audio stream with stream id 1
		this->audioStream = av_new_stream(this->outputContext, 1);
		if (!this->audioStream)
		{
			LOGE("FFmpegEncoder.open, failed to new audio stream!");
			return -1;
		}

		// set the parameters for audio codec context
		AVCodecContext *audioCodecContext = this->audioStream->codec;
		audioCodecContext->codec_id       = audioCodec->id;
		audioCodecContext->codec_type     = CODEC_TYPE_AUDIO;
		audioCodecContext->bit_rate       = this->audioParam.bitRate;
		audioCodecContext->sample_rate    = this->audioParam.sampleRate;
		audioCodecContext->channels       = this->audioParam.channels;

		// open the audio codec
		if (avcodec_open(audioCodecContext, audioCodec) < 0)
		{
			LOGE("FFmpegEncoder.open, failed to open audio codec!");
			return -1;
		}

		// TODO: how to determine the buffer size?
		// allocate the output buffer
		this->audioBufferSize = 4 * MAX_AUDIO_PACKET_SIZE;
		this->audioBuffer     = (uint8_t*)(av_malloc(this->audioBufferSize));
	}

	this->opened = true;
	LOGI("FFmpegEncoder.open, end!");

	return 0;
}
Beispiel #15
0
/**
 * Starts the extraction process of a video using ffmpeg.
 *
 * - filename must be a c-style string of the location of the
 * videofile. It is relative to the current working directory.
 * - video_stream must be a pointer to a video_stream memory
 * location. You must allocate this using malloc prior.
 *
 * Returns 0 on success, nonzero otherwise.
 */
int extract_video(char *filename, struct video_stream *output)
{
    AVFormatContext* format_context;
    AVCodecContext* codec_context;  
    AVCodec* codec;                 
    AVFrame* frame_reg;             
    AVFrame* frame_rgb;             
    uint8_t* buffer;                
    int video_stream = -1;
    int num_bytes;                  
    unsigned int i;

    av_register_all();

    if (av_open_input_file(&format_context, filename, 0, 0, 0) != 0)
    {
        return 1;
    }
    if (av_find_stream_info(format_context) < 0)
    {
        return 1;
    }

    // determine video stream
    for (i = 0; i < format_context->nb_streams; i++)
    {
        if (format_context->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            video_stream = i;
            break;
        }
    }
    if (video_stream == -1)
    {
        return 2;
    }
    // video stream exists in video_stream

    codec_context = format_context->streams[video_stream]->codec;
    codec = avcodec_find_decoder(codec_context->codec_id);
    if (codec == 0)
    {
        return 3;
    }

    if (avcodec_open(codec_context, codec) < 0)
    {
        return 4;
    }

    // hack to correct wrong frame rates generated by codecs
    if (codec_context->time_base.num > 1000 && codec_context->time_base.den == 1)
    {
        codec_context->time_base.den = 1000;
    }

    frame_reg = avcodec_alloc_frame();
    frame_rgb = avcodec_alloc_frame();
    if (frame_rgb == 0)
    {
        return 5;
    }

    num_bytes = avpicture_get_size(PIX_FMT_RGB24, codec_context->width, codec_context->height);

    buffer = (uint8_t*) malloc(num_bytes);

    avpicture_fill((AVPicture*) frame_rgb, buffer, PIX_FMT_RGB24, codec_context->width, codec_context->height);

    output->format_context = (void*) format_context;
    output->codec_context = (void*) codec_context;
    output->frame_reg = (void*) frame_reg;
    output->frame_rgb = (void*) frame_rgb;
    output->buffer = buffer;
    output->video_stream = video_stream;
    output->width = codec_context->width;
    output->height = codec_context->height;
    
    return 0;
}
Beispiel #16
0
static void
player_av_load (Player *self, Entry *entry)
{
    gint i;
    PlayerAVPrivate *priv = PLAYER_AV (self)->priv;

    player_av_close (self);

    if (av_open_input_file (&priv->fctx, entry_get_location (entry), NULL, 0, NULL) != 0)
        return;

    if (av_find_stream_info (priv->fctx) < 0)
        return;

    priv->fctx->flags = AVFMT_FLAG_GENPTS;

    dump_format(priv->fctx, 0, entry_get_location (entry), 0);

    priv->astream = priv->vstream = -1;
    for (i = 0; i < priv->fctx->nb_streams; i++) {
        if (priv->fctx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {
            priv->vstream = i;
        }

        if (priv->fctx->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO) {
            priv->astream = i;
        }

        if (priv->vstream != -1 && priv->astream != -1)
            break;
    }

    // Setup Audio Stream
    if (priv->astream != -1) {
        priv->actx = priv->fctx->streams[priv->astream]->codec;
        AVCodec *acodec = avcodec_find_decoder (priv->actx->codec_id);
        if (acodec && avcodec_open (priv->actx, acodec) < 0) {
            g_print ("Error opening audio stream\n");
            return;
        }
    } else {
        priv->actx = NULL;
    }

    // Setup Video Stream
    if (priv->vstream != -1) {
        priv->vctx = priv->fctx->streams[priv->vstream]->codec;
        AVCodec *vcodec = avcodec_find_decoder (priv->vctx->codec_id);
        if(vcodec && avcodec_open (priv->vctx, vcodec) < 0) {
            g_print ("Error opening video stream\n");
            return;
        }
    } else {
        priv->vctx = NULL;
    }

    if (priv->vctx) {
        priv->vctx->get_buffer = player_av_av_get_buffer;
        priv->vctx->release_buffer = player_av_av_release_buffer;

        priv->display = gdk_x11_display_get_xdisplay (gdk_display_get_default ());
        priv->root = DefaultRootWindow (priv->display);

        priv->win = GDK_WINDOW_XID (priv->em_da->window);
        XSetWindowBackgroundPixmap (priv->display, priv->win, None);

        int nb_adaptors;
        XvAdaptorInfo *adaptors;
        XvQueryAdaptors (priv->display, priv->root, &nb_adaptors, &adaptors);
        int adaptor_no = 0, j, res;

        priv->xv_port_id = 0;
        for (i = 0; i < nb_adaptors && !priv->xv_port_id; i++) {
            adaptor_no = i;
            for (j = 0; j < adaptors[adaptor_no].num_ports && !priv->xv_port_id; j++) {
                res = XvGrabPort (priv->display, adaptors[adaptor_no].base_id + j, 0);
                if (Success == res) {
                    priv->xv_port_id = adaptors[adaptor_no].base_id + j;
                }
            }
        }

        XvFreeAdaptorInfo (adaptors);

        int nb_formats;
        XvImageFormatValues *formats = XvListImageFormats (priv->display,
            priv->xv_port_id, &nb_formats);

        unsigned int vfmt = avcodec_pix_fmt_to_codec_tag (priv->vctx->pix_fmt);
        for (i = 0; i < nb_formats; i++) {
            if (vfmt == formats[i].id) {
                break;
            }
        }

        enum PixelFormat ofmt = PIX_FMT_NONE;

        priv->vframe = avcodec_alloc_frame ();
        priv->vframe_xv = avcodec_alloc_frame();

        if (i < nb_formats) {
            ofmt = priv->vctx->pix_fmt;
        } else {
            for (i = 0; i < nb_formats; i++) {
                ofmt = avcodec_codec_tag_to_pix_fmt (formats[i].id);
                if (ofmt != PIX_FMT_NONE) {
                    break;
                }
            }
        }

        int num_bytes = avpicture_get_size (ofmt,
            priv->vctx->width + priv->vctx->width % 4, priv->vctx->height);
        priv->vbuffer_xv = (uint8_t*) av_malloc (num_bytes * sizeof (uint8_t));

        avpicture_fill ((AVPicture*) priv->vframe_xv,
            priv->vbuffer_xv, ofmt,
            priv->vctx->width + priv->vctx->width % 4, priv->vctx->height);

        priv->sws_ctx = sws_getContext (
            priv->vctx->width, priv->vctx->height, priv->vctx->pix_fmt,
            priv->vctx->width, priv->vctx->height, ofmt,
            SWS_POINT, NULL, NULL, NULL);

        priv->xvimage = XvCreateImage (
            priv->display, priv->xv_port_id,
            formats[i].id, priv->vbuffer_xv,
            priv->vctx->width, priv->vctx->height);

        XFree (formats);

        priv->xv_gc = XCreateGC (priv->display, priv->win, 0, &priv->values);
    }

    priv->entry = entry;
    g_object_ref (entry);

    priv->vpos = 0;

    priv->start_time = -1;
    priv->stop_time = -1;
}
int FfmpegCamera::OpenFfmpeg() {

    Debug ( 2, "OpenFfmpeg called." );

    mOpenStart = time(NULL);
    mIsOpening = true;

    // Open the input, not necessarily a file
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_open_input_file" );
    if ( av_open_input_file( &mFormatContext, mPath.c_str(), NULL, 0, NULL ) !=0 )
#else
    // Handle options
    AVDictionary *opts = 0;
    StringVector opVect = split(Options(), ",");
    
    // Set transport method as specified by method field, rtpUni is default
    if ( Method() == "rtpMulti" )
    	opVect.push_back("rtsp_transport=udp_multicast");
    else if ( Method() == "rtpRtsp" )
        opVect.push_back("rtsp_transport=tcp");
    else if ( Method() == "rtpRtspHttp" )
        opVect.push_back("rtsp_transport=http");
    
  	Debug(2, "Number of Options: %d",opVect.size());
    for (size_t i=0; i<opVect.size(); i++)
    {
    	StringVector parts = split(opVect[i],"=");
    	if (parts.size() > 1) {
    		parts[0] = trimSpaces(parts[0]);
    		parts[1] = trimSpaces(parts[1]);
    	    if ( av_dict_set(&opts, parts[0].c_str(), parts[1].c_str(), 0) == 0 ) {
    	        Debug(2, "set option %d '%s' to '%s'", i,  parts[0].c_str(), parts[1].c_str());
    	    }
    	    else
    	    {
    	        Warning( "Error trying to set option %d '%s' to '%s'", i, parts[0].c_str(), parts[1].c_str() );
    	    }
    		  
    	}
    }    
	Debug ( 1, "Calling avformat_open_input" );

    mFormatContext = avformat_alloc_context( );
    mFormatContext->interrupt_callback.callback = FfmpegInterruptCallback;
    mFormatContext->interrupt_callback.opaque = this;

    if ( avformat_open_input( &mFormatContext, mPath.c_str(), NULL, &opts ) !=0 )
#endif
    {
        mIsOpening = false;
        Error( "Unable to open input %s due to: %s", mPath.c_str(), strerror(errno) );
        return -1;
    }

    mIsOpening = false;
    Debug ( 1, "Opened input" );

    // Locate stream info from avformat_open_input
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_find_stream_info" );
    if ( av_find_stream_info( mFormatContext ) < 0 )
#else
    Debug ( 1, "Calling avformat_find_stream_info" );
    if ( avformat_find_stream_info( mFormatContext, 0 ) < 0 )
#endif
        Fatal( "Unable to find stream info from %s due to: %s", mPath.c_str(), strerror(errno) );
    
    Debug ( 1, "Got stream info" );

    // Find first video stream present
    mVideoStreamId = -1;
    for (unsigned int i=0; i < mFormatContext->nb_streams; i++ )
    {
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
        if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
#else
        if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif
        {
            mVideoStreamId = i;
            break;
        }
    }
    if ( mVideoStreamId == -1 )
        Fatal( "Unable to locate video stream in %s", mPath.c_str() );

    Debug ( 1, "Found video stream" );

    mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;

    // Try and get the codec from the codec context
    if ( (mCodec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
        Fatal( "Can't find codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Found decoder" );

    // Open the codec
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 7, 0)
    Debug ( 1, "Calling avcodec_open" );
    if ( avcodec_open( mCodecContext, mCodec ) < 0 )
#else
    Debug ( 1, "Calling avcodec_open2" );
    if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
#endif
        Fatal( "Unable to open codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Opened codec" );

    // Allocate space for the native video frame
    mRawFrame = avcodec_alloc_frame();

    // Allocate space for the converted video frame
    mFrame = avcodec_alloc_frame();
    
    if(mRawFrame == NULL || mFrame == NULL)
        Fatal( "Unable to allocate frame for %s", mPath.c_str() );

    Debug ( 1, "Allocated frames" );
    
    int pSize = avpicture_get_size( imagePixFormat, width, height );
    if( (unsigned int)pSize != imagesize) {
        Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
    }

    Debug ( 1, "Validated imagesize" );
    
#if HAVE_LIBSWSCALE
    Debug ( 1, "Calling sws_isSupportedInput" );
    if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
        Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
    }
Beispiel #18
0
int main(int argc ,char **argv)
{
	av_register_all();
	AVFormatContext *pFormatCtx = NULL;
	AVInputFormat *file_iformat = NULL;
	
	//avio_set_interrupt_cb(decode_interrupt_cb);	
	//Open video file
	printf("open video file:%s\n", argv[1]);
	if(avformat_open_input(&pFormatCtx, argv[1], file_iformat, NULL) < 0)
	{
		printf("canot open input file: %s\n", argv[1]);
		return -1; //Cannot open file
	}
	printf("open input file: %s OK\n", argv[1]);
	//Retrieve stream information
	if(av_find_stream_info(pFormatCtx) < 0)
		return -1;//cannot find stream infomation
	//Dump information about file no to standard error
	av_dump_format(pFormatCtx, 0, argv[1], 0);

	int i;
	int videoStream;
	int audioStream;
	videoStream = -1;
	audioStream = -1;
	AVCodecContext *vCodecCtx;
	AVCodecContext *aCodecCtx;
	//Find the first video stream
	for(i = 0; i < pFormatCtx->nb_streams; i++)
	{
		if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videoStream < 0) 
		{
			videoStream = i;
		}
		if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audioStream < 0)
		{
			audioStream = i;	
		}
	}
	if(videoStream == -1)
	{
		printf("no video stream\n");
		return -1;//Did not find a video stream
	}
	if(audioStream == -1)
	{
		printf("no audio stream\n");
		return -1;//Did not find a audio stream
	}
	printf("find video strean: %d\n", videoStream);
	printf("find audio strean: %d\n", audioStream);

	//Get a pointer to the codec context for the video stream
	vCodecCtx = pFormatCtx->streams[videoStream]->codec;
	AVCodec *vCodec;
	vCodec = avcodec_find_decoder(vCodecCtx->codec_id);
	if(vCodec == NULL)
	{
		fprintf(stderr, "Unsupported video codec\n");
		return -1;//codec not find
	}
	//Open video codec
	if(avcodec_open(vCodecCtx, vCodec) < 0)
	{
		fprintf(stderr, "open video codec error\n");
		return -1;//Could not open codec
	}
	//Get a pointer to the codec context for the audio stream
	aCodecCtx = pFormatCtx->streams[audioStream]->codec;
	static SDL_AudioSpec wanted_spec, spec;
	wanted_spec.freq = aCodecCtx->sample_rate;
	wanted_spec.format = AUDIO_S16SYS;
	wanted_spec.channels = aCodecCtx->channels;
	wanted_spec.silence = 0;
	wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
	wanted_spec.callback = audio_callback;
	wanted_spec.userdata = aCodecCtx;
	if(SDL_OpenAudio(&wanted_spec, &spec) < 0)
	{	
		fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
		return -1;
	}
	AVCodec *aCodec;
	aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
	if(aCodec == NULL)
	{
		fprintf(stderr, "Unsupport audio codec\n");
		return -1;//codec not found
	}
	if(avcodec_open(aCodecCtx, aCodec) < 0)
	{
		fprintf(stderr, "open avcodec error\n");
		return -1;
	}
	packet_queue_init(&audioq);
	SDL_PauseAudio(0);

	AVFrame *pFrame;
	//Allocate video frame
	pFrame = avcodec_alloc_frame();
	AVFrame *pFrameRGB;
	//Allocate an AVFrame structure
	pFrameRGB = avcodec_alloc_frame();
	if(pFrameRGB == NULL)
		return -1;
	uint8_t *buffer;
	int numBytes;
	//Detemine required buffer size and allocate buffer
	numBytes = avpicture_get_size(PIX_FMT_RGB24, vCodecCtx->width, vCodecCtx->height);
	buffer = (uint8_t*)av_malloc(numBytes * sizeof(uint8_t));
	//Assign appropriate parts of buffer to image planes in pFrameRGB
	//Note that pFrameRGB is an AVFrame, but AVFrame is a superset
	//of AVPicture
	avpicture_fill((AVPicture*)pFrameRGB, buffer, PIX_FMT_RGB24, vCodecCtx->width, vCodecCtx->height);
	
	if((SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)))
	{
		fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
		exit(1);
	}
	SDL_Surface *screen;
	screen = SDL_SetVideoMode(vCodecCtx->width, vCodecCtx->height, 0, 0);
	if(!screen)
	{
		fprintf(stderr, "SDL: could not set video mode\n");
		exit(1);
	}
	SDL_Overlay *bmp;
	bmp = SDL_CreateYUVOverlay(vCodecCtx->width, vCodecCtx->height, SDL_YV12_OVERLAY, screen);

	int frameFinished;
	AVPacket packet;
	SDL_Rect rect;
	i = 0;
	while(av_read_frame(pFormatCtx, &packet) >=0)
	{
		//is this a packet from video stream?
		if(packet.stream_index == videoStream)
		{
			//Decoder video frame
			avcodec_decode_video2(vCodecCtx, pFrame, &frameFinished, &packet);
			//Did we got a video frame?
			if(frameFinished)
			{
				usleep(40 * 1000);
				SDL_LockYUVOverlay(bmp);
				AVPicture pict;
				pict.data[0] = bmp->pixels[0];
				pict.data[1] = bmp->pixels[2];
				pict.data[2] = bmp->pixels[1];
				pict.linesize[0] = bmp->pitches[0];
				pict.linesize[1] = bmp->pitches[2];
				pict.linesize[2] = bmp->pitches[1];
				//Convert the image into YUV format that SDL uses
				static struct SwsContext *img_convert_ctx;
				img_convert_ctx = sws_getCachedContext(img_convert_ctx,
		                   vCodecCtx->width, vCodecCtx->height, vCodecCtx->pix_fmt,
			               vCodecCtx->width, vCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
		        sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize,
		                   0, pFrame->height, pict.data, pict.linesize);
				SDL_UnlockYUVOverlay(bmp); 
				rect.x = 0;
				rect.y = 0;
				rect.w = vCodecCtx->width;
				rect.h = vCodecCtx->height;
				SDL_DisplayYUVOverlay(bmp, &rect);
			}
			//Free the packet that was allocated by av_read_frame
			av_free_packet(&packet);	
			SDL_Event event;
			SDL_PollEvent(&event);
			switch(event.type)
			{
				case SDL_QUIT:
					quit = 1;
					SDL_Quit();
					exit(0);
					break;
				defalut:
					break;
			}
		}
		else if(packet.stream_index == audioStream)
		{
			packet_queue_put(&audioq, &packet);
		}
		else
		{
			av_free_packet(&packet);
		}
	}
	//Free the RGB image
	av_free(buffer);
	av_free(pFrameRGB);
	//Free the YUV freame
	av_free(pFrame);
	//Close the codec
	avcodec_close(vCodecCtx);
	//Close the video file
	avformat_close_input(&pFormatCtx);
}
Beispiel #19
0
libav_encoder* libav_encoder::create( const char *path, unsigned width, unsigned height,
                                      const std::pair<int, int>& fps, int bit_rate )
{
    impl *m = new impl;
    for (;;) {
        m->format = av_guess_format(NULL, path, NULL);
        if (!m->format)
            break;

        if ((m->format->flags & AVFMT_NOFILE) || (m->format->video_codec == CODEC_ID_NONE))
            break;

        m->formatCtx = avformat_alloc_context();
        if (!m->formatCtx)
            break;
        m->formatCtx->oformat = m->format;

        {
            #if LIBAVFORMAT_VERSION_MAJOR >= 54
                m->stream = avformat_new_stream(m->formatCtx, 0);
            #else
                m->stream = av_new_stream(m->formatCtx, 0);
            #endif
            if (!m->stream) {
                fprintf(stderr, "Could not alloc stream\n");
                break;
            }
            m->codecCtx = m->stream->codec;

            m->codecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
            if (strcmp(m->format->name, "avi") == 0) {
                m->codecCtx->codec_id = CODEC_ID_RAWVIDEO;
            } else {
                m->codecCtx->codec_id = m->format->video_codec;
            }

            AVCodec *codec = avcodec_find_encoder(m->codecCtx->codec_id);
            if (!codec)
                break;

            if (strcmp(m->format->name, "avi") == 0) {
                m->codecCtx->pix_fmt = PIX_FMT_BGR24;
            } else {
                if (codec->pix_fmts)
                    m->codecCtx->pix_fmt = codec->pix_fmts[0];
                else
                    m->codecCtx->pix_fmt = PIX_FMT_YUV420P;
            }

            m->codecCtx->bit_rate = 8000000;
            m->codecCtx->width = width;   /* resolution must be a multiple of two */
            m->codecCtx->height = height;
            m->codecCtx->time_base.num = fps.second;
            m->codecCtx->time_base.den = fps.first;
            m->codecCtx->gop_size = 12; /* emit one intra frame every twelve frames at most */

            if(m->formatCtx->oformat->flags & AVFMT_GLOBALHEADER)
                m->codecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;

                #if LIBAVFORMAT_VERSION_MAJOR >= 54
                    if (avcodec_open2(m->codecCtx, codec, NULL) < 0) break;
                #else
                    if (avcodec_open(m->codecCtx, codec) < 0) break;
                #endif

            if (!(m->formatCtx->oformat->flags & AVFMT_RAWPICTURE)) {
                m->output_size = m->codecCtx->width * m->codecCtx->height * 4;
                m->output_buffer = (uint8_t*)av_malloc(m->output_size);
            }

            if (!alloc_frame(&m->frame1, &m->buffer1, m->codecCtx->pix_fmt, m->codecCtx->width, m->codecCtx->height))
                break;
            if (!alloc_frame(&m->frame0, &m->buffer0, PIX_FMT_RGB32, m->codecCtx->width, m->codecCtx->height))
                break;
        }

        #if LIBAVFORMAT_VERSION_MAJOR >= 54
            if (avio_open(&m->formatCtx->pb, path, AVIO_FLAG_WRITE) < 0) break;
            avformat_write_header(m->formatCtx, NULL);
        #else
            if (url_fopen(&m->formatCtx->pb, path, URL_WRONLY) < 0) break;
            av_write_header(m->formatCtx);
        #endif

        m->swsCtx = sws_getContext( m->codecCtx->width, m->codecCtx->height, PIX_FMT_RGB32,
                                    m->codecCtx->width, m->codecCtx->height, m->codecCtx->pix_fmt,
                                    SWS_POINT, NULL, NULL, NULL );
        if (!m->swsCtx)
            break;

        return new libav_encoder(m);
    }
    delete m;
    return 0;
}
Beispiel #20
0
static codec_data_t *ffmpeg_create (const char *stream_type,
                                    const char *compressor,
                                    int type,
                                    int profile,
                                    format_list_t *media_fmt,
                                    video_info_t *vinfo,
                                    const uint8_t *userdata,
                                    uint32_t ud_size,
                                    video_vft_t *vft,
                                    void *ifptr)
{
    ffmpeg_codec_t *ffmpeg;

    ffmpeg = MALLOC_STRUCTURE(ffmpeg_codec_t);
    memset(ffmpeg, 0, sizeof(*ffmpeg));

    ffmpeg->m_vft = vft;
    ffmpeg->m_ifptr = ifptr;
    avcodec_init();
    avcodec_register_all();
    av_log_set_level(AV_LOG_QUIET);

    ffmpeg->m_codecId = ffmpeg_find_codec(stream_type, compressor, type,
                                          profile, media_fmt, userdata, ud_size);

    // must have a codecID - we checked it earlier
    ffmpeg->m_codec = avcodec_find_decoder(ffmpeg->m_codecId);
    ffmpeg->m_c = avcodec_alloc_context();
    ffmpeg->m_picture = avcodec_alloc_frame();
    bool open_codec = true;
    bool run_userdata = false;
    bool free_userdata = false;

    switch (ffmpeg->m_codecId) {
    case CODEC_ID_MJPEG:
        break;
    case CODEC_ID_H264:
        // need to find height and width
        if (media_fmt != NULL && media_fmt->fmt_param != NULL) {
            userdata = h264_sdp_parse_sprop_param_sets(media_fmt->fmt_param,
                       &ud_size,
                       ffmpeg->m_vft->log_msg);
            if (userdata != NULL) free_userdata = true;
            ffmpeg_message(LOG_DEBUG, "ffmpeg", "sprop len %d", ud_size);
        }
        if (ud_size > 0) {
            ffmpeg_message(LOG_DEBUG, "ffmpeg", "userdata len %d", ud_size);
            open_codec = ffmpeg_find_h264_size(ffmpeg, userdata, ud_size);
            ffmpeg_message(LOG_DEBUG, "ffmpeg", "open codec is %d", open_codec);
            run_userdata = true;
        } else {
            open_codec = false;
        }
        break;
    case CODEC_ID_MPEG4: {
        fmtp_parse_t *fmtp = NULL;
        open_codec = false;
        if (media_fmt != NULL) {
            fmtp = parse_fmtp_for_mpeg4(media_fmt->fmt_param,
                                        ffmpeg->m_vft->log_msg);
            if (fmtp->config_binary != NULL) {
                userdata = fmtp->config_binary;
                ud_size = fmtp->config_binary_len;
                fmtp->config_binary = NULL;
                free_userdata = true;
            }
        }

        if (ud_size > 0) {
            uint8_t *vol = MP4AV_Mpeg4FindVol((uint8_t *)userdata, ud_size);
            u_int8_t TimeBits;
            u_int16_t TimeTicks;
            u_int16_t FrameDuration;
            u_int16_t FrameWidth;
            u_int16_t FrameHeight;
            u_int8_t  aspectRatioDefine;
            u_int8_t  aspectRatioWidth;
            u_int8_t  aspectRatioHeight;
            if (vol) {
                if (MP4AV_Mpeg4ParseVol(vol,
                                        ud_size - (vol - userdata),
                                        &TimeBits,
                                        &TimeTicks,
                                        &FrameDuration,
                                        &FrameWidth,
                                        &FrameHeight,
                                        &aspectRatioDefine,
                                        &aspectRatioWidth,
                                        &aspectRatioHeight)) {
                    ffmpeg->m_c->width = FrameWidth;
                    ffmpeg->m_c->height = FrameHeight;
                    open_codec = true;
                    run_userdata = true;
                }
            }
        }
        if (fmtp != NULL) {
            free_fmtp_parse(fmtp);
        }
    }
    break;
    case CODEC_ID_SVQ3:
        ffmpeg->m_c->extradata = (void *)userdata;
        ffmpeg->m_c->extradata_size = ud_size;
        if (vinfo != NULL) {
            ffmpeg->m_c->width = vinfo->width;
            ffmpeg->m_c->height = vinfo->height;
        }
        break;
    default:
        break;
    }
    if (open_codec) {
        if (avcodec_open(ffmpeg->m_c, ffmpeg->m_codec) < 0) {
            ffmpeg_message(LOG_CRIT, "ffmpeg", "failed to open codec");
            return NULL;
        }
        ffmpeg_message(LOG_DEBUG, "ffmpeg", "pixel format is %d",
                       ffmpeg->m_c->pix_fmt);
        ffmpeg->m_codec_opened = true;
        if (run_userdata) {
            uint32_t offset = 0;
            do {
                int got_picture;
                offset += avcodec_decode_video(ffmpeg->m_c,
                                               ffmpeg->m_picture,
                                               &got_picture,
                                               (uint8_t *)userdata + offset,
                                               ud_size - offset);
            } while (offset < ud_size);
        }

    }

    if (free_userdata) {
        CHECK_AND_FREE(userdata);
    }
    ffmpeg->m_did_pause = 1;
    return ((codec_data_t *)ffmpeg);
}
Beispiel #21
0
int main (int argc, const char * argv[])
{
    AVFormatContext *pFormatCtx;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;
    AVFrame         *pFrame; 
    AVFrame         *pFrameRGB;
    AVPacket        packet;
    int             frameFinished;
    int             numBytes;
    uint8_t         *buffer;

    // Register all formats and codecs
    av_register_all();

    // Open video file
    AVInputFormat *iformat = NULL;
    AVDictionary *format_opts = NULL;
    if (avformat_open_input(&pFormatCtx, argv[1], iformat, &format_opts)!=0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if(av_find_stream_info(pFormatCtx)<0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, argv[1], false);

    // Find the first video stream
    videoStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
        {
            videoStream=i;
            break;
        }
    if(videoStream==-1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    pCodecCtx=pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL)
        return -1; // Codec not found

    // Open codec
    if(avcodec_open(pCodecCtx, pCodec)<0)
        return -1; // Could not open codec

    // Hack to correct wrong frame rates that seem to be generated by some codecs
    if(pCodecCtx->time_base.num>1000 && pCodecCtx->time_base.den==1)
		pCodecCtx->time_base.den=1000;
		
    // Allocate video frame
    pFrame=avcodec_alloc_frame();

    // Allocate an AVFrame structure
    pFrameRGB=avcodec_alloc_frame();
    if(pFrameRGB==NULL)
        return -1;

    // Determine required buffer size and allocate buffer
    numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
        pCodecCtx->height);

    buffer=malloc(numBytes);

    // Assign appropriate parts of buffer to image planes in pFrameRGB
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
        pCodecCtx->width, pCodecCtx->height);

    // Read frames and save first five frames to disk
    i=0;
    av_init_packet(&packet);
    while(av_read_frame(pFormatCtx, &packet)>=0)
    {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream)
        {
            // Decode video frame
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
                &packet);

            // Did we get a video frame?
            if(frameFinished)
            {
				static struct SwsContext *img_convert_ctx;

#if 0
				// Older removed code
                // Convert the image from its native format to RGB swscale
                img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24, 
                    (AVPicture*)pFrame, pCodecCtx->pix_fmt, pCodecCtx->width, 
                    pCodecCtx->height);
				
				// function template, for reference
				int sws_scale(struct SwsContext *context, uint8_t* src[], int srcStride[], int srcSliceY,
							  int srcSliceH, uint8_t* dst[], int dstStride[]);
#endif
				// Convert the image into RGB format that PPM files expect
				if(img_convert_ctx == NULL) {
					int w = pCodecCtx->width;
					int h = pCodecCtx->height;
					
					img_convert_ctx = sws_getContext(w, h, 
									pCodecCtx->pix_fmt, 
									w, h, PIX_FMT_RGB24, SWS_BICUBIC,
									NULL, NULL, NULL);
					if(img_convert_ctx == NULL) {
						fprintf(stderr, "Cannot initialize the conversion context!\n");
						exit(1);
					}
				}
				int ret = sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, 
						  pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
#if 0 // this use to be true, as of 1/2009, but apparently it is no longer true in 3/2009
				if(ret) {
					fprintf(stderr, "SWS_Scale failed [%d]!\n", ret);
					exit(-1);
				}
#endif
                // Save the frame to disk
                if(i++<=5)
                    SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);
            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
    }

    // Free the RGB image
    free(buffer);
    av_free(pFrameRGB);

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    av_close_input_file(pFormatCtx);

    return 0;
}
Beispiel #22
0
static int ffmpeg_decode (codec_data_t *ptr,
                          frame_timestamp_t *pts,
                          int from_rtp,
                          int *sync_frame,
                          uint8_t *buffer,
                          uint32_t buflen,
                          void *ud)
{
    ffmpeg_codec_t *ffmpeg = (ffmpeg_codec_t *)ptr;
    uint32_t bytes_used = 0;
    int got_picture = 0;
    uint64_t ts = pts->msec_timestamp;

    //ffmpeg_message(LOG_ERR, "ffmpeg", "%u timestamp "U64, buflen, ts);
    if (ffmpeg->m_codec_opened == false) {
        // look for header, like above, and open it
        bool open_codec = true;
        switch (ffmpeg->m_codecId) {
        case CODEC_ID_H264:
            open_codec = ffmpeg_find_h264_size(ffmpeg, buffer, buflen);
            break;
        default:
            break;
        }
        if (open_codec) {
            if (avcodec_open(ffmpeg->m_c, ffmpeg->m_codec) < 0) {
                ffmpeg_message(LOG_CRIT, "ffmpeg", "failed to open codec");
                return buflen;
            }
            ffmpeg->m_codec_opened = true;
            ffmpeg_message(LOG_ERR, "ffmpeg", "opened codec");
        } else {
            ffmpeg_message(LOG_ERR, "ffmpeg", "no open %u "U64, buflen, ts);
            return buflen;
        }
    }

    // look and see if we have read the I frame.
    if (ffmpeg->m_got_i == false) {
        if (ffmpeg_frame_is_sync(ptr, buffer, buflen, NULL) == 0) {
            return buflen;
        }
        ffmpeg->m_got_i = true;
    }

    int ret;
    do {
        int local_got_picture;
        ret = avcodec_decode_video(ffmpeg->m_c,
                                   ffmpeg->m_picture,
                                   &local_got_picture,
                                   buffer + bytes_used,
                                   buflen - bytes_used);
        bytes_used += ret;
        //ffmpeg_message(LOG_CRIT, "ffmpeg", "used %d %d", ret, local_got_picture);
        got_picture |= local_got_picture;
    } while (ret != -1 && bytes_used < buflen);

    if (pts->timestamp_is_pts) {
        //ffmpeg_message(LOG_ERR, "ffmpeg", "pts timestamp "U64, ts);
        if (ffmpeg->m_codecId == CODEC_ID_MPEG2VIDEO) {
            if (ffmpeg->pts_convert.frame_rate == 0.0) {
                int have_mpeg2;
                uint32_t h, w;
                double bitrate, aspect_ratio;
                uint8_t profile;
                MP4AV_Mpeg3ParseSeqHdr(buffer, buflen,
                                       &have_mpeg2,
                                       &h, &w,
                                       &ffmpeg->pts_convert.frame_rate,
                                       &bitrate, &aspect_ratio,
                                       &profile);
            }

            int ftype;
            int header = MP4AV_Mpeg3FindPictHdr(buffer, buflen, &ftype);
            if (header >= 0) {
                uint16_t temp_ref = MP4AV_Mpeg3PictHdrTempRef(buffer + header);
                uint64_t ret;
                if (got_picture == 0 ||
                        mpeg3_find_dts_from_pts(&ffmpeg->pts_convert,
                                                ts,
                                                ftype,
                                                temp_ref,
                                                &ret) < 0) {
                    ffmpeg->have_cached_ts = false;
                    return buflen;
                }
#if 0
                ffmpeg->m_vft->log_msg(LOG_DEBUG, "ffmpeg", "pts "U64" dts "U64" temp %u type %u %u",
                                       ts, ret,
                                       temp_ref, ftype, got_picture);
#endif
                ts = ret;
                //	ffmpeg_message(LOG_ERR, "ffmpeg", "type %d ref %u "U64, ftype, temp_ref, ret);
            }
        } else if (ffmpeg->m_codecId == CODEC_ID_MPEG4) {
            uint8_t *vopstart = MP4AV_Mpeg4FindVop(buffer, buflen);
            if (vopstart) {
                int ftype = MP4AV_Mpeg4GetVopType(vopstart, buflen);
                uint64_t dts;
                if (MP4AV_calculate_dts_from_pts(&ffmpeg->pts_to_dts,
                                                 ts,
                                                 ftype,
                                                 &dts) < 0) {
                    ffmpeg->have_cached_ts = false;
#ifdef DEBUG_FFMPEG_PTS
                    ffmpeg_message(LOG_DEBUG, "ffmpeg", "type %d %d pts "U64" failed to calc",
                                   ftype, got_picture, ts);
#endif
                    return buflen;
                }
#ifdef DEBUG_FFMPEG_PTS
                ffmpeg_message(LOG_DEBUG, "ffmpeg", "type %d %d pts "U64" dts "U64,
                               ftype, got_picture, ts, dts);
#endif
                ts = dts;
            }
        } else if (ffmpeg->m_codecId == CODEC_ID_H264) {
            uint8_t *nal_ptr = buffer;
            uint32_t len = buflen;
            bool have_b_nal = false;
            do {
                if (h264_nal_unit_type_is_slice(h264_nal_unit_type(nal_ptr))) {
                    uint8_t slice_type;
                    if (h264_find_slice_type(nal_ptr, len, &slice_type, false) >= 0) {
                        have_b_nal = H264_TYPE_IS_B(slice_type);
                    }
                }
                uint32_t offset = h264_find_next_start_code(nal_ptr, len);
                if (offset == 0) {
                    len = 0;
                } else {
                    nal_ptr += offset;
                    len -= offset;
                }
            } while (len > 0 && have_b_nal == false);
            uint64_t dts;
            if (MP4AV_calculate_dts_from_pts(&ffmpeg->pts_to_dts,
                                             ts,
                                             have_b_nal ? VOP_TYPE_B : VOP_TYPE_P,
                                             &dts) < 0) {
                ffmpeg->have_cached_ts = false;
#ifdef DEBUG_FFMPEG_PTS
                ffmpeg_message(LOG_DEBUG, "ffmpeg", "pts "U64" failed to calc",
                               ts);
#endif
                return buflen;
            }
            ts = dts;
        }
    }
    if (got_picture != 0) {
        if (ffmpeg->m_video_initialized == false) {
            double aspect;
            if (ffmpeg->m_c->sample_aspect_ratio.den == 0) {
                aspect = 0.0; // don't have one
            } else {
                aspect = av_q2d(ffmpeg->m_c->sample_aspect_ratio);
            }
            if (ffmpeg->m_c->width == 0) {
                return buflen;
            }
            ffmpeg->m_vft->video_configure(ffmpeg->m_ifptr,
                                           ffmpeg->m_c->width,
                                           ffmpeg->m_c->height,
                                           VIDEO_FORMAT_YUV,
                                           aspect);
            ffmpeg->m_video_initialized = true;
        }

        if (ffmpeg->m_c->pix_fmt != PIX_FMT_YUV420P) {
            // convert the image from whatever it is to YUV 4:2:0
            AVPicture from, to;
            int ret;
            // get the buffer to copy into (put it right into the ring buffer)
            ret = ffmpeg->m_vft->video_get_buffer(ffmpeg->m_ifptr,
                                                  &to.data[0],
                                                  &to.data[1],
                                                  &to.data[2]);
            if (ret == 0) {
                return buflen;
            }
            // set up the AVPicture structures
            to.linesize[0] = ffmpeg->m_c->width;
            to.linesize[1] = ffmpeg->m_c->width / 2;
            to.linesize[2] = ffmpeg->m_c->width / 2;
            for (int ix = 0; ix < 4; ix++) {
                from.data[ix] = ffmpeg->m_picture->data[ix];
                from.linesize[ix] = ffmpeg->m_picture->linesize[ix];
            }

            img_convert(&to, PIX_FMT_YUV420P,
                        &from, ffmpeg->m_c->pix_fmt,
                        ffmpeg->m_c->width, ffmpeg->m_c->height);
            ffmpeg->m_vft->video_filled_buffer(ffmpeg->m_ifptr,
                                               ffmpeg->have_cached_ts ?
                                               ffmpeg->cached_ts : ts);
        } else {
            ffmpeg->m_vft->video_have_frame(ffmpeg->m_ifptr,
                                            ffmpeg->m_picture->data[0],
                                            ffmpeg->m_picture->data[1],
                                            ffmpeg->m_picture->data[2],
                                            ffmpeg->m_picture->linesize[0],
                                            ffmpeg->m_picture->linesize[1],
                                            ffmpeg->have_cached_ts ?
                                            ffmpeg->cached_ts : ts);
        }
        ffmpeg->cached_ts = ts;
    } else {
        ffmpeg->cached_ts = ts;
        ffmpeg->have_cached_ts = true;
    }
#ifdef DEBUG_FFMPEG_FRAME
    ffmpeg_message(LOG_DEBUG, "ffmpeg", "used %u of %u", bytes_used, buflen);
#endif
    return (buflen);
}
Beispiel #23
0
static bool ffemu_init_video(struct ff_video_info *video, const struct ffemu_params *param)
{
   AVCodec *codec = NULL;
   if (g_settings.video.h264_record)
   {
      codec = avcodec_find_encoder_by_name("libx264rgb");
      // Older versions of FFmpeg have RGB encoding in libx264.
      if (!codec)
         codec = avcodec_find_encoder_by_name("libx264");
   }
   else
      codec = avcodec_find_encoder_by_name("ffv1");

   if (!codec)
      return false;

   video->encoder = codec;

   switch (param->pix_fmt)
   {
      case FFEMU_PIX_XRGB1555:
         video->scaler.in_fmt = SCALER_FMT_0RGB1555;
         video->pix_size = 2;
         break;

      case FFEMU_PIX_BGR24:
         video->scaler.in_fmt = SCALER_FMT_BGR24;
         video->pix_size = 3;
         break;

      case FFEMU_PIX_ARGB8888:
         video->scaler.in_fmt = SCALER_FMT_ARGB8888;
         video->pix_size = 4;
         break;

      default:
         return false;
   }

   if (g_settings.video.h264_record)
   {
      video->pix_fmt = PIX_FMT_BGR24;
      video->scaler.out_fmt = SCALER_FMT_BGR24;
   }
   else
   {
      video->pix_fmt = PIX_FMT_RGB32;
      video->scaler.out_fmt = SCALER_FMT_ARGB8888;
   }

#ifdef HAVE_FFMPEG_ALLOC_CONTEXT3
   video->codec = avcodec_alloc_context3(codec);
#else
   video->codec = avcodec_alloc_context();
   avcodec_get_context_defaults(video->codec);
#endif

   video->codec->width = param->out_width;
   video->codec->height = param->out_height;
   video->codec->time_base = av_d2q(1.0 / param->fps, 1000000); // Arbitrary big number.
   video->codec->sample_aspect_ratio = av_d2q(param->aspect_ratio * param->out_height / param->out_width, 255);
   video->codec->pix_fmt = video->pix_fmt;

#ifdef HAVE_FFMPEG_AVCODEC_OPEN2
   AVDictionary *opts = NULL;
#endif

   if (g_settings.video.h264_record)
   {
      video->codec->thread_count = 3;
      av_dict_set(&opts, "qp", "0", 0);
   }
   else
      video->codec->thread_count = 2;

#ifdef HAVE_FFMPEG_AVCODEC_OPEN2
   if (avcodec_open2(video->codec, codec, &opts) != 0)
#else
   if (avcodec_open(video->codec, codec) != 0)
#endif
      return false;

#ifdef HAVE_FFMPEG_AVCODEC_OPEN2
   if (opts)
      av_dict_free(&opts);
#endif

   // Allocate a big buffer :p ffmpeg API doesn't seem to give us some clues how big this buffer should be.
   video->outbuf_size = 1 << 23;
   video->outbuf = (uint8_t*)av_malloc(video->outbuf_size);

   size_t size = avpicture_get_size(video->pix_fmt, param->out_width, param->out_height);
   video->conv_frame_buf = (uint8_t*)av_malloc(size);
   video->conv_frame = avcodec_alloc_frame();
   avpicture_fill((AVPicture*)video->conv_frame, video->conv_frame_buf, video->pix_fmt, param->out_width, param->out_height);

   return true;
}
Beispiel #24
0
void init_dumper( int width, int height )
{  
  double fps = Machine->drv->frames_per_second / (double)frame_halver;

  avcodec_init();
  avcodec_register_all();
#ifdef AVICAPTURE_DEBUG
  av_log_set_level (99);
#endif

  avc = avcodec_find_encoder( CODEC_ID_MPEG1VIDEO );
  if (avc == NULL)
  {
  	  printf ("cannot find MPEG encoder\n");
     exit (1);
  }

  avctx = avcodec_alloc_context();
    
  /* sample parameters */
  avctx->me_method = ME_LOG;
  avctx->pix_fmt = PIX_FMT_YUV420P;
  avctx->bit_rate = 2500000;
  avctx->width = width;
  avctx->height = height;
  avctx->time_base.num = 1;
  avctx->time_base.den = fps;
  avctx->gop_size=10;
  avctx->max_b_frames=1;
  avctx->draw_horiz_band = NULL;
  avctx->idct_algo = FF_IDCT_AUTO;

  int ret = avcodec_open( avctx, avc );
  if (ret)
    {
      printf("FAILED TO OPEN ENCODER, ret=%d, errno=%d\n", ret, errno);
      exit( 1 );
    }
  
  int size=height*width;
  
  pic = avcodec_alloc_frame();
  
  output_buffer=(char *)malloc(BUFFSIZE); /* Find where this value comes from */
  
  outpic.data[0]=(unsigned char *)malloc(size*3/2); /* YUV 420 Planar */
  outpic.data[1]=outpic.data[0]+size;
  outpic.data[2]=outpic.data[1]+size/4;
  outpic.data[3]=NULL;
  outpic.linesize[0]=width;
  outpic.linesize[1]=outpic.linesize[2]=width/2;
  outpic.linesize[3]=0;
  
  pic->data[0]=outpic.data[0];  /* Points to data portion of outpic     */
  pic->data[1]=outpic.data[1];  /* Since encode_video takes an AVFrame, */
  pic->data[2]=outpic.data[2];  /* and img_convert takes an AVPicture   */
  pic->data[3]=outpic.data[3];
  
  pic->linesize[0]=outpic.linesize[0]; /* This doesn't change */
  pic->linesize[1]=outpic.linesize[1];
  pic->linesize[2]=outpic.linesize[2];
  pic->linesize[3]=outpic.linesize[3];
  
  inpic.data[0]=(unsigned char *)malloc(size*3); /* RGB24 packed in 1 plane */
  inpic.data[1]=inpic.data[2]=inpic.data[3]=NULL;
  inpic.linesize[0]=width*3;
  inpic.linesize[1]=inpic.linesize[2]=inpic.linesize[3]=0;

  video_outf = fopen("video.outf","wb");
  if (video_outf == NULL)
  {
    printf ("failed to open output video file\n");
    exit (1);
  }
}
Beispiel #25
0
static int decode_example(const char *filename)
{
    AVFormatContext *fctx = NULL;
    AVCodec *codec;
    AVCodecContext *avctx;
    int video_st = -1;
    int i, got_pic;
    AVFrame *picture, *tmp_picture;
    int size;
    uint8_t *tmp_buf;
    int ret = 0;

    avformat_open_input(&fctx, filename, NULL, NULL);
    if (fctx == NULL)
        return AVERROR(1);

    av_find_stream_info(fctx);

    av_dump_format(fctx, 0, filename, 0);

    for (i = 0; i < fctx->nb_streams; i++) {
        if (fctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            video_st = i;
            break;
        }
    }

    avctx = fctx->streams[video_st]->codec;

    codec = avcodec_find_decoder_by_name("libdm365_h264");
    if (codec == NULL) {
        av_log(avctx, AV_LOG_ERROR, "unsupported codec\n");
        return AVERROR(1);
    }

    if (avcodec_open(avctx, codec) < 0) {
        av_log(avctx, AV_LOG_ERROR, "cannot open codec\n");
        return AVERROR(1);
    }

    picture = avcodec_alloc_frame();
    tmp_picture = avcodec_alloc_frame();

    size = avpicture_get_size(PIX_FMT_YUV420P, avctx->width, avctx->height);
    tmp_buf = av_malloc(size);
    if (tmp_buf == NULL) {
        ret = AVERROR(ENOMEM);
        goto decode_cleanup;
    }
    avpicture_fill((AVPicture *)tmp_picture, tmp_buf,
            PIX_FMT_NV12, avctx->width, avctx->height);

    for (i = 0; i < 10; i++) {
        AVPacket pkt;
        int nb;
        char fname[32];
        int factor = 2;

        if (av_read_frame(fctx, &pkt) < 0)
            break;

        nb = avcodec_decode_video2(avctx, picture, &got_pic, &pkt);
        if (nb < 0) {
            av_log(avctx, AV_LOG_ERROR, "error in decoding\n");
            goto decode_cleanup;
        }
        printf("Decoded frame: %d\n", i);

        my_scale((AVPicture *) picture, avctx->width, avctx->height,
                (AVPicture *) tmp_picture, factor);

        sprintf(fname, "frame%02d.pgm", i+1);
        pgm_save(picture->data[0], picture->linesize[0],
                avctx->width, avctx->height, fname);

        sprintf(fname, "frame%02d.bmp", i+1);
        save_image((AVPicture *)tmp_picture, avctx->pix_fmt,
                avctx->width/factor, avctx->height/factor, fname);
    }

decode_cleanup:
    av_free(picture);
    av_free(tmp_picture->data[0]);
    av_free(tmp_picture);
    av_close_input_file(fctx);
    avcodec_close(avctx);
    return ret;
}
Beispiel #26
0
int FfmpegCamera::OpenFfmpeg() {

    Debug ( 2, "OpenFfmpeg called." );

    mOpenStart = time(NULL);
    mIsOpening = true;

    // Open the input, not necessarily a file
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_open_input_file" );
    if ( av_open_input_file( &mFormatContext, mPath.c_str(), NULL, 0, NULL ) !=0 )
#else
    Debug ( 1, "Calling avformat_open_input" );

    mFormatContext = avformat_alloc_context( );
    mFormatContext->interrupt_callback.callback = FfmpegInterruptCallback;
    mFormatContext->interrupt_callback.opaque = this;

    if ( avformat_open_input( &mFormatContext, mPath.c_str(), NULL, NULL ) !=0 )
#endif
    {
        mIsOpening = false;
        Error( "Unable to open input %s due to: %s", mPath.c_str(), strerror(errno) );
        return -1;
    }

    mIsOpening = false;
    Debug ( 1, "Opened input" );

    // Locate stream info from avformat_open_input
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_find_stream_info" );
    if ( av_find_stream_info( mFormatContext ) < 0 )
#else
    Debug ( 1, "Calling avformat_find_stream_info" );
    if ( avformat_find_stream_info( mFormatContext, 0 ) < 0 )
#endif
        Fatal( "Unable to find stream info from %s due to: %s", mPath.c_str(), strerror(errno) );
    
    Debug ( 1, "Got stream info" );

    // Find first video stream present
    mVideoStreamId = -1;
    for (unsigned int i=0; i < mFormatContext->nb_streams; i++ )
    {
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
        if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
#else
        if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif
        {
            mVideoStreamId = i;
            break;
        }
    }
    if ( mVideoStreamId == -1 )
        Fatal( "Unable to locate video stream in %s", mPath.c_str() );

    Debug ( 1, "Found video stream" );

    mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;

    // Try and get the codec from the codec context
    if ( (mCodec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
        Fatal( "Can't find codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Found decoder" );

    // Open the codec
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 7, 0)
    Debug ( 1, "Calling avcodec_open" );
    if ( avcodec_open( mCodecContext, mCodec ) < 0 )
#else
    Debug ( 1, "Calling avcodec_open2" );
    if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
#endif
        Fatal( "Unable to open codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Opened codec" );

    // Allocate space for the native video frame
    mRawFrame = avcodec_alloc_frame();

    // Allocate space for the converted video frame
    mFrame = avcodec_alloc_frame();
    
    if(mRawFrame == NULL || mFrame == NULL)
        Fatal( "Unable to allocate frame for %s", mPath.c_str() );

    Debug ( 1, "Allocated frames" );
    
    int pSize = avpicture_get_size( imagePixFormat, width, height );
    if( (unsigned int)pSize != imagesize) {
        Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
    }

    Debug ( 1, "Validated imagesize" );
    
#if HAVE_LIBSWSCALE
    Debug ( 1, "Calling sws_isSupportedInput" );
    if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
        Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
    }
Beispiel #27
0
static int jpeg_read(ByteIOContext *f, 
                     int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
{
    AVCodecContext *c;
    AVFrame *picture, picture1;
    int len, size, got_picture, i;
    uint8_t *inbuf_ptr, inbuf[IO_BUF_SIZE];
    JpegOpaque jctx;

    jctx.alloc_cb = alloc_cb;
    jctx.opaque = opaque;
    jctx.ret_code = -1; /* default return code is error */
    
    c = avcodec_alloc_context();
    if (!c)
        return -1;
    picture= avcodec_alloc_frame();
    if (!picture) {
        av_free(c);
        return -1;
    }
    c->opaque = &jctx;
    c->get_buffer = jpeg_get_buffer;
    c->flags |= CODEC_FLAG_TRUNCATED; /* we dont send complete frames */
    if (avcodec_open(c, &mjpeg_decoder) < 0)
        goto fail1;
    for(;;) {
        size = get_buffer(f, inbuf, sizeof(inbuf));
        if (size == 0)
            break;
        inbuf_ptr = inbuf;
        while (size > 0) {
            len = avcodec_decode_video(c, &picture1, &got_picture, 
                                       inbuf_ptr, size);
            if (len < 0)
                goto fail;
            if (got_picture)
                goto the_end;
            size -= len;
            inbuf_ptr += len;
        }
    }
 the_end:
    /* XXX: currently, the mjpeg decoder does not use AVFrame, so we
       must do it by hand */
    if (jpeg_get_buffer(c, picture) < 0)
        goto fail;
    for(i=0;i<3;i++) {
        int w, h;
        w = c->width;
        h = c->height;
        if (i >= 1) {
            switch(c->pix_fmt) {
            default:
            case PIX_FMT_YUV420P:
                w = (w + 1) >> 1;
                h = (h + 1) >> 1;
                break;
            case PIX_FMT_YUV422P:
                w = (w + 1) >> 1;
                break;
            case PIX_FMT_YUV444P:
                break;
            }
        }
        jpeg_img_copy(picture->data[i], picture->linesize[i],
                 picture1.data[i], picture1.linesize[i],
                 w, h);
    }
    jctx.ret_code = 0;
 fail:
    avcodec_close(c);
 fail1:
    av_free(picture);
    av_free(c);
    return jctx.ret_code;
}
Beispiel #28
0
int decode_audio_file(ChromaprintContext *chromaprint_ctx, int16_t *buffer1, int16_t *buffer2, const char *file_name, int max_length, int *duration)
{
	int i, ok = 0, remaining, length, consumed, buffer_size, codec_ctx_opened = 0;
	AVFormatContext *format_ctx = NULL;
	AVCodecContext *codec_ctx = NULL;
	AVCodec *codec = NULL;
	AVStream *stream = NULL;
	AVPacket packet, packet_temp;
#ifdef HAVE_AV_AUDIO_CONVERT
	AVAudioConvert *convert_ctx = NULL;
#endif
	int16_t *buffer;

	if (!strcmp(file_name, "-")) {
		file_name = "pipe:0";
	}

#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 2, 0)
	if (av_open_input_file(&format_ctx, file_name, NULL, 0, NULL) != 0) {
#else
	if (avformat_open_input(&format_ctx, file_name, NULL, NULL) != 0) {
#endif
		fprintf(stderr, "ERROR: couldn't open the file\n");
		goto done;
	}

	if (av_find_stream_info(format_ctx) < 0) {
		fprintf(stderr, "ERROR: couldn't find stream information in the file\n");
		goto done;
	}

	for (i = 0; i < format_ctx->nb_streams; i++) {
		codec_ctx = format_ctx->streams[i]->codec;
		if (codec_ctx && codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
			stream = format_ctx->streams[i];
			break;
		}
	}
	if (!stream) {
		fprintf(stderr, "ERROR: couldn't find any audio stream in the file\n");
		goto done;
	}

	codec = avcodec_find_decoder(codec_ctx->codec_id);
	if (!codec) {
		fprintf(stderr, "ERROR: unknown codec\n");
		goto done;
	}

	if (avcodec_open(codec_ctx, codec) < 0) {
		fprintf(stderr, "ERROR: couldn't open the codec\n");
		goto done;
	}
	codec_ctx_opened = 1;

	if (codec_ctx->channels <= 0) {
		fprintf(stderr, "ERROR: no channels found in the audio stream\n");
		goto done;
	}

	if (codec_ctx->sample_fmt != AV_SAMPLE_FMT_S16) {
#ifdef HAVE_AV_AUDIO_CONVERT
		convert_ctx = av_audio_convert_alloc(AV_SAMPLE_FMT_S16, codec_ctx->channels,
		                                     codec_ctx->sample_fmt, codec_ctx->channels, NULL, 0);
		if (!convert_ctx) {
			fprintf(stderr, "ERROR: couldn't create sample format converter\n");
			goto done;
		}
#else
		fprintf(stderr, "ERROR: unsupported sample format\n");
		goto done;
#endif
	}

	*duration = stream->time_base.num * stream->duration / stream->time_base.den;

	av_init_packet(&packet);
	av_init_packet(&packet_temp);

	remaining = max_length * codec_ctx->channels * codec_ctx->sample_rate;
	chromaprint_start(chromaprint_ctx, codec_ctx->sample_rate, codec_ctx->channels);

	while (1) {
		if (av_read_frame(format_ctx, &packet) < 0) {
			break;
		}

		packet_temp.data = packet.data;
		packet_temp.size = packet.size;

		while (packet_temp.size > 0) {
			buffer_size = BUFFER_SIZE;
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(52, 23, 0)
			consumed = avcodec_decode_audio2(codec_ctx,
				buffer1, &buffer_size, packet_temp.data, packet_temp.size);
#else
			consumed = avcodec_decode_audio3(codec_ctx,
				buffer1, &buffer_size, &packet_temp);
#endif

			if (consumed < 0) {
				break;
			}

			packet_temp.data += consumed;
			packet_temp.size -= consumed;

			if (buffer_size <= 0) {
				if (buffer_size < 0) {
					fprintf(stderr, "WARNING: size returned from avcodec_decode_audioX is too small\n");
				}
				continue;
			}
			if (buffer_size > BUFFER_SIZE) {
				fprintf(stderr, "WARNING: size returned from avcodec_decode_audioX is too large\n");
				continue;
			}

#ifdef HAVE_AV_AUDIO_CONVERT
			if (convert_ctx) {
				const void *ibuf[6] = { buffer1 };
				void *obuf[6] = { buffer2 };
#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(51, 8, 0)
				int istride[6] = { av_get_bits_per_sample_format(codec_ctx->sample_fmt) / 8 };
#else
				int istride[6] = { av_get_bytes_per_sample(codec_ctx->sample_fmt) };
#endif
				int ostride[6] = { 2 };
				int len = buffer_size / istride[0];
				if (av_audio_convert(convert_ctx, obuf, ostride, ibuf, istride, len) < 0) {
					break;
				}
				buffer = buffer2;
				buffer_size = len * ostride[0];
			}
			else {
				buffer = buffer1;
			}
#else
			buffer = buffer1;
#endif

			length = MIN(remaining, buffer_size / 2);
			if (!chromaprint_feed(chromaprint_ctx, buffer, length)) {
				fprintf(stderr, "ERROR: fingerprint calculation failed\n");
				goto done;
			}

			if (max_length) {
				remaining -= length;
				if (remaining <= 0) {
					goto finish;
				}
			}
		}

		if (packet.data) {
			av_free_packet(&packet);
		}
	}

finish:
	if (!chromaprint_finish(chromaprint_ctx)) {
		fprintf(stderr, "ERROR: fingerprint calculation failed\n");
		goto done;
	}

	ok = 1;

done:
	if (codec_ctx_opened) {
		avcodec_close(codec_ctx);
	}
	if (format_ctx) {
		av_close_input_file(format_ctx);
	}
#ifdef HAVE_AV_AUDIO_CONVERT
	if (convert_ctx) {
		av_audio_convert_free(convert_ctx);
	}
#endif
	return ok;
}

int fpcalc_main(int argc, char **argv)
{
	int i, j, max_length = 120, num_file_names = 0, raw = 0, raw_fingerprint_size, duration;
	int16_t *buffer1, *buffer2;
	int32_t *raw_fingerprint;
	char *file_name, *fingerprint, **file_names;
	ChromaprintContext *chromaprint_ctx;
	int algo = CHROMAPRINT_ALGORITHM_DEFAULT;

	file_names = malloc(argc * sizeof(char *));
	for (i = 1; i < argc; i++) {
		char *arg = argv[i];
		if (!strcmp(arg, "-length") && i + 1 < argc) {
			max_length = atoi(argv[++i]);
		}
		else if (!strcmp(arg, "-version") || !strcmp(arg, "-v")) {
			printf("fpcalc version %s\n", chromaprint_get_version());
			return 0;
		}
		else if (!strcmp(arg, "-raw")) {
			raw = 1;
		}
		else if (!strcmp(arg, "-algo") && i + 1 < argc) {
			const char *v = argv[++i];
			if (!strcmp(v, "test1")) { algo = CHROMAPRINT_ALGORITHM_TEST1; }
			else if (!strcmp(v, "test2")) { algo = CHROMAPRINT_ALGORITHM_TEST2; }
			else if (!strcmp(v, "test3")) { algo = CHROMAPRINT_ALGORITHM_TEST3; }
			else if (!strcmp(v, "test4")) { algo = CHROMAPRINT_ALGORITHM_TEST4; }
			else {
				fprintf(stderr, "WARNING: unknown algorithm, using the default\n");
			}
		}
		else if (!strcmp(arg, "-set") && i + 1 < argc) {
			i += 1;
		}
		else {
			file_names[num_file_names++] = argv[i];
		}
	}

	if (!num_file_names) {
		printf("usage: %s [OPTIONS] FILE...\n\n", argv[0]);
		printf("Options:\n");
		printf("  -version      print version information\n");
		printf("  -length SECS  length of the audio data used for fingerprint calculation (default 120)\n");
		printf("  -raw          output the raw uncompressed fingerprint\n");
		printf("  -algo NAME    version of the fingerprint algorithm\n");
		return 2;
	}

	av_register_all();
	av_log_set_level(AV_LOG_ERROR);

	buffer1 = av_malloc(BUFFER_SIZE + 16);
	buffer2 = av_malloc(BUFFER_SIZE + 16);
	chromaprint_ctx = chromaprint_new(algo);

	for (i = 1; i < argc; i++) {
		char *arg = argv[i];
		if (!strcmp(arg, "-set") && i + 1 < argc) {
			char *name = argv[++i];
			char *value = strchr(name, '=');
			if (value) {
				*value++ = '\0';
				chromaprint_set_option(chromaprint_ctx, name, atoi(value));
			}
		}
	}

	for (i = 0; i < num_file_names; i++) {
		file_name = file_names[i];
		if (!decode_audio_file(chromaprint_ctx, buffer1, buffer2, file_name, max_length, &duration)) {
			fprintf(stderr, "ERROR: unable to calculate fingerprint for file %s, skipping\n", file_name);
			continue;
		}
		if (i > 0) {
			printf("\n");
		}
		printf("FILE=%s\n", file_name);
		printf("DURATION=%d\n", duration);
		if (raw) {
			if (!chromaprint_get_raw_fingerprint(chromaprint_ctx, (void **)&raw_fingerprint, &raw_fingerprint_size)) {
				fprintf(stderr, "ERROR: unable to calculate fingerprint for file %s, skipping\n", file_name);
				continue;
			}
			printf("FINGERPRINT=");
			for (j = 0; j < raw_fingerprint_size; j++) {
				printf("%d%s", raw_fingerprint[j], j + 1 < raw_fingerprint_size ? "," : "");
			}
			printf("\n");
			chromaprint_dealloc(raw_fingerprint);
		}
		else {
			if (!chromaprint_get_fingerprint(chromaprint_ctx, &fingerprint)) {
				fprintf(stderr, "ERROR: unable to calculate fingerprint for file %s, skipping\n", file_name);
				continue;
			}
			printf("FINGERPRINT=%s\n", fingerprint);
			chromaprint_dealloc(fingerprint);
		}
	}

	chromaprint_free(chromaprint_ctx);
	av_free(buffer1);
	av_free(buffer2);
	free(file_names);

	return 0;
}
Beispiel #29
0
int main( int argc, char *argv[])  
{  
	
	char * filename = NULL; 
	if( argc != 2)
	{
		printf("[useage:] %s ***.mp3 \n", argv[0] );
		return 0;
	}
	else
	{
		filename = argv[1]; 
	}



    // 注册编解码器  
    av_register_all();  
  
    AVFormatContext * pFormatCtx = avformat_alloc_context();   // 文件容器上下文  
   	 
      
    // 打开输入文件  
    if (avformat_open_input(&pFormatCtx, filename, 0, NULL) != 0)  
    {  
        printf("can't open file");  
        return -1;  
    }  
    if (av_find_stream_info(pFormatCtx) < 0) // 检查在文件中的流的信息  
    {  
		printf("error! can not find the stream's info \n");
        return -1;  
    }  

    av_dump_format(pFormatCtx, 0, filename, 0); // 显示pfmtctx->streams里的信息  
      
    int i, audioStream;  
    AVCodecContext * pCodecCtx;  

    // 找到第一个音频流  
    audioStream = -1;  
    for (int i = 0; i < pFormatCtx->nb_streams; ++i)  //找到音频、视频对应的stream  
    {  
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)  
        {  
            audioStream = i;  
            break;  
        }  
    }  
    if (audioStream == -1) // 有音频  
    {  
        printf("input file has no audio stream\n");  
        return -1;  
    }  
      
    // 获得音频流的解码器上下文  
    pCodecCtx = pFormatCtx->streams[audioStream]->codec;   
      
    // 根据解码器上下文找到解码器  
    AVCodec * pCodec = NULL ;  
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);    
    if (pCodec == NULL)  
    {  
		printf("error! no Codec found \n" );
        return -1;  
    }  
  
    // Inform the codec that we can handle truncated bitstreams  
    // bitstreams where frame boundaries can fall in the middle of packets  
    if (pCodec->capabilities & CODEC_CAP_TRUNCATED)  
    {  
        pCodecCtx->flags |= CODEC_CAP_TRUNCATED;  
    }  
  
    // 打开解码器  
    if (avcodec_open(pCodecCtx, pCodec) < 0)  
    {  
		printf("error! avcodec_open failed. \n ");
        return -1;  
    }  

	printf(" 	[bit_rate]    = %d \r\n", pCodecCtx->bit_rate);
	printf(" 	[sample_rate] = %d \r\n", pCodecCtx->sample_rate);
	printf(" 	[channels]    = %d \r\n", pCodecCtx->channels);
	printf(" 	[code_name]    = %s \r\n", pCodecCtx->codec->name);
	printf(" 	[block_align]  = %d\n",pCodecCtx->block_align);


    // Hack to correct wrong frame rates that seem to be generated by some   
    // codecs  
    FILE * fp = fopen("out.pcm", "wb");  
  
    //AVFrame * pFrame;  
    //pFrame = avcodec_alloc_frame();  
    //   
    AVPacket packet;  
    uint8_t * pktdata;  
    int pktsize;  
    int out_size = AVCODEC_MAX_AUDIO_FRAME_SIZE * 100;  
  
  	printf(" before decode, out_size=%d, MAX_FRAME_SIZE=%d\n", out_size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
    long start = clock(); //开始解码时间  
       
	/**
	* test a mp3 file,
	* while loop: 14572
	*/
	int count =0;
//	int frame_size = pCodecCtx->sample_rate * pCodecCtx->channels ;
//	printf("***** add size control, frame_size = %d \n", frame_size );

    while(av_read_frame(pFormatCtx, &packet) >= 0) //pFormatCtx中调用对应格式的packet获取函数  
    {  
        if (packet.stream_index == audioStream) //Detect read packet is audio stream? 
        {  
            pktdata = packet.data;  
            pktsize = packet.size;  
            while (pktsize > 0)  
            {  
                out_size = AVCODEC_MAX_AUDIO_FRAME_SIZE * 100;  
                int len = avcodec_decode_audio3(pCodecCtx, (short *)inbuf, &out_size, &packet); // 解码  
                if (len < 0)  
                {  
                    printf("error! while decoding files\n");  
                    break;  
                }  
               if (out_size > 0)  
                {  
                    fwrite(inbuf, 1, out_size, fp);  //write pcm to file: ***.pcm
					fflush( fp );
                }  

			  	printf(" decoding %d: len=%d, out_size=%d, pktsize=%d, pktdata=%d \n",\
						count++, len, out_size, pktsize, *pktdata );
                pktsize -= len;  
                pktdata += len;  
            }  
            av_free_packet(&packet);  
        }  
    } 
          
    long end = clock();  
    printf("cost time :%f\n",(double)(end-start)/(double)CLOCKS_PER_SEC);  
  
    fclose(fp);  
    avcodec_close(pCodecCtx);  
    avformat_close_input(&pFormatCtx);  
  
    return 0;  
}  
Beispiel #30
0
int stream_component_open(VideoState *is, int stream_index)
{
    AVFormatContext *pFormatCtx = is->pFormatCtx;
    AVCodecContext *codecCtx;
    AVCodec *codec;
    SDL_AudioSpec wanted_spec, spec;

    if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams)
    {
        return -1;
    }

    // Get a pointer to the codec context for the video stream
    codecCtx = pFormatCtx->streams[stream_index]->codec;

    if(codecCtx->codec_type == CODEC_TYPE_AUDIO)
    {
        // Set audio settings from codec info
        wanted_spec.freq = codecCtx->sample_rate;
        wanted_spec.format = AUDIO_S16SYS;
        wanted_spec.channels = codecCtx->channels;
        wanted_spec.silence = 0;
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
        wanted_spec.callback = audio_callback;
        wanted_spec.userdata = is;

        if(SDL_OpenAudio(&wanted_spec, &spec) < 0)
        {
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
            return -1;
        }
        is->audio_hw_buf_size = spec.size;
    }
    codec = avcodec_find_decoder(codecCtx->codec_id);

    if(!codec || (avcodec_open(codecCtx, codec) < 0))
    {
        fprintf(stderr, "Unsupported codec!\n");
        return -1;
    }

    switch(codecCtx->codec_type)
    {
    case CODEC_TYPE_AUDIO:
        is->audioStream = stream_index;
        is->audio_st = pFormatCtx->streams[stream_index];
        is->audio_buf_size = 0;
        is->audio_buf_index = 0;
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
        packet_queue_init(&is->audioq);
        SDL_PauseAudio(0);
        break;
    case CODEC_TYPE_VIDEO:
        is->videoStream = stream_index;
        is->video_st = pFormatCtx->streams[stream_index];

        is->frame_timer = (double)av_gettime() / 1000000.0;
        is->frame_last_delay = 40e-3;
        is->video_current_pts_time = av_gettime();

        packet_queue_init(&is->videoq);
        is->video_tid = SDL_CreateThread(video_thread, is);
        codecCtx->get_buffer = our_get_buffer;
        codecCtx->release_buffer = our_release_buffer;
        break;
    default:
        break;
    }
    return 0;
}