int SWDecoder::decodeFrames()
{
	int videoStream = -1;
	int i;
	for(i = 0; i < mState.pFormatCtx->nb_streams; i++)
	{
		if(mState.pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO)
		{
			videoStream = i;
			break;
		}
	}
	if(videoStream == -1)
	{
		std::cerr << "SWDecoder::decodeFrames: no video stream found" << std::endl;
		return -1;
	}
	
	AVCodecContext *pCodecCtx = mState.pFormatCtx->streams[videoStream]->codec;
	AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if(!pCodec)
	{
		std::cerr << "SWDecoder::decodeFrames: no codec found" << std::endl;
		return -1;
	}
	
	if(avcodec_open(pCodecCtx, pCodec)<0)
	{
		std::cerr << "SWDecoder::decodeFrames: error opening codec" << std::endl;
		return -1;
	}
	
	AVFrame *pFrame = avcodec_alloc_frame();
	AVFrame *pFrameRGB = avcodec_alloc_frame();
	
	int numBytes = avpicture_get_size(PIX_FMT_RGB32, pCodecCtx->width, pCodecCtx->height);
	
	AVPacket packet;
	int frameFinished;
	while(av_read_frame(mState.pFormatCtx, &packet) >= 0 && !mFinish)
	{
		if(packet.stream_index == videoStream)
		{
			avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet.data, packet.size);
			if(frameFinished)
			{
				pBuffer_t displaybuffer = acquireDVRBuffer();
				if(displaybuffer)
				{ /* non available, use dummy else the DVR may overflow */
					struct SwsContext *img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
																	pCodecCtx->pix_fmt,
																	pCodecCtx->width, pCodecCtx->height,
																	PIX_FMT_RGB32,
																	SWS_BICUBIC,
																	0, 0, 0);
					if(!img_convert_ctx)
					{
						std::cout << "SWDecoder::decodeFrames: error setting up image convert context" << std::endl;
					}
					else
					{
						if(displaybuffer->size() < numBytes)
						{
							displaybuffer->resize(numBytes);
						}
						avpicture_fill(reinterpret_cast<AVPicture *>(pFrameRGB), &(*displaybuffer)[0], PIX_FMT_RGB32, pCodecCtx->width, pCodecCtx->height);
						sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, 
												   pFrameRGB->data, pFrameRGB->linesize);
						sws_freeContext(img_convert_ctx);
						/* enqueue for display */
						displaybuffer->width(pCodecCtx->width);
						displaybuffer->height(pCodecCtx->height);
						returnDVRBuffer(displaybuffer);
					}
				}
				else
				{
					std::cout << "all buffer full, skipping..." << std::endl;
				}
			}
		}
		av_free_packet(&packet);
	}
	
	av_free(pFrameRGB);
	av_free(pFrame);
	avcodec_close(pCodecCtx);
}
Пример #2
0
main(int argc, char **argv) {
    AVFormatContext *pFormatCtx = NULL;
    int i, videoStream;
    AVCodecContext *pCodecCtx = NULL;
    AVCodec *pCodec = NULL;
    AVFrame *pFrame = NULL;
    AVFrame *pFrameRGB = NULL;
    AVPacket packet;
    int frameFinished;
    int numBytes;
    uint8_t *buffer = NULL;

    AVDictionary *optionsDict = NULL;
    struct SwsContext *sws_ctx = NULL;
  

    if(argc < 2) {
	printf("Please provide a movie file\n");
	return -1;
    }
    // Register all formats and codecs
    av_register_all();
  
    // Open video file
    if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
	return -1; // Couldn't open file
  
    // Retrieve stream information
    if(avformat_find_stream_info(pFormatCtx, NULL)<0)
	return -1; // Couldn't find stream information
  
    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, argv[1], 0);
  
    // Find the first video stream
    videoStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
	if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
	    videoStream=i;
	    break;
	}
    if(videoStream==-1)
	return -1; // Didn't find a video stream
  
    // Get a pointer to the codec context for the video stream
    pCodecCtx=pFormatCtx->streams[videoStream]->codec;
  
    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL) {
	fprintf(stderr, "Unsupported codec!\n");
	return -1; // Codec not found
    }
    // Open codec
    if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
	return -1; // Could not open codec
  
    // Allocate video frame
    pFrame=avcodec_alloc_frame();
  
    // Allocate an AVFrame structure
    pFrameRGB=avcodec_alloc_frame();
    if(pFrameRGB==NULL)
	return -1;
  
    // Determine required buffer size and allocate buffer
    numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
				pCodecCtx->height);
    buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

    
    sws_ctx =
	sws_getContext
	(
	 pCodecCtx->width,
	 pCodecCtx->height,
	 pCodecCtx->pix_fmt,
	 pCodecCtx->width,
	 pCodecCtx->height,
	 PIX_FMT_RGB24,
	 SWS_BILINEAR,
	 NULL,
	 NULL,
	 NULL
	 );
    

    // Assign appropriate parts of buffer to image planes in pFrameRGB
    // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
    // of AVPicture
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
		   pCodecCtx->width, pCodecCtx->height);

    // Read frames and save first five frames to disk
    i=0;
    while(av_read_frame(pFormatCtx, &packet)>=0) {
	// Is this a packet from the video stream?
	if(packet.stream_index==videoStream) {
	    // Decode video frame
	    avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
				  &packet);
      
	    // Did we get a video frame?
	    if(frameFinished) {
		// Convert the image from its native format to RGB
		sws_scale
		    (
		     sws_ctx,
		     (uint8_t const * const *)pFrame->data,
		     pFrame->linesize,
		     0,
		     pCodecCtx->height,
		     pFrameRGB->data,
		     pFrameRGB->linesize
		     );

		printf("Read frame\n");
		// Save the frame to disk
		if(++i<=5)
		    SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height,
			      i);
		else
		    break;
	    }
	}
    
	// Free the packet that was allocated by av_read_frame
	av_free_packet(&packet);
    }
  
    // Free the RGB image
    av_free(buffer);
    av_free(pFrameRGB);
  
    // Free the YUV frame
    av_free(pFrame);
  
    // Close the codec
    avcodec_close(pCodecCtx);
  
    // Close the video file
    avformat_close_input(&pFormatCtx);
  
    return 0;

}
Пример #3
0
void vineoOpen( Vineo *v, char *file )
{
    // NOTE FIXME als er iets mis gaat in deze functie, netjes unloaden

    if( av_open_input_file( &v->fmt_ctx, file, NULL, 0, NULL ) != 0 )
    {
        printf( "Error @ vineoOpen() @ av_open_input_file()\n" );
        return;
    }

    if( av_find_stream_info( v->fmt_ctx ) < 0 )
    {
        printf( "Error @ vineoOpen() @ av_find_stream_info()\n" );
        return;
    }


    //dump_format( v->fmt_ctx, 0, file, 0 );


    // doe we have a start time offset?
    if( v->fmt_ctx->start_time != AV_NOPTS_VALUE ) {
		v->start_time = v->fmt_ctx->start_time;
	}


    // find streams
    int i;

    for( i = 0; i < v->fmt_ctx->nb_streams; i++ )
    {
        if( v->fmt_ctx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO && v->idx_video < 0 ) {
            v->idx_video = i;
        }

         if( v->fmt_ctx->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO && v->idx_audio < 0) {
            v->idx_audio = i;
        }
    }


    // open video codec
    if( v->idx_video >= 0 )
    {
        v->vid_codec_ctx = v->fmt_ctx->streams[v->idx_video]->codec;
        v->vid_codec = avcodec_find_decoder( v->vid_codec_ctx->codec_id );

        if( !v->vid_codec )
        {
            printf( "Error @ vineoOpen() @ avcodec_find_decoder()\n" );
            v->idx_video = -1;
        }
        else if( avcodec_open( v->vid_codec_ctx, v->vid_codec ) < 0 )
        {
            printf( "Error @ vineoOpen() @ avcodec_open()\n" );
            v->idx_video = -1;
        }

        v->sws = sws_getContext(
            v->vid_codec_ctx->width,
            v->vid_codec_ctx->height,
            v->vid_codec_ctx->pix_fmt,
            v->vid_codec_ctx->width,
            v->vid_codec_ctx->height,
            PIX_FMT_RGBA32,
            SWS_FAST_BILINEAR,
            NULL,
            NULL,
            NULL
        );
    }


    // open audio codec
    if( v->idx_audio >= 0 )
    {
        v->aud_codec_ctx = v->fmt_ctx->streams[v->idx_audio]->codec;
        v->aud_codec = avcodec_find_decoder( v->aud_codec_ctx->codec_id );

        if( !v->aud_codec )
        {
            printf( "Error @ vineoOpen() @ avcodec_find_decoder()\n" );
            v->idx_audio = -1;
        }
        else if( avcodec_open( v->aud_codec_ctx, v->aud_codec ) < 0 )
        {
            printf( "Error @ vineoOpen() @ avcodec_open()\n" );
            v->idx_audio = -1;
        }
    }


    // allocate video frames for decoding
    if( v->idx_video >= 0 )
    {
        v->frame = avcodec_alloc_frame();
        v->frame_rgba = avcodec_alloc_frame();

        if( !v->frame_rgba )
        {
            printf( "Error @ vineoOpen() @ avcodec_alloc_frame()\n" );
            return;
        }

        int b = avpicture_get_size( PIX_FMT_RGBA32, v->vid_codec_ctx->width, v->vid_codec_ctx->height );
        v->frame_buffer = av_malloc( b * sizeof(unsigned char) );

        if( !v->frame_buffer )
        {
            printf( "Error @ vineoOpen() @ av_malloc()\n" );
            return;
        }

        avpicture_fill( (AVPicture *)v->frame_rgba, v->frame_buffer, PIX_FMT_RGBA32, v->vid_codec_ctx->width, v->vid_codec_ctx->height );
    }


    // init audio
    if( v->idx_audio >= 0 )
    {
        v->aud_rate = v->aud_codec_ctx->sample_rate;
        v->aud_channels = v->aud_codec_ctx->channels;
        v->aud_bits = 16; // NOTE ffmpeg gebruikt altijd 16 bits
        v->aud_format = 0;

        if( v->aud_channels == 1 ) {
            v->aud_format = AL_FORMAT_MONO16;
        }

        if( v->aud_channels == 2 ) {
            v->aud_format = AL_FORMAT_STEREO16;
        }

        if( alIsExtensionPresent("AL_EXT_MCFORMATS") )
        {
            if( v->aud_channels == 4 ) {
                v->aud_format = alGetEnumValue("AL_FORMAT_QUAD16");
            }

            if( v->aud_channels == 6 ) {
                v->aud_format = alGetEnumValue("AL_FORMAT_51CHN16");
            }
        }

        if( v->aud_format == 0 )
        {
            printf( "Error @ vineoOpen() @ Unhandled format (%d channels, %d bits)\n", v->aud_channels, v->aud_bits );
            avcodec_close( v->aud_codec_ctx );
            v->idx_audio = -1;
            return;
        }
        else
        {
            v->buffer_playing = 0;
            v->data = NULL;
            v->data_size = 0;
            v->data_size_max = 0;
            v->dec_data = av_malloc( AVCODEC_MAX_AUDIO_FRAME_SIZE );
            v->dec_data_size = 0;

            if( !v->dec_data )
            {
                printf( "Error @ vineoOpen() @ av_malloc()\n" );
                return;
            }

            v->data_tmp = av_malloc( VINEO_AUDIO_BUFFER_SIZE );

            if( !v->data_tmp )
            {
                printf( "Error @ vineoOpen() @ av_malloc()\n" );
                return;
            }
        }
    }

    v->is_opened = 1;
}
Пример #4
0
static gboolean ffaudio_play (const gchar * filename, VFSFile * file)
{
    AUDDBG ("Playing %s.\n", filename);
    if (! file)
        return FALSE;

    AVPacket pkt = {.data = NULL};
    gint errcount;
    gboolean codec_opened = FALSE;
    gint out_fmt;
    gboolean planar;
    gboolean seekable;
    gboolean error = FALSE;

    void *buf = NULL;
    gint bufsize = 0;

    AVFormatContext * ic = open_input_file (filename, file);
    if (! ic)
        return FALSE;

    CodecInfo cinfo;

    if (! find_codec (ic, & cinfo))
    {
        fprintf (stderr, "ffaudio: No codec found for %s.\n", filename);
        goto error_exit;
    }

    AUDDBG("got codec %s for stream index %d, opening\n", cinfo.codec->name, cinfo.stream_idx);

    if (avcodec_open2 (cinfo.context, cinfo.codec, NULL) < 0)
        goto error_exit;

    codec_opened = TRUE;

    switch (cinfo.context->sample_fmt)
    {
        case AV_SAMPLE_FMT_U8: out_fmt = FMT_U8; planar = FALSE; break;
        case AV_SAMPLE_FMT_S16: out_fmt = FMT_S16_NE; planar = FALSE; break;
        case AV_SAMPLE_FMT_S32: out_fmt = FMT_S32_NE; planar = FALSE; break;
        case AV_SAMPLE_FMT_FLT: out_fmt = FMT_FLOAT; planar = FALSE; break;

        case AV_SAMPLE_FMT_U8P: out_fmt = FMT_U8; planar = TRUE; break;
        case AV_SAMPLE_FMT_S16P: out_fmt = FMT_S16_NE; planar = TRUE; break;
        case AV_SAMPLE_FMT_S32P: out_fmt = FMT_S32_NE; planar = TRUE; break;
        case AV_SAMPLE_FMT_FLTP: out_fmt = FMT_FLOAT; planar = TRUE; break;

    default:
        fprintf (stderr, "ffaudio: Unsupported audio format %d\n", (int) cinfo.context->sample_fmt);
        goto error_exit;
    }

    /* Open audio output */
    AUDDBG("opening audio output\n");

    if (aud_input_open_audio(out_fmt, cinfo.context->sample_rate, cinfo.context->channels) <= 0)
    {
        error = TRUE;
        goto error_exit;
    }

    AUDDBG("setting parameters\n");

    aud_input_set_bitrate(ic->bit_rate);

    errcount = 0;
    seekable = ffaudio_codec_is_seekable(cinfo.codec);

    while (! aud_input_check_stop ())
    {
        int seek_value = aud_input_check_seek ();

        if (seek_value >= 0 && seekable)
        {
            if (av_seek_frame (ic, -1, (gint64) seek_value * AV_TIME_BASE /
             1000, AVSEEK_FLAG_ANY) < 0)
            {
                _ERROR("error while seeking\n");
            } else
                errcount = 0;

            seek_value = -1;
        }

        AVPacket tmp;
        gint ret;

        /* Read next frame (or more) of data */
        if ((ret = av_read_frame(ic, &pkt)) < 0)
        {
            if (ret == AVERROR_EOF)
            {
                AUDDBG("eof reached\n");
                break;
            }
            else
            {
                if (++errcount > 4)
                {
                    _ERROR("av_read_frame error %d, giving up.\n", ret);
                    break;
                } else
                    continue;
            }
        } else
            errcount = 0;

        /* Ignore any other substreams */
        if (pkt.stream_index != cinfo.stream_idx)
        {
            av_free_packet(&pkt);
            continue;
        }

        /* Decode and play packet/frame */
        memcpy(&tmp, &pkt, sizeof(tmp));
        while (tmp.size > 0 && ! aud_input_check_stop ())
        {
            /* Check for seek request and bail out if we have one */
            if (seek_value < 0)
                seek_value = aud_input_check_seek ();

            if (seek_value >= 0)
                break;

#if CHECK_LIBAVCODEC_VERSION (55, 28, 1)
            AVFrame * frame = av_frame_alloc ();
#else
            AVFrame * frame = avcodec_alloc_frame ();
#endif
            int decoded = 0;
            int len = avcodec_decode_audio4 (cinfo.context, frame, & decoded, & tmp);

            if (len < 0)
            {
                fprintf (stderr, "ffaudio: decode_audio() failed, code %d\n", len);
                break;
            }

            tmp.size -= len;
            tmp.data += len;

            if (! decoded)
                continue;

            gint size = FMT_SIZEOF (out_fmt) * cinfo.context->channels * frame->nb_samples;

            if (planar)
            {
                if (bufsize < size)
                {
                    buf = g_realloc (buf, size);
                    bufsize = size;
                }

                audio_interlace ((const void * *) frame->data, out_fmt,
                 cinfo.context->channels, buf, frame->nb_samples);
                aud_input_write_audio (buf, size);
            }
            else
                aud_input_write_audio (frame->data[0], size);

#if CHECK_LIBAVCODEC_VERSION (55, 28, 1)
            av_frame_free (& frame);
#else
            avcodec_free_frame (& frame);
#endif
        }

        if (pkt.data)
            av_free_packet(&pkt);
    }

error_exit:
    if (pkt.data)
        av_free_packet(&pkt);
    if (codec_opened)
        avcodec_close(cinfo.context);
    if (ic != NULL)
        close_input_file(ic);

    g_free (buf);

    return ! error;
}
Пример #5
0
jint naMain(JNIEnv *pEnv, jobject pObj, jobject pMainAct, jstring pFileName, jint pNumOfFrames) {
	AVFormatContext *pFormatCtx = NULL;
	int             i, videoStream;
	AVCodecContext  *pCodecCtx = NULL;
	AVCodec         *pCodec = NULL;
	AVFrame         *pFrame = NULL;
	AVFrame         *pFrameRGBA = NULL;
	AVPacket        packet;
	int             frameFinished;
	jobject			bitmap;
	void* 			buffer;

	AVDictionary    *optionsDict = NULL;
	struct SwsContext      *sws_ctx = NULL;
	char *videoFileName;

	// Register all formats and codecs
	av_register_all();

	//get C string from JNI jstring
	videoFileName = (char *)(*pEnv)->GetStringUTFChars(pEnv, pFileName, NULL);

	// Open video file
	if(avformat_open_input(&pFormatCtx, videoFileName, NULL, NULL)!=0)
		return -1; // Couldn't open file

	// Retrieve stream information
	if(avformat_find_stream_info(pFormatCtx, NULL)<0)
		return -1; // Couldn't find stream information

	// Dump information about file onto standard error
	av_dump_format(pFormatCtx, 0, videoFileName, 0);

	// Find the first video stream
	videoStream=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++) {
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
			videoStream=i;
			break;
		}
	}
	if(videoStream==-1)
		return -1; // Didn't find a video stream

	// Get a pointer to the codec context for the video stream
	pCodecCtx=pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1; // Codec not found
	}
	// Open codec
	if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
		return -1; // Could not open codec

	// Allocate video frame
	pFrame=avcodec_alloc_frame();

	// Allocate an AVFrame structure
	pFrameRGBA=avcodec_alloc_frame();
	if(pFrameRGBA==NULL)
		return -1;

	//create a bitmap as the buffer for pFrameRGBA
	bitmap = createBitmap(pEnv, pCodecCtx->width, pCodecCtx->height);
	if (AndroidBitmap_lockPixels(pEnv, bitmap, &buffer) < 0)
		return -1;
	//get the scaling context
	sws_ctx = sws_getContext
    (
        pCodecCtx->width,
        pCodecCtx->height,
        pCodecCtx->pix_fmt,
        pCodecCtx->width,
        pCodecCtx->height,
        AV_PIX_FMT_RGBA,
        SWS_BILINEAR,
        NULL,
        NULL,
        NULL
    );

	// Assign appropriate parts of bitmap to image planes in pFrameRGBA
	// Note that pFrameRGBA is an AVFrame, but AVFrame is a superset
	// of AVPicture
	avpicture_fill((AVPicture *)pFrameRGBA, buffer, AV_PIX_FMT_RGBA,
		 pCodecCtx->width, pCodecCtx->height);

	// Read frames and save first five frames to disk
	i=0;
	while(av_read_frame(pFormatCtx, &packet)>=0) {
		// Is this a packet from the video stream?
		if(packet.stream_index==videoStream) {
			// Decode video frame
			avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
			   &packet);
			// Did we get a video frame?
			if(frameFinished) {
				// Convert the image from its native format to RGBA
				sws_scale
				(
					sws_ctx,
					(uint8_t const * const *)pFrame->data,
					pFrame->linesize,
					0,
					pCodecCtx->height,
					pFrameRGBA->data,
					pFrameRGBA->linesize
				);

				// Save the frame to disk
				if(++i<=pNumOfFrames) {
					SaveFrame(pEnv, pMainAct, bitmap, pCodecCtx->width, pCodecCtx->height, i);
					LOGI("save frame %d", i);
				}
			}
		}
		// Free the packet that was allocated by av_read_frame
		av_free_packet(&packet);
	}

	//unlock the bitmap
	AndroidBitmap_unlockPixels(pEnv, bitmap);

	// Free the RGB image
	av_free(pFrameRGBA);

	// Free the YUV frame
	av_free(pFrame);

	// Close the codec
	avcodec_close(pCodecCtx);

	// Close the video file
	avformat_close_input(&pFormatCtx);

	return 0;
}
Пример #6
0
static ImBuf * avi_fetchibuf (struct anim *anim, int position) {
	ImBuf *ibuf = NULL;
	int *tmp;
	int y;
	
	if (anim == NULL) return (NULL);

#if defined(_WIN32) && !defined(FREE_WINDOWS)
	if (anim->avistreams) {
		LPBITMAPINFOHEADER lpbi;

		if (anim->pgf) {
			lpbi = AVIStreamGetFrame(anim->pgf, position + AVIStreamStart(anim->pavi[anim->firstvideo]));
			if (lpbi) {
				ibuf = IMB_ibImageFromMemory((unsigned char *) lpbi, 100, IB_rect);
//Oh brother...
			}
		}
	} else {
#else
	if (1) {
#endif
		ibuf = IMB_allocImBuf (anim->x, anim->y, 24, IB_rect);

		tmp = AVI_read_frame (anim->avi, AVI_FORMAT_RGB32, position,
			AVI_get_stream(anim->avi, AVIST_VIDEO, 0));
		
		if (tmp == NULL) {
			printf ("Error reading frame from AVI");
			IMB_freeImBuf (ibuf);
			return NULL;
		}

		for (y=0; y < anim->y; y++) {
			memcpy (&(ibuf->rect)[((anim->y-y)-1)*anim->x],  &tmp[y*anim->x],  
				anim->x * 4);
		}
		
		MEM_freeN (tmp);
	}
	
	ibuf->profile = IB_PROFILE_SRGB;
	
	return ibuf;
}

#ifdef WITH_FFMPEG

extern void do_init_ffmpeg(void);

static int startffmpeg(struct anim * anim) {
	int            i, videoStream;

	AVCodec *pCodec;
	AVFormatContext *pFormatCtx;
	AVCodecContext *pCodecCtx;
	int frs_num;
	double frs_den;
	int streamcount;

#ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
	/* The following for color space determination */
	int srcRange, dstRange, brightness, contrast, saturation;
	int *table;
	const int *inv_table;
#endif

	if (anim == 0) return(-1);

	streamcount = anim->streamindex;

	do_init_ffmpeg();

	if(av_open_input_file(&pFormatCtx, anim->name, NULL, 0, NULL)!=0) {
		return -1;
	}

	if(av_find_stream_info(pFormatCtx)<0) {
		av_close_input_file(pFormatCtx);
		return -1;
	}

	av_dump_format(pFormatCtx, 0, anim->name, 0);


	/* Find the video stream */
	videoStream = -1;

	for(i = 0; i < pFormatCtx->nb_streams; i++)
		if (pFormatCtx->streams[i]->codec->codec_type
		   == AVMEDIA_TYPE_VIDEO) {
			if (streamcount > 0) {
				streamcount--;
				continue;
			}
			videoStream = i;
			break;
		}

	if(videoStream==-1) {
		av_close_input_file(pFormatCtx);
		return -1;
	}

	pCodecCtx = pFormatCtx->streams[videoStream]->codec;

	/* Find the decoder for the video stream */
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec == NULL) {
		av_close_input_file(pFormatCtx);
		return -1;
	}

	pCodecCtx->workaround_bugs = 1;

	if(avcodec_open(pCodecCtx, pCodec) < 0) {
		av_close_input_file(pFormatCtx);
		return -1;
	}

	anim->duration = ceil(pFormatCtx->duration
		* av_q2d(pFormatCtx->streams[videoStream]->r_frame_rate) 
		/ AV_TIME_BASE);

	frs_num	= pFormatCtx->streams[videoStream]->r_frame_rate.num;
	frs_den = pFormatCtx->streams[videoStream]->r_frame_rate.den;

	frs_den *= AV_TIME_BASE;

	while (frs_num % 10 == 0 && frs_den >= 2.0 && frs_num > 10) {
		frs_num /= 10;
		frs_den /= 10;
	}

	anim->frs_sec = frs_num;
	anim->frs_sec_base = frs_den;

	anim->params = 0;

	anim->x = pCodecCtx->width;
	anim->y = pCodecCtx->height;
	anim->interlacing = 0;
	anim->orientation = 0;
	anim->framesize = anim->x * anim->y * 4;

	anim->curposition = -1;
	anim->last_frame = 0;
	anim->last_pts = -1;
	anim->next_pts = -1;
	anim->next_undecoded_pts = -1;
	anim->next_packet.stream_index = -1;

	anim->pFormatCtx = pFormatCtx;
	anim->pCodecCtx = pCodecCtx;
	anim->pCodec = pCodec;
	anim->videoStream = videoStream;

	anim->pFrame = avcodec_alloc_frame();
	anim->pFrameDeinterlaced = avcodec_alloc_frame();
	anim->pFrameRGB = avcodec_alloc_frame();

	if (avpicture_get_size(PIX_FMT_RGBA, anim->x, anim->y)
		!= anim->x * anim->y * 4) {
		fprintf (stderr,
			 "ffmpeg has changed alloc scheme ... ARGHHH!\n");
		avcodec_close(anim->pCodecCtx);
		av_close_input_file(anim->pFormatCtx);
		av_free(anim->pFrameRGB);
		av_free(anim->pFrameDeinterlaced);
		av_free(anim->pFrame);
		anim->pCodecCtx = NULL;
		return -1;
	}

	if (anim->ib_flags & IB_animdeinterlace) {
		avpicture_fill((AVPicture*) anim->pFrameDeinterlaced, 
				   MEM_callocN(avpicture_get_size(
						   anim->pCodecCtx->pix_fmt,
						   anim->x, anim->y), 
					   "ffmpeg deinterlace"), 
				   anim->pCodecCtx->pix_fmt, anim->x, anim->y);
	}

	if (pCodecCtx->has_b_frames) {
		anim->preseek = 25; /* FIXME: detect gopsize ... */
	} else {
		anim->preseek = 0;
	}
	
	anim->img_convert_ctx = sws_getContext(
		anim->pCodecCtx->width,
		anim->pCodecCtx->height,
		anim->pCodecCtx->pix_fmt,
		anim->pCodecCtx->width,
		anim->pCodecCtx->height,
		PIX_FMT_RGBA,
		SWS_FAST_BILINEAR | SWS_PRINT_INFO,
		NULL, NULL, NULL);
		
	if (!anim->img_convert_ctx) {
		fprintf (stderr,
			 "Can't transform color space??? Bailing out...\n");
		avcodec_close(anim->pCodecCtx);
		av_close_input_file(anim->pFormatCtx);
		av_free(anim->pFrameRGB);
		av_free(anim->pFrameDeinterlaced);
		av_free(anim->pFrame);
		anim->pCodecCtx = NULL;
		return -1;
	}

#ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
	/* Try do detect if input has 0-255 YCbCR range (JFIF Jpeg MotionJpeg) */
	if (!sws_getColorspaceDetails(anim->img_convert_ctx, (int**)&inv_table, &srcRange,
		&table, &dstRange, &brightness, &contrast, &saturation)) {

		srcRange = srcRange || anim->pCodecCtx->color_range == AVCOL_RANGE_JPEG;
		inv_table = sws_getCoefficients(anim->pCodecCtx->colorspace);

		if(sws_setColorspaceDetails(anim->img_convert_ctx, (int *)inv_table, srcRange,
			table, dstRange, brightness, contrast, saturation)) {

			printf("Warning: Could not set libswscale colorspace details.\n");
			}
	}
	else {
		printf("Warning: Could not set libswscale colorspace details.\n");
	}
#endif
		
	return (0);
}
Пример #7
0
void video_decode_example(const char *outfilename, const char *filename)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    int frame, size, got_picture, len;
    FILE *f;
    AVFrame *picture;
    uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE], *inbuf_ptr;
    char buf[1024];

    /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
    memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);

    printf("Video decoding\n");

    /* find the mpeg1 video decoder */
    codec = avcodec_find_decoder(CODEC_ID_MPEG1VIDEO);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        exit(1);
    }

    c= avcodec_alloc_context();
    picture= avcodec_alloc_frame();

    if(codec->capabilities&CODEC_CAP_TRUNCATED)
        c->flags|= CODEC_FLAG_TRUNCATED; /* we dont send complete frames */

    /* for some codecs, such as msmpeg4 and mpeg4, width and height
       MUST be initialized there because these info are not available
       in the bitstream */

    /* open it */
    if (avcodec_open(c, codec) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

    /* the codec gives us the frame size, in samples */

    f = fopen(filename, "rb");
    if (!f) {
        fprintf(stderr, "could not open %s\n", filename);
        exit(1);
    }

    frame = 0;
    for(;;) {
        size = fread(inbuf, 1, INBUF_SIZE, f);
        if (size == 0)
            break;

        /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
           and this is the only method to use them because you cannot
           know the compressed data size before analysing it.

           BUT some other codecs (msmpeg4, mpeg4) are inherently frame
           based, so you must call them with all the data for one
           frame exactly. You must also initialize 'width' and
           'height' before initializing them. */

        /* NOTE2: some codecs allow the raw parameters (frame size,
           sample rate) to be changed at any frame. We handle this, so
           you should also take care of it */

        /* here, we use a stream based decoder (mpeg1video), so we
           feed decoder and see if it could decode a frame */
        inbuf_ptr = inbuf;
        while (size > 0) {
            len = avcodec_decode_video(c, picture, &got_picture,
                                       inbuf_ptr, size);
            if (len < 0) {
                fprintf(stderr, "Error while decoding frame %d\n", frame);
                exit(1);
            }
            if (got_picture) {
                printf("saving frame %3d\n", frame);
                fflush(stdout);

                /* the picture is allocated by the decoder. no need to
                   free it */
                snprintf(buf, sizeof(buf), outfilename, frame);
                pgm_save(picture->data[0], picture->linesize[0],
                         c->width, c->height, buf);
                frame++;
            }
            size -= len;
            inbuf_ptr += len;
        }
    }

    /* some codecs, such as MPEG, transmit the I and P frame with a
       latency of one frame. You must do the following to have a
       chance to get the last frame of the video */
    len = avcodec_decode_video(c, picture, &got_picture,
                               NULL, 0);
    if (got_picture) {
        printf("saving last frame %3d\n", frame);
        fflush(stdout);

        /* the picture is allocated by the decoder. no need to
           free it */
        snprintf(buf, sizeof(buf), outfilename, frame);
        pgm_save(picture->data[0], picture->linesize[0],
                 c->width, c->height, buf);
        frame++;
    }

    fclose(f);

    avcodec_close(c);
    av_free(c);
    av_free(picture);
    printf("\n");
}
Пример #8
0
/* fas_open_video */
fas_error_type fas_open_video (fas_context_ref_type *context_ptr, const char *file_path)
{
  int stream_idx;
  int numBytes;
  fas_context_ref_type fas_context;
  AVCodec *codec;

  if (NULL == context_ptr)
    return private_show_error("NULL context pointer provided", FAS_INVALID_ARGUMENT);

  *context_ptr = NULL; // set returned context to NULL in case of error

  fas_context = (fas_context_ref_type) malloc(sizeof(fas_context_type));
  memset(fas_context, 0, sizeof(fas_context_type));

  if (NULL == fas_context)
    return private_show_error("unable to allocate buffer", FAS_OUT_OF_MEMORY);

  fas_context->is_video_active        = FAS_TRUE;
  fas_context->is_frame_available     = FAS_TRUE;
  fas_context->current_frame_index    = FIRST_FRAME_INDEX - 1;
  fas_context->current_dts            = AV_NOPTS_VALUE;
  fas_context->previous_dts           = AV_NOPTS_VALUE;
  fas_context->keyframe_packet_dts    = AV_NOPTS_VALUE;
  fas_context->first_dts              = AV_NOPTS_VALUE;
  fas_context->seek_table = seek_init_table(-1); /* default starting size */

  if (av_open_input_file(&(fas_context->format_context), file_path, NULL, 0, NULL ) != 0)
  {
    fas_close_video(fas_context);
    return private_show_error("failure to open file", FAS_UNSUPPORTED_FORMAT);
  }

  if (av_find_stream_info (fas_context->format_context) < 0)
  {
    fas_close_video(fas_context);
    return private_show_error("could not extract stream information", FAS_UNSUPPORTED_FORMAT);
  }

  if (SHOW_WARNING_MESSAGES)
    av_dump_format(fas_context->format_context, 0, file_path, 0);

  for (stream_idx = 0; stream_idx < fas_context->format_context->nb_streams; stream_idx++)
  {
    if (fas_context->format_context->streams[stream_idx]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
    {
      fas_context->stream_idx = stream_idx;
      fas_context->codec_context  = fas_context->format_context->streams[stream_idx]->codec;
      break;
    }
  }

  if (fas_context->codec_context == 0)
  {
    fas_close_video(fas_context);
    return private_show_error("failure to find a video stream", FAS_UNSUPPORTED_FORMAT);
  }

  codec = avcodec_find_decoder(fas_context->codec_context->codec_id);

  if (!codec)
  {
    fas_context->codec_context = 0;
    fas_close_video(fas_context);
    return private_show_error("failed to find correct video codec", FAS_UNSUPPORTED_CODEC);
  }

  if (avcodec_open(fas_context->codec_context, codec) < 0)
  {
    fas_context->codec_context = 0;
    fas_close_video(fas_context);
    return private_show_error("failed to open codec", FAS_UNSUPPORTED_CODEC);
  }

  fas_context->frame_buffer = avcodec_alloc_frame();
  if (fas_context->frame_buffer == NULL)
  {
    fas_close_video(fas_context);
    return private_show_error("failed to allocate frame buffer", FAS_OUT_OF_MEMORY);
  }

  fas_context->rgb_frame_buffer = avcodec_alloc_frame();
  if (fas_context->rgb_frame_buffer == NULL)
  {
    fas_close_video(fas_context);
    return private_show_error("failed to allocate rgb frame buffer", FAS_OUT_OF_MEMORY);
  }
  numBytes = avpicture_get_size(PIX_FMT_RGB24, fas_context->codec_context->width, fas_context->codec_context->height);
  fas_context->rgb_buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
  avpicture_fill((AVPicture *)fas_context->rgb_frame_buffer, fas_context->rgb_buffer, PIX_FMT_RGB24, fas_context->codec_context->width, fas_context->codec_context->height);

  fas_context->gray8_frame_buffer = avcodec_alloc_frame();
  if (fas_context->gray8_frame_buffer == NULL)
  {
    fas_close_video(fas_context);
    return private_show_error("failed to allocate gray8 frame buffer", FAS_OUT_OF_MEMORY);
  }

  fas_context->rgb_buffer = 0;
  fas_context->gray8_buffer = 0;
  fas_context->rgb_already_converted = FAS_FALSE;
  fas_context->gray8_already_converted = FAS_FALSE;

  *context_ptr = fas_context;

  if (FAS_SUCCESS != fas_step_forward(*context_ptr))
    return private_show_error("failure decoding first frame", FAS_NO_MORE_FRAMES);

  if (!fas_frame_available(*context_ptr))
    return private_show_error("couldn't find a first frame (no valid frames in video stream)", FAS_NO_MORE_FRAMES);

  return FAS_SUCCESS;
}
Пример #9
0
/*****************************************************************************
 * InitVideo: initialize the video decoder
 *****************************************************************************
 * the ffmpeg codec will be opened, some memory allocated. The vout is not yet
 * opened (done after the first decoded frame).
 *****************************************************************************/
int InitVideoDec( decoder_t *p_dec, AVCodecContext *p_context,
                      AVCodec *p_codec, int i_codec_id, const char *psz_namecodec )
{
    decoder_sys_t *p_sys;
    int i_val;

    /* Allocate the memory needed to store the decoder's structure */
    if( ( p_dec->p_sys = p_sys = calloc( 1, sizeof(decoder_sys_t) ) ) == NULL )
        return VLC_ENOMEM;

    p_codec->type = AVMEDIA_TYPE_VIDEO;
    p_context->codec_type = AVMEDIA_TYPE_VIDEO;
    p_context->codec_id = i_codec_id;
    p_sys->p_context = p_context;
    p_sys->p_codec = p_codec;
    p_sys->i_codec_id = i_codec_id;
    p_sys->psz_namecodec = psz_namecodec;
    p_sys->p_ff_pic = avcodec_alloc_frame();
    p_sys->b_delayed_open = true;
    p_sys->p_va = NULL;
    vlc_sem_init( &p_sys->sem_mt, 0 );

    /* ***** Fill p_context with init values ***** */
    p_sys->p_context->codec_tag = ffmpeg_CodecTag( p_dec->fmt_in.i_original_fourcc ?: p_dec->fmt_in.i_codec );

    /*  ***** Get configuration of ffmpeg plugin ***** */
    p_sys->p_context->workaround_bugs =
        var_InheritInteger( p_dec, "ffmpeg-workaround-bugs" );
    p_sys->p_context->error_recognition =
        var_InheritInteger( p_dec, "ffmpeg-error-resilience" );

    if( var_CreateGetBool( p_dec, "grayscale" ) )
        p_sys->p_context->flags |= CODEC_FLAG_GRAY;

    i_val = var_CreateGetInteger( p_dec, "ffmpeg-vismv" );
    if( i_val ) p_sys->p_context->debug_mv = i_val;

    i_val = var_CreateGetInteger( p_dec, "ffmpeg-lowres" );
    if( i_val > 0 && i_val <= 2 ) p_sys->p_context->lowres = i_val;

    i_val = var_CreateGetInteger( p_dec, "ffmpeg-skiploopfilter" );
    if( i_val >= 4 ) p_sys->p_context->skip_loop_filter = AVDISCARD_ALL;
    else if( i_val == 3 ) p_sys->p_context->skip_loop_filter = AVDISCARD_NONKEY;
    else if( i_val == 2 ) p_sys->p_context->skip_loop_filter = AVDISCARD_BIDIR;
    else if( i_val == 1 ) p_sys->p_context->skip_loop_filter = AVDISCARD_NONREF;

    if( var_CreateGetBool( p_dec, "ffmpeg-fast" ) )
        p_sys->p_context->flags2 |= CODEC_FLAG2_FAST;

    /* ***** ffmpeg frame skipping ***** */
    p_sys->b_hurry_up = var_CreateGetBool( p_dec, "ffmpeg-hurry-up" );

    switch( var_CreateGetInteger( p_dec, "ffmpeg-skip-frame" ) )
    {
        case -1:
            p_sys->p_context->skip_frame = AVDISCARD_NONE;
            break;
        case 0:
            p_sys->p_context->skip_frame = AVDISCARD_DEFAULT;
            break;
        case 1:
            p_sys->p_context->skip_frame = AVDISCARD_NONREF;
            break;
        case 2:
            p_sys->p_context->skip_frame = AVDISCARD_NONKEY;
            break;
        case 3:
            p_sys->p_context->skip_frame = AVDISCARD_ALL;
            break;
        default:
            p_sys->p_context->skip_frame = AVDISCARD_NONE;
            break;
    }
    p_sys->i_skip_frame = p_sys->p_context->skip_frame;

    switch( var_CreateGetInteger( p_dec, "ffmpeg-skip-idct" ) )
    {
        case -1:
            p_sys->p_context->skip_idct = AVDISCARD_NONE;
            break;
        case 0:
            p_sys->p_context->skip_idct = AVDISCARD_DEFAULT;
            break;
        case 1:
            p_sys->p_context->skip_idct = AVDISCARD_NONREF;
            break;
        case 2:
            p_sys->p_context->skip_idct = AVDISCARD_NONKEY;
            break;
        case 3:
            p_sys->p_context->skip_idct = AVDISCARD_ALL;
            break;
        default:
            p_sys->p_context->skip_idct = AVDISCARD_NONE;
            break;
    }
    p_sys->i_skip_idct = p_sys->p_context->skip_idct;

    /* ***** ffmpeg direct rendering ***** */
    p_sys->b_direct_rendering = false;
    p_sys->i_direct_rendering_used = -1;
    if( var_CreateGetBool( p_dec, "ffmpeg-dr" ) &&
       (p_sys->p_codec->capabilities & CODEC_CAP_DR1) &&
        /* No idea why ... but this fixes flickering on some TSCC streams */
        p_sys->i_codec_id != CODEC_ID_TSCC &&
#if (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT( 52, 68, 2 ) ) && (LIBAVCODEC_VERSION_INT < AV_VERSION_INT( 52, 100, 1 ) )
        /* avcodec native vp8 decode doesn't handle EMU_EDGE flag, and I
           don't have idea howto implement fallback to libvpx decoder */
        p_sys->i_codec_id != CODEC_ID_VP8 &&
#endif
        !p_sys->p_context->debug_mv )
    {
        /* Some codecs set pix_fmt only after the 1st frame has been decoded,
         * so we need to do another check in ffmpeg_GetFrameBuf() */
        p_sys->b_direct_rendering = true;
    }

    /* ffmpeg doesn't properly release old pictures when frames are skipped */
    //if( p_sys->b_hurry_up ) p_sys->b_direct_rendering = false;
    if( p_sys->b_direct_rendering )
    {
        msg_Dbg( p_dec, "trying to use direct rendering" );
        p_sys->p_context->flags |= CODEC_FLAG_EMU_EDGE;
    }
    else
    {
        msg_Dbg( p_dec, "direct rendering is disabled" );
    }

    /* Always use our get_buffer wrapper so we can calculate the
     * PTS correctly */
    p_sys->p_context->get_buffer = ffmpeg_GetFrameBuf;
    p_sys->p_context->reget_buffer = ffmpeg_ReGetFrameBuf;
    p_sys->p_context->release_buffer = ffmpeg_ReleaseFrameBuf;
    p_sys->p_context->opaque = p_dec;

#ifdef HAVE_AVCODEC_MT
    int i_thread_count = var_InheritInteger( p_dec, "ffmpeg-threads" );
    if( i_thread_count <= 0 )
        i_thread_count = vlc_GetCPUCount();
    msg_Dbg( p_dec, "allowing %d thread(s) for decoding", i_thread_count );
    p_sys->p_context->thread_count = i_thread_count;
#endif

#ifdef HAVE_AVCODEC_VA
    const bool b_use_hw = var_CreateGetBool( p_dec, "ffmpeg-hw" );
    if( b_use_hw &&
        (i_codec_id == CODEC_ID_MPEG1VIDEO || i_codec_id == CODEC_ID_MPEG2VIDEO ||
         i_codec_id == CODEC_ID_MPEG4 ||
         i_codec_id == CODEC_ID_H264 ||
         i_codec_id == CODEC_ID_VC1 || i_codec_id == CODEC_ID_WMV3) )
    {
#ifdef HAVE_AVCODEC_MT
        if( p_sys->p_context->thread_type & FF_THREAD_FRAME )
        {
            msg_Warn( p_dec, "threaded frame decoding is not compatible with ffmpeg-hw, disabled" );
            p_sys->p_context->thread_type &= ~FF_THREAD_FRAME;
        }
#endif
        p_sys->p_context->get_format = ffmpeg_GetFormat;
    }
#endif

    /* ***** misc init ***** */
    p_sys->i_pts = VLC_TS_INVALID;
    p_sys->b_has_b_frames = false;
    p_sys->b_first_frame = true;
    p_sys->b_flush = false;
    p_sys->i_late_frames = 0;

    /* Set output properties */
    p_dec->fmt_out.i_cat = VIDEO_ES;
    if( GetVlcChroma( &p_dec->fmt_out.video, p_context->pix_fmt ) != VLC_SUCCESS )
    {
        /* we are doomed. but not really, because most codecs set their pix_fmt later on */
        p_dec->fmt_out.i_codec = VLC_CODEC_I420;
    }
    p_dec->fmt_out.i_codec = p_dec->fmt_out.video.i_chroma;

    /* Setup palette */
    memset( &p_sys->palette, 0, sizeof(p_sys->palette) );
    if( p_dec->fmt_in.video.p_palette )
    {
        p_sys->palette.palette_changed = 1;

        for( int i = 0; i < __MIN( AVPALETTE_COUNT, p_dec->fmt_in.video.p_palette->i_entries ); i++ )
        {
            union {
                uint32_t u;
                uint8_t a[4];
            } c;
            c.a[0] = p_dec->fmt_in.video.p_palette->palette[i][0];
            c.a[1] = p_dec->fmt_in.video.p_palette->palette[i][1];
            c.a[2] = p_dec->fmt_in.video.p_palette->palette[i][2];
            c.a[3] = p_dec->fmt_in.video.p_palette->palette[i][3];

            p_sys->palette.palette[i] = c.u;
        }
        p_sys->p_context->palctrl = &p_sys->palette;

        p_dec->fmt_out.video.p_palette = malloc( sizeof(video_palette_t) );
        if( p_dec->fmt_out.video.p_palette )
            *p_dec->fmt_out.video.p_palette = *p_dec->fmt_in.video.p_palette;
    }
    else if( p_sys->i_codec_id != CODEC_ID_MSVIDEO1 && p_sys->i_codec_id != CODEC_ID_CINEPAK )
    {
        p_sys->p_context->palctrl = &p_sys->palette;
    }

    /* ***** init this codec with special data ***** */
    ffmpeg_InitCodec( p_dec );

    /* ***** Open the codec ***** */
    if( ffmpeg_OpenCodec( p_dec ) < 0 )
    {
        msg_Err( p_dec, "cannot open codec (%s)", p_sys->psz_namecodec );
        av_free( p_sys->p_ff_pic );
        vlc_sem_destroy( &p_sys->sem_mt );
        free( p_sys );
        return VLC_EGENERIC;
    }

#ifdef ANDROID
    p_sys->i_decode_called_count = 0;
    p_sys->i_decode_total_time = 0;
    p_sys->i_decode_average_time = 0;
    p_sys->i_decode_last_time = 0;
    p_sys->i_display_date_head = 0;
    p_sys->i_decode_may_suck = 0;
#endif

    return VLC_SUCCESS;
}
Пример #10
0
static int netcam_read_rtsp_image(netcam_context_ptr netcam)
{
  if (netcam->rtsp == NULL) {
    if (rtsp_connect(netcam) < 0) {
      return -1;
    }
  }

  AVCodecContext *cc = netcam->rtsp->codec_context;
  AVFormatContext *fc = netcam->rtsp->format_context;
  netcam_buff_ptr buffer;

  /* Point to our working buffer. */
  buffer = netcam->receiving;
  buffer->used = 0;

  AVFrame *frame = avcodec_alloc_frame();

  AVPacket packet;
  
  av_init_packet(&packet);

  packet.data = NULL;
  packet.size = 0;

  int size_decoded = 0;
  static int usual_size_decoded = 0;

  while (size_decoded == 0 && av_read_frame(fc, &packet) >= 0) {

    if(packet.stream_index != netcam->rtsp->video_stream_index) {
      // not our packet, skip
      continue;
    }

    size_decoded = decode_packet(&packet, buffer, frame, cc);
  }

  if (size_decoded == 0) {
    // something went wrong, end of stream?
    MOTION_LOG(ERR, TYPE_NETCAM, SHOW_ERRNO, "%s: invalid frame!");
    return -1;
  }

  if (size_decoded != usual_size_decoded) {
    MOTION_LOG(WRN, TYPE_NETCAM, SHOW_ERRNO, "%s: unusual frame size of %d!", size_decoded);
    usual_size_decoded = size_decoded;
  }

  // at this point, we are finished with the packet and frame, so free them.
  av_free_packet(&packet);
  av_free(frame);
  
  struct timeval curtime;
  
  if (gettimeofday(&curtime, NULL) < 0) {
    MOTION_LOG(WRN, TYPE_NETCAM, SHOW_ERRNO, "%s: gettimeofday");
  }
  
  netcam->receiving->image_time = curtime;
  
  /*
   * Calculate our "running average" time for this netcam's
   * frame transmissions (except for the first time).
   * Note that the average frame time is held in microseconds.
   */
  if (netcam->last_image.tv_sec) {
    netcam->av_frame_time = ((9.0 * netcam->av_frame_time) + 1000000.0 *
			     (curtime.tv_sec - netcam->last_image.tv_sec) +
			     (curtime.tv_usec- netcam->last_image.tv_usec)) / 10.0;
    
    MOTION_LOG(DBG, TYPE_NETCAM, NO_ERRNO, "%s: Calculated frame time %f",
	       netcam->av_frame_time);
  }
  
  netcam->last_image = curtime;
  
  netcam_buff *xchg;
  
  /*
   * read is complete - set the current 'receiving' buffer atomically
   * as 'latest', and make the buffer previously in 'latest' become
   * the new 'receiving'.
   */
  pthread_mutex_lock(&netcam->mutex);

  xchg = netcam->latest;
  netcam->latest = netcam->receiving;
  netcam->receiving = xchg;
  netcam->imgcnt++;
  
  /*
   * We have a new frame ready.  We send a signal so that
   * any thread (e.g. the motion main loop) waiting for the
   * next frame to become available may proceed.
   */
  pthread_cond_signal(&netcam->pic_ready);
  
  pthread_mutex_unlock(&netcam->mutex);
  
  return 0;
}
Пример #11
0
bool QVideoDecoder::load(const QString & filename)
{
	//if(!QFile::exists(filename))
	//	return false;

	//QMutexLocker locker(&mutex);
    static int debugCounter = 0;

	 AVInputFormat *inFmt = NULL;
	 AVFormatParameters formatParams;
	memset(&formatParams, 0, sizeof(AVFormatParameters));

	 QString fileTmp = filename;
	 //if(debugCounter ++ <= 0)
		//fileTmp = "vfwcap://0";
	if(fileTmp == "C:/dummy.txt")
		fileTmp = "vfwcap://0";
	qDebug() << "[DEBUG] QVideoDecoder::load(): starting with fileTmp:"<<fileTmp;
	 bool customInputFormat = false;
	 if(fileTmp.indexOf("://") > -1)
	 {
		 QStringList list = fileTmp.split("://");
		 if(list.size() == 2)
		 {
			 qDebug() << "[DEBUG] QVideoDecoder::load(): input format args:"<<list;
			 fileTmp = list[1];
			 if(fileTmp.isEmpty())
				fileTmp = "0";
			 avdevice_register_all();
			 QString fmt = list[0];
			 if(fmt == "cap")
				fmt = "vfwcap";
			 inFmt = av_find_input_format(qPrintable(list[0]));
			 if( !inFmt )
			 {
				   qDebug() << "[ERROR] QVideoDecoder::load(): Unable to find input format:"<<list[0];
				   return -1;
			 }


			 formatParams.time_base.num = 1;
			 formatParams.time_base.den = 25;

			 customInputFormat = true;
		}

	}




	// Open video file
	 //
	if(av_open_input_file(&m_av_format_context, qPrintable(fileTmp), inFmt, 0, &formatParams) != 0)
	//if(av_open_input_file(&m_av_format_context, "1", inFmt, 0, NULL) != 0)
	{
		qDebug() << "[WARN] QVideoDecoder::load(): av_open_input_file() failed, fileTmp:"<<fileTmp;
		return false;
	}

	// Retrieve stream information
	if(!customInputFormat)
	    if(av_find_stream_info(m_av_format_context) < 0)
	    {
		    qDebug() << "[WARN] QVideoDecoder::load(): av_find_stream_info() failed.";
		    return false;
	    }


	int i;

	// Find the first video stream
	m_video_stream = -1;
	m_audio_stream = -1;
	for(i = 0; i < m_av_format_context->nb_streams; i++)
	{
		if(m_av_format_context->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO)
		{
			m_video_stream = i;
		}
		if(m_av_format_context->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO)
		{
			m_audio_stream = i;
		}
	}
	if(m_video_stream == -1)
	{
		qDebug() << "[WARN] QVideoDecoder::load(): Cannot find video stream.";
		return false;
	}

	// Get a pointer to the codec context for the video and audio streams
	m_video_codec_context = m_av_format_context->streams[m_video_stream]->codec;
// 	m_video_codec_context->get_buffer = our_get_buffer;
// 	m_video_codec_context->release_buffer = our_release_buffer;

	// Find the decoder for the video stream
	m_video_codec =avcodec_find_decoder(m_video_codec_context->codec_id);
	if(m_video_codec == NULL)
	{
		qDebug() << "[WARN] QVideoDecoder::load(): avcodec_find_decoder() failed.";
		return false;
	}

	// Open codec
	if(avcodec_open(m_video_codec_context, m_video_codec) < 0)
	{
		qDebug() << "[WARN] QVideoDecoder::load(): avcodec_open() failed.";
		return false;
	}

	// Allocate video frame
	m_av_frame = avcodec_alloc_frame();

	// Allocate an AVFrame structure
	m_av_rgb_frame =avcodec_alloc_frame();
	if(m_av_rgb_frame == NULL)
	{
		qDebug() << "[WARN] QVideoDecoder::load(): avcodec_alloc_frame() failed.";
		return false;
	}

	// Determine required buffer size and allocate buffer
	//int num_bytes = avpicture_get_size(PIX_FMT_RGB32, m_video_codec_context->width, m_video_codec_context->height);
	int num_bytes = avpicture_get_size(RAW_PIX_FMT, m_video_codec_context->width, m_video_codec_context->height);


	m_buffer = (uint8_t *)av_malloc(num_bytes * sizeof(uint8_t));

	// Assign appropriate parts of buffer to image planes in pFrameRGB
	// Note that pFrameRGB is an AVFrame, but AVFrame is a superset of AVPicture
	//avpicture_fill((AVPicture *)m_av_rgb_frame, m_buffer, PIX_FMT_RGB32, m_video_codec_context->width, m_video_codec_context->height);
	avpicture_fill((AVPicture *)m_av_rgb_frame, m_buffer, RAW_PIX_FMT, m_video_codec_context->width, m_video_codec_context->height);

	if(m_audio_stream != -1)
	{
		m_audio_codec_context = m_av_format_context->streams[m_audio_stream]->codec;

// 		// Set audio settings from codec info
// 		wanted_spec.freq = m_audio_codec_context->sample_rate;
// 		wanted_spec.format = AUDIO_S16SYS;
// 		wanted_spec.channels = m_audio_codec_context->channels;
// 		wanted_spec.silence = 0;
// 		wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
// 		//wanted_spec.callback = audio_callback;
// 		wanted_spec.userdata = m_audio_codec_context;
//
// 		if(SDL_OpenAudio(&wanted_spec, &spec) < 0)
// 		{
// 			//error
// 			return false;
// 		}
//
		m_audio_codec = avcodec_find_decoder(m_audio_codec_context->codec_id);
		if(!m_audio_codec)
		{
			//unsupported codec
			return false;
		}
		avcodec_open(m_audio_codec_context, m_audio_codec);
	}

	m_timebase = m_av_format_context->streams[m_video_stream]->time_base;

	calculateVideoProperties();

	m_initial_decode = true;

	decode();

	m_video->m_video_loaded = true;

	return true;
}
Пример #12
0
bool CvCapture_FFMPEG::open( const char* _filename )
{
    unsigned i;
    bool valid = false;

    close();

    /* register all codecs, demux and protocols */
    av_register_all();

#ifndef _DEBUG
    // av_log_level = AV_LOG_QUIET;
#endif

    int err = av_open_input_file(&ic, _filename, NULL, 0, NULL);
    if (err < 0) {
	    CV_WARN("Error opening file");
	    goto exit_func;
    }
    err = av_find_stream_info(ic);
    if (err < 0) {
	    CV_WARN("Could not find codec parameters");
	    goto exit_func;
    }
    for(i = 0; i < ic->nb_streams; i++) {
#if LIBAVFORMAT_BUILD > 4628
        AVCodecContext *enc = ic->streams[i]->codec;
#else
        AVCodecContext *enc = &ic->streams[i]->codec;
#endif

        if( CODEC_TYPE_VIDEO == enc->codec_type && video_stream < 0) {
            AVCodec *codec = avcodec_find_decoder(enc->codec_id);
            if (!codec ||
            avcodec_open(enc, codec) < 0)
            goto exit_func;
            video_stream = i;
            video_st = ic->streams[i];
            picture = avcodec_alloc_frame();

            rgb_picture.data[0] = (uint8_t*)cvAlloc(
                                    avpicture_get_size( PIX_FMT_BGR24,
                                    enc->width, enc->height ));
            avpicture_fill( (AVPicture*)&rgb_picture, rgb_picture.data[0],
                    PIX_FMT_BGR24, enc->width, enc->height );

            cvInitImageHeader( &frame, cvSize( enc->width,
                                       enc->height ), 8, 3, 0, 4 );
            cvSetData( &frame, rgb_picture.data[0],
                               rgb_picture.linesize[0] );
            break;
        }
    }

    if(video_stream >= 0) valid = true;

    // perform check if source is seekable via ffmpeg's seek function av_seek_frame(...)
    err = av_seek_frame(ic, video_stream, 10, 0);
    if (err < 0)
    {
        filename=(char*)malloc(strlen(_filename)+1);
        strcpy(filename, _filename);
        // reopen videofile to 'seek' back to first frame
        reopen();
    }
    else
    {
        // seek seems to work, so we don't need the filename,
        // but we still need to seek back to filestart
        filename=NULL;
        av_seek_frame(ic, video_stream, 0, 0);
    }
exit_func:

    if( !valid )
        close();

    return valid;
}
Пример #13
0
/**
 * ffmpeg_open
 *      Opens an mpeg file using the new libavformat method. Both mpeg1
 *      and mpeg4 are supported. However, if the current ffmpeg version doesn't allow
 *      mpeg1 with non-standard framerate, the open will fail. Timelapse is a special
 *      case and is tested separately.
 *
 *  Returns
 *      A new allocated ffmpeg struct or NULL if any error happens.
 */
struct ffmpeg *ffmpeg_open(char *ffmpeg_video_codec, char *filename,
                           unsigned char *y, unsigned char *u, unsigned char *v,
                           int width, int height, int rate, int bps, int vbr)
{
    AVCodecContext *c;
    AVCodec *codec;
    struct ffmpeg *ffmpeg;
    int is_mpeg1;
    int ret;
    /*
     * Allocate space for our ffmpeg structure. This structure contains all the
     * codec and image information we need to generate movies.
     * FIXME when motion exits we should close the movie to ensure that
     * ffmpeg is freed.
     */
    ffmpeg = mymalloc(sizeof(struct ffmpeg));
    memset(ffmpeg, 0, sizeof(struct ffmpeg));

    ffmpeg->vbr = vbr;

    /* Store codec name in ffmpeg->codec, with buffer overflow check. */
    snprintf(ffmpeg->codec, sizeof(ffmpeg->codec), "%s", ffmpeg_video_codec);

    /* Allocation the output media context. */
#ifdef have_avformat_alloc_context
    ffmpeg->oc = avformat_alloc_context();
#elif defined have_av_avformat_alloc_context
    ffmpeg->oc = av_alloc_format_context();
#else
    ffmpeg->oc = av_mallocz(sizeof(AVFormatContext));
#endif

    if (!ffmpeg->oc) {
        MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Memory error while allocating"
                   " output media context");
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }

    /* Setup output format */
    ffmpeg->oc->oformat = get_oformat(ffmpeg_video_codec, filename);
    if (!ffmpeg->oc->oformat) {
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }

    snprintf(ffmpeg->oc->filename, sizeof(ffmpeg->oc->filename), "%s", filename);

    /* Create a new video stream and initialize the codecs. */
    ffmpeg->video_st = NULL;
    if (ffmpeg->oc->oformat->video_codec != CODEC_ID_NONE) {
#if defined FF_API_NEW_AVIO 
        ffmpeg->video_st = avformat_new_stream(ffmpeg->oc, NULL /* Codec */);
#else
        ffmpeg->video_st = av_new_stream(ffmpeg->oc, 0);
#endif
        if (!ffmpeg->video_st) {
            MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: av_new_stream - could"
                       " not alloc stream");
            ffmpeg_cleanups(ffmpeg);
            return NULL;
        }
    } else {
        /* We did not get a proper video codec. */
        MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Failed to obtain a proper"
                   " video codec");
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }

    ffmpeg->c     = c = AVSTREAM_CODEC_PTR(ffmpeg->video_st);
    c->codec_id   = ffmpeg->oc->oformat->video_codec;
#if LIBAVCODEC_VERSION_MAJOR < 53    
    c->codec_type = CODEC_TYPE_VIDEO;
#else
    c->codec_type = AVMEDIA_TYPE_VIDEO;
#endif    
    is_mpeg1      = c->codec_id == CODEC_ID_MPEG1VIDEO;

    if (strcmp(ffmpeg_video_codec, "ffv1") == 0)
        c->strict_std_compliance = -2;

    /* Uncomment to allow non-standard framerates. */
    //c->strict_std_compliance = -1;

    /* Set default parameters */
    c->bit_rate = bps;
    c->width    = width;
    c->height   = height;
#if LIBAVCODEC_BUILD >= 4754
    /* Frame rate = 1/time_base, so we set 1/rate, not rate/1 */
    c->time_base.num = 1;
    c->time_base.den = rate;
#else
    c->frame_rate      = rate;
    c->frame_rate_base = 1;
#endif /* LIBAVCODEC_BUILD >= 4754 */

    MOTION_LOG(INF, TYPE_ENCODER, NO_ERRNO, "%s FPS %d",
               rate);

    if (vbr)
        c->flags |= CODEC_FLAG_QSCALE;

    /*
     * Set codec specific parameters.
     * Set intra frame distance in frames depending on codec.
     */
    c->gop_size = is_mpeg1 ? 10 : 12;

    /* Some formats want stream headers to be separate. */
    if (!strcmp(ffmpeg->oc->oformat->name, "mp4") ||
        !strcmp(ffmpeg->oc->oformat->name, "mov") ||
        !strcmp(ffmpeg->oc->oformat->name, "3gp")) {
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }

#if defined FF_API_NEW_AVIO
// pass the options to avformat_write_header directly
#else
    /* Set the output parameters (must be done even if no parameters). */
    if (av_set_parameters(ffmpeg->oc, NULL) < 0) {
        MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: av_set_parameters error:"
                   " Invalid output format parameters");
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }
#endif

    /* Dump the format settings.  This shows how the various streams relate to each other. */
    //dump_format(ffmpeg->oc, 0, filename, 1);

    /*
     * Now that all the parameters are set, we can open the video
     * codec and allocate the necessary encode buffers.
     */
    codec = avcodec_find_encoder(c->codec_id);

    if (!codec) {
        MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Codec %s not found",
                   ffmpeg_video_codec);
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }

    /* Set the picture format - need in ffmpeg starting round April-May 2005 */
    c->pix_fmt = PIX_FMT_YUV420P;

    /* Get a mutex lock. */
    pthread_mutex_lock(&global_lock);

    /* Open the codec */
#if defined FF_API_NEW_AVIO
    ret = avcodec_open2(c, codec, NULL /* options */ );
#else
    ret = avcodec_open(c, codec);
#endif

    if (ret < 0) {
        /* Release the lock. */
        pthread_mutex_unlock(&global_lock);
        MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: avcodec_open - could not open codec %s",
                   ffmpeg_video_codec);
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }

    /* Release the lock. */
    pthread_mutex_unlock(&global_lock);

    ffmpeg->video_outbuf = NULL;

    if (!(ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE)) {
        /*
         * Allocate output buffer
         * XXX: API change will be done
         * ffmpeg->video_outbuf_size = 200000
         */
        ffmpeg->video_outbuf_size = ffmpeg->c->width * 512;
        ffmpeg->video_outbuf = mymalloc(ffmpeg->video_outbuf_size);
    }

    /* Allocate the encoded raw picture. */
    ffmpeg->picture = avcodec_alloc_frame();

    if (!ffmpeg->picture) {
        MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: avcodec_alloc_frame -"
                   " could not alloc frame");
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }

    /* Set variable bitrate if requested. */
    if (ffmpeg->vbr)
        ffmpeg->picture->quality = ffmpeg->vbr;


    /* Set the frame data. */
    ffmpeg->picture->data[0] = y;
    ffmpeg->picture->data[1] = u;
    ffmpeg->picture->data[2] = v;
    ffmpeg->picture->linesize[0] = ffmpeg->c->width;
    ffmpeg->picture->linesize[1] = ffmpeg->c->width / 2;
    ffmpeg->picture->linesize[2] = ffmpeg->c->width / 2;

    /* Open the output file, if needed. */
    if (!(ffmpeg->oc->oformat->flags & AVFMT_NOFILE)) {
        char file_proto[256];

        /*
         * Use append file protocol for mpeg1, to get the append behavior from
         * url_fopen, but no protocol (=> default) for other codecs.
         */
        if (is_mpeg1)
#if defined FF_API_NEW_AVIO
            snprintf(file_proto, sizeof(file_proto), "%s", filename);
#else
            snprintf(file_proto, sizeof(file_proto), APPEND_PROTO ":%s", filename);
#endif
        else
Пример #14
0
/* ============ Internal functions ================= */
int tdav_codec_mp4ves_open_encoder(tdav_codec_mp4ves_t* self)
{
	int ret, size;
	int32_t max_bw_kpbs;
	if(!self->encoder.codec && !(self->encoder.codec = avcodec_find_encoder(CODEC_ID_MPEG4))){
		TSK_DEBUG_ERROR("Failed to find mp4v encoder");
		return -1;
	}

	if(self->encoder.context){
		TSK_DEBUG_ERROR("Encoder already opened");
		return -1;
	}
	self->encoder.context = avcodec_alloc_context();
	avcodec_get_context_defaults(self->encoder.context);
	
	self->encoder.context->pix_fmt		= PIX_FMT_YUV420P;
	self->encoder.context->time_base.num  = 1;
	self->encoder.context->time_base.den  = TMEDIA_CODEC_VIDEO(self)->in.fps;
	self->encoder.context->width = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.height : TMEDIA_CODEC_VIDEO(self)->out.width;
	self->encoder.context->height = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.width : TMEDIA_CODEC_VIDEO(self)->out.height;
	self->encoder.context->mb_decision = FF_MB_DECISION_RD;
	self->encoder.context->noise_reduction = 250;
	self->encoder.context->flags |= CODEC_FLAG_QSCALE;
	self->encoder.context->global_quality = FF_QP2LAMBDA * self->encoder.quality;
	
	max_bw_kpbs = TSK_CLAMP(
		0,
		tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, TMEDIA_CODEC_VIDEO(self)->out.fps), 
		self->encoder.max_bw_kpbs
	);
	self->encoder.context->bit_rate = (max_bw_kpbs * 1024);// bps
	self->encoder.context->rtp_payload_size = MP4V_RTP_PAYLOAD_SIZE;
	self->encoder.context->opaque = tsk_null;
	self->encoder.context->profile = self->profile>>4;
	self->encoder.context->level = self->profile & 0x0F;
	self->encoder.context->gop_size = (TMEDIA_CODEC_VIDEO(self)->in.fps * MP4V_GOP_SIZE_IN_SECONDS);
	self->encoder.context->max_b_frames = 0;
	self->encoder.context->b_frame_strategy = 1;
    self->encoder.context->flags |= CODEC_FLAG_AC_PRED;

	// Picture (YUV 420)
	if(!(self->encoder.picture = avcodec_alloc_frame())){
		TSK_DEBUG_ERROR("Failed to create MP4V-ES encoder picture");
		return -2;
	}
	avcodec_get_frame_defaults(self->encoder.picture);
	
	size = avpicture_get_size(PIX_FMT_YUV420P, self->encoder.context->width, self->encoder.context->height);
	if(!(self->encoder.buffer = tsk_calloc(size, sizeof(uint8_t)))){
		TSK_DEBUG_ERROR("Failed to allocate MP4V-ES encoder buffer");
		return -2;
	}
	
	// Open encoder
	if((ret = avcodec_open(self->encoder.context, self->encoder.codec)) < 0){
		TSK_DEBUG_ERROR("Failed to open MP4V-ES encoder");
		return ret;
	}

	TSK_DEBUG_INFO("[MP4V-ES] bitrate=%d bps", self->encoder.context->bit_rate);

	return ret;
}
Пример #15
0
uint32_t MythRAOPConnection::decodeAudioPacket(uint8_t type,
        const QByteArray *buf,
        QList<AudioData> *dest)
{
    const char *data_in = buf->constData();
    int len             = buf->size();
    if (type == AUDIO_RESEND)
    {
        data_in += 4;
        len     -= 4;
    }
    data_in += 12;
    len     -= 12;
    if (len < 16)
        return -1;

    int aeslen = len & ~0xf;
    unsigned char iv[16];
    unsigned char decrypted_data[MAX_PACKET_SIZE];
    memcpy(iv, m_AESIV.constData(), sizeof(iv));
    AES_cbc_encrypt((const unsigned char*)data_in,
                    decrypted_data, aeslen,
                    &m_aesKey, iv, AES_DECRYPT);
    memcpy(decrypted_data + aeslen, data_in + aeslen, len - aeslen);

    AVPacket tmp_pkt;
    AVCodecContext *ctx = m_codeccontext;

    av_init_packet(&tmp_pkt);
    tmp_pkt.data = decrypted_data;
    tmp_pkt.size = len;

    uint32_t frames_added = 0;
    while (tmp_pkt.size > 0)
    {
        AVFrame *frame = avcodec_alloc_frame();
        int got_frame = 0;
        int ret = avcodec_decode_audio4(ctx, frame, &got_frame, &tmp_pkt);

        if (ret < 0)
        {
            LOG(VB_GENERAL, LOG_ERR, LOC + QString("Error decoding audio"));
            return -1;
        }

        if (ret > 0 && got_frame)
        {
            int decoded_size =
                av_samples_get_buffer_size(NULL, ctx->channels,
                                           frame->nb_samples,
                                           ctx->sample_fmt, 1);
            frame->linesize[0] = decoded_size;
            int frames = frame->nb_samples;

            frames_added += frames;
            dest->append(frame);
        }
        tmp_pkt.data += ret;
        tmp_pkt.size -= ret;
    }
    return frames_added;
}
Пример #16
0
bool FFmpegEncoderOpen(struct FFmpegEncoder* encoder, const char* outfile) {
	AVCodec* acodec = avcodec_find_encoder_by_name(encoder->audioCodec);
	AVCodec* vcodec = avcodec_find_encoder_by_name(encoder->videoCodec);
	if ((encoder->audioCodec && !acodec) || !vcodec || !FFmpegEncoderVerifyContainer(encoder)) {
		return false;
	}

	encoder->currentAudioSample = 0;
	encoder->currentAudioFrame = 0;
	encoder->currentVideoFrame = 0;
	encoder->nextAudioPts = 0;

	AVOutputFormat* oformat = av_guess_format(encoder->containerFormat, 0, 0);
#ifndef USE_LIBAV
	avformat_alloc_output_context2(&encoder->context, oformat, 0, outfile);
#else
	encoder->context = avformat_alloc_context();
	strncpy(encoder->context->filename, outfile, sizeof(encoder->context->filename) - 1);
	encoder->context->filename[sizeof(encoder->context->filename) - 1] = '\0';
	encoder->context->oformat = oformat;
#endif

	if (acodec) {
#ifdef FFMPEG_USE_CODECPAR
		encoder->audioStream = avformat_new_stream(encoder->context, NULL);
		encoder->audio = avcodec_alloc_context3(acodec);
#else
		encoder->audioStream = avformat_new_stream(encoder->context, acodec);
		encoder->audio = encoder->audioStream->codec;
#endif
		encoder->audio->bit_rate = encoder->audioBitrate;
		encoder->audio->channels = 2;
		encoder->audio->channel_layout = AV_CH_LAYOUT_STEREO;
		encoder->audio->sample_rate = encoder->sampleRate;
		encoder->audio->sample_fmt = encoder->sampleFormat;
		AVDictionary* opts = 0;
		av_dict_set(&opts, "strict", "-2", 0);
		if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) {
			encoder->audio->flags |= CODEC_FLAG_GLOBAL_HEADER;
		}
		avcodec_open2(encoder->audio, acodec, &opts);
		av_dict_free(&opts);
#if LIBAVCODEC_VERSION_MAJOR >= 55
		encoder->audioFrame = av_frame_alloc();
#else
		encoder->audioFrame = avcodec_alloc_frame();
#endif
		if (!encoder->audio->frame_size) {
			encoder->audio->frame_size = 1;
		}
		encoder->audioFrame->nb_samples = encoder->audio->frame_size;
		encoder->audioFrame->format = encoder->audio->sample_fmt;
		encoder->audioFrame->pts = 0;
		encoder->resampleContext = avresample_alloc_context();
		av_opt_set_int(encoder->resampleContext, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0);
		av_opt_set_int(encoder->resampleContext, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
		av_opt_set_int(encoder->resampleContext, "in_sample_rate", PREFERRED_SAMPLE_RATE, 0);
		av_opt_set_int(encoder->resampleContext, "out_sample_rate", encoder->sampleRate, 0);
		av_opt_set_int(encoder->resampleContext, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
		av_opt_set_int(encoder->resampleContext, "out_sample_fmt", encoder->sampleFormat, 0);
		avresample_open(encoder->resampleContext);
		encoder->audioBufferSize = (encoder->audioFrame->nb_samples * PREFERRED_SAMPLE_RATE / encoder->sampleRate) * 4;
		encoder->audioBuffer = av_malloc(encoder->audioBufferSize);
		encoder->postaudioBufferSize = av_samples_get_buffer_size(0, encoder->audio->channels, encoder->audio->frame_size, encoder->audio->sample_fmt, 0);
		encoder->postaudioBuffer = av_malloc(encoder->postaudioBufferSize);
		avcodec_fill_audio_frame(encoder->audioFrame, encoder->audio->channels, encoder->audio->sample_fmt, (const uint8_t*) encoder->postaudioBuffer, encoder->postaudioBufferSize, 0);

		if (encoder->audio->codec->id == AV_CODEC_ID_AAC &&
		    (strcasecmp(encoder->containerFormat, "mp4") ||
		        strcasecmp(encoder->containerFormat, "m4v") ||
		        strcasecmp(encoder->containerFormat, "mov"))) {
			// MP4 container doesn't support the raw ADTS AAC format that the encoder spits out
#ifdef FFMPEG_USE_NEW_BSF
			av_bsf_alloc(av_bsf_get_by_name("aac_adtstoasc"), &encoder->absf);
			avcodec_parameters_from_context(encoder->absf->par_in, encoder->audio);
			av_bsf_init(encoder->absf);
#else
			encoder->absf = av_bitstream_filter_init("aac_adtstoasc");
#endif
		}
#ifdef FFMPEG_USE_CODECPAR
		avcodec_parameters_from_context(encoder->audioStream->codecpar, encoder->audio);
#endif
	}

#ifdef FFMPEG_USE_CODECPAR
	encoder->videoStream = avformat_new_stream(encoder->context, NULL);
	encoder->video = avcodec_alloc_context3(vcodec);
#else
	encoder->videoStream = avformat_new_stream(encoder->context, vcodec);
	encoder->video = encoder->videoStream->codec;
#endif
	encoder->video->bit_rate = encoder->videoBitrate;
	encoder->video->width = encoder->width;
	encoder->video->height = encoder->height;
	encoder->video->time_base = (AVRational) { VIDEO_TOTAL_LENGTH, GBA_ARM7TDMI_FREQUENCY };
	encoder->video->pix_fmt = encoder->pixFormat;
	encoder->video->gop_size = 60;
	encoder->video->max_b_frames = 3;
	if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) {
		encoder->video->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}
	if (strcmp(vcodec->name, "libx264") == 0) {
		// Try to adaptively figure out when you can use a slower encoder
		if (encoder->width * encoder->height > 1000000) {
			av_opt_set(encoder->video->priv_data, "preset", "superfast", 0);
		} else if (encoder->width * encoder->height > 500000) {
			av_opt_set(encoder->video->priv_data, "preset", "veryfast", 0);
		} else {
			av_opt_set(encoder->video->priv_data, "preset", "faster", 0);
		}
		av_opt_set(encoder->video->priv_data, "tune", "zerolatency", 0);
	}
	avcodec_open2(encoder->video, vcodec, 0);
#if LIBAVCODEC_VERSION_MAJOR >= 55
	encoder->videoFrame = av_frame_alloc();
#else
	encoder->videoFrame = avcodec_alloc_frame();
#endif
	encoder->videoFrame->format = encoder->video->pix_fmt;
	encoder->videoFrame->width = encoder->video->width;
	encoder->videoFrame->height = encoder->video->height;
	encoder->videoFrame->pts = 0;
	_ffmpegSetVideoDimensions(&encoder->d, encoder->iwidth, encoder->iheight);
	av_image_alloc(encoder->videoFrame->data, encoder->videoFrame->linesize, encoder->video->width, encoder->video->height, encoder->video->pix_fmt, 32);
#ifdef FFMPEG_USE_CODECPAR
	avcodec_parameters_from_context(encoder->videoStream->codecpar, encoder->video);
#endif

	avio_open(&encoder->context->pb, outfile, AVIO_FLAG_WRITE);
	return avformat_write_header(encoder->context, 0) >= 0;
}
Пример #17
0
/*****************************************************************************
 * InitVideo: initialize the video decoder
 *****************************************************************************
 * the ffmpeg codec will be opened, some memory allocated. The vout is not yet
 * opened (done after the first decoded frame).
 *****************************************************************************/
int InitVideoDec( decoder_t *p_dec, AVCodecContext *p_context,
                      AVCodec *p_codec, int i_codec_id, const char *psz_namecodec )
{
    decoder_sys_t *p_sys;
    int i_val;

    /* Allocate the memory needed to store the decoder's structure */
    if( ( p_dec->p_sys = p_sys = calloc( 1, sizeof(decoder_sys_t) ) ) == NULL )
        return VLC_ENOMEM;

    p_codec->type = AVMEDIA_TYPE_VIDEO;
    p_context->codec_type = AVMEDIA_TYPE_VIDEO;
    p_context->codec_id = i_codec_id;
    p_sys->p_context = p_context;
    p_sys->p_codec = p_codec;
    p_sys->i_codec_id = i_codec_id;
    p_sys->psz_namecodec = psz_namecodec;
    p_sys->p_ff_pic = avcodec_alloc_frame();
    p_sys->b_delayed_open = true;
    p_sys->p_va = NULL;
    vlc_sem_init( &p_sys->sem_mt, 0 );

    /* ***** Fill p_context with init values ***** */
    p_sys->p_context->codec_tag = ffmpeg_CodecTag( p_dec->fmt_in.i_original_fourcc ?: p_dec->fmt_in.i_codec );

    /*  ***** Get configuration of ffmpeg plugin ***** */
    p_sys->p_context->workaround_bugs =
        var_InheritInteger( p_dec, "avcodec-workaround-bugs" );
    p_sys->p_context->err_recognition =
        var_InheritInteger( p_dec, "avcodec-error-resilience" );

    if( var_CreateGetBool( p_dec, "grayscale" ) )
        p_sys->p_context->flags |= CODEC_FLAG_GRAY;

    /* ***** Output always the frames ***** */
#if LIBAVCODEC_VERSION_CHECK(55, 23, 1, 40, 101)
    p_sys->p_context->flags |= CODEC_FLAG_OUTPUT_CORRUPT;
#endif

    i_val = var_CreateGetInteger( p_dec, "avcodec-vismv" );
    if( i_val ) p_sys->p_context->debug_mv = i_val;

    i_val = var_CreateGetInteger( p_dec, "avcodec-skiploopfilter" );
    if( i_val >= 4 ) p_sys->p_context->skip_loop_filter = AVDISCARD_ALL;
    else if( i_val == 3 ) p_sys->p_context->skip_loop_filter = AVDISCARD_NONKEY;
    else if( i_val == 2 ) p_sys->p_context->skip_loop_filter = AVDISCARD_BIDIR;
    else if( i_val == 1 ) p_sys->p_context->skip_loop_filter = AVDISCARD_NONREF;

    if( var_CreateGetBool( p_dec, "avcodec-fast" ) )
        p_sys->p_context->flags2 |= CODEC_FLAG2_FAST;

    /* ***** libavcodec frame skipping ***** */
    p_sys->b_hurry_up = var_CreateGetBool( p_dec, "avcodec-hurry-up" );

    i_val = var_CreateGetInteger( p_dec, "avcodec-skip-frame" );
    if( i_val >= 4 ) p_sys->p_context->skip_frame = AVDISCARD_ALL;
    else if( i_val == 3 ) p_sys->p_context->skip_frame = AVDISCARD_NONKEY;
    else if( i_val == 2 ) p_sys->p_context->skip_frame = AVDISCARD_BIDIR;
    else if( i_val == 1 ) p_sys->p_context->skip_frame = AVDISCARD_NONREF;
    else if( i_val == -1 ) p_sys->p_context->skip_frame = AVDISCARD_NONE;
    else p_sys->p_context->skip_frame = AVDISCARD_DEFAULT;
    p_sys->i_skip_frame = p_sys->p_context->skip_frame;

    i_val = var_CreateGetInteger( p_dec, "avcodec-skip-idct" );
    if( i_val >= 4 ) p_sys->p_context->skip_idct = AVDISCARD_ALL;
    else if( i_val == 3 ) p_sys->p_context->skip_idct = AVDISCARD_NONKEY;
    else if( i_val == 2 ) p_sys->p_context->skip_idct = AVDISCARD_BIDIR;
    else if( i_val == 1 ) p_sys->p_context->skip_idct = AVDISCARD_NONREF;
    else if( i_val == -1 ) p_sys->p_context->skip_idct = AVDISCARD_NONE;
    else p_sys->p_context->skip_idct = AVDISCARD_DEFAULT;
    p_sys->i_skip_idct = p_sys->p_context->skip_idct;

    /* ***** libavcodec direct rendering ***** */
    p_sys->b_direct_rendering = false;
    p_sys->i_direct_rendering_used = -1;
    if( var_CreateGetBool( p_dec, "avcodec-dr" ) &&
       (p_sys->p_codec->capabilities & CODEC_CAP_DR1) &&
        /* No idea why ... but this fixes flickering on some TSCC streams */
        p_sys->i_codec_id != AV_CODEC_ID_TSCC && p_sys->i_codec_id != AV_CODEC_ID_CSCD &&
        p_sys->i_codec_id != AV_CODEC_ID_CINEPAK &&
        !p_sys->p_context->debug_mv )
    {
        /* Some codecs set pix_fmt only after the 1st frame has been decoded,
         * so we need to do another check in ffmpeg_GetFrameBuf() */
        p_sys->b_direct_rendering = true;
    }

    /* libavcodec doesn't properly release old pictures when frames are skipped */
    //if( p_sys->b_hurry_up ) p_sys->b_direct_rendering = false;
    if( p_sys->b_direct_rendering )
    {
        msg_Dbg( p_dec, "trying to use direct rendering" );
        p_sys->p_context->flags |= CODEC_FLAG_EMU_EDGE;
    }
    else
    {
        msg_Dbg( p_dec, "direct rendering is disabled" );
    }

    p_sys->p_context->get_format = ffmpeg_GetFormat;
    /* Always use our get_buffer wrapper so we can calculate the
     * PTS correctly */
#if LIBAVCODEC_VERSION_MAJOR >= 55
    p_sys->p_context->get_buffer2 = lavc_GetFrame;
#else
    p_sys->p_context->get_buffer = ffmpeg_GetFrameBuf;
    p_sys->p_context->reget_buffer = avcodec_default_reget_buffer;
    p_sys->p_context->release_buffer = ffmpeg_ReleaseFrameBuf;
#endif
    p_sys->p_context->opaque = p_dec;

#ifdef HAVE_AVCODEC_MT
    int i_thread_count = var_InheritInteger( p_dec, "avcodec-threads" );
    if( i_thread_count <= 0 )
    {
        i_thread_count = vlc_GetCPUCount();
        if( i_thread_count > 1 )
            i_thread_count++;

        //FIXME: take in count the decoding time
        i_thread_count = __MIN( i_thread_count, 4 );
    }
    i_thread_count = __MIN( i_thread_count, 16 );
    msg_Dbg( p_dec, "allowing %d thread(s) for decoding", i_thread_count );
    p_sys->p_context->thread_count = i_thread_count;
    p_sys->p_context->thread_safe_callbacks = true;

    switch( i_codec_id )
    {
        case AV_CODEC_ID_MPEG4:
        case AV_CODEC_ID_H263:
            p_sys->p_context->thread_type = 0;
            break;
        case AV_CODEC_ID_MPEG1VIDEO:
        case AV_CODEC_ID_MPEG2VIDEO:
            p_sys->p_context->thread_type &= ~FF_THREAD_SLICE;
            /* fall through */
# if (LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55, 1, 0))
        case AV_CODEC_ID_H264:
        case AV_CODEC_ID_VC1:
        case AV_CODEC_ID_WMV3:
            p_sys->p_context->thread_type &= ~FF_THREAD_FRAME;
# endif
    }

    if( p_sys->p_context->thread_type & FF_THREAD_FRAME )
        p_dec->i_extra_picture_buffers = 2 * p_sys->p_context->thread_count;
#endif

    /* ***** misc init ***** */
    p_sys->i_pts = VLC_TS_INVALID;
    p_sys->b_has_b_frames = false;
    p_sys->b_first_frame = true;
    p_sys->b_flush = false;
    p_sys->i_late_frames = 0;

    /* Set output properties */
    p_dec->fmt_out.i_cat = VIDEO_ES;
    if( GetVlcChroma( &p_dec->fmt_out.video, p_context->pix_fmt ) != VLC_SUCCESS )
    {
        /* we are doomed. but not really, because most codecs set their pix_fmt later on */
        p_dec->fmt_out.i_codec = VLC_CODEC_I420;
    }
    p_dec->fmt_out.i_codec = p_dec->fmt_out.video.i_chroma;

    p_dec->fmt_out.video.orientation = p_dec->fmt_in.video.orientation;

    if( p_dec->fmt_in.video.p_palette ) {
        p_sys->palette_sent = false;
        p_dec->fmt_out.video.p_palette = malloc( sizeof(video_palette_t) );
        if( p_dec->fmt_out.video.p_palette )
            *p_dec->fmt_out.video.p_palette = *p_dec->fmt_in.video.p_palette;
    } else
        p_sys->palette_sent = true;

    /* ***** init this codec with special data ***** */
    ffmpeg_InitCodec( p_dec );

    /* ***** Open the codec ***** */
    if( ffmpeg_OpenCodec( p_dec ) < 0 )
    {
        msg_Err( p_dec, "cannot open codec (%s)", p_sys->psz_namecodec );
        av_free( p_sys->p_ff_pic );
        vlc_sem_destroy( &p_sys->sem_mt );
        free( p_sys );
        return VLC_EGENERIC;
    }

    return VLC_SUCCESS;
}
Пример #18
0
// --------------------------------------------------------------------------
//! @brief   Initialize video.
//! @return  Result of initialization
//! @retval  1 Success
//! @retval  0 Failure
// --------------------------------------------------------------------------
int ARDrone::initVideo(void)
{
    // AR.Drone 2.0
    if (version.major == ARDRONE_VERSION_2) {
        // Open the IP address and port
        char filename[256];
        sprintf(filename, "tcp://%s:%d", ip, ARDRONE_VIDEO_PORT);
        if (avformat_open_input(&pFormatCtx, filename, NULL, NULL) < 0) {
            CVDRONE_ERROR("avformat_open_input() was failed. (%s, %d)\n", __FILE__, __LINE__);
            return 0;
        }

        // Retrive and dump stream information
        avformat_find_stream_info(pFormatCtx, NULL);
        av_dump_format(pFormatCtx, 0, filename, 0);

        // Find the decoder for the video stream
        pCodecCtx = pFormatCtx->streams[0]->codec;
        AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
        if (pCodec == NULL) {
            CVDRONE_ERROR("avcodec_find_decoder() was failed. (%s, %d)\n", __FILE__, __LINE__);
            return 0;
        }

        // Open codec
        if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
            CVDRONE_ERROR("avcodec_open2() was failed. (%s, %d)\n", __FILE__, __LINE__);
            return 0;
        }

        // Allocate video frames and a buffer
        #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
        pFrame = av_frame_alloc();
        pFrameBGR = av_frame_alloc();
        #else
        pFrame = avcodec_alloc_frame();
        pFrameBGR = avcodec_alloc_frame();
        #endif
        bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height) * sizeof(uint8_t));

        // Assign appropriate parts of buffer to image planes in pFrameBGR
        avpicture_fill((AVPicture*)pFrameBGR, bufferBGR, PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);

        // Convert it to BGR
        pConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_BGR24, SWS_SPLINE, NULL, NULL, NULL);
    }
    // AR.Drone 1.0
    else {
        // Open the IP address and port
        if (!sockVideo.open(ip, ARDRONE_VIDEO_PORT)) {
            CVDRONE_ERROR("UDPSocket::open(port=%d) was failed. (%s, %d)\n", ARDRONE_VIDEO_PORT, __FILE__, __LINE__);
            return 0;
        }

        // Set codec
        pCodecCtx = avcodec_alloc_context3(NULL);
        pCodecCtx->width = 320;
        pCodecCtx->height = 240;

        // Allocate a buffer
        bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height));
    }

    // Allocate an IplImage
    img = cvCreateImage(cvSize(pCodecCtx->width, (pCodecCtx->height == 368) ? 360 : pCodecCtx->height), IPL_DEPTH_8U, 3);
    if (!img) {
        CVDRONE_ERROR("cvCreateImage() was failed. (%s, %d)\n", __FILE__, __LINE__);
        return 0;
    }

    // Clear the image
    cvZero(img);

    // Create a mutex
    mutexVideo = new pthread_mutex_t;
    pthread_mutex_init(mutexVideo, NULL);

    // Create a thread
    threadVideo = new pthread_t;
    if (pthread_create(threadVideo, NULL, runVideo, this) != 0) {
        CVDRONE_ERROR("pthread_create() was failed. (%s, %d)\n", __FILE__, __LINE__);
        return 0;
    }

    return 1;
}
Пример #19
0
/*
 * Video encoding example
 */
void video_encode_example(const char *filename)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    int i, out_size, size, x, y, outbuf_size;
    FILE *f;
    AVFrame *picture;
    uint8_t *outbuf, *picture_buf;

    printf("Video encoding\n");

    /* find the mpeg1 video encoder */
    codec = avcodec_find_encoder(CODEC_ID_MPEG1VIDEO);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        exit(1);
    }

    c= avcodec_alloc_context();
    picture= avcodec_alloc_frame();

    /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width = 352;
    c->height = 288;
    /* frames per second */
    c->time_base= (AVRational){1,25};
    c->gop_size = 10; /* emit one intra frame every ten frames */
    c->max_b_frames=1;
    c->pix_fmt = PIX_FMT_YUV420P;

    /* open it */
    if (avcodec_open(c, codec) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

    /* the codec gives us the frame size, in samples */

    f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "could not open %s\n", filename);
        exit(1);
    }

    /* alloc image and output buffer */
    outbuf_size = 100000;
    outbuf = malloc(outbuf_size);
    size = c->width * c->height;
    picture_buf = malloc((size * 3) / 2); /* size for YUV 420 */

    picture->data[0] = picture_buf;
    picture->data[1] = picture->data[0] + size;
    picture->data[2] = picture->data[1] + size / 4;
    picture->linesize[0] = c->width;
    picture->linesize[1] = c->width / 2;
    picture->linesize[2] = c->width / 2;

    /* encode 1 second of video */
    for(i=0;i<25;i++) {
        fflush(stdout);
        /* prepare a dummy image */
        /* Y */
        for(y=0;y<c->height;y++) {
            for(x=0;x<c->width;x++) {
                picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
            }
        }

        /* Cb and Cr */
        for(y=0;y<c->height/2;y++) {
            for(x=0;x<c->width/2;x++) {
                picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
                picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
            }
        }

        /* encode the image */
        out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
        printf("encoding frame %3d (size=%5d)\n", i, out_size);
        fwrite(outbuf, 1, out_size, f);
    }

    /* get the delayed frames */
    for(; out_size; i++) {
        fflush(stdout);

        out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
        printf("write frame %3d (size=%5d)\n", i, out_size);
        fwrite(outbuf, 1, out_size, f);
    }

    /* add sequence end code to have a real mpeg file */
    outbuf[0] = 0x00;
    outbuf[1] = 0x00;
    outbuf[2] = 0x01;
    outbuf[3] = 0xb7;
    fwrite(outbuf, 1, 4, f);
    fclose(f);
    free(picture_buf);
    free(outbuf);

    avcodec_close(c);
    av_free(c);
    av_free(picture);
    printf("\n");
}
Пример #20
0
static int UpdateInput( ffmpeg_video_real* p )
{
	if (p->Context)
		avcodec_close(p->Context);
    av_free(p->Context);
    av_free(p->Picture);
	if(p->rm.videobuf)
	{
		av_free(p->rm.videobuf);
		p->rm.videobuf = NULL;
	}

	p->RefTime = TIME_UNKNOWN;
	p->Context = NULL;
	p->Picture = NULL;
	BufferClear(&p->Buffer);

	if (p->Codec.In.Format.Type == PACKET_VIDEO)
	{
	    AVCodec *Codec;

		const codecinfo *i;
		for (i=Info;i->Id;++i)
			if (i->Id == p->Codec.Node.Class)
				break;
		if (!i->Id)
			return ERR_INVALID_DATA;
		
		Codec = avcodec_find_decoder(i->CodecId);
		if (!Codec)
			return ERR_INVALID_DATA;

		p->Context = avcodec_alloc_context();
		p->Picture = avcodec_alloc_frame();

		if (!p->Context || !p->Picture)
			return ERR_OUT_OF_MEMORY;

	    if ((p->Codec.In.Format.Format.Video.Pixel.Flags & PF_FRAGMENTED) && 
			(Codec->capabilities & CODEC_CAP_TRUNCATED))
			p->Context->flags|= CODEC_FLAG_TRUNCATED;

		UpdateSettings(p);
		p->Context->palctrl = NULL;
	    p->Context->bit_rate = 0;
		p->Context->extradata = p->Codec.In.Format.Extra;
		p->Context->extradata_size = p->Codec.In.Format.ExtraLength;
		p->Context->width = p->Codec.In.Format.Format.Video.Width;
		p->Context->height = p->Codec.In.Format.Format.Video.Height;
		p->Context->bits_per_coded_sample = p->Codec.In.Format.Format.Video.Pixel.BitCount;
		if (p->Codec.In.Format.Format.Video.Pixel.Palette && 
			p->Codec.In.Format.Format.Video.Pixel.BitCount<=8)
		{
			int i,n = 1 << p->Codec.In.Format.Format.Video.Pixel.BitCount;
			for (i=0;i<n;++i)
				p->Palette.palette[i] = INT32LE(p->Codec.In.Format.Format.Video.Pixel.Palette[i].v);
			p->Palette.palette_changed = 1;
			p->Context->palctrl = &p->Palette;
		}

		p->CodecId = i->CodecId;

	    if (avcodec_open(p->Context,Codec)<0)
		{
			// avoid calling avcodec_close at next UpdateInput
		    av_free(p->Context);
			p->Context = NULL;
			return ERR_INVALID_DATA;
		}

		if (!BuildOutputFormat(p))
			return ERR_INVALID_DATA;

		p->SkipToKey = 1;
		p->DropToKey = 1;
		p->Dropping = 0;
	}

	return ERR_NONE;
}
Пример #21
0
bool MediaConverter::initializeOutput(const char *outputMediaName)
{
	// 初始化输出格式
	AVOutputFormat *pOutputFormat = av_guess_format(NULL, outputMediaName, NULL);
	if (NULL == pOutputFormat)
	{
		printf("Could not deduce output format from file extension: using AVI.\n");
		pOutputFormat = av_guess_format("avi", NULL, NULL);
	}
	if (NULL == pOutputFormat)
	{
		printf("Could not find suitable output format.\n");
		return false;
	}
	pOutputFormat->video_codec = mOutputCodecId;
	// Allocate the output media context
	mOutputFormatCtxPtr = avformat_alloc_context();
	if (NULL == mOutputFormatCtxPtr)
	{
		printf("Could not allocate format context.\n");
		return false;
	}
	mOutputFormatCtxPtr->oformat = pOutputFormat;
	strcpy_s(mOutputFormatCtxPtr->filename, sizeof(mOutputFormatCtxPtr->filename), outputMediaName);
	// Find the video encoder
	AVCodec *pOutputCodec = avcodec_find_encoder(pOutputFormat->video_codec);
	if (NULL == pOutputCodec)
	{
		printf("Could not find encoder.\n");
		return false;
	}
	// Open the output stream
	mOutputStreamPtr = avformat_new_stream(mOutputFormatCtxPtr, pOutputCodec);
	if (NULL == mOutputStreamPtr)
	{
		printf("Could not allocate stream.\n");
		return false;
	}
	// Set codec context params
	mOutputCodecCtxPtr = mOutputStreamPtr->codec;
	mOutputCodecCtxPtr->bit_rate = mOutputBitRate;
	mOutputCodecCtxPtr->width = mOutputWidth;
	mOutputCodecCtxPtr->height = mOutputHeight;
	mOutputCodecCtxPtr->time_base.den = mOutputFrameRate;
	mOutputCodecCtxPtr->time_base.num = 1;
	mOutputCodecCtxPtr->gop_size = 12;
	if (NULL != pOutputCodec->pix_fmts)
	{
		mOutputPixFmt = pOutputCodec->pix_fmts[0];
		mOutputCodecCtxPtr->pix_fmt = mOutputPixFmt;
	}
	else
	{
		mOutputPixFmt = DEFAULT_PIX_FMT;
		mOutputCodecCtxPtr->pix_fmt = mOutputPixFmt;
	}
	if (mOutputCodecCtxPtr->codec_id == AV_CODEC_ID_MPEG1VIDEO) 
	{
        /* Needed to avoid using macroblocks in which some coeffs overflow.
         * This does not happen with normal video, it just happens here as
         * the motion of the chroma plane does not match the luma plane. */
        mOutputCodecCtxPtr->mb_decision = 2;
    }
    /* Some formats want stream headers to be separate. */
    if (mOutputFormatCtxPtr->oformat->flags & AVFMT_GLOBALHEADER)
        mOutputFormatCtxPtr->flags |= CODEC_FLAG_GLOBAL_HEADER;

	// Open the video encoder
	if (avcodec_open2(mOutputCodecCtxPtr, NULL, NULL) < 0)
	{
		printf("Could not open encoder.\n");
		return false;
	}
	// Allocate output frame
	mOutputFramePtr = avcodec_alloc_frame();
	int frameBuffSize = avpicture_get_size(mOutputPixFmt, mOutputWidth, mOutputHeight);
	uint8_t *frameBuff = (uint8_t*)av_malloc_array(frameBuffSize, sizeof(uint8_t));
	avpicture_fill((AVPicture*)mOutputFramePtr, frameBuff, mOutputPixFmt, mOutputWidth, mOutputHeight);
	// Allocate encode buff
	mEncodeBuffSize = 400000;
	mEncodeBuff = (uint8_t *)av_malloc_array(mEncodeBuffSize, sizeof(uint8_t));

	return true;
}
Пример #22
0
int
film::process ()
{
  int audioSize;
  uint8_t *buffer;
  uint8_t *buffer2;
  int frameFinished;
  int numBytes;
  shot s;
  static struct SwsContext *img_convert_ctx = NULL;

  create_main_dir ();

  string graphpath = this->global_path + "/";
  g = new graph (600, 400, graphpath, threshold, this);
  g->set_title ("Motion quantity");

  /*
   * Register all formats and codecs 
   */
  av_register_all ();

  if (av_open_input_file (&pFormatCtx, input_path.c_str (), NULL, 0, NULL) != 0)
    {
      string error_msg = "Impossible to open file";
      error_msg += input_path;
      shotlog (error_msg);
      return -1;		// Couldn't open file
    }

  /*
   * Retrieve stream information 
   */
  if (av_find_stream_info (pFormatCtx) < 0)
    return -1;			// Couldn't find stream information


  // dump_format (pFormatCtx, 0, path.c_str (), false);
  videoStream = -1;
  audioStream = -1;


  /*
   * Detect streams types 
   */
  for (int j = 0; j < pFormatCtx->nb_streams; j++)
    {
      switch (pFormatCtx->streams[j]->codec->codec_type)
	{
	case AVMEDIA_TYPE_VIDEO:
	  videoStream = j;
	  break;

	case AVMEDIA_TYPE_AUDIO:
	  audioStream = j;
	  break;

	default:
	  break;
	}
    }



  /*
   * Get a pointer to the codec context for the video stream 
   */
  if (audioStream != -1)
    {
          if (audio_set)
    {
      string xml_audio = graphpath + "/" + "audio.xml";
      init_xml (xml_audio);
    }

      pCodecCtxAudio = pFormatCtx->streams[audioStream]->codec;
      pCodecAudio = avcodec_find_decoder (pCodecCtxAudio->codec_id);

      if (pCodecAudio == NULL)
	return -1;		// Codec not found
      if (avcodec_open (pCodecCtxAudio, pCodecAudio) < 0)
	return -1;		// Could not open codec

    }
  update_metadata ();
  /*
   * Find the decoder for the video stream 
   */
  if (videoStream != -1)
    {
      pCodecCtx = pFormatCtx->streams[videoStream]->codec;
      pCodec = avcodec_find_decoder (pCodecCtx->codec_id);

      if (pCodec == NULL)
	return -1;		// Codec not found
      if (avcodec_open (pCodecCtx, pCodec) < 0)
	return -1;		// Could not open codec

      /*
       * Allocate video frame 
       */
      pFrame = avcodec_alloc_frame ();
      pFrameRGB = avcodec_alloc_frame ();
      pFrameRGBprev = avcodec_alloc_frame ();

      /*
       * Determine required buffer size and allocate buffer 
       */
      numBytes = avpicture_get_size (PIX_FMT_RGB24, width, height);

      buffer = (uint8_t *) malloc (sizeof (uint8_t) * numBytes);
      buffer2 = (uint8_t *) malloc (sizeof (uint8_t) * numBytes);

      /*
       * Assign appropriate parts of buffer to image planes in pFrameRGB 
       */
      avpicture_fill ((AVPicture *) pFrameRGB, buffer, PIX_FMT_RGB24, width, height);

      avpicture_fill ((AVPicture *) pFrameRGBprev, buffer2, PIX_FMT_RGB24, width, height);


      /*
       * Mise en place du premier plan 
       */
      s.fbegin = 0;
      s.msbegin = 0;
      s.myid = 0;
      shots.push_back (s);



    }


  checknumber = (samplerate * samplearg) / 1000;

  /*
   * Boucle de traitement principale du flux 
   */
  this->frame_number = 0;
  while (av_read_frame (pFormatCtx, &packet) >= 0)
    {
      if (packet.stream_index == videoStream)
	{
          AVPacket pkt;
          av_init_packet (&pkt);
          pkt.data = packet.data;
          pkt.size = packet.size;
          avcodec_decode_video2 (pCodecCtx, pFrame, &frameFinished, &pkt);

        if (frameFinished)
	    {
	      // Convert the image into RGB24
	      if (! img_convert_ctx)
		{
		  img_convert_ctx = sws_getContext(width, height, pCodecCtx->pix_fmt,
						   width, height, PIX_FMT_RGB24, SWS_BICUBIC, 
						   NULL, NULL, NULL);
		  if (! img_convert_ctx) 
		  {
		    fprintf(stderr, "Cannot initialize the conversion context!\n");
		    exit(1);
		  }
		}
	      
	      /* API: int sws_scale(SwsContext *c, uint8_t *src, int srcStride[], int srcSliceY, int srcSliceH, uint8_t dst[], int dstStride[] )
	       */
	      sws_scale(img_convert_ctx, pFrame->data, 
			pFrame->linesize, 0, 
			pCodecCtx->height,
			pFrameRGB->data, pFrameRGB->linesize);

	      /*
		Old API doc (cf http://www.dranger.com/ffmpeg/functions.html )
		int img_convert(AVPicture *dst, int dst_pix_fmt,
		                const AVPicture *src, int src_pix_fmt,
				int src_width, int src_height)
	      */
	      /*
	      img_convert ((AVPicture *) pFrameRGB, PIX_FMT_RGB24, (AVPicture *) pFrame, pCodecCtx->pix_fmt, width, height);
	      */

            this->frame_number ++;
	      /* Si ce n'est pas la permiere image */
        if ( this->frame_number  > 2)
		{
		  CompareFrame (pFrameRGB, pFrameRGBprev);
		}
	      else
		{
		  /*
		   * Cas ou c'est la premiere image, on cree la premiere image dans tous les cas 
		   */
		  image *begin_i = new image (this, width, height, s.myid, BEGIN);
		  begin_i->create_img_dir ();
		  begin_i->SaveFrame (pFrameRGB);
		  shots.back ().img_begin = begin_i;
		}
	     memcpy (buffer2, buffer, numBytes);
	    }
	}
      if (audio_set && (packet.stream_index == audioStream))
	{
	  process_audio ();
	}
      /*
       * Free the packet that was allocated by av_read_frame 
       */
      if (packet.data != NULL)
	av_free_packet (&packet);
    }

  if (videoStream != -1)
    {
      /* Mise en place de la dernière image */
      int lastFrame = this->frame_number;
      shots.back ().fduration = lastFrame - shots.back ().fbegin;
      shots.back ().msduration = int (((shots.back ().fduration) * 1000) / fps);
      duration.mstotal = int (shots.back ().msduration + shots.back ().msbegin);
	  image *end_i = new image (this, width, height, shots.back ().myid, END);
	  end_i->SaveFrame (pFrameRGB);
	  shots.back ().img_end = end_i;

      /*
       * Graphe de la qté de mvmt 
       */
      g->init_gd ();
      g->draw_all_canvas ();
      g->draw_color_datas ();
      g->draw_datas ();
      if (video_set)
	{
	  string xml_color = graphpath + "/" + "video.xml";
	  g->write_xml (xml_color);
	}
      g->save ();

      /*
       * Free the RGB images 
       */
      free (buffer);
      free (buffer2);
      av_free (pFrameRGB);
      av_free (pFrame);
      av_free (pFrameRGBprev);
      avcodec_close (pCodecCtx);
    }
  /*
   * Close the codec 
   */
  if (audioStream != -1)
    {
        /* Fermetrure du fichier xml */
      if (audio_set) close_xml ();
      avcodec_close (pCodecCtxAudio);
    }

  /*
   * Close the video file 
   */
  av_close_input_file (pFormatCtx);


}
Пример #23
0
DummySink::DummySink(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId)
  : MediaSink(env),
    fSubsession(subsession) {
  fStreamId = strDup(streamId);
  fReceiveBuffer = new u_int8_t[DUMMY_SINK_RECEIVE_BUFFER_SIZE];
  fReceiveBufferAV = new u_int8_t[DUMMY_SINK_RECEIVE_BUFFER_SIZE+4];
  fReceiveBufferAV[0] = 0;
  fReceiveBufferAV[1] = 0;
  fReceiveBufferAV[2] = 0;
  fReceiveBufferAV[3] = 1;


	av_init_packet(&avpkt);
	avpkt.flags |= AV_PKT_FLAG_KEY;
	avpkt.pts = avpkt.dts = 0;

	/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
	memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);

	//codec = avcodec_find_decoder(CODEC_ID_MPEG1VIDEO);
	codec = avcodec_find_decoder(CODEC_ID_H264);
	if (!codec) {
		envir() << "codec not found!";
		exit(4);
	}

	c = avcodec_alloc_context3(codec);
	picture = avcodec_alloc_frame();

	if (codec->capabilities & CODEC_CAP_TRUNCATED) {
		c->flags |= CODEC_FLAG_TRUNCATED; // we do not send complete frames
	}
	
	c->width = 640;
	c->height = 360;
	c->pix_fmt = PIX_FMT_YUV420P;

	/* for some codecs width and height MUST be initialized there becuase this info is not available in the bitstream */

	if (avcodec_open2(c,codec,NULL) < 0) {
		envir() << "could not open codec";
		exit(5);
	}


	//SDL init
	if (
		SDL_Init(
			SDL_INIT_VIDEO |
			SDL_INIT_AUDIO |
			SDL_INIT_TIMER
		)
	) {
		envir() << "Could not initialize SDL - " << SDL_GetError() << "\n";
		exit (10);
	}


	screen = SDL_SetVideoMode(
		c->width,
		c->height,
		24, 
		0
	);
	if (!screen) {
		envir() << "SDL: could not set video mode - exiting\n";
		exit(11);
	}

	bmp = SDL_CreateYUVOverlay(
		c->width,
		c->height,
		SDL_YV12_OVERLAY,
		screen
	);
}
Пример #24
0
static void handle_packet(struct vidsrc_st *st, AVPacket *pkt)
{
	AVFrame *frame = NULL;
	struct vidframe vf;
	struct vidsz sz;
	unsigned i;

	if (st->codec) {
		int got_pict, ret;

#if LIBAVUTIL_VERSION_INT >= ((52<<16)+(20<<8)+100)
		frame = av_frame_alloc();
#else
		frame = avcodec_alloc_frame();
#endif

#if LIBAVCODEC_VERSION_INT >= ((57<<16)+(37<<8)+100)

		ret = avcodec_send_packet(st->ctx, pkt);
		if (ret < 0)
			goto out;

		ret = avcodec_receive_frame(st->ctx, frame);
		if (ret < 0)
			goto out;

		got_pict = true;

#elif LIBAVCODEC_VERSION_INT <= ((52<<16)+(23<<8)+0)
		ret = avcodec_decode_video(st->ctx, frame, &got_pict,
					   pkt->data, pkt->size);
#else
		ret = avcodec_decode_video2(st->ctx, frame,
					    &got_pict, pkt);
#endif
		if (ret < 0 || !got_pict)
			goto out;

		sz.w = st->ctx->width;
		sz.h = st->ctx->height;

		/* check if size changed */
		if (!vidsz_cmp(&sz, &st->sz)) {
			info("avformat: size changed: %d x %d  ---> %d x %d\n",
			     st->sz.w, st->sz.h, sz.w, sz.h);
			st->sz = sz;
		}
	}
	else {
		/* No-codec option is not supported */
		return;
	}

#if LIBAVCODEC_VERSION_INT >= ((53<<16)+(5<<8)+0)
	switch (frame->format) {

	case AV_PIX_FMT_YUV420P:
	case AV_PIX_FMT_YUVJ420P:
		vf.fmt = VID_FMT_YUV420P;
		break;

	default:
		warning("avformat: decode: bad pixel format"
			" (%i) (%s)\n",
			frame->format,
			av_get_pix_fmt_name(frame->format));
		goto out;
	}
#else
	vf.fmt = VID_FMT_YUV420P;
#endif

	vf.size = sz;
	for (i=0; i<4; i++) {
		vf.data[i]     = frame->data[i];
		vf.linesize[i] = frame->linesize[i];
	}

	st->frameh(&vf, st->arg);

 out:
	if (frame) {
#if LIBAVUTIL_VERSION_INT >= ((52<<16)+(20<<8)+100)
		av_frame_free(&frame);
#else
		av_free(frame);
#endif
	}
}
Пример #25
0
static av_cold int Faac_encode_init(AVCodecContext *avctx)
{
    FaacAudioContext *s = avctx->priv_data;
    faacEncConfigurationPtr faac_cfg;
    unsigned long samples_input, max_bytes_output;
    int ret;

    /* number of channels */
    if (avctx->channels < 1 || avctx->channels > 6) {
        av_log(avctx, AV_LOG_ERROR, "encoding %d channel(s) is not allowed\n", avctx->channels);
        ret = AVERROR(EINVAL);
        goto error;
    }

    s->faac_handle = faacEncOpen(avctx->sample_rate,
                                 avctx->channels,
                                 &samples_input, &max_bytes_output);
    if (!s->faac_handle) {
        av_log(avctx, AV_LOG_ERROR, "error in faacEncOpen()\n");
        ret = AVERROR_UNKNOWN;
        goto error;
    }

    /* check faac version */
    faac_cfg = faacEncGetCurrentConfiguration(s->faac_handle);
    if (faac_cfg->version != FAAC_CFG_VERSION) {
        av_log(avctx, AV_LOG_ERROR, "wrong libfaac version (compiled for: %d, using %d)\n", FAAC_CFG_VERSION, faac_cfg->version);
        ret = AVERROR(EINVAL);
        goto error;
    }

    /* put the options in the configuration struct */
    switch(avctx->profile) {
        case FF_PROFILE_AAC_MAIN:
            faac_cfg->aacObjectType = MAIN;
            break;
        case FF_PROFILE_UNKNOWN:
        case FF_PROFILE_AAC_LOW:
            faac_cfg->aacObjectType = LOW;
            break;
        case FF_PROFILE_AAC_SSR:
            faac_cfg->aacObjectType = SSR;
            break;
        case FF_PROFILE_AAC_LTP:
            faac_cfg->aacObjectType = LTP;
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "invalid AAC profile\n");
            ret = AVERROR(EINVAL);
            goto error;
    }
    faac_cfg->mpegVersion = MPEG4;
    faac_cfg->useTns = 0;
    faac_cfg->allowMidside = 1;
    faac_cfg->bitRate = avctx->bit_rate / avctx->channels;
    faac_cfg->bandWidth = avctx->cutoff;
    if(avctx->flags & CODEC_FLAG_QSCALE) {
        faac_cfg->bitRate = 0;
        faac_cfg->quantqual = avctx->global_quality / FF_QP2LAMBDA;
    }
    faac_cfg->outputFormat = 1;
    faac_cfg->inputFormat = FAAC_INPUT_16BIT;
    if (avctx->channels > 2)
        memcpy(faac_cfg->channel_map, channel_maps[avctx->channels-3],
               avctx->channels * sizeof(int));

    avctx->frame_size = samples_input / avctx->channels;

#if FF_API_OLD_ENCODE_AUDIO
    avctx->coded_frame= avcodec_alloc_frame();
    if (!avctx->coded_frame) {
        ret = AVERROR(ENOMEM);
        goto error;
    }
#endif

    /* Set decoder specific info */
    avctx->extradata_size = 0;
    if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {

        unsigned char *buffer = NULL;
        unsigned long decoder_specific_info_size;

        if (!faacEncGetDecoderSpecificInfo(s->faac_handle, &buffer,
                                           &decoder_specific_info_size)) {
            avctx->extradata = av_malloc(decoder_specific_info_size + FF_INPUT_BUFFER_PADDING_SIZE);
            if (!avctx->extradata) {
                ret = AVERROR(ENOMEM);
                goto error;
            }
            avctx->extradata_size = decoder_specific_info_size;
            memcpy(avctx->extradata, buffer, avctx->extradata_size);
            faac_cfg->outputFormat = 0;
        }
        free(buffer);
    }

    if (!faacEncSetConfiguration(s->faac_handle, faac_cfg)) {
        av_log(avctx, AV_LOG_ERROR, "libfaac doesn't support this output format!\n");
        ret = AVERROR(EINVAL);
        goto error;
    }

    avctx->delay = FAAC_DELAY_SAMPLES;
    ff_af_queue_init(avctx, &s->afq);

    return 0;
error:
    Faac_encode_close(avctx);
    return ret;
}
Пример #26
0
// init driver
static int init(sh_video_t *sh){
    AVCodecContext *avctx;
    vd_ffmpeg_ctx *ctx;
    AVCodec *lavc_codec;
    int lowres_w=0;
    int do_vis_debug= lavc_param_vismv || (lavc_param_debug&(FF_DEBUG_VIS_MB_TYPE|FF_DEBUG_VIS_QP));
    // slice is rather broken with threads, so disable that combination unless
    // explicitly requested
    int use_slices = vd_use_slices > 0 || (vd_use_slices <  0 && lavc_param_threads <= 1);
    AVDictionary *opts = NULL;

    init_avcodec();

    ctx = sh->context = malloc(sizeof(vd_ffmpeg_ctx));
    if (!ctx)
        return 0;
    memset(ctx, 0, sizeof(vd_ffmpeg_ctx));

    lavc_codec = avcodec_find_decoder_by_name(sh->codec->dll);
    if(!lavc_codec){
        mp_msg(MSGT_DECVIDEO, MSGL_ERR, MSGTR_MissingLAVCcodec, sh->codec->dll);
        uninit(sh);
        return 0;
    }

    if (auto_threads) {
#if (defined(__MINGW32__) && HAVE_PTHREADS)
        lavc_param_threads = pthread_num_processors_np();
#elif defined(__linux__)
        /* Code stolen from x264 :) */
        unsigned int bit;
        int np;
        cpu_set_t p_aff;
        memset( &p_aff, 0, sizeof(p_aff) );
        sched_getaffinity( 0, sizeof(p_aff), &p_aff );
        for( np = 0, bit = 0; bit < sizeof(p_aff); bit++ )
            np += (((uint8_t *)&p_aff)[bit / 8] >> (bit % 8)) & 1;

        lavc_param_threads = np;
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__) || defined(__APPLE__) || defined (__NetBSD__) || defined(__OpenBSD__)
        /* Code stolen from x264 :) */
        int numberOfCPUs;
        size_t length = sizeof( numberOfCPUs );
#if defined (__NetBSD__) || defined(__OpenBSD__)
        int mib[2] = { CTL_HW, HW_NCPU };
        if( sysctl(mib, 2, &numberOfCPUs, &length, NULL, 0) )
#else
        if( sysctlbyname("hw.ncpu", &numberOfCPUs, &length, NULL, 0) )
#endif
        {
            numberOfCPUs = 1;
        }
        lavc_param_threads = numberOfCPUs;
#endif
        if(lavc_codec->id == CODEC_ID_H264 || lavc_codec->id == CODEC_ID_FFH264 
		 || lavc_codec->id == CODEC_ID_MPEG2TS || lavc_codec->id == CODEC_ID_MPEG2VIDEO_XVMC) {
            if (lavc_param_threads > 16) lavc_param_threads = 16;
            if (lavc_param_threads > 1 && (lavc_codec->id == CODEC_ID_MPEG2VIDEO ||
              lavc_codec->id == CODEC_ID_MPEG2TS || lavc_codec->id == CODEC_ID_MPEG2VIDEO_XVMC))
                vd_use_slices = 0;
            mp_msg(MSGT_DECVIDEO, MSGL_INFO, "Spawning %d decoding thread%s...\n", lavc_param_threads, lavc_param_threads == 1 ? "" : "s");
        } else {
            lavc_param_threads = 1;
        }
    }

    if(use_slices && (lavc_codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND) && !do_vis_debug)
        ctx->do_slices=1;

    if(lavc_codec->capabilities&CODEC_CAP_DR1 && !do_vis_debug && lavc_codec->id != CODEC_ID_INTERPLAY_VIDEO && lavc_codec->id != CODEC_ID_VP8)
        ctx->do_dr1=1;
    ctx->nonref_dr = lavc_codec->id == CODEC_ID_H264;
    ctx->ip_count= ctx->b_count= 0;

    ctx->pic = avcodec_alloc_frame();
    ctx->avctx = avcodec_alloc_context3(lavc_codec);
    avctx = ctx->avctx;
    avctx->opaque = sh;
    avctx->codec_id = lavc_codec->id;

    avctx->get_format = get_format;
    if(ctx->do_dr1){
        avctx->flags|= CODEC_FLAG_EMU_EDGE;
        avctx->    get_buffer=     get_buffer;
        avctx->release_buffer= release_buffer;
        avctx->  reget_buffer=     get_buffer;
    }

    avctx->flags|= lavc_param_bitexact;

    avctx->coded_width = sh->disp_w;
    avctx->coded_height= sh->disp_h;
    avctx->workaround_bugs= lavc_param_workaround_bugs;
    switch (lavc_param_error_resilience) {
    case 5:
        avctx->err_recognition |= AV_EF_EXPLODE | AV_EF_COMPLIANT | AV_EF_CAREFUL;
        break;
    case 4:
    case 3:
        avctx->err_recognition |= AV_EF_AGGRESSIVE;
        // Fallthrough
    case 2:
        avctx->err_recognition |= AV_EF_COMPLIANT;
        // Fallthrough
    case 1:
        avctx->err_recognition |= AV_EF_CAREFUL;
    }
    lavc_param_gray|= CODEC_FLAG_GRAY;
#ifdef CODEC_FLAG2_SHOW_ALL
    if(!lavc_param_wait_keyframe) avctx->flags2 |= CODEC_FLAG2_SHOW_ALL;
#endif
    avctx->flags2|= lavc_param_fast;
    avctx->codec_tag= sh->format;
    avctx->stream_codec_tag= sh->video.fccHandler;
    avctx->idct_algo= lavc_param_idct_algo;
    avctx->error_concealment= lavc_param_error_concealment;
    avctx->debug= lavc_param_debug;
    if (lavc_param_debug)
        av_log_set_level(AV_LOG_DEBUG);
    avctx->debug_mv= lavc_param_vismv;
    avctx->skip_top   = lavc_param_skip_top;
    avctx->skip_bottom= lavc_param_skip_bottom;
    if(lavc_param_lowres_str != NULL)
    {
        sscanf(lavc_param_lowres_str, "%d,%d", &lavc_param_lowres, &lowres_w);
        if(lavc_param_lowres < 1 || lavc_param_lowres > 16 || (lowres_w > 0 && avctx->width < lowres_w))
            lavc_param_lowres = 0;
        avctx->lowres = lavc_param_lowres;
    }
    avctx->skip_loop_filter = str2AVDiscard(lavc_param_skip_loop_filter_str);
    avctx->skip_idct        = str2AVDiscard(lavc_param_skip_idct_str);
    avctx->skip_frame       = str2AVDiscard(lavc_param_skip_frame_str);

    if(lavc_avopt){
        if (parse_avopts(avctx, lavc_avopt) < 0 &&
            (!lavc_codec->priv_class ||
             parse_avopts(avctx->priv_data, lavc_avopt) < 0)) {
            mp_msg(MSGT_DECVIDEO, MSGL_ERR, "Your options /%s/ look like gibberish to me pal\n", lavc_avopt);
            uninit(sh);
            return 0;
        }
    }

    skip_idct = avctx->skip_idct;
    skip_frame = avctx->skip_frame;

    mp_dbg(MSGT_DECVIDEO, MSGL_DBG2, "libavcodec.size: %d x %d\n", avctx->width, avctx->height);
    switch (sh->format) {
    case mmioFOURCC('S','V','Q','3'):
    /* SVQ3 extradata can show up as sh->ImageDesc if demux_mov is used, or
       in the phony AVI header if demux_lavf is used. The first case is
       handled here; the second case falls through to the next section. */
        if (sh->ImageDesc) {
            avctx->extradata_size = (*(int *)sh->ImageDesc) - sizeof(int);
            avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
            memcpy(avctx->extradata, ((int *)sh->ImageDesc)+1, avctx->extradata_size);
            break;
        }
        /* fallthrough */

    case mmioFOURCC('A','V','R','n'):
    case mmioFOURCC('M','J','P','G'):
    /* AVRn stores huffman table in AVI header */
    /* Pegasus MJPEG stores it also in AVI header, but it uses the common
       MJPG fourcc :( */
        if (!sh->bih || sh->bih->biSize <= sizeof(*sh->bih))
            break;
        av_dict_set(&opts, "extern_huff", "1", 0);
        avctx->extradata_size = sh->bih->biSize-sizeof(*sh->bih);
        avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
        memcpy(avctx->extradata, sh->bih+1, avctx->extradata_size);

#if 0
        {
            int x;
            uint8_t *p = avctx->extradata;

            for (x=0; x<avctx->extradata_size; x++)
                mp_msg(MSGT_DECVIDEO, MSGL_INFO, "[%x] ", p[x]);
            mp_msg(MSGT_DECVIDEO, MSGL_INFO, "\n");
        }
#endif
        break;

    case mmioFOURCC('R', 'V', '1', '0'):
    case mmioFOURCC('R', 'V', '1', '3'):
    case mmioFOURCC('R', 'V', '2', '0'):
    case mmioFOURCC('R', 'V', '3', '0'):
    case mmioFOURCC('R', 'V', '4', '0'):
        if(sh->bih->biSize<sizeof(*sh->bih)+8){
            /* only 1 packet per frame & sub_id from fourcc */
            avctx->extradata_size= 8;
            avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
            ((uint32_t *)avctx->extradata)[0] = 0;
            ((uint32_t *)avctx->extradata)[1] =
                (sh->format == mmioFOURCC('R', 'V', '1', '3')) ? 0x10003001 : 0x10000000;
        } else {
            /* has extra slice header (demux_rm or rm->avi streamcopy) */
            avctx->extradata_size = sh->bih->biSize-sizeof(*sh->bih);
            avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
            memcpy(avctx->extradata, sh->bih+1, avctx->extradata_size);
        }
        avctx->sub_id= AV_RB32(avctx->extradata+4);

//        printf("%X %X %d %d\n", extrahdr[0], extrahdr[1]);
        break;

    default:
        if (!sh->bih || sh->bih->biSize <= sizeof(*sh->bih))
            break;
        avctx->extradata_size = sh->bih->biSize-sizeof(*sh->bih);
        avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
        memcpy(avctx->extradata, sh->bih+1, avctx->extradata_size);
        break;
    }

    if(sh->bih)
        avctx->bits_per_coded_sample= sh->bih->biBitCount;

    avctx->thread_count = lavc_param_threads;
    avctx->thread_type = FF_THREAD_FRAME | FF_THREAD_SLICE;
    if(lavc_codec->capabilities & CODEC_CAP_HWACCEL)
        // HACK around badly placed checks in mpeg_mc_decode_init
        set_format_params(avctx, PIX_FMT_XVMC_MPEG2_IDCT);

    /* open it */
    if (avcodec_open2(avctx, lavc_codec, &opts) < 0) {
        mp_msg(MSGT_DECVIDEO, MSGL_ERR, MSGTR_CantOpenCodec);
        uninit(sh);
        return 0;
    }
    av_dict_free(&opts);
    // this is necessary in case get_format was never called and init_vo is
    // too late e.g. for H.264 VDPAU
    set_format_params(avctx, avctx->pix_fmt);
    mp_msg(MSGT_DECVIDEO, MSGL_V, "INFO: libavcodec init OK!\n");
    return 1; //mpcodecs_config_vo(sh, sh->disp_w, sh->disp_h, IMGFMT_YV12);
}
Пример #27
0
int tdav_codec_h264_open_encoder(tdav_codec_h264_t* self)
{
#if HAVE_FFMPEG
	int ret;
	tsk_size_t size;

	if(self->encoder.context){
		TSK_DEBUG_ERROR("Encoder already opened");
		return -1;
	}
    
#if (LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 35, 0))
    if((self->encoder.context = avcodec_alloc_context3(self->encoder.codec))){
        avcodec_get_context_defaults3(self->encoder.context, self->encoder.codec);
    }
#else
    if((self->encoder.context = avcodec_alloc_context())){
        avcodec_get_context_defaults(self->encoder.context);
    }
#endif
    
    if(!self->encoder.context){
        TSK_DEBUG_ERROR("Failed to allocate context");
		return -1;
    }

#if TDAV_UNDER_X86 && LIBAVCODEC_VERSION_MAJOR <= 53
	self->encoder.context->dsp_mask = (FF_MM_MMX | FF_MM_MMXEXT | FF_MM_SSE);
#endif

	self->encoder.context->pix_fmt		= PIX_FMT_YUV420P;
	self->encoder.context->time_base.num  = 1;
	self->encoder.context->time_base.den  = TMEDIA_CODEC_VIDEO(self)->out.fps;
	self->encoder.context->width = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.height : TMEDIA_CODEC_VIDEO(self)->out.width;
	self->encoder.context->height = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.width : TMEDIA_CODEC_VIDEO(self)->out.height;
	self->encoder.max_bw_kpbs = TSK_CLAMP(
		0,
		tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, TMEDIA_CODEC_VIDEO(self)->out.fps), 
		TMEDIA_CODEC(self)->bandwidth_max_upload
	);
	self->encoder.context->bit_rate = (self->encoder.max_bw_kpbs * 1024);// bps

	self->encoder.context->rc_min_rate = (self->encoder.context->bit_rate >> 3);
	self->encoder.context->rc_max_rate = self->encoder.context->bit_rate;

#if LIBAVCODEC_VERSION_MAJOR <= 53
	self->encoder.context->rc_lookahead = 0;
#endif
	self->encoder.context->global_quality = FF_QP2LAMBDA * self->encoder.quality;
	
#if LIBAVCODEC_VERSION_MAJOR <= 53
    self->encoder.context->partitions = X264_PART_I4X4 | X264_PART_I8X8 | X264_PART_P8X8 | X264_PART_B8X8;
#endif
    self->encoder.context->me_method = ME_UMH;
	self->encoder.context->me_range = 16;
	self->encoder.context->qmin = 10;
	self->encoder.context->qmax = 51;
#if LIBAVCODEC_VERSION_MAJOR <= 53
    self->encoder.context->mb_qmin = self->encoder.context->qmin;
	self->encoder.context->mb_qmax = self->encoder.context->qmax;
#endif
	/* METROPOLIS = G2J.COM TelePresence client. Check Issue 378: No video when calling "TANDBERG/4129 (X8.1.1)" */
#if !METROPOLIS  && 0
	self->encoder.context->flags |= CODEC_FLAG_GLOBAL_HEADER;
#endif
    self->encoder.context->flags |= CODEC_FLAG_LOW_DELAY;
	if (self->encoder.context->profile == FF_PROFILE_H264_BASELINE) {
		self->encoder.context->max_b_frames = 0;
	}

	switch(TDAV_CODEC_H264_COMMON(self)->profile){
		case profile_idc_baseline:
		default:
			self->encoder.context->profile = FF_PROFILE_H264_BASELINE;
			self->encoder.context->level = TDAV_CODEC_H264_COMMON(self)->level;
			break;
		case profile_idc_main:
			self->encoder.context->profile = FF_PROFILE_H264_MAIN;
			self->encoder.context->level = TDAV_CODEC_H264_COMMON(self)->level;
			break;
	} 
	
	/* Comment from libavcodec/libx264.c:
     * Allow x264 to be instructed through AVCodecContext about the maximum
     * size of the RTP payload. For example, this enables the production of
     * payload suitable for the H.264 RTP packetization-mode 0 i.e. single
     * NAL unit per RTP packet.
     */
	self->encoder.context->rtp_payload_size = H264_RTP_PAYLOAD_SIZE;
	self->encoder.context->opaque = tsk_null;
	self->encoder.context->gop_size = (TMEDIA_CODEC_VIDEO(self)->out.fps * TDAV_H264_GOP_SIZE_IN_SECONDS);
	
#if (LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 35, 0))
    if((ret = av_opt_set_int(self->encoder.context->priv_data, "slice-max-size", H264_RTP_PAYLOAD_SIZE, 0))){
	    TSK_DEBUG_ERROR("Failed to set x264 slice-max-size to %d", H264_RTP_PAYLOAD_SIZE);
	}
    if((ret = av_opt_set(self->encoder.context->priv_data, "profile", (self->encoder.context->profile == FF_PROFILE_H264_BASELINE ? "baseline" : "main"), 0))){
	    TSK_DEBUG_ERROR("Failed to set x264 profile");
	}
    if((ret = av_opt_set(self->encoder.context->priv_data, "preset", "veryfast", 0))){
	    TSK_DEBUG_ERROR("Failed to set x264 preset to veryfast");
	}
    if((ret = av_opt_set_int(self->encoder.context->priv_data, "rc-lookahead", 0, 0)) && (ret = av_opt_set_int(self->encoder.context->priv_data, "rc_lookahead", 0, 0))){
        TSK_DEBUG_ERROR("Failed to set x264 rc_lookahead=0");
    }
	if((ret = av_opt_set(self->encoder.context->priv_data, "tune", "animation+zerolatency", 0))){
	    TSK_DEBUG_ERROR("Failed to set x264 tune to zerolatency");
	}
#endif

	// Picture (YUV 420)
	if(!(self->encoder.picture = avcodec_alloc_frame())){
		TSK_DEBUG_ERROR("Failed to create encoder picture");
		return -2;
	}
	avcodec_get_frame_defaults(self->encoder.picture);
	

	size = avpicture_get_size(PIX_FMT_YUV420P, self->encoder.context->width, self->encoder.context->height);
	if(!(self->encoder.buffer = tsk_calloc(size, sizeof(uint8_t)))){
		TSK_DEBUG_ERROR("Failed to allocate encoder buffer");
		return -2;
	}

	// Open encoder
	if((ret = avcodec_open(self->encoder.context, self->encoder.codec)) < 0){
		TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(self)->plugin->desc);
		return ret;
	}
    
    self->encoder.frame_count = 0;

	TSK_DEBUG_INFO("[H.264] bitrate=%d bps", self->encoder.context->bit_rate);

	return ret;
#elif HAVE_H264_PASSTHROUGH
    self->encoder.frame_count = 0;
	return 0;
#endif

	TSK_DEBUG_ERROR("Not expected code called");
	return -1;
}
Пример #28
0
static av_cold int Faac_encode_init(AVCodecContext *avctx)
{
    FaacAudioContext *s = avctx->priv_data;
    faacEncConfigurationPtr faac_cfg;
    unsigned long samples_input, max_bytes_output;

    /* number of channels */
    if (avctx->channels < 1 || avctx->channels > 6)
        return -1;

    s->faac_handle = faacEncOpen(avctx->sample_rate,
                                 avctx->channels,
                                 &samples_input, &max_bytes_output);

    /* check faac version */
    faac_cfg = faacEncGetCurrentConfiguration(s->faac_handle);
    if (faac_cfg->version != FAAC_CFG_VERSION) {
        av_log(avctx, AV_LOG_ERROR, "wrong libfaac version (compiled for: %d, using %d)\n", FAAC_CFG_VERSION, faac_cfg->version);
        faacEncClose(s->faac_handle);
        return -1;
    }

    /* put the options in the configuration struct */
    switch(avctx->profile) {
        case FF_PROFILE_AAC_MAIN:
            faac_cfg->aacObjectType = MAIN;
            break;
        case FF_PROFILE_UNKNOWN:
        case FF_PROFILE_AAC_LOW:
            faac_cfg->aacObjectType = LOW;
            break;
        case FF_PROFILE_AAC_SSR:
            faac_cfg->aacObjectType = SSR;
            break;
        case FF_PROFILE_AAC_LTP:
            faac_cfg->aacObjectType = LTP;
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "invalid AAC profile\n");
            faacEncClose(s->faac_handle);
            return -1;
    }
    faac_cfg->mpegVersion = MPEG4;
    faac_cfg->useTns = 0;
    faac_cfg->allowMidside = 1;
    faac_cfg->bitRate = avctx->bit_rate / avctx->channels;
    faac_cfg->bandWidth = avctx->cutoff;
    if(avctx->flags & CODEC_FLAG_QSCALE) {
        faac_cfg->bitRate = 0;
        faac_cfg->quantqual = avctx->global_quality / FF_QP2LAMBDA;
    }
    faac_cfg->outputFormat = 1;
    faac_cfg->inputFormat = FAAC_INPUT_16BIT;

    avctx->frame_size = samples_input / avctx->channels;

    avctx->coded_frame= avcodec_alloc_frame();
    avctx->coded_frame->key_frame= 1;

    /* Set decoder specific info */
    avctx->extradata_size = 0;
    if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {

        unsigned char *buffer = NULL;
        unsigned long decoder_specific_info_size;

        if (!faacEncGetDecoderSpecificInfo(s->faac_handle, &buffer,
                                           &decoder_specific_info_size)) {
            avctx->extradata = av_malloc(decoder_specific_info_size + FF_INPUT_BUFFER_PADDING_SIZE);
            avctx->extradata_size = decoder_specific_info_size;
            memcpy(avctx->extradata, buffer, avctx->extradata_size);
            faac_cfg->outputFormat = 0;
        }
#undef free
        free(buffer);
#define free please_use_av_free
    }

    if (!faacEncSetConfiguration(s->faac_handle, faac_cfg)) {
        av_log(avctx, AV_LOG_ERROR, "libfaac doesn't support this output format!\n");
        return -1;
    }

    return 0;
}
Пример #29
0
void *encode_video_thread(void *arg)
{
    INFO("Started encode video thread!");

    av_session_t *_phone = arg;

    _phone->running_encvid = 1;
    //CodecState *cs = get_cs_temp(_phone->av);
    AVPacket pkt1, *packet = &pkt1;
    //int p = 0;
    //int got_packet;
    int video_frame_finished;
    AVFrame *s_video_frame;
    AVFrame *webcam_frame;
    s_video_frame = avcodec_alloc_frame();
    webcam_frame = avcodec_alloc_frame();
    //AVPacket enc_video_packet;

    uint8_t *buffer;
    int numBytes;
    /* Determine required buffer size and allocate buffer */
    numBytes = avpicture_get_size(PIX_FMT_YUV420P, _phone->webcam_decoder_ctx->width, _phone->webcam_decoder_ctx->height);
    buffer = (uint8_t *)av_calloc(numBytes * sizeof(uint8_t), 1);
    avpicture_fill((AVPicture *)s_video_frame, buffer, PIX_FMT_YUV420P, _phone->webcam_decoder_ctx->width,
                   _phone->webcam_decoder_ctx->height);
    _phone->sws_ctx = sws_getContext(_phone->webcam_decoder_ctx->width, _phone->webcam_decoder_ctx->height,
                                     _phone->webcam_decoder_ctx->pix_fmt, _phone->webcam_decoder_ctx->width, _phone->webcam_decoder_ctx->height,
                                     PIX_FMT_YUV420P,
                                     SWS_BILINEAR, NULL, NULL, NULL);


    vpx_image_t *image =
        vpx_img_alloc(NULL, VPX_IMG_FMT_I420, _phone->webcam_decoder_ctx->width, _phone->webcam_decoder_ctx->height, 1);

    //uint32_t frame_counter = 0;
    while (_phone->running_encvid) {

        if (av_read_frame(_phone->video_format_ctx, packet) < 0) {
            printf("error reading frame\n");

            if (_phone->video_format_ctx->pb->error != 0)
                break;

            continue;
        }

        if (packet->stream_index == _phone->video_stream) {
            if (avcodec_decode_video2(_phone->webcam_decoder_ctx, webcam_frame, &video_frame_finished, packet) < 0) {
                printf("couldn't decode\n");
                continue;
            }

            av_free_packet(packet);
            sws_scale(_phone->sws_ctx, (uint8_t const * const *)webcam_frame->data, webcam_frame->linesize, 0,
                      _phone->webcam_decoder_ctx->height, s_video_frame->data, s_video_frame->linesize);
            /* create a new I-frame every 60 frames */
            //++p;
            /*
            if (p == 60) {

                s_video_frame->pict_type = AV_PICTURE_TYPE_BI ;
            } else if (p == 61) {
                s_video_frame->pict_type = AV_PICTURE_TYPE_I ;
                p = 0;
            } else {
                s_video_frame->pict_type = AV_PICTURE_TYPE_P ;
            }*/

            if (video_frame_finished) {
                memcpy(image->planes[VPX_PLANE_Y], s_video_frame->data[0],
                       s_video_frame->linesize[0] * _phone->webcam_decoder_ctx->height);
                memcpy(image->planes[VPX_PLANE_U], s_video_frame->data[1],
                       s_video_frame->linesize[1] * _phone->webcam_decoder_ctx->height / 2);
                memcpy(image->planes[VPX_PLANE_V], s_video_frame->data[2],
                       s_video_frame->linesize[2] * _phone->webcam_decoder_ctx->height / 2);
                toxav_send_video (_phone->av, image);
                //if (avcodec_encode_video2(cs->video_encoder_ctx, &enc_video_packet, s_video_frame, &got_packet) < 0) {
                /*if (vpx_codec_encode(&cs->v_encoder, image, frame_counter, 1, 0, 0) != VPX_CODEC_OK) {
                    printf("could not encode video frame\n");
                    continue;
                }
                ++frame_counter;

                vpx_codec_iter_t iter = NULL;
                vpx_codec_cx_pkt_t *pkt;
                while( (pkt = vpx_codec_get_cx_data(&cs->v_encoder, &iter)) ) {
                    if (pkt->kind == VPX_CODEC_CX_FRAME_PKT)
                        toxav_send_rtp_payload(_phone->av, TypeVideo, pkt->data.frame.buf, pkt->data.frame.sz);
                }*/
                //if (!got_packet) {
                //    continue;
                //}

                //if (!enc_video_packet.data) fprintf(stderr, "video packet data is NULL\n");

                //toxav_send_rtp_payload(_phone->av, TypeVideo, enc_video_packet.data, enc_video_packet.size);

                //av_free_packet(&enc_video_packet);
            }
        } else {
            av_free_packet(packet);
        }
    }

    vpx_img_free(image);

    /* clean up codecs */
    //pthread_mutex_lock(&cs->ctrl_mutex);
    av_free(buffer);
    av_free(webcam_frame);
    av_free(s_video_frame);
    sws_freeContext(_phone->sws_ctx);
    //avcodec_close(webcam_decoder_ctx);
    //avcodec_close(cs->video_encoder_ctx);
    //pthread_mutex_unlock(&cs->ctrl_mutex);

    _phone->running_encvid = -1;

    pthread_exit ( NULL );
}
Пример #30
0
/*****************************************************************************
 * InitVideo: initialize the video decoder
 *****************************************************************************
 * the ffmpeg codec will be opened, some memory allocated. The vout is not yet
 * opened (done after the first decoded frame).
 *****************************************************************************/
int E_(InitVideoDec)( decoder_t *p_dec, AVCodecContext *p_context,
                      AVCodec *p_codec, int i_codec_id, char *psz_namecodec )
{
    decoder_sys_t *p_sys;
    vlc_value_t lockval;
    vlc_value_t val;

    var_Get( p_dec->p_libvlc, "avcodec", &lockval );

    /* Allocate the memory needed to store the decoder's structure */
    if( ( p_dec->p_sys = p_sys =
          (decoder_sys_t *)malloc(sizeof(decoder_sys_t)) ) == NULL )
    {
        msg_Err( p_dec, "out of memory" );
        return VLC_EGENERIC;
    }

    p_dec->p_sys->p_context = p_context;
    p_dec->p_sys->p_codec = p_codec;
    p_dec->p_sys->i_codec_id = i_codec_id;
    p_dec->p_sys->psz_namecodec = psz_namecodec;
    p_sys->p_ff_pic = avcodec_alloc_frame();

    /* ***** Fill p_context with init values ***** */
    /* FIXME: remove when ffmpeg deals properly with avc1 */
    if( p_dec->fmt_in.i_codec != VLC_FOURCC('a','v','c','1') )
    /* End FIXME */
    p_sys->p_context->codec_tag = ffmpeg_CodecTag( p_dec->fmt_in.i_codec );
    p_sys->p_context->width  = p_dec->fmt_in.video.i_width;
    p_sys->p_context->height = p_dec->fmt_in.video.i_height;
    p_sys->p_context->bits_per_sample = p_dec->fmt_in.video.i_bits_per_pixel;

    /*  ***** Get configuration of ffmpeg plugin ***** */
    p_sys->p_context->workaround_bugs =
        config_GetInt( p_dec, "ffmpeg-workaround-bugs" );
    p_sys->p_context->error_resilience =
        config_GetInt( p_dec, "ffmpeg-error-resilience" );

    var_Create( p_dec, "grayscale", VLC_VAR_BOOL | VLC_VAR_DOINHERIT );
    var_Get( p_dec, "grayscale", &val );
    if( val.b_bool ) p_sys->p_context->flags |= CODEC_FLAG_GRAY;

    var_Create( p_dec, "ffmpeg-vismv", VLC_VAR_INTEGER | VLC_VAR_DOINHERIT );
    var_Get( p_dec, "ffmpeg-vismv", &val );
#if LIBAVCODEC_BUILD >= 4698
    if( val.i_int ) p_sys->p_context->debug_mv = val.i_int;
#endif

    var_Create( p_dec, "ffmpeg-lowres", VLC_VAR_INTEGER | VLC_VAR_DOINHERIT );
    var_Get( p_dec, "ffmpeg-lowres", &val );
#if LIBAVCODEC_BUILD >= 4723
    if( val.i_int > 0 && val.i_int <= 2 ) p_sys->p_context->lowres = val.i_int;
#endif

    /* ***** ffmpeg frame skipping ***** */
    var_Create( p_dec, "ffmpeg-hurry-up", VLC_VAR_BOOL | VLC_VAR_DOINHERIT );
    var_Get( p_dec, "ffmpeg-hurry-up", &val );
    p_sys->b_hurry_up = val.b_bool;

    /* ***** ffmpeg direct rendering ***** */
    p_sys->b_direct_rendering = 0;
    var_Create( p_dec, "ffmpeg-dr", VLC_VAR_BOOL | VLC_VAR_DOINHERIT );
    var_Get( p_dec, "ffmpeg-dr", &val );
    if( val.b_bool && (p_sys->p_codec->capabilities & CODEC_CAP_DR1) &&
        ffmpeg_PixFmtToChroma( p_sys->p_context->pix_fmt ) &&
        /* Apparently direct rendering doesn't work with YUV422P */
        p_sys->p_context->pix_fmt != PIX_FMT_YUV422P &&
        /* H264 uses too many reference frames */
        p_sys->i_codec_id != CODEC_ID_H264 &&
        !(p_sys->p_context->width % 16) && !(p_sys->p_context->height % 16) &&
#if LIBAVCODEC_BUILD >= 4698
        !p_sys->p_context->debug_mv )
#else
        1 )
#endif
    {
        /* Some codecs set pix_fmt only after the 1st frame has been decoded,
         * so we need to do another check in ffmpeg_GetFrameBuf() */
        p_sys->b_direct_rendering = 1;
    }

#ifdef LIBAVCODEC_PP
    p_sys->p_pp = NULL;
    p_sys->b_pp = p_sys->b_pp_async = p_sys->b_pp_init = VLC_FALSE;
    p_sys->p_pp = E_(OpenPostproc)( p_dec, &p_sys->b_pp_async );
#endif

    /* ffmpeg doesn't properly release old pictures when frames are skipped */
    //if( p_sys->b_hurry_up ) p_sys->b_direct_rendering = 0;
    if( p_sys->b_direct_rendering )
    {
        msg_Dbg( p_dec, "using direct rendering" );
        p_sys->p_context->flags |= CODEC_FLAG_EMU_EDGE;
    }

    /* Always use our get_buffer wrapper so we can calculate the
     * PTS correctly */
    p_sys->p_context->get_buffer = ffmpeg_GetFrameBuf;
    p_sys->p_context->release_buffer = ffmpeg_ReleaseFrameBuf;
    p_sys->p_context->opaque = p_dec;

    /* ***** init this codec with special data ***** */
    if( p_dec->fmt_in.i_extra )
    {
        int i_size = p_dec->fmt_in.i_extra;

        if( p_sys->i_codec_id == CODEC_ID_SVQ3 )
        {
            uint8_t *p;

            p_sys->p_context->extradata_size = i_size + 12;
            p = p_sys->p_context->extradata  =
                malloc( p_sys->p_context->extradata_size );

            memcpy( &p[0],  "SVQ3", 4 );
            memset( &p[4], 0, 8 );
            memcpy( &p[12], p_dec->fmt_in.p_extra, i_size );

            /* Now remove all atoms before the SMI one */
            if( p_sys->p_context->extradata_size > 0x5a &&
                strncmp( &p[0x56], "SMI ", 4 ) )
            {
                uint8_t *psz = &p[0x52];

                while( psz < &p[p_sys->p_context->extradata_size - 8] )
                {
                    int i_size = GetDWBE( psz );
                    if( i_size <= 1 )
                    {
                        /* FIXME handle 1 as long size */
                        break;
                    }
                    if( !strncmp( &psz[4], "SMI ", 4 ) )
                    {
                        memmove( &p[0x52], psz,
                                 &p[p_sys->p_context->extradata_size] - psz );
                        break;
                    }

                    psz += i_size;
                }
            }
        }
        else if( p_dec->fmt_in.i_codec == VLC_FOURCC( 'R', 'V', '1', '0' ) ||
                 p_dec->fmt_in.i_codec == VLC_FOURCC( 'R', 'V', '1', '3' ) ||
                 p_dec->fmt_in.i_codec == VLC_FOURCC( 'R', 'V', '2', '0' ) )
        {
            if( p_dec->fmt_in.i_extra == 8 )
            {
                p_sys->p_context->extradata_size = 8;
                p_sys->p_context->extradata = malloc( 8 );

                memcpy( p_sys->p_context->extradata,
                        p_dec->fmt_in.p_extra,
                        p_dec->fmt_in.i_extra );
                p_sys->p_context->sub_id= ((uint32_t*)p_dec->fmt_in.p_extra)[1];

                msg_Warn( p_dec, "using extra data for RV codec sub_id=%08x",
                          p_sys->p_context->sub_id );
            }
        }
        /* FIXME: remove when ffmpeg deals properly with avc1 */
        else if( p_dec->fmt_in.i_codec == VLC_FOURCC('a','v','c','1') )
        {
            ;
        }
        /* End FIXME */
        else
        {
            p_sys->p_context->extradata_size = i_size;
            p_sys->p_context->extradata =
                malloc( i_size + FF_INPUT_BUFFER_PADDING_SIZE );
            memcpy( p_sys->p_context->extradata,
                    p_dec->fmt_in.p_extra, i_size );
            memset( &((uint8_t*)p_sys->p_context->extradata)[i_size],
                    0, FF_INPUT_BUFFER_PADDING_SIZE );
        }
    }

    /* ***** misc init ***** */
    p_sys->input_pts = p_sys->input_dts = 0;
    p_sys->i_pts = 0;
    p_sys->b_has_b_frames = VLC_FALSE;
    p_sys->b_first_frame = VLC_TRUE;
    p_sys->i_late_frames = 0;
    p_sys->i_buffer = 0;
    p_sys->i_buffer_orig = 1;
    p_sys->p_buffer_orig = p_sys->p_buffer = malloc( p_sys->i_buffer_orig );

    /* Set output properties */
    p_dec->fmt_out.i_cat = VIDEO_ES;
    p_dec->fmt_out.i_codec = ffmpeg_PixFmtToChroma( p_context->pix_fmt );

    /* Setup palette */
#if LIBAVCODEC_BUILD >= 4688
    if( p_dec->fmt_in.video.p_palette )
        p_sys->p_context->palctrl =
            (AVPaletteControl *)p_dec->fmt_in.video.p_palette;
    else
        p_sys->p_context->palctrl = &palette_control;
#endif

    /* ***** Open the codec ***** */
    vlc_mutex_lock( lockval.p_address );
    if( avcodec_open( p_sys->p_context, p_sys->p_codec ) < 0 )
    {
        vlc_mutex_unlock( lockval.p_address );
        msg_Err( p_dec, "cannot open codec (%s)", p_sys->psz_namecodec );
        free( p_sys );
        return VLC_EGENERIC;
    }
    vlc_mutex_unlock( lockval.p_address );
    msg_Dbg( p_dec, "ffmpeg codec (%s) started", p_sys->psz_namecodec );


    return VLC_SUCCESS;
}