Exemplo n.º 1
0
static void
finalize (GObject *object)
{
  GeglProperties *o = GEGL_PROPERTIES (object);
  if (o->user_data)
    {
      Priv *p = (Priv*)o->user_data;
      flush_audio (o);
      flush_video (o);

      av_write_trailer (p->oc);

      if (p->video_st)
        close_video (p, p->oc, p->video_st);
      if (p->audio_st)
        close_audio (p, p->oc, p->audio_st);

      avio_closep (&p->oc->pb);
      avformat_free_context (p->oc);

      g_free (o->user_data);
      o->user_data = NULL;
    }

  G_OBJECT_CLASS (g_type_class_peek_parent (G_OBJECT_GET_CLASS (object)))->finalize (object);
}
void close_video_file()
{
/* write the trailer, if any.  the trailer must be written
     * before you close the CodecContexts open when you wrote the
     * header; otherwise write_trailer may try to use memory that
     * was freed on av_codec_close() */
    av_write_trailer(oc);

    /* close each codec */
    if (video_st)
        close_video(oc, video_st);

    /* free the streams */
    for(i = 0; i < oc->nb_streams; i++) {
        av_freep(&oc->streams[i]->codec);
        av_freep(&oc->streams[i]);
    }

    if (!(fmt->flags & AVFMT_NOFILE)) {
        /* close the output file */
        url_fclose(oc->pb);
    }

    /* free the stream */
    av_free(oc);
}
void finalizeAVFormatContext(AVFormatContext **out_oc, AVStream **out_video_st, AVFrame **out_picture, AVStream **out_audio_st, int16_t **out_samples){
	/* Write the trailer, if any. The trailer must be written before you
	 * close the CodecContexts open when you wrote the header; otherwise
	 * av_write_trailer() may try to use memory that was freed on
	 * av_codec_close(). */

	if(!out_oc || !out_video_st || !out_picture || !out_audio_st || !out_samples)
		LOGE("finalizeAVFormatContext argument is null");

	AVFormatContext *oc = *out_oc;
	AVStream *video_st = *out_video_st;
	AVFrame *picture = *out_picture;
	AVStream *audio_st = *out_audio_st;
	int16_t *samples = *out_samples;


	//LOGI("finalizeAVFormatContext");
	av_write_trailer(oc);
	//LOGI("av_write_trailer");

	/* Close each codec. */
	if (video_st)
		close_video(oc, video_st, picture);
	//LOGI("close_video");
	if (audio_st)
		close_audio(oc, audio_st, samples);
	//LOGI("close_audio");

	//LOGI("close audio / video");

	/* Free the streams. */
	for (i = 0; i < oc->nb_streams; i++) {
		av_freep(&oc->streams[i]->codec);
		av_freep(&oc->streams[i]);
	}

	//LOGI("free streams");

	if (!(fmt->flags & AVFMT_NOFILE))
		/* Close the output file. */
		avio_close(oc->pb);

	//LOGI("avio_close");

	/* free the stream */
	av_free(oc);

	//LOGI("av_free");

	*out_oc = NULL;
	*out_video_st = NULL;
    *out_picture = NULL;
	*out_audio_st = NULL;
	*out_samples = NULL;
}
Exemplo n.º 4
0
void webm_close(webm_context *ctx)
{
    quit_video = 1;
    thread_join(ctx->the_demux_thread);
    thread_join(ctx->the_video_thread);
    close_video(&(ctx->video_ctx));
    if (ctx->the_audio_thread) thread_join(ctx->the_audio_thread);
    if (ctx->audio_track >= 0) close_audio(&(ctx->audio_ctx));
    nestegg_destroy(ctx->nestegg_ctx);
    closepackfile(ctx->packhandle);
    free(ctx);
}
Exemplo n.º 5
0
void CloseVideo()
{
    if (GraphicsDriver)
    {
        void (SPHERE_STDCALL * close_video)();

        close_video = (void (SPHERE_STDCALL *)())dlsym(GraphicsDriver, "CloseVideo");

        if (close_video != NULL)
            close_video();

        dlclose(GraphicsDriver);
    }
}
Exemplo n.º 6
0
BOOL CDlgMotion::connect_video()
{
	close_video();

	int slot = m_cbo_slot.GetCurSel();
	if(slot < 0 || slot >= g_window_count)
	{
		return FALSE;
	}

	if(m_motion_test.connect_video(g_server_id,slot))
	{
		m_motion_test.show_motion(TRUE);
		return TRUE;
	}
	return FALSE;
}
Exemplo n.º 7
0
//close and clean up the video, audio, and input systems on the platform
//  returns int - negative on failure, otherwise success
int platform_close(void) {
  int result;

  result = close_video();
  if(result < 0) {
    return result;
  }

  result = close_audio();
  if(result < 0) {
    return result;
  }

  result = close_input();
  if(result < 0) {
    return result;
  }

  return 0;
}
Exemplo n.º 8
0
void shut_down(EncoderJob &jobSpec) {
	/* write the trailer, if any.  the trailer must be written
	* before you close the CodecContexts open when you wrote the
	* header; otherwise write_trailer may try to use memory that
	* was freed on av_codec_close() */

#ifdef NEW_M2TS
	// nothing
	jobSpec.p->CloseFile();
#else 
	av_write_trailer(jobSpec.oc);
#endif

	/* close each codec */ // one of these (or both) are failing.
	if (jobSpec.video_st && jobSpec.video_outbuf) close_video(jobSpec, jobSpec.oc, jobSpec.video_st);
	if (jobSpec.audio_st && jobSpec.audio_outbuf) close_audio(jobSpec, jobSpec.oc, jobSpec.audio_st);

	/* free the streams */
	for(unsigned int i = 0; i < jobSpec.oc->nb_streams; i++) {
		av_freep(&jobSpec.oc->streams[i]->codec);
		av_freep(&jobSpec.oc->streams[i]);
	}


#ifdef NEW_M2TS
	// nothing
#else 
	if (!(jobSpec.fmt->flags & AVFMT_NOFILE)) {
		// close the output file
		url_fclose(jobSpec.oc->pb);
	}
#endif

	// free the stream
	av_free(jobSpec.oc);
	delete jobSpec.p;
}
Exemplo n.º 9
0
int main(int argc, char **argv)
{
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVStream *audio_st, *video_st;
    double audio_pts, video_pts;
    int i;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc != 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];

    /* Autodetect the output format from the name. default is MPEG. */
    fmt = av_guess_format(NULL, filename, NULL);
    if (!fmt) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        fmt = av_guess_format("mpeg", NULL, NULL);
    }
    if (!fmt) {
        fprintf(stderr, "Could not find suitable output format\n");
        return 1;
    }

    /* Allocate the output media context. */
    oc = avformat_alloc_context();
    if (!oc) {
        fprintf(stderr, "Memory error\n");
        return 1;
    }
    oc->oformat = fmt;
    snprintf(oc->filename, sizeof(oc->filename), "%s", filename);

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    video_st = NULL;
    audio_st = NULL;
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
        video_st = add_video_stream(oc, fmt->video_codec);
    }
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        audio_st = add_audio_stream(oc, fmt->audio_codec);
    }

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (video_st)
        open_video(oc, video_st);
    if (audio_st)
        open_audio(oc, audio_st);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "Could not open '%s'\n", filename);
            return 1;
        }
    }

    /* Write the stream header, if any. */
    avformat_write_header(oc, NULL);

    for (;;) {
        /* Compute current audio and video time. */
        if (audio_st)
            audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
        else
            audio_pts = 0.0;

        if (video_st)
            video_pts = (double)video_st->pts.val * video_st->time_base.num /
                        video_st->time_base.den;
        else
            video_pts = 0.0;

        if ((!audio_st || audio_pts >= STREAM_DURATION) &&
            (!video_st || video_pts >= STREAM_DURATION))
            break;

        /* write interleaved audio and video frames */
        if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
            write_audio_frame(oc, audio_st);
        } else {
            write_video_frame(oc, video_st);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (video_st)
        close_video(oc, video_st);
    if (audio_st)
        close_audio(oc, audio_st);

    /* Free the streams. */
    for (i = 0; i < oc->nb_streams; i++) {
        av_freep(&oc->streams[i]->codec);
        av_freep(&oc->streams[i]);
    }

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    av_free(oc);

    return 0;
}
Exemplo n.º 10
0
/* media file output */
int main(int argc, char **argv)
{
	const char *filename;
	AVOutputFormat *fmt;
	AVFormatContext *oc;
	AVStream *video_st = NULL;
	AVCodec *video_codec = NULL;
	double video_time;
	int flush, ret;

	/* Initialize libavcodec, and register all codecs and formats. */
	av_register_all();

	filename = "E:\\muxing.mp4";
	/* allocate the output media context */
	avformat_alloc_output_context2(&oc, NULL, NULL, filename);

	if (!oc) {
		printf("Could not deduce output format from file extension: using MPEG.\n");
		avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
	}
	if (!oc)
		return 1;


	fmt = oc->oformat; //muxing 할 때 outputformat 설정

	/* Add the audio and video streams using the default format codecs
	* and initialize the codecs. */

	//fmt->video_codec = AV_CODEC_ID_H264;
	fmt->video_codec = AV_CODEC_ID_MPEG4;

	if (fmt->video_codec != AV_CODEC_ID_NONE)
		video_st = add_stream(oc, &video_codec, fmt->video_codec); // add_stream(AVFormatContext *oc, AVCodec **codec,enum AVCodecID codec_id)
	// codec parameters set 함수

	/* Now that all the parameters are set, we can open the audio and
	* video codecs and allocate the necessary encode buffers. */
	if (video_st)
		open_video(oc, video_codec, video_st); // (AVFormatContext *oc, AVCodec *codec, AVStream *st)
	// 코댁 열기, 프레임 설정

	av_dump_format(oc, 0, filename, 1); // 정보 출력 디버깅 함수


	/* open the output file, if needed */
	if (!(fmt->flags & AVFMT_NOFILE)) {

		ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);

		if (ret < 0) {
			char buf[256];
			av_strerror(ret, buf, sizeof(buf));
			fprintf(stderr, "Could not open '%s': %s\n", filename, buf);
			return 1;
		}
	}

	/* Write the stream header, if any. */
	ret = avformat_write_header(oc, NULL); // Allocate the stream private data and write the stream header to an output media file. 
	// 헤더파일 생성 -> 본체 생성 -> 마무리 작업

	if (ret < 0) {
		char buf[256];
		av_strerror(ret, buf, sizeof(buf));
		fprintf(stderr, "Error occurred when opening output file: %s\n", buf);
		return 1;
	}

	flush = 0;

	while ((video_st && !video_is_eof)) {

		if (!flush && (!video_st)) {
			flush = 1;
		}
		if (video_st && !video_is_eof) {
			write_video_frame(oc, video_st, flush); // 본체 생성
		}

		if (frame_count == 10000)
			break;
	}

	/* Write the trailer, if any. The trailer must be written before you
	* close the CodecContexts open when you wrote the header; otherwise
	* av_write_trailer() may try to use memory that was freed on
	* av_codec_close(). */

	av_write_trailer(oc);

	/* Close each codec. */
	if (video_st)
		close_video(oc, video_st);

	if (!(fmt->flags & AVFMT_NOFILE))
		/* Close the output file. */
		avio_close(oc->pb);

	/* free the stream */
	avformat_free_context(oc);

	return 0;
}
Exemplo n.º 11
0
int ff_example(const char *filename, const char *format)
{
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVStream *video_st;
    double video_pts;
    int i;

    fmt = av_guess_format(format, NULL, NULL);
    if (!fmt) {
        fprintf(stderr, "Could not find suitable output format\n");
        exit(1);
    }

    fmt->video_codec = CODEC_ID_MJPEG;

    /* allocate the output media context */
    oc = avformat_alloc_context();
    if (!oc) {
        fprintf(stderr, "Memory error\n");
        exit(1);
    }
    oc->oformat = fmt;
    snprintf(oc->filename, sizeof(oc->filename), "%s", filename);

    video_st = NULL;
    if (fmt->video_codec != CODEC_ID_NONE)
        video_st = add_video_stream(oc, fmt->video_codec);

    av_dump_format(oc, 0, filename, 1);

    /* now that all the parameters are set, we can open the audio and
       video codecs and allocate the necessary encode buffers */
    if (video_st)
        open_video(oc, video_st);

    if (avio_open(&oc->pb, filename, URL_WRONLY) < 0) {
        fprintf(stderr, "Could not open '%s'\n", filename);
        exit(1);
    }


    /* write the stream header, if any */
    avformat_write_header(oc, NULL);

    for(;;) {

        video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
        printf("pts: %f\n", video_pts);

        if (frame_count > STREAM_NB_FRAMES)
            break;

        /* write interleaved audio and video frames */
        if (write_video_frame(oc, video_st) < 0)
            break;
    }

    printf("%d frames written\n", frame_count);

    av_write_trailer(oc);

    /* close each codec */
    if (video_st)
        close_video(oc, video_st);

    /* free the streams */
    for(i = 0; i < oc->nb_streams; i++) {
        av_freep(&oc->streams[i]->codec);
        av_freep(&oc->streams[i]);
    }

    avio_close(oc->pb);

    /* free the stream */
    av_free(oc);

    return 0;
}
Exemplo n.º 12
0
int main(int argc, char **argv)
{
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVStream *audio_st, *video_st;
    AVCodec *audio_codec, *video_codec;
    double audio_time, video_time;
    int flush, ret;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc != 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "This program generates a synthetic audio and video stream, encodes and\n"
               "muxes them into a file named output_file.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename.\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];

    /* allocate the output media context */
    avformat_alloc_output_context2(&oc, NULL, NULL, filename);
    if (!oc) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
    }
    if (!oc)
        return 1;

    fmt = oc->oformat;

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    video_st = NULL;
    audio_st = NULL;

    if (fmt->video_codec != AV_CODEC_ID_NONE)
        video_st = add_stream(oc, &video_codec, fmt->video_codec);
    if (fmt->audio_codec != AV_CODEC_ID_NONE)
        audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (video_st)
        open_video(oc, video_codec, video_st);
    if (audio_st)
        open_audio(oc, audio_codec, audio_st);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open '%s': %s\n", filename,
                    av_err2str(ret));
            return 1;
        }
    }

    /* Write the stream header, if any. */
    ret = avformat_write_header(oc, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file: %s\n",
                av_err2str(ret));
        return 1;
    }

    flush = 0;
    while ((video_st && !video_is_eof) || (audio_st && !audio_is_eof)) {
        /* Compute current audio and video time. */
        audio_time = (audio_st && !audio_is_eof) ? audio_st->pts.val * av_q2d(audio_st->time_base) : INFINITY;
        video_time = (video_st && !video_is_eof) ? video_st->pts.val * av_q2d(video_st->time_base) : INFINITY;

        if (!flush &&
            (!audio_st || audio_time >= STREAM_DURATION) &&
            (!video_st || video_time >= STREAM_DURATION)) {
            flush = 1;
        }

        /* write interleaved audio and video frames */
        if (audio_st && !audio_is_eof && audio_time <= video_time) {
            write_audio_frame(oc, audio_st, flush);
        } else if (video_st && !video_is_eof && video_time < audio_time) {
            write_video_frame(oc, video_st, flush);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (video_st)
        close_video(oc, video_st);
    if (audio_st)
        close_audio(oc, audio_st);

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    avformat_free_context(oc);

    return 0;
}
Exemplo n.º 13
0
void CDlgMask::close()
{
	close_video();
}
int main(int argc, char ** argv)
{
	if(argc < 4) {
		printf("\nScrub, you need to specify a bitrate, number of frames, and server."
				"\nLike this: pixieHD 350 1000 rtmp://domain.com/live/matt\n"
				"\nNOTE, it is: progname bitrate frames server\n\n"
				"The bitrate is understood to be kbits/sec.\n"
				"You should enter frames or else you the program will\n"
				"continue to stream until you forcefully close it.\n"
				"THANK YOU: while(1) { /* stream! */ }\n");
		return 0;
	}
	printf("\nYou have set the following options:\n\n%5cbitrate: %s,"
			"\n%5cframes: %s\n%5cserver: %s\n\n",
			' ',argv[1],' ',argv[2],' ',argv[3]);
	
	/*int p;
	printf("Initializing noob options");
	for(p=0; p<3; ++p) {
		printf("%5c",'.');
		Sleep(1500);
	}
	printf("\n\n");

	char *input;
	printf("You hating on my GFX or wat? Please Answer: ");
	input = getline();

	printf("\n\n");
	printf("Your answer: ");

	size_t input_len = strlen(input);
	for(p=0; p<input_len; ++p) {
		Sleep(300);
		printf("%c",input[p]);
	}
	printf("\nkk here we go...");
	Sleep(1000);*/

	printf("\n\nPress the CONTROL key to begin streaming or ESC key to QUIT.\n\n");
    while (1)
    {
	   if (ButtonPress(VK_ESCAPE)) {
          printf("Quit.\n\n");
          break;
       } else if (ButtonPress(VK_CONTROL)) {
    	   // Decoder local variable declaration
    	   	AVFormatContext *pFormatCtx = NULL;
    	   	int i, videoStream;
    	   	AVCodecContext *pCodecCtx = NULL;
    	   	AVCodec *pCodec;
    	   	AVFrame *pFrame;
    	   	AVPacket packet;
    	   	int frameFinished;

    	   	// Encoder local variable declaration
    	   	const char *filename;
    	   	AVOutputFormat *fmt;
    	   	AVFormatContext *oc;
    	   	AVStream *video_st;
    	   	AVCodec *video_codec;
    	   	int ret; unsigned int frame_count, frame_count2;
    	   	StreamInfo sInfo;

    	   	size_t max_frames = strtol(argv[2], NULL, 0);

    	   	// Register all formats, codecs and network
    	   	av_register_all();
    	   	avcodec_register_all();
    	   	avformat_network_init();

    	   	// Setup mux
    	   	//filename = "output_file.flv";
    	   	//filename = "rtmp://chineseforall.org/live/beta";
    	   	filename = argv[3];
    	   	fmt = av_guess_format("flv", filename, NULL);
    	   	if (fmt == NULL) {
    	   		printf("Could not guess format.\n");
    	   		return -1;
    	   	}
    	   	// allocate the output media context
    	   	oc = avformat_alloc_context();
    	   	if (oc == NULL) {
    	   		printf("could not allocate context.\n");
    	   		return -1;
    	   	}


    	   HDC hScreen = GetDC(GetDesktopWindow());
		   ScreenX = GetDeviceCaps(hScreen, HORZRES);
		   ScreenY = GetDeviceCaps(hScreen, VERTRES);

		   // Temp. hard-code the resolution
		   int new_width = 1024, new_height = 576;
		   double v_ratio = 1.7786458333333333333333333333333;

    	   	// Set output format context to the format ffmpeg guessed
    	   	oc->oformat = fmt;

    	   	// Add the video stream using the h.264
    	   	// codec and initialize the codec.
    	   	video_st = NULL;
    	   	sInfo.width = new_width;
    	   	sInfo.height = new_height;
    	   	sInfo.pix_fmt = AV_PIX_FMT_YUV420P;
    	   	sInfo.frame_rate = 10;
    	   	sInfo.bitrate = strtol(argv[1], NULL, 0)*1000;
    	   	video_st = add_stream(oc, &video_codec, AV_CODEC_ID_H264, &sInfo);

    	   	// Now that all the parameters are set, we can open the audio and
    	   	// video codecs and allocate the necessary encode buffers.
    	   	if (video_st)
    	   		open_video(oc, video_codec, video_st);

    	   	/* open the output file, if needed */
    	   	if (!(fmt->flags & AVFMT_NOFILE)) {
    	   		ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
    	   		if (ret < 0) {
    	   			fprintf(stderr, "Could not open '%s': %s\n", filename, av_err2str(ret));
    	   			return 1;
    	   		}
    	   	}

    	   	// dump output format
    	   	av_dump_format(oc, 0, filename, 1);

    	   	// Write the stream header, if any.
    	   	ret = avformat_write_header(oc, NULL);
    	   	if (ret < 0) {
    	   		fprintf(stderr, "Error occurred when opening output file: %s\n", av_err2str(ret));
    	   		return 1;
    	   	}

    	   	// Read frames, decode, and re-encode
    	   	frame_count = 1;
    	   	frame_count2 = 1;

		   HDC hdcMem = CreateCompatibleDC (hScreen);
		   HBITMAP hBitmap = CreateCompatibleBitmap(hScreen, ScreenX, ScreenY);
		   HGDIOBJ hOld;
		   BITMAPINFOHEADER bmi = {0};
		   bmi.biSize = sizeof(BITMAPINFOHEADER);
		   bmi.biPlanes = 1;
		   bmi.biBitCount = 32;
		   bmi.biWidth = ScreenX;
		   bmi.biHeight = -ScreenY;
		   bmi.biCompression = BI_RGB;
		   bmi.biSizeImage = 0;// 3 * ScreenX * ScreenY;


		   if(ScreenData)
			   free(ScreenData);
		   ScreenData = (BYTE*)malloc(4 * ScreenX * ScreenY);
		   AVPacket pkt;

		   clock_t start_t = GetTickCount();
		   long long wait_time = 0;

		   uint64_t total_size;

    	   while(1) {
			hOld = SelectObject(hdcMem, hBitmap);
			BitBlt(hdcMem, 0, 0, ScreenX, ScreenY, hScreen, 0, 0, SRCCOPY);
			SelectObject(hdcMem, hOld);

			GetDIBits(hdcMem, hBitmap, 0, ScreenY, ScreenData, (BITMAPINFO*)&bmi, DIB_RGB_COLORS);

			//calculate the bytes needed for the output image
			int nbytes = avpicture_get_size(AV_PIX_FMT_YUV420P, new_width, new_height);

			//create buffer for the output image
			uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes);

			//create ffmpeg frame structures.  These do not allocate space for image data,
			//just the pointers and other information about the image.
			AVFrame* inpic = avcodec_alloc_frame();
			AVFrame* outpic = avcodec_alloc_frame();

			//this will set the pointers in the frame structures to the right points in
			//the input and output buffers.
			avpicture_fill((AVPicture*)inpic, ScreenData, AV_PIX_FMT_RGB32, ScreenX, ScreenY);
			avpicture_fill((AVPicture*)outpic, outbuffer, AV_PIX_FMT_YUV420P, new_width, new_height);

			//create the conversion context
			struct SwsContext *fooContext = sws_getContext(ScreenX, ScreenY, AV_PIX_FMT_RGB32, new_width, new_height, AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);

			//perform the conversion
			sws_scale(fooContext, inpic->data, inpic->linesize, 0, ScreenY, outpic->data, outpic->linesize);
			
			// Initialize a new frame
			AVFrame* newFrame = avcodec_alloc_frame();

			int size = avpicture_get_size(video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);
			uint8_t* picture_buf = av_malloc(size);

			avpicture_fill((AVPicture *) newFrame, picture_buf, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);

			// Copy only the frame content without additional fields
			av_picture_copy((AVPicture*) newFrame, (AVPicture*) outpic, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);

			// encode the image
			int got_output;
			av_init_packet(&pkt);
			pkt.data = NULL; // packet data will be allocated by the encoder
			pkt.size = 0;

			// Set the frame's pts (this prevents the warning notice 'non-strictly-monotonic PTS')
			newFrame->pts = frame_count2;

			ret = avcodec_encode_video2(video_st->codec, &pkt, newFrame, &got_output);
			if (ret < 0) {
				fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
				exit(1);
			}

			if (got_output) {
				if (video_st->codec->coded_frame->key_frame)
					pkt.flags |= AV_PKT_FLAG_KEY;
				pkt.stream_index = video_st->index;

				if (pkt.pts != AV_NOPTS_VALUE)
					pkt.pts = av_rescale_q(pkt.pts, video_st->codec->time_base, video_st->time_base);
				if (pkt.dts != AV_NOPTS_VALUE)
					pkt.dts = av_rescale_q(pkt.dts, video_st->codec->time_base, video_st->time_base);

				// Write the compressed frame to the media file.
				ret = av_interleaved_write_frame(oc, &pkt);

				fprintf(stderr, "encoded frame #%d\n", frame_count);
				frame_count++;
			} else {
				ret = 0;
			}
			if (ret != 0) {
				fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
				exit(1);
			}

			++frame_count2;

			// Free the YUV picture frame we copied from the
			// decoder to eliminate the additional fields
			// and other packets/frames used
			av_free(picture_buf);
			av_free_packet(&pkt);
			av_free(newFrame);

			//free memory
			av_free(outbuffer);
			av_free(inpic);
			av_free(outpic);


          if(frame_count == max_frames) {
			/* Write the trailer, if any. The trailer must be written before you
			 * close the CodecContexts open when you wrote the header; otherwise
			 * av_write_trailer() may try to use memory that was freed on
			 * av_codec_close().
			 */
			av_write_trailer(oc);

			/* Close the video codec (encoder) */
			if (video_st) {
				close_video(oc, video_st);
			}
			// Free the output streams.
			for (i = 0; i < oc->nb_streams; i++) {
				av_freep(&oc->streams[i]->codec);
				av_freep(&oc->streams[i]);
			}
			if (!(fmt->flags & AVFMT_NOFILE)) {
				/* Close the output file. */
				avio_close(oc->pb);
			}
			/* free the output format context */
			av_free(oc);

			ReleaseDC(GetDesktopWindow(),hScreen);
			DeleteDC(hdcMem);

			printf("\n\nPress the CONTROL key to begin streaming or ESC key to QUIT.\n\n");
			break;
          }
       }
       }
    }
    return 0;
}
int main(int argc, char *argv[]) {
	// Decoder local variable declaration
	AVFormatContext *pFormatCtx = NULL;
	int i, videoStream;
	AVCodecContext *pCodecCtx = NULL;
	AVCodec *pCodec;
	AVFrame *pFrame;
	AVPacket packet;
	int frameFinished;

	// Encoder local variable declaration
	const char *filename;
	AVOutputFormat *fmt;
	AVFormatContext *oc;
	AVStream *video_st;
	AVCodec *video_codec;
	int ret, frame_count;
	StreamInfo sInfo;

	// Register all formats, codecs and network
	av_register_all();
	avcodec_register_all();
	avformat_network_init();

	// Open video file
	if (avformat_open_input(&pFormatCtx, "input_file.wmv", NULL, NULL) != 0)
		return -1; // Couldn't open file

	// Retrieve stream information
	if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
		return -1; // Couldn't find stream information

	// Dump information about file onto standard error
	av_dump_format(pFormatCtx, 0, "input_file.wmv", 0);

	// Find the first video stream
	videoStream = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++)
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			videoStream = i;
			break;
		}
	if (videoStream == -1)
		return -1; // Didn't find a video stream

	// Get a pointer to the codec context for the video stream
	pCodecCtx = pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1; // Codec not found
	}
	// Open codec (decoder)
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
		return -1; // Could not open codec

	// Allocate video frame
	pFrame = avcodec_alloc_frame();

	// Setup mux
	filename = "output_file.flv";
	
	// To stream to a media server (e.g. FMS)
	// filename = "rtmp://chineseforall.org/live/beta";
	
	fmt = av_guess_format("flv", filename, NULL);
	if (fmt == NULL) {
		printf("Could not guess format.\n");
		return -1;
	}
	// allocate the output media context
	oc = avformat_alloc_context();
	if (oc == NULL) {
		printf("could not allocate context.\n");
		return -1;
	}

	// Set output format context to the format ffmpeg guessed
	oc->oformat = fmt;

	// Add the video stream using the h.264
	// codec and initialize the codec.
	video_st = NULL;
	sInfo.width = pFormatCtx->streams[i]->codec->width;
	sInfo.height = pFormatCtx->streams[i]->codec->height;
	sInfo.pix_fmt = AV_PIX_FMT_YUV420P;
	sInfo.frame_rate = 30;
	sInfo.bitrate = 450*1000;
	video_st = add_stream(oc, &video_codec, AV_CODEC_ID_H264, &sInfo);

	// Now that all the parameters are set, we can open the audio and
	// video codecs and allocate the necessary encode buffers.
	if (video_st)
		open_video(oc, video_codec, video_st);

	/* open the output file, if needed */
	if (!(fmt->flags & AVFMT_NOFILE)) {
		ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
		if (ret < 0) {
			fprintf(stderr, "Could not open '%s': %s\n", filename, av_err2str(ret));
			return 1;
		}
	}

	// dump output format
	av_dump_format(oc, 0, filename, 1);

	// Write the stream header, if any.
	ret = avformat_write_header(oc, NULL);
	if (ret < 0) {
		fprintf(stderr, "Error occurred when opening output file: %s\n", av_err2str(ret));
		return 1;
	}

	// Read frames, decode, and re-encode
	frame_count = 1;
	while (av_read_frame(pFormatCtx, &packet) >= 0) {
		// Is this a packet from the video stream?
		if (packet.stream_index == videoStream) {
			// Decode video frame
			avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

			// Did we get a video frame?
			if (frameFinished) {

				// Initialize a new frame
				AVFrame* newFrame = avcodec_alloc_frame();

				int size = avpicture_get_size(video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);
				uint8_t* picture_buf = av_malloc(size);

				avpicture_fill((AVPicture *) newFrame, picture_buf, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);

				// Copy only the frame content without additional fields
				av_picture_copy((AVPicture*) newFrame, (AVPicture*) pFrame, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);

				// encode the image
				AVPacket pkt;
				int got_output;
				av_init_packet(&pkt);
				pkt.data = NULL; // packet data will be allocated by the encoder
				pkt.size = 0;

				// Set the frame's pts (this prevents the warning notice 'non-strictly-monotonic PTS'
				newFrame->pts = frame_count;

				ret = avcodec_encode_video2(video_st->codec, &pkt, newFrame, &got_output);
				if (ret < 0) {
					fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
					exit(1);
				}

				if (got_output) {
					if (video_st->codec->coded_frame->key_frame)
						pkt.flags |= AV_PKT_FLAG_KEY;
					pkt.stream_index = video_st->index;

					if (pkt.pts != AV_NOPTS_VALUE)
						pkt.pts = av_rescale_q(pkt.pts, video_st->codec->time_base, video_st->time_base);
					if (pkt.dts != AV_NOPTS_VALUE)
						pkt.dts = av_rescale_q(pkt.dts, video_st->codec->time_base, video_st->time_base);

					// Write the compressed frame to the media file.
					ret = av_interleaved_write_frame(oc, &pkt);
				} else {
					ret = 0;
				}
				if (ret != 0) {
					fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
					exit(1);
				}

				fprintf(stderr, "encoded frame #%d\n", frame_count);
				frame_count++;

				// Free the YUV picture frame we copied from the
				// decoder to eliminate the additional fields
				// and other packets/frames used
				av_free(picture_buf);
				av_free_packet(&pkt);
				av_free(newFrame);
			}
		}

		// Free the packet that was allocated by av_read_frame
		av_free_packet(&packet);
	}

	/* Write the trailer, if any. The trailer must be written before you
	 * close the CodecContexts open when you wrote the header; otherwise
	 * av_write_trailer() may try to use memory that was freed on
	 * av_codec_close(). */
	av_write_trailer(oc);

	/* Close the video codec (encoder) */
	if (video_st)
		close_video(oc, video_st);
	// Free the output streams.
	for (i = 0; i < oc->nb_streams; i++) {
		av_freep(&oc->streams[i]->codec);
		av_freep(&oc->streams[i]);
	}
	if (!(fmt->flags & AVFMT_NOFILE))
		/* Close the output file. */
		avio_close(oc->pb);
	/* free the output format context */
	av_free(oc);

	// Free the YUV frame populated by the decoder
	av_free(pFrame);

	// Close the video codec (decoder)
	avcodec_close(pCodecCtx);

	// Close the input video file
	avformat_close_input(&pFormatCtx);

	return 0;
}
Exemplo n.º 16
0
void CDlgIPCFeature::close()
{
	close_video();
}
Exemplo n.º 17
0
void CDlgOsd::close()
{
	close_video();
}
Exemplo n.º 18
0
void CDlgMotion::close()
{
	close_video();
}