コード例 #1
0
ファイル: ff-save.c プロジェクト: OpenCL/GEGL-OpenCL-old
static gboolean
process (GeglOperation       *operation,
         GeglBuffer          *input,
         const GeglRectangle *result,
         gint                 level)
{
  GeglProperties *o = GEGL_PROPERTIES (operation);
  Priv           *p = (Priv*)o->user_data;
  static gint     inited = 0;

  g_assert (input);

  if (p == NULL)
    init (o);
  p = (Priv*)o->user_data;

  p->width = result->width;
  p->height = result->height;
  p->input = input;

  if (!inited)
    {
      tfile (o);
      inited = 1;
    }

  write_video_frame (o, p->oc, p->video_st);
  if (p->audio_st)
    write_audio_frame (o, p->oc, p->audio_st);

  return  TRUE;
}
コード例 #2
0
ファイル: writeffmpeg.c プロジェクト: BHCLL/blendocv
static void write_audio_frames(double to_pts)
{
	int finished = 0;

	while (audio_stream && !finished) {
		if((audio_time >= to_pts) ||
		   (write_audio_frame())) {
			finished = 1;
		}
	}
}
コード例 #3
0
void push_audio(EncoderJob &jobSpec, MediaFrame &frame) {// Send available frames to the encoder
	if (frame.AudioSize > 0) {
		// This function only writes frames until it reaches sync with the video pts.
		// also, it updates the AudioSamplesConsumed in 'frame'.
		write_audio_frame(jobSpec, frame, jobSpec.oc, jobSpec.audio_st);

		if (frame.ForceAudioConsumption && frame.VideoSize == 0) {
			jobSpec.FrameCount++; // if we're doing audio-only, push frames here.
		}
	}
}
コード例 #4
0
ファイル: writeffmpeg.c プロジェクト: RiazAhamed/NewBlender
static void write_audio_frames(FFMpegContext *context, double to_pts)
{
	int finished = 0;

	while (context->audio_stream && !finished) {
		if ((context->audio_time >= to_pts) ||
		    (write_audio_frame(context)))
		{
			finished = 1;
		}
	}
}
コード例 #5
0
ファイル: VideoLivRecord.cpp プロジェクト: u-stone/CodeBase
void CVideoLivRecord::write_arbitrary_av()
{
	av_dump_format(m_pAVFormatContext, 0, "E:\\test.mp4", 1);

	while (m_bEncodeAudio || m_bEncodeVideo){
		if (m_bEncodeVideo && (!m_bEncodeAudio || av_compare_ts(m_videoNextpts, m_pVideoStream->codec->time_base, 
			m_audioNextpts, m_pAudioStream->codec->time_base) <= 0)){
			m_bEncodeVideo = !write_video_frame(m_pVideoStream, 0, 0);
		} else {
			m_bEncodeAudio = !write_audio_frame(m_pAudioStream, 0, 0);
		}
	}
}
コード例 #6
0
int write_audio_frames()
{
	int ret = 0;
	
	n_audio_source_sample = n_audio_source_sample_size;
	ret = get_audio_sample(p_audio_source,p_audio_source_sample,&n_audio_source_sample);
	if(ret < 0)
	{		 
		fprintf(stderr ,"avencoder: get_audio_sample fail..\n");		 
		return -1;	  
	}
	else if(ret == 0)
	{
		//fprintf(stderr ,"avencoder: get_audio_sample no audio sample..\n");
		n_audio_source_sample = 4608;
		memset(p_audio_source_sample,0,n_audio_source_sample);
	}
	//else if(ret > 0)
	//	fprintf(stderr ,"avencoder: get_audio_sample yes audio sample..\n");

	//printf("n_audio_source_sample = %d\n",n_audio_source_sample);

	n_audio_encode_sample = n_audio_encode_sample_size;
	ret = encode_audio_sample(p_audio_encoder,p_audio_source_sample,n_audio_source_sample,p_audio_encode_sample,&n_audio_encode_sample);
	if(ret < 0)
	{		 
		fprintf(stderr ,"avencoder: encode_audio_sample fail..\n");		 
		return -2;	  
	}

	//printf("n_audio_encode_sample = %d\n",n_audio_encode_sample);


	ret = write_audio_frame(p_avmuxer,p_audio_encode_sample,n_audio_encode_sample);
	if(ret < 0)
	{		 
		fprintf(stderr ,"avencoder: write_audio_frame fail..\n");		 
		return -3;	  
	}
	return 0;
}
コード例 #7
0
ファイル: mux.c プロジェクト: lorddoskias/omxtranscode
void
*writer_thread(void *thread_ctx) {

    struct transcoder_ctx_t *ctx = (struct transcoder_ctx_t *) thread_ctx;
    AVStream *video_stream = NULL, *audio_stream = NULL;
    AVFormatContext *output_context = init_output_context(ctx, &video_stream, &audio_stream);
    struct mux_state_t mux_state = {0};

    //from omxtx
    mux_state.pts_offset = av_rescale_q(ctx->input_context->start_time, AV_TIME_BASE_Q, output_context->streams[ctx->video_stream_index]->time_base);

#if 0
    FILE *out_file;

    out_file = fopen(ctx->output_filename, "wb");
    if (out_file == NULL) {
        printf("error creating output file. DYING \n");
        exit(1);
    }
#endif

    //write stream header if any
    avformat_write_header(output_context, NULL);

    //do not start doing anything until we get an encoded packet
    pthread_mutex_lock(&ctx->pipeline.video_encode.is_running_mutex);
    while (!ctx->pipeline.video_encode.is_running) {
        pthread_cond_wait(&ctx->pipeline.video_encode.is_running_cv, &ctx->pipeline.video_encode.is_running_mutex);
    }

    while (!ctx->pipeline.video_encode.eos || !ctx->processed_audio_queue->queue_finished) {
        //FIXME a memory barrier is required here so that we don't race 
        //on above variables 

        //fill a buffer with video data 
        OERR(OMX_FillThisBuffer(ctx->pipeline.video_encode.h, omx_get_next_output_buffer(&ctx->pipeline.video_encode)));

        write_audio_frame(output_context, audio_stream, ctx); //write full audio frame 
        //FIXME no guarantee that we have a full frame per packet?
        write_video_frame(output_context, video_stream, ctx, &mux_state); //write full video frame
        //encoded_video_queue is being filled by the previous command

#if 0
        struct packet_t *encoded_packet = packet_queue_get_next_item(&ctx->pipeline.encoded_video_queue);
        fwrite(encoded_packet->data, 1, encoded_packet->data_length, out_file);
        packet_queue_free_packet(encoded_packet, 1);
#endif

    }

    av_write_trailer(output_context);

    //free all the resources
    avcodec_close(video_stream->codec);
    avcodec_close(audio_stream->codec);
    /* Free the streams. */
    for (int i = 0; i < output_context->nb_streams; i++) {
        av_freep(&output_context->streams[i]->codec);
        av_freep(&output_context->streams[i]);
    }

    if (!(output_context->oformat->flags & AVFMT_NOFILE)) {
        /* Close the output file. */
        avio_close(output_context->pb);
    }
       

    /* free the stream */
    av_free(output_context);
    free(mux_state.pps);
    free(mux_state.sps);
#if 0
    fclose(out_file);
#endif
}
コード例 #8
0
ファイル: output-example.c プロジェクト: JSinglan/libav
int main(int argc, char **argv)
{
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVStream *audio_st, *video_st;
    double audio_pts, video_pts;
    int i;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc != 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];

    /* Autodetect the output format from the name. default is MPEG. */
    fmt = av_guess_format(NULL, filename, NULL);
    if (!fmt) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        fmt = av_guess_format("mpeg", NULL, NULL);
    }
    if (!fmt) {
        fprintf(stderr, "Could not find suitable output format\n");
        return 1;
    }

    /* Allocate the output media context. */
    oc = avformat_alloc_context();
    if (!oc) {
        fprintf(stderr, "Memory error\n");
        return 1;
    }
    oc->oformat = fmt;
    snprintf(oc->filename, sizeof(oc->filename), "%s", filename);

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    video_st = NULL;
    audio_st = NULL;
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
        video_st = add_video_stream(oc, fmt->video_codec);
    }
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        audio_st = add_audio_stream(oc, fmt->audio_codec);
    }

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (video_st)
        open_video(oc, video_st);
    if (audio_st)
        open_audio(oc, audio_st);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "Could not open '%s'\n", filename);
            return 1;
        }
    }

    /* Write the stream header, if any. */
    avformat_write_header(oc, NULL);

    for (;;) {
        /* Compute current audio and video time. */
        if (audio_st)
            audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
        else
            audio_pts = 0.0;

        if (video_st)
            video_pts = (double)video_st->pts.val * video_st->time_base.num /
                        video_st->time_base.den;
        else
            video_pts = 0.0;

        if ((!audio_st || audio_pts >= STREAM_DURATION) &&
            (!video_st || video_pts >= STREAM_DURATION))
            break;

        /* write interleaved audio and video frames */
        if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
            write_audio_frame(oc, audio_st);
        } else {
            write_video_frame(oc, video_st);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (video_st)
        close_video(oc, video_st);
    if (audio_st)
        close_audio(oc, audio_st);

    /* Free the streams. */
    for (i = 0; i < oc->nb_streams; i++) {
        av_freep(&oc->streams[i]->codec);
        av_freep(&oc->streams[i]);
    }

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    av_free(oc);

    return 0;
}
コード例 #9
0
ファイル: __main.cpp プロジェクト: f-v-m/ffmpegForUnity
int main(int argc, char **argv)
{
    OutputStream audio_st = { 0 };
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVCodec *audio_codec;
    int ret;
    int have_audio = 0;
    int encode_audio = 0;
    AVDictionary *opt = NULL;
    
    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();
    avformat_network_init();
    
    if (argc < 2) {
        printf("usage: %s output_file\n", argv[0]);
        return 1;
    }
    av_dict_set(&opt, "strict", "experimental", 0);
    
    filename = argv[1];
    if (argc > 3 && !strcmp(argv[2], "-flags")) {
        av_dict_set(&opt, argv[2]+1, argv[3], 0);
    }
    
    /* allocate the output media context */
    avformat_alloc_output_context2(&oc, NULL, "sdp", filename);
    if (!oc) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
    }
    if (!oc)
        return 1;
    
    fmt = oc->oformat;
    
    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
    }
    
    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */

    open_audio(oc, audio_codec, &audio_st, opt);
    
    av_dump_format(oc, 0, filename, 1);
    
    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open '%s': %s\n", filename,
                    av_err2str(ret));
            return 1;
        }
    }
    
    /* Write the stream header, if any. */
    ret = avformat_write_header(oc, &opt);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file: %s\n",
                av_err2str(ret));
        return 1;
    }
    
    while (encode_audio) {
        /* select the stream to encode */
            encode_audio = !write_audio_frame(oc, &audio_st);
    }
    
    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);
    
    /* Close each codec. */
    close_stream(oc, &audio_st);
    
    if (!(fmt->flags & AVFMT_NOFILE))
    /* Close the output file. */
        avio_close(oc->pb);
    
    /* free the stream */
    avformat_free_context(oc);
    
    return 0;
}
コード例 #10
0
ファイル: muxing.c プロジェクト: Gnate/FFmpeg-Android
int main(int argc, char **argv)
{
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVStream *audio_st, *video_st;
    AVCodec *audio_codec, *video_codec;
    double audio_time, video_time;
    int flush, ret;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc != 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "This program generates a synthetic audio and video stream, encodes and\n"
               "muxes them into a file named output_file.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename.\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];

    /* allocate the output media context */
    avformat_alloc_output_context2(&oc, NULL, NULL, filename);
    if (!oc) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
    }
    if (!oc)
        return 1;

    fmt = oc->oformat;

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    video_st = NULL;
    audio_st = NULL;

    if (fmt->video_codec != AV_CODEC_ID_NONE)
        video_st = add_stream(oc, &video_codec, fmt->video_codec);
    if (fmt->audio_codec != AV_CODEC_ID_NONE)
        audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (video_st)
        open_video(oc, video_codec, video_st);
    if (audio_st)
        open_audio(oc, audio_codec, audio_st);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open '%s': %s\n", filename,
                    av_err2str(ret));
            return 1;
        }
    }

    /* Write the stream header, if any. */
    ret = avformat_write_header(oc, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file: %s\n",
                av_err2str(ret));
        return 1;
    }

    flush = 0;
    while ((video_st && !video_is_eof) || (audio_st && !audio_is_eof)) {
        /* Compute current audio and video time. */
        audio_time = (audio_st && !audio_is_eof) ? audio_st->pts.val * av_q2d(audio_st->time_base) : INFINITY;
        video_time = (video_st && !video_is_eof) ? video_st->pts.val * av_q2d(video_st->time_base) : INFINITY;

        if (!flush &&
            (!audio_st || audio_time >= STREAM_DURATION) &&
            (!video_st || video_time >= STREAM_DURATION)) {
            flush = 1;
        }

        /* write interleaved audio and video frames */
        if (audio_st && !audio_is_eof && audio_time <= video_time) {
            write_audio_frame(oc, audio_st, flush);
        } else if (video_st && !video_is_eof && video_time < audio_time) {
            write_video_frame(oc, video_st, flush);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (video_st)
        close_video(oc, video_st);
    if (audio_st)
        close_audio(oc, audio_st);

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    avformat_free_context(oc);

    return 0;
}
コード例 #11
0
int32_t CEncoder::enc_loop(void)
{
	int video_frames = 0;

	int video_index = 0;
	int video_queue_index = 0;
	int last_video_index = -1;
	uint64_t audio_total_write = 0;
	
	uint64_t allow_audio_samples = g_enc_opt.m_VideoDisable ? INT64_MAX : 0;
	int audio_buffer_point = 0;
	int audio_buffer_channels = g_enc_opt.m_EncAudioIsPlanar ? g_enc_opt.m_EncAudioChannels : 1;
	int audio_buffer_max_bytes = AUDIO_BUFFER_SEC * g_enc_opt.m_EncAudioSampleRate * g_enc_opt.m_EncAudioPacketBytes;
	int audio_buffer_frame_bytes = audio_frame_size * g_enc_opt.m_EncAudioPacketBytes;
	uint64_t audio_samples_per_sec = audio_sample_rate;

	int32_t video_eof = g_enc_opt.m_VideoDisable;
	int32_t audio_eof = g_enc_opt.m_AudioDisable;

	while ((video_eof == 0) || (audio_eof == 0))
	{
		if (*m_pAbort) return ABORT_FLAG;
		if (video_eof == 0)
		{
			while ((m_VStream->m_Eof == 0) && (m_VStream->m_Queued == 0))
			{
				if (*m_pAbort) return ABORT_FLAG;
				Sleep(1);
			}

			if (m_VStream->m_Queued == 0)
			{
				assert(m_VStream->m_Eof);
				video_eof = 1;
				ending_video_codec();
				if (m_AStream)
				{
					allow_audio_samples = INT64_MAX;
				}
				else if (audio_eof == 0)
				{
					// 结束静音
					audio_eof = 1;
					ending_audio_codec();
				}
				continue;
			}

			video_index = m_VStream->m_Queue[video_queue_index];
			video_queue_index = (video_queue_index + 1) & (MAX_VIDEO_QUEUE - 1);
			if (write_video_frame(last_video_index == video_index, video_index) < 0)
			{
				return -1;
			}
			if (last_video_index != video_index)
			{
				if (last_video_index != -1)
					InterlockedDecrement(&m_VStream->m_Buffered);
				last_video_index = video_index;
			}
			InterlockedDecrement(&m_VStream->m_Queued);
			video_frames ++;
			allow_audio_samples = (uint64_t)video_frames * audio_samples_per_sec * (uint64_t)g_enc_opt.m_FrameNum / (uint64_t)g_enc_opt.m_FrameDen;
		}

		if (*m_pAbort) return ABORT_FLAG;
		if ((audio_eof == 0) && (m_AStream))
		{
			while (audio_total_write < allow_audio_samples)
			{

				while ((m_AStream->m_Eof == 0) && (m_AStream->m_Buffered < audio_frame_size))
				{
					if (*m_pAbort) return ABORT_FLAG;
					Sleep(1);
				}
				
				if (m_AStream->m_Buffered < audio_frame_size)
				{
					assert(m_AStream->m_Eof);
					m_AStream->m_Buffered = 0;
					audio_eof = 1;
					ending_audio_codec();
					break;
				}

				for(int i = 0; i < audio_buffer_channels; i++)
				{
					audio_frame.data[i] = &m_AStream->m_Buffers[i][audio_buffer_point];
					audio_frame.linesize[i] = audio_buffer_frame_bytes;
				}
				
				if ((audio_buffer_point + audio_buffer_frame_bytes) < audio_buffer_max_bytes)
				{
					audio_buffer_point += audio_buffer_frame_bytes;
				}
				else
				{
					int32_t l = audio_buffer_point + audio_buffer_frame_bytes - audio_buffer_max_bytes;
					for(int i = 0; i < audio_buffer_channels; i++)
					{
						memcpy(&m_AStream->m_Buffers[i][audio_buffer_max_bytes], m_AStream->m_Buffers[i], l);
					}
					audio_buffer_point = l;
				}
				
				if (write_audio_frame() < 0)
				{
					return -1;
				}

				InterlockedExchangeAdd((volatile long *)&m_AStream->m_Buffered, -audio_frame_size);
				audio_total_write += audio_frame_size;
			}
		}
		else if (audio_eof == 0)
		{
			// 写入静音
			while (audio_total_write < allow_audio_samples)
			{
				for(int i = 0; i < audio_buffer_channels; i++)
				{
					audio_frame.data[i] = m_AudioBuffer;
					audio_frame.linesize[i] = audio_buffer_frame_bytes;
				}
				if (write_audio_frame() < 0)
				{
					return -1;
				}
				audio_total_write += audio_frame_size;
			}
		}
				
		if (video_eof == 0)
		{
			uint64_t t = (uint64_t)video_frames * AV_TIME_BASE_LL * (uint64_t)g_enc_opt.m_FrameNum / (uint64_t)g_enc_opt.m_FrameDen;
			InterlockedExchange64(m_Time, t);
		}
		else if (audio_eof == 0)
		{
			int64_t t = audio_total_write * AV_TIME_BASE_LL / audio_samples_per_sec;
			InterlockedExchange64(m_Time,  t);
		}
	}

	return 0;
}
コード例 #12
0
ファイル: muxing.c プロジェクト: KuMiMusic/FFmpeg
int main(int argc, char **argv)
{
    OutputStream video_st = { 0 }, audio_st = { 0 };
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVCodec *audio_codec, *video_codec;
    int ret;
    int have_video = 0, have_audio = 0;
    int encode_video = 0, encode_audio = 0;
    AVDictionary *opt = NULL;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc < 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "This program generates a synthetic audio and video stream, encodes and\n"
               "muxes them into a file named output_file.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename.\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];
    if (argc > 3 && !strcmp(argv[2], "-flags")) {
        av_dict_set(&opt, argv[2]+1, argv[3], 0);
    }

    /* allocate the output media context */
    avformat_alloc_output_context2(&oc, NULL, NULL, filename);
    if (!oc) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
    }
    if (!oc)
        return 1;

    fmt = oc->oformat;

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
        add_stream(&video_st, oc, &video_codec, fmt->video_codec);
        have_video = 1;
        encode_video = 1;
    }
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
        have_audio = 1;
        encode_audio = 1;
    }

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (have_video)
        open_video(oc, video_codec, &video_st, opt);

    if (have_audio)
        open_audio(oc, audio_codec, &audio_st, opt);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open '%s': %s\n", filename,
                    av_err2str(ret));
            return 1;
        }
    }

    /* Write the stream header, if any. */
    ret = avformat_write_header(oc, &opt);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file: %s\n",
                av_err2str(ret));
        return 1;
    }

    while (encode_video || encode_audio) {
        /* select the stream to encode */
        if (encode_video &&
                (!encode_audio || av_compare_ts(video_st.next_pts, video_st.st->codec->time_base,
                                                audio_st.next_pts, audio_st.st->codec->time_base) <= 0)) {
            encode_video = !write_video_frame(oc, &video_st);
        } else {
            encode_audio = !write_audio_frame(oc, &audio_st);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (have_video)
        close_stream(oc, &video_st);
    if (have_audio)
        close_stream(oc, &audio_st);

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    avformat_free_context(oc);

    return 0;
}
コード例 #13
0
ファイル: VideoLivRecord.cpp プロジェクト: u-stone/CodeBase
void CVideoLivRecord::write_audio_data(char* pBuffer, LONG len)
{
	write_audio_frame(m_pAudioStream, pBuffer, len);
}