Пример #1
0
int BKE_ffmpeg_append(void *context_v, RenderData *rd, int start_frame, int frame, int *pixels,
                      int rectx, int recty, const char *suffix, ReportList *reports)
{
	FFMpegContext *context = context_v;
	AVFrame *avframe;
	int success = 1;

	PRINT("Writing frame %i, render width=%d, render height=%d\n", frame, rectx, recty);

/* why is this done before writing the video frame and again at end_ffmpeg? */
//	write_audio_frames(frame / (((double)rd->frs_sec) / rd->frs_sec_base));

	if (context->video_stream) {
		avframe = generate_video_frame(context, (unsigned char *) pixels, reports);
		success = (avframe && write_video_frame(context, rd, frame - start_frame, avframe, reports));

		if (context->ffmpeg_autosplit) {
			if (avio_tell(context->outfile->pb) > FFMPEG_AUTOSPLIT_SIZE) {
				end_ffmpeg_impl(context, true);
				context->ffmpeg_autosplit_count++;
				success &= start_ffmpeg_impl(context, rd, rectx, recty, suffix, reports);
			}
		}
	}

#ifdef WITH_AUDASPACE
	write_audio_frames(context, (frame - start_frame) / (((double)rd->frs_sec) / (double)rd->frs_sec_base));
#endif
	return success;
}
Пример #2
0
static gboolean
process (GeglOperation       *operation,
         GeglBuffer          *input,
         const GeglRectangle *result,
         gint                 level)
{
  GeglProperties *o = GEGL_PROPERTIES (operation);
  Priv           *p = (Priv*)o->user_data;
  static gint     inited = 0;

  g_assert (input);

  if (p == NULL)
    init (o);
  p = (Priv*)o->user_data;

  p->width = result->width;
  p->height = result->height;
  p->input = input;

  if (!inited)
    {
      tfile (o);
      inited = 1;
    }

  write_video_frame (o, p->oc, p->video_st);
  if (p->audio_st)
    write_audio_frame (o, p->oc, p->audio_st);

  return  TRUE;
}
Пример #3
0
void reply_avframe(AVPacket *pkt, AVCodec *codec) {
  ei_x_buff x;
  ei_x_new_with_version(&x);
  struct video_frame r;

  r.content = codec->type == AVMEDIA_TYPE_VIDEO ? frame_content_video :
    codec->type == AVMEDIA_TYPE_AUDIO ? frame_content_audio : 0;

  r.dts = pkt->dts / 90.0;
  r.pts = pkt->pts / 90.0;
  r.stream_id = 0;
  r.codec = codec->id == AV_CODEC_ID_H264 ? frame_codec_h264 :
    codec->id == AV_CODEC_ID_AAC ? frame_codec_aac : 0;

  r.flavor = pkt->flags & CODEC_FLAG_GLOBAL_HEADER ? frame_flavor_config :
    pkt->flags & AV_PKT_FLAG_KEY ? frame_flavor_keyframe : 
    frame_flavor_frame;

  r.track_id = codec->type == AVMEDIA_TYPE_VIDEO ? 1 : 2;
  r.body.data = pkt->data;
  r.body.size = pkt->size;
  write_video_frame(&x, r);
  write_x(&x);
  ei_x_free(&x);
}
Пример #4
0
int append_ffmpeg(RenderData *rd, int frame, int *pixels, int rectx, int recty, ReportList *reports) 
{
	AVFrame* avframe;
	int success = 1;

	fprintf(stderr, "Writing frame %i, "
		"render width=%d, render height=%d\n", frame,
		rectx, recty);

// why is this done before writing the video frame and again at end_ffmpeg?
//	write_audio_frames(frame / (((double)rd->frs_sec) / rd->frs_sec_base));

	if(video_stream)
	{
		avframe= generate_video_frame((unsigned char*) pixels, reports);
		success= (avframe && write_video_frame(rd, avframe, reports));

		if (ffmpeg_autosplit) {
			if (avio_tell(outfile->pb) > FFMPEG_AUTOSPLIT_SIZE) {
				end_ffmpeg();
				ffmpeg_autosplit_count++;
				success &= start_ffmpeg_impl(rd, rectx, recty, reports);
			}
		}
	}

#ifdef WITH_AUDASPACE
	write_audio_frames((frame - rd->sfra) / (((double)rd->frs_sec) / rd->frs_sec_base));
#endif
	return success;
}
Пример #5
0
void Java_com_richitec_imeeting_video_ECVideoEncoder_processRawFrame(
		JNIEnv* env, jobject thiz, jbyteArray buffer, jint width, jint height,
		jint rotateDegree) {
	if (!qvo || !is_video_encode_ready) {
		return;
	}
//	D("process raw frame - width: %d height: %d", width, height);

	jint rotateWidth, rotateHeight;

	AVCodecContext *c = qvo->video_stream->codec;

	jbyte *p_buffer_array = (*env)->GetByteArrayElements(env, buffer, 0);

//	D("process raw frame - rotate degree: %d", rotateDegree);

	unsigned char * p_rotated_buffer = rotateYUV420SP(p_buffer_array, width,
			height, rotateDegree, &rotateWidth, &rotateHeight);
	if (!p_rotated_buffer) {
		(*env)->ReleaseByteArrayElements(env, buffer, p_buffer_array,
				JNI_ABORT);
		return;
	}

	avpicture_fill((AVPicture *) tmp_picture, p_rotated_buffer, src_pix_fmt,
			rotateWidth, rotateHeight);
//	D("avpicture fill ok");
	(*env)->ReleaseByteArrayElements(env, buffer, p_buffer_array, JNI_ABORT);

	img_convert_ctx = sws_getCachedContext(img_convert_ctx, rotateWidth,
			rotateHeight, src_pix_fmt, qvo->width, qvo->height, c->pix_fmt,
			SWS_BILINEAR, NULL, NULL, NULL);
	sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize, 0,
			rotateHeight, raw_picture->data, raw_picture->linesize);

	int out_size = write_video_frame(qvo, raw_picture);

//	D(
//			"stream pts val: %lld time base: %d / %d", qvo->video_stream->pts.val, qvo->video_stream->time_base.num, qvo->video_stream->time_base.den);
//	double video_pts = (double) qvo->video_stream->pts.val
//			* qvo->video_stream->time_base.num
//			/ qvo->video_stream->time_base.den;
//	D("write video frame - size: %d video pts: %f", out_size, video_pts);

	raw_picture->pts++;

	free(p_rotated_buffer);

	if (out_size == -2) {
		// network interrupted
		call_void_method(env, thiz, "onVideoLiveDisconnected");
	}
}
Пример #6
0
void CVideoLivRecord::write_arbitrary_av()
{
	av_dump_format(m_pAVFormatContext, 0, "E:\\test.mp4", 1);

	while (m_bEncodeAudio || m_bEncodeVideo){
		if (m_bEncodeVideo && (!m_bEncodeAudio || av_compare_ts(m_videoNextpts, m_pVideoStream->codec->time_base, 
			m_audioNextpts, m_pAudioStream->codec->time_base) <= 0)){
			m_bEncodeVideo = !write_video_frame(m_pVideoStream, 0, 0);
		} else {
			m_bEncodeAudio = !write_audio_frame(m_pAudioStream, 0, 0);
		}
	}
}
Пример #7
0
int reap_filter(struct liveStream *ctx)
{
	int ret = 0;
	/* pull filtered frames from the filtergraph */
	while (1)
	{
		int i = 0;
		int nb_frames = 1;
		take_filter_lock(&ctx->filter_lock);
		ret = av_buffersink_get_frame_flags(ctx->out_filter, ctx->OutFrame,AV_BUFFERSINK_FLAG_NO_REQUEST);
		give_filter_lock(&ctx->filter_lock);
		if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
		{
			ret = 0;
			break;
		}
		if (ret < 0)
		{
			av_log(NULL, AV_LOG_ERROR, "nothing in buffer sink\n");
			ret = -1;
			break;
		}
		if (ctx->OutFrame->pts != AV_NOPTS_VALUE)
		{
			ctx->OutFrame->pts = av_rescale_q(ctx->OutFrame->pts, ctx->out_filter->inputs[0]->time_base, ctx->oc->streams[0]->codec->time_base);
		}
		nb_frames += ctx->OutFrame->pts - ctx->sync_out_pts;
		/** drop all frames if extra are provided */
		if(nb_frames < 0)
			nb_frames = 1;
		/** Some time insane gap is seen,remove that in ffmpeg itself */
		if(nb_frames > 15)
			nb_frames = 1;

		for( i = 0;i < nb_frames;i++)
		{
			ctx->OutFrame->pts = ctx->sync_out_pts;
			if (ctx->OutFrame->pts != AV_NOPTS_VALUE)
			{
				ctx->OutFrame->pts = av_rescale_q(ctx->OutFrame->pts, ctx->oc->streams[0]->codec->time_base, ctx->oc->streams[0]->time_base);
			}
			write_video_frame(ctx->oc,ctx->oc->streams[0],ctx->OutFrame);
			ctx->sync_out_pts++;
		}
		av_frame_unref(ctx->OutFrame);
	}
	return ret;
}
Пример #8
0
void CVideoLivRecord::write_video_data(void* pBuffer, LONG len)
{
	write_video_frame(m_pVideoStream, pBuffer, len);
}
int32_t CEncoder::enc_loop(void)
{
	int video_frames = 0;

	int video_index = 0;
	int video_queue_index = 0;
	int last_video_index = -1;
	uint64_t audio_total_write = 0;
	
	uint64_t allow_audio_samples = g_enc_opt.m_VideoDisable ? INT64_MAX : 0;
	int audio_buffer_point = 0;
	int audio_buffer_channels = g_enc_opt.m_EncAudioIsPlanar ? g_enc_opt.m_EncAudioChannels : 1;
	int audio_buffer_max_bytes = AUDIO_BUFFER_SEC * g_enc_opt.m_EncAudioSampleRate * g_enc_opt.m_EncAudioPacketBytes;
	int audio_buffer_frame_bytes = audio_frame_size * g_enc_opt.m_EncAudioPacketBytes;
	uint64_t audio_samples_per_sec = audio_sample_rate;

	int32_t video_eof = g_enc_opt.m_VideoDisable;
	int32_t audio_eof = g_enc_opt.m_AudioDisable;

	while ((video_eof == 0) || (audio_eof == 0))
	{
		if (*m_pAbort) return ABORT_FLAG;
		if (video_eof == 0)
		{
			while ((m_VStream->m_Eof == 0) && (m_VStream->m_Queued == 0))
			{
				if (*m_pAbort) return ABORT_FLAG;
				Sleep(1);
			}

			if (m_VStream->m_Queued == 0)
			{
				assert(m_VStream->m_Eof);
				video_eof = 1;
				ending_video_codec();
				if (m_AStream)
				{
					allow_audio_samples = INT64_MAX;
				}
				else if (audio_eof == 0)
				{
					// 结束静音
					audio_eof = 1;
					ending_audio_codec();
				}
				continue;
			}

			video_index = m_VStream->m_Queue[video_queue_index];
			video_queue_index = (video_queue_index + 1) & (MAX_VIDEO_QUEUE - 1);
			if (write_video_frame(last_video_index == video_index, video_index) < 0)
			{
				return -1;
			}
			if (last_video_index != video_index)
			{
				if (last_video_index != -1)
					InterlockedDecrement(&m_VStream->m_Buffered);
				last_video_index = video_index;
			}
			InterlockedDecrement(&m_VStream->m_Queued);
			video_frames ++;
			allow_audio_samples = (uint64_t)video_frames * audio_samples_per_sec * (uint64_t)g_enc_opt.m_FrameNum / (uint64_t)g_enc_opt.m_FrameDen;
		}

		if (*m_pAbort) return ABORT_FLAG;
		if ((audio_eof == 0) && (m_AStream))
		{
			while (audio_total_write < allow_audio_samples)
			{

				while ((m_AStream->m_Eof == 0) && (m_AStream->m_Buffered < audio_frame_size))
				{
					if (*m_pAbort) return ABORT_FLAG;
					Sleep(1);
				}
				
				if (m_AStream->m_Buffered < audio_frame_size)
				{
					assert(m_AStream->m_Eof);
					m_AStream->m_Buffered = 0;
					audio_eof = 1;
					ending_audio_codec();
					break;
				}

				for(int i = 0; i < audio_buffer_channels; i++)
				{
					audio_frame.data[i] = &m_AStream->m_Buffers[i][audio_buffer_point];
					audio_frame.linesize[i] = audio_buffer_frame_bytes;
				}
				
				if ((audio_buffer_point + audio_buffer_frame_bytes) < audio_buffer_max_bytes)
				{
					audio_buffer_point += audio_buffer_frame_bytes;
				}
				else
				{
					int32_t l = audio_buffer_point + audio_buffer_frame_bytes - audio_buffer_max_bytes;
					for(int i = 0; i < audio_buffer_channels; i++)
					{
						memcpy(&m_AStream->m_Buffers[i][audio_buffer_max_bytes], m_AStream->m_Buffers[i], l);
					}
					audio_buffer_point = l;
				}
				
				if (write_audio_frame() < 0)
				{
					return -1;
				}

				InterlockedExchangeAdd((volatile long *)&m_AStream->m_Buffered, -audio_frame_size);
				audio_total_write += audio_frame_size;
			}
		}
		else if (audio_eof == 0)
		{
			// 写入静音
			while (audio_total_write < allow_audio_samples)
			{
				for(int i = 0; i < audio_buffer_channels; i++)
				{
					audio_frame.data[i] = m_AudioBuffer;
					audio_frame.linesize[i] = audio_buffer_frame_bytes;
				}
				if (write_audio_frame() < 0)
				{
					return -1;
				}
				audio_total_write += audio_frame_size;
			}
		}
				
		if (video_eof == 0)
		{
			uint64_t t = (uint64_t)video_frames * AV_TIME_BASE_LL * (uint64_t)g_enc_opt.m_FrameNum / (uint64_t)g_enc_opt.m_FrameDen;
			InterlockedExchange64(m_Time, t);
		}
		else if (audio_eof == 0)
		{
			int64_t t = audio_total_write * AV_TIME_BASE_LL / audio_samples_per_sec;
			InterlockedExchange64(m_Time,  t);
		}
	}

	return 0;
}
Пример #10
0
int main(int argc, char **argv)
{
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVStream *audio_st, *video_st;
    double audio_pts, video_pts;
    int i;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc != 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];

    /* Autodetect the output format from the name. default is MPEG. */
    fmt = av_guess_format(NULL, filename, NULL);
    if (!fmt) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        fmt = av_guess_format("mpeg", NULL, NULL);
    }
    if (!fmt) {
        fprintf(stderr, "Could not find suitable output format\n");
        return 1;
    }

    /* Allocate the output media context. */
    oc = avformat_alloc_context();
    if (!oc) {
        fprintf(stderr, "Memory error\n");
        return 1;
    }
    oc->oformat = fmt;
    snprintf(oc->filename, sizeof(oc->filename), "%s", filename);

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    video_st = NULL;
    audio_st = NULL;
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
        video_st = add_video_stream(oc, fmt->video_codec);
    }
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        audio_st = add_audio_stream(oc, fmt->audio_codec);
    }

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (video_st)
        open_video(oc, video_st);
    if (audio_st)
        open_audio(oc, audio_st);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "Could not open '%s'\n", filename);
            return 1;
        }
    }

    /* Write the stream header, if any. */
    avformat_write_header(oc, NULL);

    for (;;) {
        /* Compute current audio and video time. */
        if (audio_st)
            audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
        else
            audio_pts = 0.0;

        if (video_st)
            video_pts = (double)video_st->pts.val * video_st->time_base.num /
                        video_st->time_base.den;
        else
            video_pts = 0.0;

        if ((!audio_st || audio_pts >= STREAM_DURATION) &&
            (!video_st || video_pts >= STREAM_DURATION))
            break;

        /* write interleaved audio and video frames */
        if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
            write_audio_frame(oc, audio_st);
        } else {
            write_video_frame(oc, video_st);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (video_st)
        close_video(oc, video_st);
    if (audio_st)
        close_audio(oc, audio_st);

    /* Free the streams. */
    for (i = 0; i < oc->nb_streams; i++) {
        av_freep(&oc->streams[i]->codec);
        av_freep(&oc->streams[i]);
    }

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    av_free(oc);

    return 0;
}
Пример #11
0
void
*writer_thread(void *thread_ctx) {

    struct transcoder_ctx_t *ctx = (struct transcoder_ctx_t *) thread_ctx;
    AVStream *video_stream = NULL, *audio_stream = NULL;
    AVFormatContext *output_context = init_output_context(ctx, &video_stream, &audio_stream);
    struct mux_state_t mux_state = {0};

    //from omxtx
    mux_state.pts_offset = av_rescale_q(ctx->input_context->start_time, AV_TIME_BASE_Q, output_context->streams[ctx->video_stream_index]->time_base);

#if 0
    FILE *out_file;

    out_file = fopen(ctx->output_filename, "wb");
    if (out_file == NULL) {
        printf("error creating output file. DYING \n");
        exit(1);
    }
#endif

    //write stream header if any
    avformat_write_header(output_context, NULL);

    //do not start doing anything until we get an encoded packet
    pthread_mutex_lock(&ctx->pipeline.video_encode.is_running_mutex);
    while (!ctx->pipeline.video_encode.is_running) {
        pthread_cond_wait(&ctx->pipeline.video_encode.is_running_cv, &ctx->pipeline.video_encode.is_running_mutex);
    }

    while (!ctx->pipeline.video_encode.eos || !ctx->processed_audio_queue->queue_finished) {
        //FIXME a memory barrier is required here so that we don't race 
        //on above variables 

        //fill a buffer with video data 
        OERR(OMX_FillThisBuffer(ctx->pipeline.video_encode.h, omx_get_next_output_buffer(&ctx->pipeline.video_encode)));

        write_audio_frame(output_context, audio_stream, ctx); //write full audio frame 
        //FIXME no guarantee that we have a full frame per packet?
        write_video_frame(output_context, video_stream, ctx, &mux_state); //write full video frame
        //encoded_video_queue is being filled by the previous command

#if 0
        struct packet_t *encoded_packet = packet_queue_get_next_item(&ctx->pipeline.encoded_video_queue);
        fwrite(encoded_packet->data, 1, encoded_packet->data_length, out_file);
        packet_queue_free_packet(encoded_packet, 1);
#endif

    }

    av_write_trailer(output_context);

    //free all the resources
    avcodec_close(video_stream->codec);
    avcodec_close(audio_stream->codec);
    /* Free the streams. */
    for (int i = 0; i < output_context->nb_streams; i++) {
        av_freep(&output_context->streams[i]->codec);
        av_freep(&output_context->streams[i]);
    }

    if (!(output_context->oformat->flags & AVFMT_NOFILE)) {
        /* Close the output file. */
        avio_close(output_context->pb);
    }
       

    /* free the stream */
    av_free(output_context);
    free(mux_state.pps);
    free(mux_state.sps);
#if 0
    fclose(out_file);
#endif
}
Пример #12
0
static void split_video(const char *infilename,
                        const char *outfmt,
                        int gop_size,
                        int chunk_size,
                        int skip,
                        long long length,
                        AVDictionary *_opt)
{
    DecoderContext *dc;
    EncoderContext *ec;

    AVFrame *frame;
    int width, height;
    long long frame_count = 0, out_frame_num = 0;
    int chunk_count = 0;
    char outfilename[MAX_FILENAME_LEN];
    AVDictionary *opt = NULL;
    AVRational framerate;
    enum AVPixelFormat pix_fmt;

    av_dict_copy(&opt, _opt, 0);

    // Initialize the decoder
    dc = init_decoder(infilename);

    // Extract parms needed by encoder
    width = dc->codecCtx->width;
    height = dc->codecCtx->height;
    framerate = dc->codecCtx->framerate;
    pix_fmt = dc->codecCtx->pix_fmt;

    // Skip input frames

    if (skip > 0)
        fprintf(stderr, "Skipping %d frames\n", skip);

    while (skip > 0) {
        // TODO: I'd rather not decode the frames, but this will take some work to
        //       refactor
        if (!read_frame(dc)) {
            fprintf(stderr, "No more frames available, skip = %d\n", skip);
            exit(0);
        }
        --skip;
    }

    // Initialize output
    fprintf(stderr, "\rWriting chunk %05d", chunk_count);
    fflush(stderr);

    snprintf(outfilename, MAX_FILENAME_LEN, outfmt, chunk_count++);
    ec = init_encoder(outfilename, gop_size, width, height, framerate, pix_fmt, opt);

    while (length <= 0 || frame_count < length) {
        frame = read_frame(dc);
        if (!frame)
            break;

        if (out_frame_num == chunk_size) {
            close_encoder(ec);

            fprintf(stderr, "\rWriting chunk %05d", chunk_count);
            fflush(stderr);

            snprintf(outfilename, MAX_FILENAME_LEN, outfmt, chunk_count++);
            ec = init_encoder(outfilename, gop_size, width, height, framerate, pix_fmt, opt);
            out_frame_num = 0;
        }

        set_pict_type(frame, gop_size, out_frame_num);
        frame->pts = out_frame_num++;
        frame_count++;

        write_video_frame(ec, frame);
    }

    close_encoder(ec);
    close_decoder(dc);

    fprintf(stderr, "\nRead %lld frames\n", frame_count);
    fprintf(stderr, "Wrote %d chunks of %d frames each (last chunk: %lld frames)\n", chunk_count, chunk_size, out_frame_num);
    fprintf(stderr, "  for a total of %lld frames\n", (chunk_count-1) * chunk_size + out_frame_num);
}
C_RESULT video_stage_ffmpeg_recorder_transform(video_stage_ffmpeg_recorder_config_t *cfg, vp_api_io_data_t *in, vp_api_io_data_t *out)
{
	 time_t temptime;
	 struct timeval tv;
	 struct tm *atm;
	 long long int current_timestamp_us;
	 static long long int first_frame_timestamp_us=0;
	 static int frame_counter=0;
	 int i;
	 int frame_size;
	 static int flag_video_file_open=0;

	 vp_os_mutex_lock( &out->lock );
	 vp_api_picture_t* picture = (vp_api_picture_t *) in->buffers;

	gettimeofday(&tv,NULL);

	 temptime = (time_t)tv.tv_sec;
	 atm = localtime(&temptime);  //atm = localtime(&tv.tv_sec);

	 current_timestamp_us = tv.tv_sec *1000000 + tv.tv_usec;


  if( out->status == VP_API_STATUS_INIT )
  {
    out->numBuffers   = 1;
    out->indexBuffer  = 0;
    out->lineSize     = NULL;
    //out->buffers      = (int8_t **) vp_os_malloc( sizeof(int8_t *) );
  }

  out->size     = in->size;
  out->status   = in->status;
  out->buffers  = in->buffers;

  if( in->status == VP_API_STATUS_ENDED ) {
    out->status = in->status;
  }
  else if(in->status == VP_API_STATUS_STILL_RUNNING) {
    out->status = VP_API_STATUS_PROCESSING;
  }
  else {
    out->status = in->status;
  }



	if(cfg->startRec==VIDEO_RECORD_HOLD)
	{
		/* Create a new video file */

		sprintf(video_filename_ffmpeg, "%s/video_%04d%02d%02d_%02d%02d%02d_w%i_h%i.mp4",
				VIDEO_FILE_DEFAULT_PATH,
				atm->tm_year+1900, atm->tm_mon+1, atm->tm_mday,
				atm->tm_hour, atm->tm_min, atm->tm_sec,
				picture->width,
				picture->height);

		create_video_file(video_filename_ffmpeg, picture->width,picture->height);
		flag_video_file_open=1;

		cfg->startRec=VIDEO_RECORD_START;

		first_frame_timestamp_us = current_timestamp_us;
		frame_counter=1;
	}

  if( out->size > 0 && out->status == VP_API_STATUS_PROCESSING && cfg->startRec==VIDEO_RECORD_START)
  {
	  frame_size = ( previous_frame.width * previous_frame.height )*3/2;

	  /* Send the previous frame to FFMPEG */
	  if (previous_frame.buffer!=NULL)
		{
		  /* Compute the number of frames to store to achieve 60 FPS
		   * This should be computed using the timestamp of the first frame
		   * to avoid error accumulation.
		   */
			int current_frame_number = (current_timestamp_us - first_frame_timestamp_us) / 16666;
			int nb_frames_to_write = current_frame_number - previous_frame.frame_number;

			if (picture_to_encode!=NULL){
				picture_to_encode->data[0] = picture_to_encode->base[0] = picture->y_buf;
				picture_to_encode->data[1] = picture_to_encode->base[1] = picture->cb_buf;
				picture_to_encode->data[2] = picture_to_encode->base[2] = picture->cr_buf;

				picture_to_encode->linesize[0] = picture->width;
				picture_to_encode->linesize[1] = picture->width/2;
				picture_to_encode->linesize[2] = picture->width/2;
			}

			for (i=0;i<nb_frames_to_write;i++)
			{
				//printf("Storing %i frames\n",nb_frames_to_write);
				write_video_frame(oc, video_st);
			}

			/* Pass infos to next iteration */
			previous_frame.frame_number = current_frame_number;
		}

	  /* Create a buffer to hold the current frame */
		//if (0)
		{
	  if (previous_frame.buffer!=NULL && (previous_frame.width!=picture->width || previous_frame.height!=picture->height))
		{
			vp_os_free(previous_frame.buffer);
			previous_frame.buffer=NULL;
		}
		if (previous_frame.buffer==NULL)
		{
			previous_frame.width = picture->width;
			previous_frame.height = picture->height;
			frame_size = ( previous_frame.width * previous_frame.height )*3/2;
			printf("Allocating previous frame.\n");
			previous_frame.buffer=vp_os_malloc( frame_size );
		}

	/* Copy the current frame in a buffer so it can be encoded at next stage call */
		if (previous_frame.buffer!=NULL)
		{
			char * dest = previous_frame.buffer;
			int size = picture->width*picture->height;
			vp_os_memcpy(dest,picture->y_buf,size);

			dest+=size;
			size /= 4;
			vp_os_memcpy(dest,picture->cb_buf,size);

			dest+=size;
			vp_os_memcpy(dest,picture->cr_buf,size);
		}
		}
  }


  else
	{
		if(cfg->startRec==VIDEO_RECORD_STOP && flag_video_file_open)
		{
			close_video_file();
			flag_video_file_open=0;
		}
	}

  vp_os_mutex_unlock( &out->lock );

  return C_OK;
}
Пример #14
0
int ff_example(const char *filename, const char *format)
{
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVStream *video_st;
    double video_pts;
    int i;

    fmt = av_guess_format(format, NULL, NULL);
    if (!fmt) {
        fprintf(stderr, "Could not find suitable output format\n");
        exit(1);
    }

    fmt->video_codec = CODEC_ID_MJPEG;

    /* allocate the output media context */
    oc = avformat_alloc_context();
    if (!oc) {
        fprintf(stderr, "Memory error\n");
        exit(1);
    }
    oc->oformat = fmt;
    snprintf(oc->filename, sizeof(oc->filename), "%s", filename);

    video_st = NULL;
    if (fmt->video_codec != CODEC_ID_NONE)
        video_st = add_video_stream(oc, fmt->video_codec);

    av_dump_format(oc, 0, filename, 1);

    /* now that all the parameters are set, we can open the audio and
       video codecs and allocate the necessary encode buffers */
    if (video_st)
        open_video(oc, video_st);

    if (avio_open(&oc->pb, filename, URL_WRONLY) < 0) {
        fprintf(stderr, "Could not open '%s'\n", filename);
        exit(1);
    }


    /* write the stream header, if any */
    avformat_write_header(oc, NULL);

    for(;;) {

        video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
        printf("pts: %f\n", video_pts);

        if (frame_count > STREAM_NB_FRAMES)
            break;

        /* write interleaved audio and video frames */
        if (write_video_frame(oc, video_st) < 0)
            break;
    }

    printf("%d frames written\n", frame_count);

    av_write_trailer(oc);

    /* close each codec */
    if (video_st)
        close_video(oc, video_st);

    /* free the streams */
    for(i = 0; i < oc->nb_streams; i++) {
        av_freep(&oc->streams[i]->codec);
        av_freep(&oc->streams[i]);
    }

    avio_close(oc->pb);

    /* free the stream */
    av_free(oc);

    return 0;
}
Пример #15
0
/* media file output */
int main(int argc, char **argv)
{
	const char *filename;
	AVOutputFormat *fmt;
	AVFormatContext *oc;
	AVStream *video_st = NULL;
	AVCodec *video_codec = NULL;
	double video_time;
	int flush, ret;

	/* Initialize libavcodec, and register all codecs and formats. */
	av_register_all();

	filename = "E:\\muxing.mp4";
	/* allocate the output media context */
	avformat_alloc_output_context2(&oc, NULL, NULL, filename);

	if (!oc) {
		printf("Could not deduce output format from file extension: using MPEG.\n");
		avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
	}
	if (!oc)
		return 1;


	fmt = oc->oformat; //muxing 할 때 outputformat 설정

	/* Add the audio and video streams using the default format codecs
	* and initialize the codecs. */

	//fmt->video_codec = AV_CODEC_ID_H264;
	fmt->video_codec = AV_CODEC_ID_MPEG4;

	if (fmt->video_codec != AV_CODEC_ID_NONE)
		video_st = add_stream(oc, &video_codec, fmt->video_codec); // add_stream(AVFormatContext *oc, AVCodec **codec,enum AVCodecID codec_id)
	// codec parameters set 함수

	/* Now that all the parameters are set, we can open the audio and
	* video codecs and allocate the necessary encode buffers. */
	if (video_st)
		open_video(oc, video_codec, video_st); // (AVFormatContext *oc, AVCodec *codec, AVStream *st)
	// 코댁 열기, 프레임 설정

	av_dump_format(oc, 0, filename, 1); // 정보 출력 디버깅 함수


	/* open the output file, if needed */
	if (!(fmt->flags & AVFMT_NOFILE)) {

		ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);

		if (ret < 0) {
			char buf[256];
			av_strerror(ret, buf, sizeof(buf));
			fprintf(stderr, "Could not open '%s': %s\n", filename, buf);
			return 1;
		}
	}

	/* Write the stream header, if any. */
	ret = avformat_write_header(oc, NULL); // Allocate the stream private data and write the stream header to an output media file. 
	// 헤더파일 생성 -> 본체 생성 -> 마무리 작업

	if (ret < 0) {
		char buf[256];
		av_strerror(ret, buf, sizeof(buf));
		fprintf(stderr, "Error occurred when opening output file: %s\n", buf);
		return 1;
	}

	flush = 0;

	while ((video_st && !video_is_eof)) {

		if (!flush && (!video_st)) {
			flush = 1;
		}
		if (video_st && !video_is_eof) {
			write_video_frame(oc, video_st, flush); // 본체 생성
		}

		if (frame_count == 10000)
			break;
	}

	/* Write the trailer, if any. The trailer must be written before you
	* close the CodecContexts open when you wrote the header; otherwise
	* av_write_trailer() may try to use memory that was freed on
	* av_codec_close(). */

	av_write_trailer(oc);

	/* Close each codec. */
	if (video_st)
		close_video(oc, video_st);

	if (!(fmt->flags & AVFMT_NOFILE))
		/* Close the output file. */
		avio_close(oc->pb);

	/* free the stream */
	avformat_free_context(oc);

	return 0;
}
Пример #16
0
int main(int argc, char **argv)
{
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVStream *audio_st, *video_st;
    AVCodec *audio_codec, *video_codec;
    double audio_time, video_time;
    int flush, ret;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc != 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "This program generates a synthetic audio and video stream, encodes and\n"
               "muxes them into a file named output_file.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename.\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];

    /* allocate the output media context */
    avformat_alloc_output_context2(&oc, NULL, NULL, filename);
    if (!oc) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
    }
    if (!oc)
        return 1;

    fmt = oc->oformat;

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    video_st = NULL;
    audio_st = NULL;

    if (fmt->video_codec != AV_CODEC_ID_NONE)
        video_st = add_stream(oc, &video_codec, fmt->video_codec);
    if (fmt->audio_codec != AV_CODEC_ID_NONE)
        audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (video_st)
        open_video(oc, video_codec, video_st);
    if (audio_st)
        open_audio(oc, audio_codec, audio_st);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open '%s': %s\n", filename,
                    av_err2str(ret));
            return 1;
        }
    }

    /* Write the stream header, if any. */
    ret = avformat_write_header(oc, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file: %s\n",
                av_err2str(ret));
        return 1;
    }

    flush = 0;
    while ((video_st && !video_is_eof) || (audio_st && !audio_is_eof)) {
        /* Compute current audio and video time. */
        audio_time = (audio_st && !audio_is_eof) ? audio_st->pts.val * av_q2d(audio_st->time_base) : INFINITY;
        video_time = (video_st && !video_is_eof) ? video_st->pts.val * av_q2d(video_st->time_base) : INFINITY;

        if (!flush &&
            (!audio_st || audio_time >= STREAM_DURATION) &&
            (!video_st || video_time >= STREAM_DURATION)) {
            flush = 1;
        }

        /* write interleaved audio and video frames */
        if (audio_st && !audio_is_eof && audio_time <= video_time) {
            write_audio_frame(oc, audio_st, flush);
        } else if (video_st && !video_is_eof && video_time < audio_time) {
            write_video_frame(oc, video_st, flush);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (video_st)
        close_video(oc, video_st);
    if (audio_st)
        close_audio(oc, audio_st);

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    avformat_free_context(oc);

    return 0;
}
Пример #17
0
int main(int argc, char **argv)
{
    OutputStream video_st = { 0 }, audio_st = { 0 };
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    int have_video = 0, have_audio = 0;
    int encode_video = 0, encode_audio = 0;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc != 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];

    /* Autodetect the output format from the name. default is MPEG. */
    fmt = av_guess_format(NULL, filename, NULL);
    if (!fmt) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        fmt = av_guess_format("mpeg", NULL, NULL);
    }
    if (!fmt) {
        fprintf(stderr, "Could not find suitable output format\n");
        return 1;
    }

    /* Allocate the output media context. */
    oc = avformat_alloc_context();
    if (!oc) {
        fprintf(stderr, "Memory error\n");
        return 1;
    }
    oc->oformat = fmt;
    snprintf(oc->filename, sizeof(oc->filename), "%s", filename);

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
        add_video_stream(&video_st, oc, fmt->video_codec);
        have_video = 1;
        encode_video = 1;
    }
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        add_audio_stream(&audio_st, oc, fmt->audio_codec);
        have_audio = 1;
        encode_audio = 1;
    }

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (have_video)
        open_video(oc, &video_st);
    if (have_audio)
        open_audio(oc, &audio_st);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "Could not open '%s'\n", filename);
            return 1;
        }
    }

    /* Write the stream header, if any. */
    avformat_write_header(oc, NULL);

    while (encode_video || encode_audio) {
        /* select the stream to encode */
        if (encode_video &&
            (!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
                                            audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
            encode_video = !write_video_frame(oc, &video_st);
        } else {
            encode_audio = !process_audio_stream(oc, &audio_st);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (have_video)
        close_stream(oc, &video_st);
    if (have_audio)
        close_stream(oc, &audio_st);

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    avformat_free_context(oc);

    return 0;
}
Пример #18
0
int write_video_frames()
{
	int ret = -1;
	int x = 0;
	int y = 0;
	int w = 0;
	int h = 0;
	
	//获取视频源
	n_video_source_sample = n_video_source_sample_size;
	if(first_frame == 0)
	{
		ret = get_video_sample_inc(p_video_source,p_video_source_sample,&n_video_source_sample,&x,&y,&w,&h);
		if(ret < 0)
		{		 
			fprintf(stderr ,"avencoder: get_video_sample_inc fail..ret=%d\n",ret);		 
			return -1;	  
		}
		
	}
	else //first_frame
	{
		ret = get_video_sample_all(p_video_source,p_video_source_sample,&n_video_source_sample,&x,&y,&w,&h);
		if(ret < 0)
		{		 
			fprintf(stderr ,"avencoder: get_video_sample_all fail..ret=%d\n",ret);		 
			return -1;	  
		}
		first_frame = 0;
	}
	
	/*
	struct timeval tv;
	long time_us;
	gettimeofday(&tv,NULL);
	time_us = tv.tv_sec*1000 + tv.tv_usec/1000;
	printf("time_us=%ld x=%d,y=%d,w=%d,h=%d, n_video_source_sample = %d\n",time_us,x,y,w,h,n_video_source_sample);
	*/

	
	//printf("n_video_source_sample = %d\n",n_video_source_sample);

	//编码视频
	n_video_encode_sample = n_video_encode_sample_size;
	ret = encode_video_sample_inc(p_video_encoder,p_video_source_sample,n_video_source_sample,p_video_encode_sample,&n_video_encode_sample,x,y,w,h);
	if(ret < 0)
	{		 
		fprintf(stderr ,"avencoder: encode_video_sample fail..\n");		 
		return -2;	  
	}
	//printf("n_video_encode_sample = %d\n",n_video_encode_sample);
			
	//写视频
	ret = write_video_frame(p_avmuxer,p_video_encode_sample,n_video_encode_sample);
	if(ret < 0)
	{		 
		fprintf(stderr ,"avencoder: write_video_frame fail..\n");		 
		return -3;	  
	}
	return 0;
}
Пример #19
0
int main(int argc, char **argv)
{
    if (argc < 2) {
        printf("usage: %s output_file\n"
                       "API example program to output a media file with libavformat.\n"
                       "This program generates a synthetic video stream, encodes and\n"
                       "muxes them into a file named output_file.\n"
                       "The output format is automatically guessed according to the file extension.\n"
                       "Raw images can also be output by using '%%d' in the filename.\n"
                       "\n", argv[0]);
        return 1;
    }

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    std::string filename = argv[1];

    /* allocate the output media context */
    AVFormatContext *oc = NULL;
    avformat_alloc_output_context2(&oc, NULL, "mp4", NULL);
    if (!oc) {
        printf("fail to generate mp4 format context");
        return 1;
    }

    /* Add the video streams using the default format codecs and initialize the codecs. */
    OutputStream video_st = {0};
    AVCodec *video_codec = NULL;
    if (oc->oformat->video_codec != AV_CODEC_ID_NONE) {
        add_stream(&video_st, oc, &video_codec, oc->oformat->video_codec);
    }

    /* Now that all the parameters are set, we can open the
     * video codecs and allocate the necessary encode buffers. */
    AVDictionary *opt = NULL;
    if (video_codec != NULL)
        open_video(oc, video_codec, &video_st, opt);

    av_dump_format(oc, 0, filename.c_str(), 1);

    /* open the output file, if needed */
    int ret = 0;
    if (!(oc->oformat->flags & AVFMT_NOFILE)) {
        if ((ret = avio_open(&oc->pb, filename.c_str(), AVIO_FLAG_WRITE)) < 0) {
            fprintf(stderr, "Could not open '%s': %s\n", filename.c_str(), av_err2str(ret));
            return 1;
        }
    }

    /* Write the stream header, if any. */
    AVDictionary *movflags = NULL;
    av_dict_set(&movflags, "movflags", "empty_moov+default_base_moof+frag_keyframe", 0);
    if ((ret = avformat_write_header(oc, &movflags)) < 0) {
        fprintf(stderr, "Error occurred when opening output file: %s\n", av_err2str(ret));
        return 1;
    }
    av_dict_free(&movflags);

    // Generate raw video frame, encode them and mux into container
    bool encode_video = true;
    while (encode_video) {
        encode_video = write_video_frame(oc, &video_st);
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (video_codec != NULL)
        close_stream(oc, &video_st);

    if (!(oc->oformat->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_closep(&oc->pb);

    /* free the stream */
    avformat_free_context(oc);

    return 0;
}
Пример #20
0
int main(int argc, char **argv)
{
    OutputStream video_st = { 0 }, audio_st = { 0 };
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVCodec *audio_codec, *video_codec;
    int ret;
    int have_video = 0, have_audio = 0;
    int encode_video = 0, encode_audio = 0;
    AVDictionary *opt = NULL;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc < 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "This program generates a synthetic audio and video stream, encodes and\n"
               "muxes them into a file named output_file.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename.\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];
    if (argc > 3 && !strcmp(argv[2], "-flags")) {
        av_dict_set(&opt, argv[2]+1, argv[3], 0);
    }

    /* allocate the output media context */
    avformat_alloc_output_context2(&oc, NULL, NULL, filename);
    if (!oc) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
    }
    if (!oc)
        return 1;

    fmt = oc->oformat;

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
        add_stream(&video_st, oc, &video_codec, fmt->video_codec);
        have_video = 1;
        encode_video = 1;
    }
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
        have_audio = 1;
        encode_audio = 1;
    }

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (have_video)
        open_video(oc, video_codec, &video_st, opt);

    if (have_audio)
        open_audio(oc, audio_codec, &audio_st, opt);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open '%s': %s\n", filename,
                    av_err2str(ret));
            return 1;
        }
    }

    /* Write the stream header, if any. */
    ret = avformat_write_header(oc, &opt);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file: %s\n",
                av_err2str(ret));
        return 1;
    }

    while (encode_video || encode_audio) {
        /* select the stream to encode */
        if (encode_video &&
                (!encode_audio || av_compare_ts(video_st.next_pts, video_st.st->codec->time_base,
                                                audio_st.next_pts, audio_st.st->codec->time_base) <= 0)) {
            encode_video = !write_video_frame(oc, &video_st);
        } else {
            encode_audio = !write_audio_frame(oc, &audio_st);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (have_video)
        close_stream(oc, &video_st);
    if (have_audio)
        close_stream(oc, &audio_st);

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    avformat_free_context(oc);

    return 0;
}