コード例 #1
0
int seg_write_frame(Segment_U * seg_union ,int input_width ,int input_height ,int flag  ,void * yuv_data ){

	Output_Context *ptr_output_ctx = seg_union->output_ctx;



	avpicture_fill((AVPicture *)seg_union->picture_capture ,(uint8_t*)yuv_data , PIX_FMT_UYVY422 ,input_width ,input_height );
		//encode video
	//input stream 的问题。
	ptr_output_ctx->sync_ipts = (double) seg_union->picture_capture_no / CAPTURE_FRAME_RATE ; //converter in seconds


	//first swscale
	sws_scale(ptr_output_ctx->img_convert_ctx,
			(const uint8_t* const *) seg_union->picture_capture->data,
			seg_union->picture_capture->linesize, 0,
			input_height,
			ptr_output_ctx->encoded_yuv_pict->data,
			ptr_output_ctx->encoded_yuv_pict->linesize);

	//second swscale
	encode_video_frame(ptr_output_ctx, ptr_output_ctx->encoded_yuv_pict,
			NULL );


	return 0;
}
コード例 #2
0
void push_video(EncoderJob &jobSpec, MediaFrame &frame) {// Send available frames to the encoder
	if (frame.VideoSize > 0) {
		// For this to work smoothly, the audio is always kept behind the video; this is handled
		// in the 'write_audio_frame' function, and by writing PTS values back to the jobSpec.
		encode_video_frame(jobSpec, frame);
		jobSpec.FrameCount++;
	}
}
コード例 #3
0
ファイル: ffmpeg_consumer.cpp プロジェクト: TELE-TWIN/Server
	void send(const safe_ptr<core::read_frame>& frame)
	{
		encode_executor_.begin_invoke([=]
		{		
			boost::timer frame_timer;

			encode_video_frame(*frame);

			if (!key_only_)
				encode_audio_frame(*frame);

			graph_->set_value("frame-time", frame_timer.elapsed()*format_desc_.fps*0.5);			
		});
	}
コード例 #4
0
ファイル: test_main.c プロジェクト: chris-magic/transcode_seg
int main(int argc ,char *argv[]){

	/*first ,open input file ,and obtain input file information*/
	INPUT_CONTEXT *ptr_input_ctx;

	if( (ptr_input_ctx = malloc (sizeof(INPUT_CONTEXT))) == NULL){
		printf("ptr_input_ctx malloc failed .\n");
		exit(MEMORY_MALLOC_FAIL);
	}

	/*second ,open output file ,and set output file information*/
	OUTPUT_CONTEXT *ptr_output_ctx;
	if( (ptr_output_ctx = malloc (sizeof(OUTPUT_CONTEXT))) == NULL){
		printf("ptr_output_ctx malloc failed .\n");
		exit(MEMORY_MALLOC_FAIL);
	}

	//init inputfile ,and get input file information
	init_input(ptr_input_ctx ,argv[1]);


	//init oputfile ,and set output file information
	init_output(ptr_output_ctx ,argv[2] ,ptr_input_ctx);

	//open video and audio ,set video_out_buf and audio_out_buf
	open_stream_codec(ptr_output_ctx);


    // open the output file, if needed
    if (!(ptr_output_ctx->fmt->flags & AVFMT_NOFILE)) {		//for mp4 or mpegts ,this must be performed
        if (avio_open(&ptr_output_ctx->ptr_format_ctx->pb, argv[2], AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "Could not open '%s'\n", argv[2]);
            exit(OPEN_MUX_FILE_FAIL);
        }
    }

    // write the stream header, if any
    avformat_write_header(ptr_output_ctx->ptr_format_ctx ,NULL);

    printf("ptr_output_ctx->ptr_format_ctx->nb_streams = %d \n\n" ,ptr_output_ctx->ptr_format_ctx->nb_streams);  //streams number in output file

    ptr_output_ctx->img_convert_ctx = sws_getContext(
    		ptr_input_ctx->video_codec_ctx->width ,ptr_input_ctx->video_codec_ctx->height ,PIX_FMT_YUV420P,
    		 ptr_output_ctx->video_stream->codec->width ,ptr_output_ctx->video_stream->codec->height ,PIX_FMT_YUV420P ,
    		 SWS_BICUBIC ,NULL ,NULL ,NULL);


    printf("src_width = %d ,src_height = %d \n" ,ptr_input_ctx->video_codec_ctx->width ,ptr_input_ctx->video_codec_ctx->height);
    printf("dts_width = %d ,dts_height = %d \n" ,ptr_output_ctx->video_stream->codec->width ,
    		ptr_output_ctx->video_stream->codec->height);

//    while(1);

	printf("before av_read_frame ...\n");
	/*************************************************************************************/
	/*decoder loop*/
	//
	//
	//***********************************************************************************/
	while(av_read_frame(ptr_input_ctx->ptr_format_ctx ,&ptr_input_ctx->pkt) >= 0){

		if (ptr_input_ctx->pkt.stream_index == ptr_input_ctx->video_index) {

			#if 1

			//decode video packet
			int got_picture = 0;
			ptr_input_ctx->mark_have_frame = 0;
			avcodec_decode_video2(ptr_input_ctx->video_codec_ctx,
					ptr_input_ctx->yuv_frame, &got_picture, &ptr_input_ctx->pkt);

			if (got_picture) {
				//encode video
				ptr_output_ctx->sync_ipts = av_q2d(ptr_input_ctx->ptr_format_ctx->streams[ptr_input_ctx->video_index]->time_base) *
						(ptr_input_ctx->yuv_frame->best_effort_timestamp  )
						- (double)ptr_input_ctx->ptr_format_ctx->start_time / AV_TIME_BASE;

				//first swscale
				sws_scale(ptr_output_ctx->img_convert_ctx ,
						(const uint8_t* const*)ptr_input_ctx->yuv_frame->data ,ptr_input_ctx->yuv_frame->linesize ,
						0 ,
						ptr_input_ctx->video_codec_ctx->height ,
						ptr_output_ctx->encoded_yuv_pict->data ,ptr_output_ctx->encoded_yuv_pict->linesize);

				//second swscale
				encode_video_frame(ptr_output_ctx , ptr_output_ctx->encoded_yuv_pict ,ptr_input_ctx);
			}
			#endif

		} else if (ptr_input_ctx->pkt.stream_index == ptr_input_ctx->audio_index) {
			#if 1
			//decode audio packet
			while (ptr_input_ctx->pkt.size > 0) {
				int got_frame = 0;
				int len = avcodec_decode_audio4(ptr_input_ctx->audio_codec_ctx,
						ptr_input_ctx->audio_decode_frame, &got_frame,
						&ptr_input_ctx->pkt);

				if (len < 0) { //decode failed ,skip frame
					fprintf(stderr, "Error while decoding audio frame\n");
					break;
				}

				if (got_frame) {
					//acquire the large of the decoded audio info...
					int data_size = av_samples_get_buffer_size(NULL,
							ptr_input_ctx->audio_codec_ctx->channels,
							ptr_input_ctx->audio_decode_frame->nb_samples,
							ptr_input_ctx->audio_codec_ctx->sample_fmt, 1);
					ptr_input_ctx->audio_size = data_size; //audio data size

					//encode audio
					int frame_bytes = ptr_output_ctx->audio_stream->codec->frame_size
										* av_get_bytes_per_sample(ptr_output_ctx->audio_stream->codec->sample_fmt)
										* ptr_output_ctx->audio_stream->codec->channels;
					uint8_t * audio_buf = ptr_input_ctx->audio_decode_frame->data[0];

					while (data_size >= frame_bytes) {

						encode_audio_frame(ptr_output_ctx ,audio_buf ,frame_bytes /*data_size*/);  //
						data_size -= frame_bytes;
						audio_buf += frame_bytes;
					}

				} else { //no data
					printf("======>avcodec_decode_audio4 ,no data ..\n");
					continue;
				}

				ptr_input_ctx->pkt.size -= len;
				ptr_input_ctx->pkt.data += len;
			}
			#endif
		}

	}//endwhile


	printf("before flush ,ptr_output_ctx->ptr_format_ctx->nb_streams = %d \n\n" ,ptr_output_ctx->ptr_format_ctx->nb_streams);
	encode_flush(ptr_output_ctx ,ptr_output_ctx->ptr_format_ctx->nb_streams);

	printf("before wirite tailer ...\n\n");

	av_write_trailer(ptr_output_ctx->ptr_format_ctx );

	/*free memory*/
}
コード例 #5
0
/* XXX: This class should really pass the picture parameters by a separate API
 * so that we can, by contract, enforce that the frame size can't suddenly
 * change on us. */
void Java_org_devtcg_rojocam_ffmpeg_RtpOutputContext_nativeWriteFrame(JNIEnv *env,
        jclass clazz, jint nativeInt, jbyteArray data, jlong frameTime,
        jint frameFormat, jint frameWidth, jint frameHeight,
        jint frameBitsPerPixel) {
    RtpOutputContext *rtpContext = (RtpOutputContext *)nativeInt;
    AVFormatContext *avContext;
    AVCodecContext *codec;
    AVStream *outputStream;
    AVPacket pkt;
    jbyte *data_c;
    int max_packet_size;
    uint8_t *rtp_data;
    int rtp_data_len;

    avContext = rtpContext->avContext;
    outputStream = avContext->streams[0];
    codec = outputStream->codec;

    if (rtpContext->tempFrame == NULL) {
        if (!first_frame_init(env, rtpContext, frameFormat,
                frameWidth, frameHeight)) {
            LOGE("Error initializing encoding buffers, cannot stream");
            return;
        }
    }

    data_c = (*env)->GetByteArrayElements(env, data, NULL);

    /* Convert the input arguments to an AVPacket, simulating it as though we
     * read this from the ffmpeg libraries but there was no need to do this as
     * it was passed into us already as a raw video frame. */
    int frameDuration = frameTime - rtpContext->lastFrameTime;
    bool frameEncoded = encode_video_frame(rtpContext,
            outputStream, rtpContext->tempFrame, rtpContext->imgConvert,
            rtpContext->tempEncodedBuf, sizeof(rtpContext->tempEncodedBuf),
            data_c, frameTime, frameDuration, frameFormat,
            frameWidth, frameHeight, frameBitsPerPixel, &pkt);
    rtpContext->lastFrameTime = frameTime;

    (*env)->ReleaseByteArrayElements(env, data, data_c, JNI_ABORT);

    if (frameEncoded) {
#if PROFILE_WRITE_FRAME
        struct timeval then;
        gettimeofday(&then, NULL);
#endif

        max_packet_size = url_get_max_packet_size(rtpContext->urlContext);
        url_open_dyn_packet_buf(&avContext->pb, max_packet_size);

        avContext->pb->seekable = 0;

        /* This organizes our encoded packet into RTP packet segments (but it
         * doesn't actually send anything over the network yet). */
        if (av_write_frame(avContext, &pkt) < 0) {
            jniThrowException(env, "java/io/IOException", "Error writing frame to output");
        }

        /* Actually deliver the packetized RTP data to the remote peer. */
        rtp_data_len = url_close_dyn_buf(avContext->pb, &rtp_data);
        exhaustive_send(rtpContext->urlContext, rtp_data, rtp_data_len);
        av_free(rtp_data);

        /* XXX: I dunno, ffserver.c does this... */
        outputStream->codec->frame_number++;

#if PROFILE_WRITE_FRAME
        store_elapsed(&rtpContext->write_time, &then);
#endif
    } else {
#if PROFILE_WRITE_TIME
        rtpContext->write_time = 0;
#endif
    }

#if PROFILE_WRITE_FRAME
    //LOGI("resample@%ld ms; encode@%ld ms; write@%ld ms",
     //       rtpContext->resampling_time, rtpContext->encoding_time,
      //      rtpContext->write_time);
#endif
}
コード例 #6
0
int seg_transcode_main(Segment_U * seg_union){

	Output_Context *ptr_output_ctx = seg_union->output_ctx;

    if (!(ptr_output_ctx->fmt->flags & AVFMT_NOFILE)) {		//for mp4 or mpegts ,this must be performed
        if (avio_open(&(ptr_output_ctx->ptr_format_ctx->pb), seg_union->ts_name, AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "Could not open '%s'\n", seg_union->ts_name);
            exit(OPEN_MUX_FILE_FAIL);
        }
    }
    // write the stream header, if any
    avformat_write_header(ptr_output_ctx->ptr_format_ctx ,NULL);
    write_m3u8_header(ptr_output_ctx);
    int i ;
    for(i = 0 ; i < seg_union->input_nb ; i ++){

    	/*initialize input file information*/
    	init_input( seg_union->input_ctx ,seg_union->input_file[i]);
    	Input_Context *ptr_input_ctx = seg_union->input_ctx;

		ptr_output_ctx->img_convert_ctx = sws_getContext(
				ptr_input_ctx->video_codec_ctx->width ,ptr_input_ctx->video_codec_ctx->height ,PIX_FMT_YUV420P,
				 ptr_output_ctx->video_stream->codec->width ,ptr_output_ctx->video_stream->codec->height ,PIX_FMT_YUV420P ,
				 SWS_BICUBIC ,NULL ,NULL ,NULL);


		printf("src_width = %d ,src_height = %d \n" ,ptr_input_ctx->video_codec_ctx->width ,ptr_input_ctx->video_codec_ctx->height);
		printf("dts_width = %d ,dts_height = %d \n" ,ptr_output_ctx->video_stream->codec->width ,
				ptr_output_ctx->video_stream->codec->height);

		printf("before av_read_frame ...\n");
		/*************************************************************************************/
		/*decoder loop*/
		//
		//
		//***********************************************************************************/
		while(av_read_frame(ptr_input_ctx->ptr_format_ctx ,&ptr_input_ctx->pkt) >= 0){

			if (ptr_input_ctx->pkt.stream_index == ptr_input_ctx->video_index) {

				//decode video packet
				int got_picture = 0;
				avcodec_decode_video2(ptr_input_ctx->video_codec_ctx,
						ptr_input_ctx->yuv_frame, &got_picture, &ptr_input_ctx->pkt);

				if (got_picture) {
					//encode video
					//input stream 的问题。
					ptr_output_ctx->sync_ipts = av_q2d(ptr_input_ctx->ptr_format_ctx->streams[ptr_input_ctx->video_index]->time_base) *
							(ptr_input_ctx->yuv_frame->best_effort_timestamp  )
							- (double)ptr_input_ctx->ptr_format_ctx->start_time / AV_TIME_BASE
							+	ptr_output_ctx->base_ipts;    //current packet time in second

					//printf("ptr_output_ctx->sync_ipts = %f \n" ,ptr_output_ctx->sync_ipts);

					//first swscale
					sws_scale(ptr_output_ctx->img_convert_ctx ,
							(const uint8_t* const*)ptr_input_ctx->yuv_frame->data ,ptr_input_ctx->yuv_frame->linesize ,
							0 ,
							ptr_input_ctx->video_codec_ctx->height ,
							ptr_output_ctx->encoded_yuv_pict->data ,ptr_output_ctx->encoded_yuv_pict->linesize);

					//second swscale
					encode_video_frame(ptr_output_ctx , ptr_output_ctx->encoded_yuv_pict ,ptr_input_ctx );
				}

			} else if (ptr_input_ctx->pkt.stream_index == ptr_input_ctx->audio_index) {
				//printf("audio ...\n");
				//decode audio packet
				uint8_t *tmp_data = ptr_input_ctx->pkt.data;
				int tmp_size = ptr_input_ctx->pkt.size;
				while (ptr_input_ctx->pkt.size > 0) {
					int got_frame = 0;
					int len = avcodec_decode_audio4(ptr_input_ctx->audio_codec_ctx,
							ptr_input_ctx->audio_decode_frame, &got_frame,
							&ptr_input_ctx->pkt);

					if (len < 0) { //decode failed ,skip frame
						fprintf(stderr, "Error while decoding audio frame\n");
						break;
					}

					if (got_frame) {
						//encode the audio data ,and write the data into the output
//						do_audio_out(ptr_output_ctx ,ptr_input_ctx ,ptr_input_ctx->audio_decode_frame);
					} else { //no data
						printf("======>avcodec_decode_audio4 ,no data ..\n");
						continue;
					}

					ptr_input_ctx->pkt.size -= len;
					ptr_input_ctx->pkt.data += len;

				}

				//renew
				ptr_input_ctx->pkt.size = tmp_size;
				ptr_input_ctx->pkt.data = tmp_data;

			}

			if(&ptr_input_ctx->pkt)
				av_free_packet(&ptr_input_ctx->pkt);

		}//endwhile

		double file_duration = ptr_input_ctx->ptr_format_ctx->duration / AV_TIME_BASE
					+ (double)( ptr_input_ctx->ptr_format_ctx->duration % AV_TIME_BASE ) / AV_TIME_BASE;


		ptr_output_ctx->base_ipts  += file_duration;  //completed files sum time duration
		printf("end while ......,time_base = %f .............> \n" ,ptr_output_ctx->base_ipts  );
		ptr_output_ctx->audio_resample = 0;
		sws_freeContext(ptr_output_ctx->img_convert_ctx);
		free_input(ptr_input_ctx);
    } //end for

	printf("before flush ,ptr_output_ctx->ptr_format_ctx->nb_streams = %d \n\n" ,ptr_output_ctx->ptr_format_ctx->nb_streams);
	encode_flush(ptr_output_ctx ,ptr_output_ctx->ptr_format_ctx->nb_streams);

	write_m3u8_tailer(ptr_output_ctx);
	printf("before wirite tailer ...\n\n");
	av_write_trailer(ptr_output_ctx->ptr_format_ctx );

	return 0;
}