void run()
    {
     fillList();

     sortList();

     cutList();

     //printList();
    }
Example #2
0
int H264::init()
{
	AVCodec* decoder = avcodec_find_decoder(stream()->codec->codec_id);
	if(!decoder)
		return error("Could not find decoder");
	
	if(avcodec_open2(stream()->codec, decoder, NULL) != 0)
		return error("Could not open decoder");
	
	m_h = (H264Context*)stream()->codec->priv_data;
	
	m_codec = avcodec_find_encoder(stream()->codec->codec_id);
	if(!m_codec)
		return error("Could not find encoder");
	
	outputStream()->codec = avcodec_alloc_context3(m_codec);
	avcodec_copy_context(outputStream()->codec, stream()->codec);
	
	outputStream()->sample_aspect_ratio = outputStream()->codec->sample_aspect_ratio;
	outputStream()->codec->thread_type = 0;
	outputStream()->codec->thread_count = 1;
	
	AVCodecContext* ctx = outputStream()->codec;
// 	ctx->bit_rate = 3 * 500 * 1024;
// 	ctx->rc_max_rate = 0;
// 	ctx->rc_buffer_size = 0;
// 	ctx->gop_size = 40;
// 	ctx->coder_type = 1;
// 	ctx->me_cmp = 1;
// 	ctx->me_range = 16;
	ctx->colorspace = AVCOL_SPC_BT709;
// 	ctx->flags2 |= CODEC_FLAG2_8X8DCT;
	
	m_nc = cutList().nextCutPoint(0);
	m_isCutout = m_nc->direction == CutPoint::IN;
	
	m_startDecodeOffset = av_rescale_q(7, (AVRational){1,1}, stream()->time_base);
	
	m_encodeBuffer = (uint8_t*)av_malloc(ENCODE_BUFSIZE);
	
	m_encoding = false;
	m_decoding = false;
	m_syncing = false;
	m_syncPoint = -1;
	
	return 0;
}
Example #3
0
int GenericAudio::init()
{
	AVCodec* codec = avcodec_find_decoder(stream()->codec->codec_id);
	
	if(!codec)
		return error("Could not find decoder");
	
	if(avcodec_open2(stream()->codec, codec, 0) != 0)
		return error("Could not open decoder");
	
	// avcodec_find_decoder does not take sample_fmt into account,
	// so we have to find the decoder ourself...
	AVCodec* encoder = findCodec(
		stream()->codec->codec_id,
		stream()->codec->sample_fmt
	);
	
	if(!encoder)
		return error("Could not find encoder");
	
	outputStream()->disposition = stream()->disposition;
	av_dict_copy(&outputStream()->metadata, stream()->metadata, 0);
	
	outputStream()->codec = avcodec_alloc_context3(encoder);
	avcodec_copy_context(outputStream()->codec, stream()->codec);
	
	if(avcodec_open2(outputStream()->codec, encoder, 0) != 0)
		return error("Could not open encoder");
	
	// Allocate sample buffer
	m_cutout_buf = (int16_t*)av_malloc(BUFSIZE);
	m_cutin_buf = (int16_t*)av_malloc(BUFSIZE);
	if(!m_cutout_buf || !m_cutin_buf)
		return error("Could not allocate sample buffer");
	
	m_nc = cutList().nextCutPoint(0);
	m_cutout = m_nc->direction == CutPoint::IN;
	
	return 0;
}
Example #4
0
int H264::handlePacket(AVPacket* packet)
{
	int gotFrame;
	int bytes;
	H264Context* h = (H264Context*)stream()->codec->priv_data;
	
	// Transform timestamps to relative timestamps
	packet->dts = pts_rel(packet->dts);
	packet->pts = pts_rel(packet->pts);
	
	if(m_decoding && !m_syncing)
		parseNAL(packet->data, packet->size);
	
	if(!m_decoding)
	{
		if(m_nc && m_nc->time - packet->pts < m_startDecodeOffset)
		{
			log_debug("Switching decoder on at PTS %'10lld (m_nc: %'10lld)",
				packet->pts, m_nc->time);
			m_decoding = true;
			
			avcodec_flush_buffers(stream()->codec);
		}
	}
	
	if(m_decoding)
	{
		if(avcodec_decode_video2(stream()->codec, &m_frame, &gotFrame, packet) < 0)
			return error("Could not decode packet");
	}
	
	if(!m_encoding && m_nc)
	{
		if(m_nc->direction == CutPoint::OUT && m_nc->time < packet->dts)
		{
			m_decoding = false;
			m_isCutout = true;
			
			int64_t current_time = m_nc->time;
			m_nc = cutList().nextCutPoint(packet->dts);
			
			if(m_nc)
				setTotalCutout(m_nc->time - (current_time - totalCutout()));
			else
				setActive(false); // last cutpoint reached
		}
		else if(m_nc->direction == CutPoint::IN && m_nc->time <= packet->dts)
		{
			m_encoding = true;
			m_encFrameCount = 0;
			
			log_debug("Opening encoder for frame with PTS %'10lld", packet->dts);

			AVDictionary* opts = 0;
			av_dict_set(&opts, "profile", "main", 0);
			av_dict_set(&opts, "preset", "ultrafast", 0);
			
			if(avcodec_open2(outputStream()->codec, m_codec, &opts) != 0)
				return error("Could not open encoder");
		}
	}
	
	if(m_encoding && m_encFrameCount > 20 && packet->flags & AV_PKT_FLAG_KEY && h->s.current_picture_ptr)
	{
		m_syncing = true;
		m_syncPoint = packet->pts;
		
		log_debug("SYNC: start with keyframe packet PTS %'10lld", m_syncPoint);
// 		log_debug("SYNC: frame_num of first original frame is %d",
// 				h->s.current_picture_ptr->frame_num
// 		);
	}

	if(m_syncing)
	{
		log_debug("decode=%d, gotFrame=%d, keyframe=%d, t=%d", m_decoding, gotFrame, m_frame.key_frame, m_frame.pict_type);
	}
	
	if(m_syncing && gotFrame && m_frame.pict_type == 1)
	{
		// Flush out encoder
		while(1)
		{
			log_debug("SYNC: Flushing out encoder");
			bytes = avcodec_encode_video(
				outputStream()->codec,
				m_encodeBuffer, ENCODE_BUFSIZE,
				NULL
			);
			outputStream()->codec->has_b_frames = 6;
			
			if(!bytes)
				break;
			
			int64_t pts = av_rescale_q(outputStream()->codec->coded_frame->pts,
					outputStream()->codec->time_base, outputStream()->time_base
				);
			
			if(pts + totalCutout() >= m_syncPoint)
			{
				log_debug("SYNC: (encoder) Skipping PTS %'10lld >= sync point %'10lld",
					pts + totalCutout(), m_syncPoint
				);
				continue;
			}
			
			if(writeOutputPacket(m_encodeBuffer, bytes, pts) != 0)
				return error("SYNC: (encoder) Could not write packet");
		}
		log_debug("SYNC: closing encoder");
		avcodec_close(outputStream()->codec);
		
		// Flush out sync buffer
		for(int i = 0; i < m_syncBuffer.size(); ++i)
		{
			log_debug("SYNC: writing packet from buffer");

			AVPacket* packet = &m_syncBuffer[i];
			if(packet->pts < m_syncPoint)
			{
				log_debug("SYNC: (buffer) Skipping PTS %'10lld < sync point %'10lld",
					packet->pts, m_syncPoint
				);
				continue;
			}
			
			if(writeInputPacket(packet) != 0)
				return error("SYNC: (buffer) Could not write packet");
		}
		m_syncBuffer.clear();
		
		m_encoding = false;
		m_isCutout = false;
		m_decoding = false;
		m_syncing = false;
		
		log_debug("SYNC: finished, got keyframe from decoder with PTS %'10lld", packet->dts);
		
		m_nc = cutList().nextCutPoint(packet->dts);
	}
	
	if(m_syncing)
	{
		m_syncBuffer.push_back(copyPacket(*packet));
	}
	
	if(m_encoding && gotFrame)
	{
		setFrameFields(&m_frame, packet->dts - totalCutout());
		
		bytes = avcodec_encode_video(
			outputStream()->codec,
			m_encodeBuffer, ENCODE_BUFSIZE,
			&m_frame
		);
		outputStream()->codec->has_b_frames = 6;
		
		if(bytes)
		{
			writeOutputPacket(
				m_encodeBuffer, bytes,
				av_rescale_q(outputStream()->codec->coded_frame->pts,
					outputStream()->codec->time_base, outputStream()->time_base
				)
			);
			m_encFrameCount++;
		}
	}
	
	if(!m_isCutout && !m_encoding)
	{
		if(m_syncPoint > 0 && packet->pts < m_syncPoint)
		{
			log_debug("COPY: Skipping packet with PTS %'10lld", packet->pts);
			return 0;
		}
		
		if(m_sps.data || m_pps.data)
		{
			int size = packet->size + m_sps.size + m_pps.size;
			uint8_t* buf = (uint8_t*)malloc(size);
			int off = 0;
			
			memcpy(buf + off, m_sps.data, m_sps.size);
			off += m_sps.size;
			memcpy(buf + off, m_pps.data, m_pps.size);
			off += m_pps.size;
			
			memcpy(buf + off, packet->data, packet->size);
			
			writeOutputPacket(buf, size, packet->pts - totalCutout());
			
			free(m_sps.data); m_sps.data = 0;
			free(m_pps.data); m_pps.data = 0;
			free(buf);
			return 0;
		}
		
// 		log_debug("COPY: packet with PTS %'10lld", packet->pts);
		outputStream()->codec->has_b_frames = 6;
		if(writeInputPacket(packet) != 0)
		{
			log_debug("PTS buffer:");
			
			for(int i = 0; i < outputStream()->codec->has_b_frames; ++i)
				log_debug(" %s", tstoa(outputStream()->pts_buffer[i]));
			
			return error("Could not copy input packet (has_b_frames: %d, max_b_frames: %d)",
				outputStream()->codec->has_b_frames, outputStream()->codec->max_b_frames
			);
		}
	}
	
	return 0;
}
Example #5
0
int GenericAudio::handlePacket(AVPacket* packet)
{
	packet->pts = pts_rel(packet->pts);
	int64_t current_time = packet->pts;
	
	if(m_nc && current_time + packet->duration > m_nc->time
		&& m_nc->direction == CutPoint::OUT
		&& current_time < m_nc->time)
	{
		log_debug("%'10lld: Packet across the cut-out point", current_time);
		
		int frame_size = BUFSIZE;
		if(avcodec_decode_audio3(stream()->codec, m_cutout_buf, &frame_size, packet) < 0)
			return error("Could not decode audio stream");
		
		int64_t total_samples = frame_size / sizeof(int16_t);
		int64_t needed_time = m_nc->time - current_time;
		int64_t needed_samples = av_rescale(needed_time, total_samples, packet->duration);
		
		log_debug("%'10lld: taking %lld of %lld samples", current_time, needed_samples, total_samples);
		
		m_saved_samples = needed_samples;
		
		return 0;
	}
	
	if(m_nc && current_time + packet->duration > m_nc->time
		&& m_nc->direction == CutPoint::IN
		&& current_time < m_nc->time)
	{
		log_debug("%'10lld: Packet across cut-in point", current_time);
		
		int frame_size = BUFSIZE;
		if(avcodec_decode_audio3(stream()->codec, m_cutin_buf, &frame_size, packet) < 0)
			return error("Could not decode audio stream");
		
		int64_t total_samples = frame_size / sizeof(int16_t);
		int64_t time_off = m_nc->time - current_time;
		int64_t needed_time = packet->duration - time_off;
		int64_t sample_off = av_rescale(time_off, total_samples, packet->duration);
		int64_t needed_samples = total_samples - sample_off;
		
		log_debug("%'10lld: taking %lld of %lld samples", current_time, needed_samples, total_samples);
		memcpy(m_cutin_buf, m_cutout_buf, sample_off);
		
		if(sample_off < m_saved_samples)
			log_warning("Dropping %lld samples to preserve packet flow",
				m_saved_samples - sample_off
			);
		else
		{
			log_warning("Inserting %lld silence samples to preserve packet flow",
				sample_off - m_saved_samples
			);
			for(int i = m_saved_samples; i < sample_off; ++i)
				m_cutin_buf[i] = 0;
		}
		
		int bytes = avcodec_encode_audio(outputStream()->codec, packet->data, packet->size, m_cutin_buf);
		
		if(bytes < 0)
			return error("Could not encode audio frame");
		
		packet->size = bytes;
	}
	
	if(m_nc && current_time > m_nc->time
		&& !m_cutout && m_nc->direction == CutPoint::OUT)
	{
		m_cutout = true;
		int64_t cutout_time = m_nc->time;
		m_nc = cutList().nextCutPoint(current_time);
		
		log_debug("CUT-OUT at %'10lld", current_time);
		
		if(m_nc)
			setTotalCutout(m_nc->time - (cutout_time - totalCutout()));
		else
		{
			log_debug("No next cutpoint, deactivating...");
			setActive(false);
		}
	}
	
	if(m_nc && current_time >= m_nc->time
		&& m_cutout && m_nc->direction == CutPoint::IN)
	{
		log_debug("CUT-IN at %'10lld", current_time);
		m_cutout = false;
		m_nc = cutList().nextCutPoint(current_time);
	}
	
	if(!m_cutout)
	{
		if(writeInputPacket(packet) != 0)
		{
			if(++m_outputErrorCount > 50)
			{
				return error("Could not write input packet");
			}
		}
		else
		{
			m_outputErrorCount = 0;
		}
	}
	
	return 0;
}