示例#1
0
static void encode(AVCodecContext *ctx, AVFrame *frame, AVPacket *pkt,
                   FILE *output)
{
    int ret;

    /* send the frame for encoding */
    ret = avcodec_send_frame(ctx, frame);
    if (ret < 0) {
        fprintf(stderr, "Error sending the frame to the encoder\n");
        exit(1);
    }

    /* read all the available output packets (in general there may be any
     * number of them */
    while (ret >= 0) {
        ret = avcodec_receive_packet(ctx, pkt);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            return;
        else if (ret < 0) {
            fprintf(stderr, "Error encoding audio frame\n");
            exit(1);
        }

        fwrite(pkt->data, 1, pkt->size, output);
        av_packet_unref(pkt);
    }
}
示例#2
0
static void vaapi_destroy(void *data)
{
	struct vaapi_encoder *enc = data;

	if (enc->initialized) {
		AVPacket pkt   = {0};
		int      r_pkt = 1;

		while (r_pkt) {
#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(57, 40, 101)
			if (avcodec_receive_packet(enc->context, &pkt) < 0)
				break;
#else
			if (avcodec_encode_video2(enc->context, &pkt, NULL,
					    &r_pkt) < 0)
				break;
#endif

			if (r_pkt)
				av_packet_unref(&pkt);
		}
	}

	avcodec_close(enc->context);
	av_frame_unref(enc->vframe);
	av_frame_free(&enc->vframe);
	av_buffer_unref(&enc->vaframes_ref);
	av_buffer_unref(&enc->vadevice_ref);
	da_free(enc->buffer);
	bfree(enc->header);
	bfree(enc->sei);

	bfree(enc);
}
示例#3
0
/**
 * Encode one frame worth of audio to the output file.
 * @param      frame                 Samples to be encoded
 * @param      output_format_context Format context of the output file
 * @param      output_codec_context  Codec context of the output file
 * @param[out] data_present          Indicates whether data has been
 *                                   decoded
 * @return Error code (0 if successful)
 */
int Transcode::encode_audio_frame(AVFrame *frame,
                              AVFormatContext *output_format_context,
                              AVCodecContext *output_codec_context,
                              int *data_present)
{
    /* Packet used for temporary storage. */
    AVPacket output_packet;
    int error;
    init_packet(&output_packet);

    /* Set a timestamp based on the sample rate for the container. */
    if (frame) {
        frame->pts = pts;
        pts += frame->nb_samples;
    }

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 48, 0)    
    /* Encode the audio frame and store it in the temporary packet.
     * The output audio stream encoder is used to do this. */
    if ((error = avcodec_encode_audio2(output_codec_context, &output_packet,
                                       frame, data_present)) < 0) {
        fprintf(stderr, "Could not encode frame (error '%s')\n",
                av_cplus_err2str(error));
        av_packet_unref(&output_packet);
        return error;
    }
    
#else    
    *data_present = 0;
    error = avcodec_send_frame(output_codec_context, frame);
    if ( error != AVERROR_EOF && error != AVERROR(EAGAIN) && error != 0){
        fprintf(stderr, "Could not send frame (error '%s')\n",
                    av_cplus_err2str(error));
        return error;
    }   

    if ( (error = avcodec_receive_packet(output_codec_context, &output_packet)) == 0)
        *data_present = 1;
        
    if ( error != AVERROR_EOF && error != AVERROR(EAGAIN) && error != 0){
        fprintf(stderr, "Could not receive packet (error '%s')\n",
                    av_cplus_err2str(error));
        return error;
    }   
#endif
    

    /* Write one audio frame from the temporary packet to the output file. */
    if (*data_present) {
        if ((error = av_write_frame(output_format_context, &output_packet)) < 0) {
            fprintf(stderr, "Could not write frame (error '%s')\n",
                    av_cplus_err2str(error));
            av_packet_unref(&output_packet);
            return error;
        }
        av_packet_unref(&output_packet);
    }

    return 0;
}
示例#4
0
    bool flush(AVCodecContext* pCodecContext, AVFormatContext* pOutputContext, AVStream* pOutputStream, const std::string& filename)
    {
        while(true)
        {
            // Initialize the packet
            AVPacket packet = {0};
            av_init_packet(&packet);

            int r = avcodec_receive_packet(pCodecContext, &packet);
            if(r == AVERROR(EAGAIN) || r == AVERROR_EOF)
            {
                return true;
            }
            else if(r < 0)
            {
                error(filename, "Can't retrieve packet");
                return false;
            }

            // rescale output packet timestamp values from codec to stream timebase
            av_packet_rescale_ts(&packet, pCodecContext->time_base, pOutputStream->time_base);
            packet.stream_index = pOutputStream->index;
            r = av_interleaved_write_frame(pOutputContext, &packet);
            if(r < 0)
            {
                char msg[1024];
                av_make_error_string(msg, 1024, r);
                error(filename, "Failed when writing encoded frame to file");
                return false;
            }
        }
    }
示例#5
0
static int encode_write(AVFrame *frame)
{
    int ret = 0;
    AVPacket enc_pkt;

    av_init_packet(&enc_pkt);
    enc_pkt.data = NULL;
    enc_pkt.size = 0;

    if ((ret = avcodec_send_frame(encoder_ctx, frame)) < 0) {
        fprintf(stderr, "Error during encoding. Error code: %s\n", av_err2str(ret));
        goto end;
    }
    while (1) {
        ret = avcodec_receive_packet(encoder_ctx, &enc_pkt);
        if (ret)
            break;

        enc_pkt.stream_index = 0;
        av_packet_rescale_ts(&enc_pkt, ifmt_ctx->streams[video_stream]->time_base,
                             ofmt_ctx->streams[0]->time_base);
        ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
        if (ret < 0) {
            fprintf(stderr, "Error during writing data to output file. "
                    "Error code: %s\n", av_err2str(ret));
            return -1;
        }
    }

end:
    if (ret == AVERROR_EOF)
        return 0;
    ret = ((ret == AVERROR(EAGAIN)) ? 0:-1);
    return ret;
}
示例#6
0
static int ReceivePacket(AVCodecContext* avctx, AVPacket* pkt, int* got_packet)
{
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 37, 100)
  return avcodec_encode_video2(avctx, pkt, nullptr, got_packet);
#else
  *got_packet = 0;
  int error = avcodec_receive_packet(avctx, pkt);
  if (!error)
    *got_packet = 1;
  if (error == AVERROR(EAGAIN))
    return 0;

  return error;
#endif
}
示例#7
0
vod_status_t
audio_encoder_flush(
	void* context)
{
	audio_encoder_state_t* state = context;
	AVPacket output_packet;
	vod_status_t rc;
	int avrc;

	avrc = avcodec_send_frame(state->encoder, NULL);
	if (avrc < 0)
	{
		vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
			"audio_encoder_flush: avcodec_send_frame failed %d", avrc);
		return VOD_UNEXPECTED;
	}

	for (;;)
	{
		av_init_packet(&output_packet);
		output_packet.data = NULL; // packet data will be allocated by the encoder
		output_packet.size = 0;

		avrc = avcodec_receive_packet(state->encoder, &output_packet);
		if (avrc == AVERROR_EOF)
		{
			break;
		}

		if (avrc < 0)
		{
			vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
				"audio_encoder_flush: avcodec_receive_packet failed %d", avrc);
			return VOD_UNEXPECTED;
		}

		rc = audio_encoder_write_packet(state, &output_packet);

		av_packet_unref(&output_packet);

		if (rc != VOD_OK)
		{
			return rc;
		}
	}

	return VOD_OK;
}
示例#8
0
vod_status_t
audio_encoder_write_frame(
	void* context,
	AVFrame* frame)
{
	audio_encoder_state_t* state = context;
	vod_status_t rc;
	AVPacket output_packet;
	int avrc;

	// send frame
	avrc = avcodec_send_frame(state->encoder, frame);

	av_frame_unref(frame);

	if (avrc < 0)
	{
		vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
			"audio_encoder_write_frame: avcodec_send_frame failed %d", avrc);
		return VOD_UNEXPECTED;
	}

	// receive packet
	av_init_packet(&output_packet);
	output_packet.data = NULL; // packet data will be allocated by the encoder
	output_packet.size = 0;

	avrc = avcodec_receive_packet(state->encoder, &output_packet);

	if (avrc == AVERROR(EAGAIN))
	{
		return VOD_OK;
	}

	if (avrc < 0)
	{
		vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
			"audio_encoder_write_frame: avcodec_receive_packet failed %d", avrc);
		return VOD_ALLOC_FAILED;
	}

	rc = audio_encoder_write_packet(state, &output_packet);

	av_packet_unref(&output_packet);

	return rc;
}
示例#9
0
int CFFmpegImage::EncodeFFmpegFrame(AVCodecContext *avctx, AVPacket *pkt, int *got_packet, AVFrame *frame)
{
  int ret;

  *got_packet = 0;

  ret = avcodec_send_frame(avctx, frame);
  if (ret < 0)
    return ret;

  ret = avcodec_receive_packet(avctx, pkt);
  if (!ret)
    *got_packet = 1;

  if (ret == AVERROR(EAGAIN))
    return 0;

  return ret;
}
示例#10
0
void _ffmpegPostVideoFrame(struct mAVStream* stream, const color_t* pixels, size_t stride) {
	struct FFmpegEncoder* encoder = (struct FFmpegEncoder*) stream;
	if (!encoder->context) {
		return;
	}
	stride *= BYTES_PER_PIXEL;

	AVPacket packet;

	av_init_packet(&packet);
	packet.data = 0;
	packet.size = 0;
#if LIBAVCODEC_VERSION_MAJOR >= 55
	av_frame_make_writable(encoder->videoFrame);
#endif
	encoder->videoFrame->pts = av_rescale_q(encoder->currentVideoFrame, encoder->video->time_base, encoder->videoStream->time_base);
	packet.pts = encoder->videoFrame->pts;
	++encoder->currentVideoFrame;

	sws_scale(encoder->scaleContext, (const uint8_t* const*) &pixels, (const int*) &stride, 0, encoder->iheight, encoder->videoFrame->data, encoder->videoFrame->linesize);

	int gotData;
#ifdef FFMPEG_USE_PACKETS
	avcodec_send_frame(encoder->video, encoder->videoFrame);
	gotData = avcodec_receive_packet(encoder->video, &packet) == 0;
#else
	avcodec_encode_video2(encoder->video, &packet, encoder->videoFrame, &gotData);
#endif
	if (gotData) {
#ifndef FFMPEG_USE_PACKET_UNREF
		if (encoder->video->coded_frame->key_frame) {
			packet.flags |= AV_PKT_FLAG_KEY;
		}
#endif
		packet.stream_index = encoder->videoStream->index;
		av_interleaved_write_frame(encoder->context, &packet);
	}
#ifdef FFMPEG_USE_PACKET_UNREF
	av_packet_unref(&packet);
#else
	av_free_packet(&packet);
#endif
}
示例#11
0
文件: FileOut.cpp 项目: mtone/ACMuxer
void FileOut::encodeFrame(Frame& frame, uint64_t pts) {
   //frame.avFrame.get()->pts = ccv.get()->frame_number;
   auto fr = frame.avFrame.get();
   fr->pts = frame.getRelativePts();
   
   fr->best_effort_timestamp = 0;
   fr->pkt_pts = fr->pts;
   fr->pkt_dts = 0;
   fr->pkt_duration = 100;
   fr->pkt_pos = 0;
   fr->pkt_size = 0;
   pkt->duration = 100;

   int ret = avcodec_send_frame(ccv.get(), frame.avFrame.get());
   frameNo++;
   assert(ret == 0);
   // packet.pts = av_rescale_q ( frame. pts , frameTimeBase,packetTimeBase )
   pkt.get()->pts = av_rescale_q(fr->pts, ccv->time_base, ccv->time_base);
   ret = avcodec_receive_packet(ccv.get(), pkt.get());
   if (ret == AVERROR(EAGAIN)) {
	cout << "AVERROR(EAGAIN) : output is not available right now - user must try to send input" << endl;
   }
   else if (ret == AVERROR_EOF) {
	cout << "AVERROR_EOF: the encoder has been fully flushed, and there will be no more output packets" << endl;
   }
   else if (ret == AVERROR(EINVAL)) {
	cout << "AVERROR(EINVAL) : codec not opened, or it is an encoder" << endl;
   }

   else if (ret == 0) {
	cout << "Success - got packet" << endl;
	if (ret == 0) {
	   pkt->duration = 100;
	   printf("Write frame %3d (size=%5d)\n", frameNo, pkt.get()->size);
	   fwrite(pkt.get()->data, 1, pkt.get()->size, f);
	   av_packet_unref(pkt.get());
	}
   }
   else {
	cout << "Other errors" << endl;
   }
}
示例#12
0
void FFMPEGWriter::close()
{
#ifdef FFMPEG_OLD_CODE
	int got_packet = true;

	while(got_packet)
	{
		m_packet->data = nullptr;
		m_packet->size = 0;

		av_init_packet(m_packet);

		if(avcodec_encode_audio2(m_codecCtx, m_packet, nullptr, &got_packet))
			AUD_THROW(FileException, "File end couldn't be written, audio encoding failed with ffmpeg.");

		if(got_packet)
		{
			m_packet->flags |= AV_PKT_FLAG_KEY;
			m_packet->stream_index = m_stream->index;
			if(av_write_frame(m_formatCtx, m_packet))
			{
				av_free_packet(m_packet);
				AUD_THROW(FileException, "Final frames couldn't be writen to the file with ffmpeg.");
			}
			av_free_packet(m_packet);
		}
	}
#else
	if(avcodec_send_frame(m_codecCtx, nullptr) < 0)
		AUD_THROW(FileException, "File couldn't be written, audio encoding failed with ffmpeg.");

	while(avcodec_receive_packet(m_codecCtx, m_packet) == 0)
	{
		m_packet->stream_index = m_stream->index;

		if(av_write_frame(m_formatCtx, m_packet) < 0)
			AUD_THROW(FileException, "Frame couldn't be writen to the file with ffmpeg.");
	}
#endif
}
示例#13
0
文件: FileOut.cpp 项目: mtone/ACMuxer
void FileOut::encodeFinish(uint64_t pts) {
   int ret = avcodec_send_frame(ccv.get(), nullptr);
   assert(ret == 0);

   ret = avcodec_receive_packet(ccv.get(), pkt.get());
   if (ret == 0) {
	printf("Write frame %3d (size=%5d)\n", frameNo++, pkt.get()->size);
	fwrite(pkt.get()->data, 1, pkt.get()->size, f);
	av_packet_unref(pkt.get());
   }

   /* get the delayed frames */
   /*for (got_output = 1; got_output; i++) {
   fflush(stdout);

   ret = avcodec_encode_video2(c, &p, NULL, &got_output);
   if (ret < 0) {
   fprintf(stderr, "Error encoding frame\n");
   exit(1);
   }

   if (got_output) {
   printf("Write frame %3d (size=%5d)\n", i, pkt.get()->size);
   fwrite(pkt.get()->data, 1, pkt.get()->size, f);
   av_packet_unref(&p);
   }
   }*/

   /* add sequence end code to have a real MPEG file */
   uint8_t endcode[] = { 0, 0, 1, 0xb7 };
   fwrite(endcode, 1, sizeof(endcode), f);
   fclose(f);

   avcodec_close(ccv.get());
   av_free(ccv.get());
   //av_freep( &frame->data[0]);
   //av_frame_free(&frame);
   printf("\n");
}
示例#14
0
static BOOL ffmpeg_encode_frame(AVCodecContext* context, AVFrame* in,
                                AVPacket* packet, wStream* out)
{
	int ret;
	/* send the packet with the compressed data to the encoder */
	ret = avcodec_send_frame(context, in);

	if (ret < 0)
	{
		const char* err = av_err2str(ret);
		WLog_ERR(TAG, "Error submitting the packet to the encoder %s [%d]",
		         err, ret);
		return FALSE;
	}

	/* read all the output frames (in general there may be any number of them */
	while (ret >= 0)
	{
		ret = avcodec_receive_packet(context, packet);

		if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
			return TRUE;
		else if (ret < 0)
		{
			const char* err = av_err2str(ret);
			WLog_ERR(TAG, "Error during encoding %s [%d]", err, ret);
			return FALSE;
		}

		if (!Stream_EnsureRemainingCapacity(out, packet->size))
			return FALSE;

		Stream_Write(out, packet->data, packet->size);
		av_packet_unref(packet);
	}

	return TRUE;
}
示例#15
0
int MP4Encoder::EncodeFrame(AVCodecContext *pCodecCtx, AVFrame *pFrame, AVPacket *avPacket) {
    int ret = avcodec_send_frame(pCodecCtx, pFrame);
    if (ret < 0) {
        //failed to send frame for encoding
        return -1;
    }
    while (!ret) {
        ret = avcodec_receive_packet(pCodecCtx, avPacket);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
            return 0;
        } else if (ret < 0) {
            //error during encoding
            return -1;
        }
        //printf("Write frame %d, size=%d\n", avPacket->pts, avPacket->size);
        avPacket->stream_index = pStream->index;
        av_packet_rescale_ts(avPacket, pCodecCtx->time_base, pStream->time_base);
        avPacket->pos = -1;
        av_interleaved_write_frame(pFormatCtx, avPacket);
        av_packet_unref(avPacket);
    }
    return 0;
}
示例#16
0
AUD_NAMESPACE_BEGIN

#if LIBAVCODEC_VERSION_MAJOR < 58
#define FFMPEG_OLD_CODE
#endif

void FFMPEGWriter::encode()
{
	sample_t* data = m_input_buffer.getBuffer();

	if(m_deinterleave)
	{
		m_deinterleave_buffer.assureSize(m_input_buffer.getSize());

		sample_t* dbuf = m_deinterleave_buffer.getBuffer();
		// deinterleave
		int single_size = sizeof(sample_t);
		for(int channel = 0; channel < m_specs.channels; channel++)
		{
			for(int i = 0; i < m_input_buffer.getSize() / AUD_SAMPLE_SIZE(m_specs); i++)
			{
				std::memcpy(((data_t*)dbuf) + (m_input_samples * channel + i) * single_size,
							((data_t*)data) + ((m_specs.channels * i) + channel) * single_size, single_size);
			}
		}

		// convert first
		if(m_input_size)
			m_convert(reinterpret_cast<data_t*>(data), reinterpret_cast<data_t*>(dbuf), m_input_samples * m_specs.channels);
		else
			std::memcpy(data, dbuf, m_input_buffer.getSize());
	}
	else
		// convert first
		if(m_input_size)
			m_convert(reinterpret_cast<data_t*>(data), reinterpret_cast<data_t*>(data), m_input_samples * m_specs.channels);

#ifdef FFMPEG_OLD_CODE
	m_packet->data = nullptr;
	m_packet->size = 0;

	av_init_packet(m_packet);

	av_frame_unref(m_frame);
	int got_packet;
#endif

	m_frame->nb_samples = m_input_samples;
	m_frame->format = m_codecCtx->sample_fmt;
	m_frame->channel_layout = m_codecCtx->channel_layout;

	if(avcodec_fill_audio_frame(m_frame, m_specs.channels, m_codecCtx->sample_fmt, reinterpret_cast<data_t*>(data), m_input_buffer.getSize(), 0) < 0)
		AUD_THROW(FileException, "File couldn't be written, filling the audio frame failed with ffmpeg.");

	AVRational sample_time = { 1, static_cast<int>(m_specs.rate) };
	m_frame->pts = av_rescale_q(m_position - m_input_samples, m_codecCtx->time_base, sample_time);

#ifdef FFMPEG_OLD_CODE
	if(avcodec_encode_audio2(m_codecCtx, m_packet, m_frame, &got_packet))
	{
		AUD_THROW(FileException, "File couldn't be written, audio encoding failed with ffmpeg.");
	}

	if(got_packet)
	{
		m_packet->flags |= AV_PKT_FLAG_KEY;
		m_packet->stream_index = m_stream->index;
		if(av_write_frame(m_formatCtx, m_packet) < 0)
		{
			av_free_packet(m_packet);
			AUD_THROW(FileException, "Frame couldn't be writen to the file with ffmpeg.");
		}
		av_free_packet(m_packet);
	}
#else
	if(avcodec_send_frame(m_codecCtx, m_frame) < 0)
		AUD_THROW(FileException, "File couldn't be written, audio encoding failed with ffmpeg.");

	while(avcodec_receive_packet(m_codecCtx, m_packet) == 0)
	{
		m_packet->stream_index = m_stream->index;

		if(av_write_frame(m_formatCtx, m_packet) < 0)
			AUD_THROW(FileException, "Frame couldn't be writen to the file with ffmpeg.");
	}
#endif
}
示例#17
0
bool AudioEncoder::EncodeFrame(AVFrameWrapper* frame) {

	if(frame != NULL) {
#if SSR_USE_AVFRAME_NB_SAMPLES
		assert((unsigned int) frame->GetFrame()->nb_samples == GetFrameSize());
#endif
#if SSR_USE_AVFRAME_CHANNELS
		assert(frame->GetFrame()->channels == GetCodecContext()->channels);
#endif
#if SSR_USE_AVFRAME_SAMPLE_RATE
		assert(frame->GetFrame()->sample_rate == GetCodecContext()->sample_rate);
#endif
#if SSR_USE_AVFRAME_FORMAT
		assert(frame->GetFrame()->format == GetCodecContext()->sample_fmt);
#endif
	}

#if SSR_USE_AVCODEC_SEND_RECEIVE

	// send a frame
	AVFrame *avframe = (frame == NULL)? NULL : frame->Release();
	try {
		if(avcodec_send_frame(GetCodecContext(), avframe) < 0) {
			Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Sending of audio frame failed!"));
			throw LibavException();
		}
	} catch(...) {
		av_frame_free(&avframe);
		throw;
	}
	av_frame_free(&avframe);

	// try to receive a packet
	for( ; ; ) {
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());
		int res = avcodec_receive_packet(GetCodecContext(), packet->GetPacket());
		if(res == 0) { // we have a packet, send the packet to the muxer
			GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
			IncrementPacketCounter();
		} else if(res == AVERROR(EAGAIN)) { // we have no packet
			return true;
		} else if(res == AVERROR_EOF) { // this is the end of the stream
			return false;
		} else {
			Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Receiving of audio packet failed!"));
			throw LibavException();
		}
	}

#elif SSR_USE_AVCODEC_ENCODE_AUDIO2

	// allocate a packet
	std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());

	// encode the frame
	int got_packet;
	if(avcodec_encode_audio2(GetCodecContext(), packet->GetPacket(), (frame == NULL)? NULL : frame->GetFrame(), &got_packet) < 0) {
		Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of audio frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(got_packet) {

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		IncrementPacketCounter();
		return true;

	} else {
		return false;
	}

#else

	// encode the frame
	short *data = (frame == NULL)? NULL : (short*) frame->GetFrame()->data[0];
	int bytes_encoded = avcodec_encode_audio(GetCodecContext(), m_temp_buffer.data(), m_temp_buffer.size(), data);
	if(bytes_encoded < 0) {
		Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of audio frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(bytes_encoded > 0) {

		// allocate a packet
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper(bytes_encoded));

		// copy the data
		memcpy(packet->GetPacket()->data, m_temp_buffer.data(), bytes_encoded);

		// set the timestamp
		// note: pts will be rescaled and stream_index will be set by Muxer
		if(GetCodecContext()->coded_frame != NULL && GetCodecContext()->coded_frame->pts != (int64_t) AV_NOPTS_VALUE)
			packet->GetPacket()->pts = GetCodecContext()->coded_frame->pts;

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		IncrementPacketCounter();
		return true;

	} else {
		return false;
	}

#endif

}
示例#18
0
void _ffmpegPostAudioFrame(struct mAVStream* stream, int16_t left, int16_t right) {
	struct FFmpegEncoder* encoder = (struct FFmpegEncoder*) stream;
	if (!encoder->context || !encoder->audioCodec) {
		return;
	}

	if (encoder->absf && !left) {
		// XXX: AVBSF doesn't like silence. Figure out why.
		left = 1;
	}

	encoder->audioBuffer[encoder->currentAudioSample * 2] = left;
	encoder->audioBuffer[encoder->currentAudioSample * 2 + 1] = right;

	++encoder->currentAudioSample;

	if (encoder->currentAudioSample * 4 < encoder->audioBufferSize) {
		return;
	}

	int channelSize = 2 * av_get_bytes_per_sample(encoder->audio->sample_fmt);
	encoder->currentAudioSample = 0;
#ifdef USE_LIBAVRESAMPLE
	avresample_convert(encoder->resampleContext, 0, 0, 0,
	                   (uint8_t**) &encoder->audioBuffer, 0, encoder->audioBufferSize / 4);

	if (avresample_available(encoder->resampleContext) < encoder->audioFrame->nb_samples) {
		return;
	}
#if LIBAVCODEC_VERSION_MAJOR >= 55
	av_frame_make_writable(encoder->audioFrame);
#endif
	int samples = avresample_read(encoder->resampleContext, encoder->audioFrame->data, encoder->postaudioBufferSize / channelSize);
#else
#if LIBAVCODEC_VERSION_MAJOR >= 55
	av_frame_make_writable(encoder->audioFrame);
#endif
	if (swr_get_out_samples(encoder->resampleContext, encoder->audioBufferSize / 4) < encoder->audioFrame->nb_samples) {
		swr_convert(encoder->resampleContext, NULL, 0, (const uint8_t**) &encoder->audioBuffer, encoder->audioBufferSize / 4);
		return;
	}
	int samples = swr_convert(encoder->resampleContext, encoder->audioFrame->data, encoder->postaudioBufferSize / channelSize,
	                          (const uint8_t**) &encoder->audioBuffer, encoder->audioBufferSize / 4);
#endif

	encoder->audioFrame->pts = av_rescale_q(encoder->currentAudioFrame, encoder->audio->time_base, encoder->audioStream->time_base);
	encoder->currentAudioFrame += samples;

	AVPacket packet;
	av_init_packet(&packet);
	packet.data = 0;
	packet.size = 0;
	packet.pts = encoder->audioFrame->pts;

	int gotData;
#ifdef FFMPEG_USE_PACKETS
	avcodec_send_frame(encoder->audio, encoder->audioFrame);
	gotData = avcodec_receive_packet(encoder->audio, &packet);
	gotData = (gotData == 0) && packet.size;
#else
	avcodec_encode_audio2(encoder->audio, &packet, encoder->audioFrame, &gotData);
#endif
	if (gotData) {
		if (encoder->absf) {
			AVPacket tempPacket;

#ifdef FFMPEG_USE_NEW_BSF
			int success = av_bsf_send_packet(encoder->absf, &packet);
			if (success >= 0) {
				success = av_bsf_receive_packet(encoder->absf, &tempPacket);
			}
#else
			int success = av_bitstream_filter_filter(encoder->absf, encoder->audio, 0,
			    &tempPacket.data, &tempPacket.size,
			    packet.data, packet.size, 0);
#endif

			if (success >= 0) {
#if LIBAVUTIL_VERSION_MAJOR >= 53
				tempPacket.buf = av_buffer_create(tempPacket.data, tempPacket.size, av_buffer_default_free, 0, 0);
#endif

#ifdef FFMPEG_USE_PACKET_UNREF
				av_packet_move_ref(&packet, &tempPacket);
#else
				av_free_packet(&packet);
				packet = tempPacket;
#endif

				packet.stream_index = encoder->audioStream->index;
				av_interleaved_write_frame(encoder->context, &packet);
			}
		} else {
			packet.stream_index = encoder->audioStream->index;
			av_interleaved_write_frame(encoder->context, &packet);
		}
	}
#ifdef FFMPEG_USE_PACKET_UNREF
	av_packet_unref(&packet);
#else
	av_free_packet(&packet);
#endif
}
示例#19
0
int save_snapshot (const char * filename, const AVCodecContext * picc, const AVFrame * pif) {
    int ok = 0;

    OutputContext * output_context = output_context_new(filename, pif);
    if (!output_context) {
        ok = -1;
        goto failed;
    }

    // write file header
    ok = avformat_write_header(output_context->format_context, NULL);
    if (ok != 0) {
        ok = 1;
        goto failed;
    }

    // prepare pixel convertion
    AVCodecContext * pcc = output_context->codec_context;
    struct SwsContext * psc = sws_getCachedContext(NULL, picc->width, picc->height, picc->pix_fmt, pcc->width, pcc->height, pcc->pix_fmt, SWS_BILINEAR, NULL, NULL, NULL);
    if (!psc) {
        ok = 1;
        goto failed;
    }

    // prepare converted picture
    AVFrame * pf = av_frame_alloc();
    if (!pf) {
        ok = 1;
        goto close_sws;
    }
    pf->width = pcc->width;
    pf->height = pcc->height;
    pf->format = pcc->pix_fmt;
    ok = av_image_alloc(pf->data, pf->linesize, pcc->width, pcc->height, pcc->pix_fmt, 1);
    if (ok < 0) {
        ok = AVUNERROR(ok);
        goto close_frame;
    }
    // convert pixel format
    ok = sws_scale(psc, (const uint8_t * const *)pif->data, pif->linesize, 0, picc->height, pf->data, pf->linesize);
    if (ok <= 0) {
        ok = 1;
        goto close_picture;
    }

    // encode frame
    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;
    ok = avcodec_send_frame(pcc, pf);
    if (ok != 0) {
        ok = AVUNERROR(ok);
        goto close_packet;
    }
    ok = avcodec_receive_packet(pcc, &pkt);
    if (ok != 0) {
        ok = AVUNERROR(ok);
        goto close_packet;
    }
    // NOTE does not matter in image
    pkt.pts = 0;
    pkt.dts = 0;

    // write encoded frame
    ok = av_interleaved_write_frame(output_context->format_context, &pkt);
    if (ok != 0) {
        ok = 1;
        goto close_file;
    }

    ok = 0;
close_file:
    av_write_trailer(output_context->format_context);
close_packet:
    av_packet_unref(&pkt);
close_picture:
    av_freep(&pf->data[0]);
close_frame:
    av_frame_free(&pf);
close_sws:
    sws_freeContext(psc);
failed:
    output_context_delete(&output_context);
    return ok;
}
示例#20
0
bool VideoEncoder::EncodeFrame(AVFrameWrapper* frame) {

	if(frame != NULL) {
#if SSR_USE_AVFRAME_WIDTH_HEIGHT
		assert(frame->GetFrame()->width == GetCodecContext()->width);
		assert(frame->GetFrame()->height == GetCodecContext()->height);
#endif
#if SSR_USE_AVFRAME_FORMAT
		assert(frame->GetFrame()->format == GetCodecContext()->pix_fmt);
#endif
#if SSR_USE_AVFRAME_SAR
		assert(frame->GetFrame()->sample_aspect_ratio.num == GetCodecContext()->sample_aspect_ratio.num);
		assert(frame->GetFrame()->sample_aspect_ratio.den == GetCodecContext()->sample_aspect_ratio.den);
#endif
	}

#if SSR_USE_AVCODEC_SEND_RECEIVE

	// send a frame
	AVFrame *avframe = (frame == NULL)? NULL : frame->Release();
	try {
		if(avcodec_send_frame(GetCodecContext(), avframe) < 0) {
			Logger::LogError("[VideoEncoder::EncodeFrame] " + Logger::tr("Error: Sending of video frame failed!"));
			throw LibavException();
		}
	} catch(...) {
		av_frame_free(&avframe);
		throw;
	}
	av_frame_free(&avframe);

	// try to receive a packet
	for( ; ; ) {
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());
		int res = avcodec_receive_packet(GetCodecContext(), packet->GetPacket());
		if(res == 0) { // we have a packet, send the packet to the muxer
			GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
			IncrementPacketCounter();
		} else if(res == AVERROR(EAGAIN)) { // we have no packet
			return true;
		} else if(res == AVERROR_EOF) { // this is the end of the stream
			return false;
		} else {
			Logger::LogError("[VideoEncoder::EncodeFrame] " + Logger::tr("Error: Receiving of video packet failed!"));
			throw LibavException();
		}
	}

#elif SSR_USE_AVCODEC_ENCODE_VIDEO2

	// allocate a packet
	std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());

	// encode the frame
	int got_packet;
	if(avcodec_encode_video2(GetCodecContext(), packet->GetPacket(), (frame == NULL)? NULL : frame->GetFrame(), &got_packet) < 0) {
		Logger::LogError("[VideoEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of video frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(got_packet) {

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		IncrementPacketCounter();
		return true;

	} else {
		return false;
	}

#else

	// encode the frame
	int bytes_encoded = avcodec_encode_video(GetCodecContext(), m_temp_buffer.data(), m_temp_buffer.size(), (frame == NULL)? NULL : frame->GetFrame());
	if(bytes_encoded < 0) {
		Logger::LogError("[VideoEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of video frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(bytes_encoded > 0) {

		// allocate a packet
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper(bytes_encoded));

		// copy the data
		memcpy(packet->GetPacket()->data, m_temp_buffer.data(), bytes_encoded);

		// set the timestamp
		// note: pts will be rescaled and stream_index will be set by Muxer
		if(GetCodecContext()->coded_frame != NULL && GetCodecContext()->coded_frame->pts != (int64_t) AV_NOPTS_VALUE)
			packet->GetPacket()->pts = GetCodecContext()->coded_frame->pts;

		// set the keyframe flag
		if(GetCodecContext()->coded_frame->key_frame)
			packet->GetPacket()->flags |= AV_PKT_FLAG_KEY;

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		IncrementPacketCounter();
		return true;

	} else {
		return false;
	}

#endif

}
示例#21
0
文件: af_lavcac3enc.c 项目: chyiz/mpv
static int filter_out(struct af_instance *af)
{
    af_ac3enc_t *s = af->priv;

    if (!s->pending)
        return 0;

    AVFrame *frame = av_frame_alloc();
    if (!frame) {
        MP_FATAL(af, "Could not allocate memory \n");
        return -1;
    }
    int err = -1;

    AVPacket pkt = {0};
    av_init_packet(&pkt);

#if HAVE_AVCODEC_NEW_CODEC_API
    // Send input as long as it wants.
    while (1) {
        err = read_input_frame(af, frame);
        if (err < 0)
            goto done;
        if (err == 0)
            break;
        err = -1;
        int lavc_ret = avcodec_send_frame(s->lavc_actx, frame);
        // On EAGAIN, we're supposed to read remaining output.
        if (lavc_ret == AVERROR(EAGAIN))
            break;
        if (lavc_ret < 0) {
            MP_FATAL(af, "Encode failed.\n");
            goto done;
        }
        s->encoder_buffered += s->input->samples;
        s->input->samples = 0;
    }
    int lavc_ret = avcodec_receive_packet(s->lavc_actx, &pkt);
    if (lavc_ret == AVERROR(EAGAIN)) {
        // Need to buffer more input.
        err = 0;
        goto done;
    }
    if (lavc_ret < 0) {
        MP_FATAL(af, "Encode failed.\n");
        goto done;
    }
#else
    err = read_input_frame(af, frame);
    if (err < 0)
        goto done;
    if (err == 0)
        goto done;
    err = -1;
    int ok;
    int lavc_ret = avcodec_encode_audio2(s->lavc_actx, &pkt, frame, &ok);
    s->input->samples = 0;
    if (lavc_ret < 0 || !ok) {
        MP_FATAL(af, "Encode failed.\n");
        goto done;
    }
#endif

    MP_DBG(af, "avcodec_encode_audio got %d, pending %d.\n",
           pkt.size, s->pending->samples + s->input->samples);

    s->encoder_buffered -= AC3_FRAME_SIZE;

    struct mp_audio *out =
        mp_audio_pool_get(af->out_pool, af->data, s->out_samples);
    if (!out)
        goto done;
    mp_audio_copy_attributes(out, s->pending);

    int frame_size = pkt.size;
    int header_len = 0;
    char hdr[8];

    if (s->cfg_add_iec61937_header && pkt.size > 5) {
        int bsmod = pkt.data[5] & 0x7;
        int len = frame_size;

        frame_size = AC3_FRAME_SIZE * 2 * 2;
        header_len = 8;

        AV_WL16(hdr,     0xF872);   // iec 61937 syncword 1
        AV_WL16(hdr + 2, 0x4E1F);   // iec 61937 syncword 2
        hdr[5] = bsmod;             // bsmod
        hdr[4] = 0x01;              // data-type ac3
        AV_WL16(hdr + 6, len << 3); // number of bits in payload
    }

    if (frame_size > out->samples * out->sstride)
        abort();

    char *buf = (char *)out->planes[0];
    memcpy(buf, hdr, header_len);
    memcpy(buf + header_len, pkt.data, pkt.size);
    memset(buf + header_len + pkt.size, 0,
           frame_size - (header_len + pkt.size));
    swap_16((uint16_t *)(buf + header_len), pkt.size / 2);
    out->samples = frame_size / out->sstride;
    af_add_output_frame(af, out);

    err = 0;
done:
    av_packet_unref(&pkt);
    av_frame_free(&frame);
    update_delay(af);
    return err;
}
示例#22
0
static bool vaapi_encode(void *data, struct encoder_frame *frame,
		struct encoder_packet *packet, bool *received_packet)
{
	struct vaapi_encoder *enc     = data;
	AVFrame *             hwframe = NULL;
	AVPacket              av_pkt;
	int                   got_packet;
	int                   ret;

	hwframe = av_frame_alloc();
	if (!hwframe) {
		warn("vaapi_encode: failed to allocate hw frame");
		return false;
	}

	ret = av_hwframe_get_buffer(enc->vaframes_ref, hwframe, 0);
	if (ret < 0) {
		warn("vaapi_encode: failed to get buffer for hw frame: %s",
				av_err2str(ret));
		goto fail;
	}

	copy_data(enc->vframe, frame, enc->height, enc->context->pix_fmt);

	enc->vframe->pts = frame->pts;
	hwframe->pts     = frame->pts;
	hwframe->width   = enc->vframe->width;
	hwframe->height  = enc->vframe->height;

	ret = av_hwframe_transfer_data(hwframe, enc->vframe, 0);
	if (ret < 0) {
		warn("vaapi_encode: failed to upload hw frame: %s",
				av_err2str(ret));
		goto fail;
	}

	ret = av_frame_copy_props(hwframe, enc->vframe);
	if (ret < 0) {
		warn("vaapi_encode: failed to copy props to hw frame: %s",
				av_err2str(ret));
		goto fail;
	}

	av_init_packet(&av_pkt);

#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(57, 40, 101)
	ret = avcodec_send_frame(enc->context, hwframe);
	if (ret == 0)
		ret = avcodec_receive_packet(enc->context, &av_pkt);

	got_packet = (ret == 0);

	if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
		ret = 0;
#else
	ret = avcodec_encode_video2(
			enc->context, &av_pkt, hwframe, &got_packet);
#endif
	if (ret < 0) {
		warn("vaapi_encode: Error encoding: %s", av_err2str(ret));
		goto fail;
	}

	if (got_packet && av_pkt.size) {
		if (enc->first_packet) {
			uint8_t *new_packet;
			size_t   size;

			enc->first_packet = false;
			obs_extract_avc_headers(av_pkt.data, av_pkt.size,
					&new_packet, &size, &enc->header,
					&enc->header_size, &enc->sei,
					&enc->sei_size);

			da_copy_array(enc->buffer, new_packet, size);
			bfree(new_packet);
		} else {
			da_copy_array(enc->buffer, av_pkt.data, av_pkt.size);
		}

		packet->pts      = av_pkt.pts;
		packet->dts      = av_pkt.dts;
		packet->data     = enc->buffer.array;
		packet->size     = enc->buffer.num;
		packet->type     = OBS_ENCODER_VIDEO;
		packet->keyframe = obs_avc_keyframe(packet->data, packet->size);
		*received_packet = true;
	} else {
		*received_packet = false;
	}

	av_packet_unref(&av_pkt);
	av_frame_free(&hwframe);
	return true;

fail:
	av_frame_free(&hwframe);
	return false;
}
示例#23
0
void _ffmpegPostAudioFrame(struct mAVStream* stream, int16_t left, int16_t right) {
	struct FFmpegEncoder* encoder = (struct FFmpegEncoder*) stream;
	if (!encoder->context || !encoder->audioCodec) {
		return;
	}

	encoder->audioBuffer[encoder->currentAudioSample * 2] = left;
	encoder->audioBuffer[encoder->currentAudioSample * 2 + 1] = right;

	++encoder->currentAudioFrame;
	++encoder->currentAudioSample;

	if ((encoder->currentAudioSample * 4) < encoder->audioBufferSize) {
		return;
	}
	encoder->currentAudioSample = 0;

	int channelSize = 2 * av_get_bytes_per_sample(encoder->audio->sample_fmt);
	avresample_convert(encoder->resampleContext,
	    0, 0, 0,
	    (uint8_t**) &encoder->audioBuffer, 0, encoder->audioBufferSize / 4);
	if (avresample_available(encoder->resampleContext) < encoder->audioFrame->nb_samples) {
		return;
	}
#if LIBAVCODEC_VERSION_MAJOR >= 55
	av_frame_make_writable(encoder->audioFrame);
#endif
	avresample_read(encoder->resampleContext, encoder->audioFrame->data, encoder->postaudioBufferSize / channelSize);

	AVRational timeBase = { 1, PREFERRED_SAMPLE_RATE };
	encoder->audioFrame->pts = encoder->nextAudioPts;
	encoder->nextAudioPts = av_rescale_q(encoder->currentAudioFrame, timeBase, encoder->audioStream->time_base);

	AVPacket packet;
	av_init_packet(&packet);
	packet.data = 0;
	packet.size = 0;
	int gotData;
#ifdef FFMPEG_USE_PACKETS
	avcodec_send_frame(encoder->audio, encoder->audioFrame);
	gotData = avcodec_receive_packet(encoder->audio, &packet) == 0;
#else
	avcodec_encode_audio2(encoder->audio, &packet, encoder->audioFrame, &gotData);
#endif
	if (gotData) {
		if (encoder->absf) {
			AVPacket tempPacket = packet;

#ifdef FFMPEG_USE_NEW_BSF
			int success = av_bsf_send_packet(encoder->absf, &packet) && av_bsf_receive_packet(encoder->absf, &packet);
#else
			int success = av_bitstream_filter_filter(encoder->absf, encoder->audio, 0,
			    &tempPacket.data, &tempPacket.size,
			    packet.data, packet.size, 0);
#endif
			if (success > 0) {
#if LIBAVUTIL_VERSION_MAJOR >= 53
				tempPacket.buf = av_buffer_create(tempPacket.data, tempPacket.size, av_buffer_default_free, 0, 0);
#endif
#ifdef FFMPEG_USE_PACKET_UNREF
				av_packet_unref(&packet);
#else
				av_free_packet(&packet);
#endif
			}
			packet = tempPacket;
		}
		packet.stream_index = encoder->audioStream->index;
		av_interleaved_write_frame(encoder->context, &packet);
	}
#ifdef FFMPEG_USE_PACKET_UNREF
		av_packet_unref(&packet);
#else
		av_free_packet(&packet);
#endif
}
示例#24
0
文件: ao_lavc.c 项目: Archer-sys/mpv
static void encode_audio_and_write(struct ao *ao, AVFrame *frame)
{
    // TODO: Can we unify this with the equivalent video code path?
    struct priv *ac = ao->priv;
    AVPacket packet = {0};

#if HAVE_AVCODEC_NEW_CODEC_API
    int status = avcodec_send_frame(ac->codec, frame);
    if (status < 0) {
        MP_ERR(ao, "error encoding at %d %d/%d\n",
               frame ? (int) frame->pts : -1,
               ac->codec->time_base.num,
               ac->codec->time_base.den);
        return;
    }
    for (;;) {
        av_init_packet(&packet);
        status = avcodec_receive_packet(ac->codec, &packet);
        if (status == AVERROR(EAGAIN)) { // No more packets for now.
            if (frame == NULL) {
                MP_ERR(ao, "sent flush frame, got EAGAIN");
            }
            break;
        }
        if (status == AVERROR_EOF) { // No more packets, ever.
            if (frame != NULL) {
                MP_ERR(ao, "sent audio frame, got EOF");
            }
            break;
        }
        if (status < 0) {
            MP_ERR(ao, "error encoding at %d %d/%d\n",
                   frame ? (int) frame->pts : -1,
                   ac->codec->time_base.num,
                   ac->codec->time_base.den);
            break;
        }
        if (frame) {
            if (ac->savepts == AV_NOPTS_VALUE)
                ac->savepts = frame->pts;
        }
        encode_lavc_write_stats(ao->encode_lavc_ctx, ac->codec);
        write_packet(ao, &packet);
        av_packet_unref(&packet);
    }
#else
    av_init_packet(&packet);
    int got_packet = 0;
    int status = avcodec_encode_audio2(ac->codec, &packet, frame, &got_packet);
    if (status < 0) {
        MP_ERR(ao, "error encoding at %d %d/%d\n",
               frame ? (int) frame->pts : -1,
               ac->codec->time_base.num,
               ac->codec->time_base.den);
        return;
    }
    if (!got_packet) {
        return;
    }
    if (frame) {
        if (ac->savepts == AV_NOPTS_VALUE)
            ac->savepts = frame->pts;
    }
    encode_lavc_write_stats(ao->encode_lavc_ctx, ac->codec);
    write_packet(ao, &packet);
    av_packet_unref(&packet);
#endif
}
示例#25
0
文件: encode.c 项目: alfredh/baresip
int h265_encode(struct videnc_state *st, bool update,
		const struct vidframe *frame, uint64_t timestamp)
{
	AVFrame *pict = NULL;
	AVPacket *pkt = NULL;
	uint64_t rtp_ts;
	int i, ret, got_packet = 0, err = 0;

	if (!st || !frame)
		return EINVAL;

	if (!st->ctx || !vidsz_cmp(&st->size, &frame->size) ||
	    st->fmt != frame->fmt) {

		enum AVPixelFormat pix_fmt;

		pix_fmt = vidfmt_to_avpixfmt(frame->fmt);
		if (pix_fmt == AV_PIX_FMT_NONE) {
			warning("h265: encode: pixel format not supported"
				" (%s)\n", vidfmt_name(frame->fmt));
			return ENOTSUP;
		}

		debug("h265: encoder: reset %u x %u (%s)\n",
		      frame->size.w, frame->size.h, vidfmt_name(frame->fmt));

		err = open_encoder(st, &frame->size, pix_fmt);
		if (err)
			return err;

		st->size = frame->size;
		st->fmt = frame->fmt;
	}

	pict = av_frame_alloc();
	if (!pict) {
		err = ENOMEM;
		goto out;
	}

	pict->format = st->ctx->pix_fmt;
	pict->width = frame->size.w;
	pict->height = frame->size.h;
	pict->pts = timestamp;

	for (i=0; i<4; i++) {
		pict->data[i]     = frame->data[i];
		pict->linesize[i] = frame->linesize[i];
	}

	if (update) {
		debug("h265: encoder picture update\n");
		pict->key_frame = 1;
		pict->pict_type = AV_PICTURE_TYPE_I;
	}

#if LIBAVUTIL_VERSION_MAJOR >= 55
	pict->color_range = AVCOL_RANGE_MPEG;
#endif

#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 37, 100)

	pkt = av_packet_alloc();
	if (!pkt) {
		err = ENOMEM;
		goto out;
	}

	ret = avcodec_send_frame(st->ctx, pict);
	if (ret < 0) {
		err = EBADMSG;
		goto out;
	}

	/* NOTE: packet contains 4-byte startcode */
	ret = avcodec_receive_packet(st->ctx, pkt);
	if (ret < 0) {
		info("h265: no packet yet ..\n");
		err = 0;
		goto out;
	}

	got_packet = 1;
#else
	pkt = av_malloc(sizeof(*pkt));
	if (!pkt) {
		err = ENOMEM;
		goto out;
	}

	av_init_packet(pkt);
	av_new_packet(pkt, 65536);

	ret = avcodec_encode_video2(st->ctx, pkt, pict, &got_packet);
	if (ret < 0) {
		err = EBADMSG;
		goto out;
	}
#endif

	if (!got_packet)
		goto out;

	rtp_ts = video_calc_rtp_timestamp_fix(pkt->dts);

	err = packetize_annexb(rtp_ts, pkt->data, pkt->size,
			       st->pktsize, st->pkth, st->arg);
	if (err)
		goto out;

 out:
	if (pict)
		av_free(pict);
	if (pkt)
		av_packet_free(&pkt);

	return err;
}
示例#26
0
文件: FileOut.cpp 项目: mtone/ACMuxer
void FileOut::encodeFrameTest() const {
   // dummy frame
   AVFrame * frame = av_frame_alloc();
   if (!frame) {
	fprintf(stderr, "Could not allocate video frame\n");
	exit(1);
   }
   frame->format = ccv.get()->pix_fmt;
   frame->width = ccv.get()->width;
   frame->height = ccv.get()->height;

   /* the image can be allocated by any means and av_image_alloc() is
   * just the most convenient way if av_malloc() is to be used */
   int got_output;
   int ret = av_image_alloc(frame->data, frame->linesize, ccv.get()->width, ccv.get()->height, ccv.get()->pix_fmt, 32);
   assert(ret >= 0);


   /* encode 1 second of video */
   AVPacket p;
   int i, x, y;
   auto c = ccv.get();

   for (i = 0; i < 100; i++) {
	av_init_packet(&p);
	p.data = NULL;    // packet data will be allocated by the encoder
	p.size = 0;

	fflush(stdout);
	/* prepare a dummy image */
	/* Y */
	for (y = 0; y < c->height; y++) {
	   for (x = 0; x < c->width; x++) {
		frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
	   }
	}

	/* Cb and Cr */
	for (y = 0; y < c->height / 2; y++) {
	   for (x = 0; x < c->width / 2; x++) {
		frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
		frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
	   }
	}

	//frame->pts = i;

	ret = avcodec_send_frame(ccv.get(), frame);
	assert(ret == 0);

	ret = avcodec_receive_packet(ccv.get(), &p);

	if(ret == AVERROR(EAGAIN)) {
	   cout << "AVERROR(EAGAIN) : output is not available right now - user must try to send input" << endl;
	}else if(ret == AVERROR_EOF) {
	   cout << "AVERROR_EOF: the encoder has been fully flushed, and there will be no more output packets" << endl;
	}else if (ret == AVERROR(EINVAL)) {
	   cout << "AVERROR(EINVAL) : codec not opened, or it is an encoder" << endl;
	}else if (ret ==0) {
	   cout << "Success - got packet" << endl;
	   if (ret == 0) {
		printf("Write frame %3d (size=%5d)\n", i, p.size);
		fwrite(p.data, 1, p.size, f);
		av_packet_unref(&p);
	   }
	} else {
	   cout << "Other errors" << endl;
	}
	

	/* encode the image */
	/*ret = avcodec_encode_video2(c, &p, frame, &got_output);
	if (ret < 0) {
	   fprintf(stderr, "Error encoding frame\n");
	   exit(1);
	}

	if (got_output) {
	   printf("Write frame %3d (size=%5d)\n", i, p.size);
	   fwrite(p.data, 1, p.size, f);
	   av_packet_unref(&p);
	}*/
   }


   /* get the delayed frames */
   ret = avcodec_send_frame(ccv.get(), nullptr);
   assert(ret == 0);

   ret = avcodec_receive_packet(ccv.get(), &p);
   if (ret == 0) {
	printf("Write frame %3d (size=%5d)\n", i, p.size);
	fwrite(p.data, 1, p.size, f);
	av_packet_unref(&p);
   }

   /* get the delayed frames */
   /*for (got_output = 1; got_output; i++) {
	fflush(stdout);

	ret = avcodec_encode_video2(c, &p, NULL, &got_output);
	if (ret < 0) {
	   fprintf(stderr, "Error encoding frame\n");
	   exit(1);
	}

	if (got_output) {
	   printf("Write frame %3d (size=%5d)\n", i, p.size);
	   fwrite(p.data, 1, p.size, f);
	   av_packet_unref(&p);
	}
   }*/

   /* add sequence end code to have a real MPEG file */
   uint8_t endcode[] = { 0, 0, 1, 0xb7 };
   fwrite(endcode, 1, sizeof(endcode), f);
   fclose(f);

   avcodec_close(c);
   av_free(c);
   av_freep(&frame->data[0]);
   av_frame_free(&frame);
   printf("\n");



}
示例#27
0
static bool write_lavc(struct image_writer_ctx *ctx, mp_image_t *image, FILE *fp)
{
    bool success = 0;
    AVFrame *pic = NULL;
    AVPacket pkt = {0};
    int got_output = 0;

    av_init_packet(&pkt);

    struct AVCodec *codec = avcodec_find_encoder(ctx->opts->format);
    AVCodecContext *avctx = NULL;
    if (!codec)
        goto print_open_fail;
    avctx = avcodec_alloc_context3(codec);
    if (!avctx)
        goto print_open_fail;

    avctx->time_base = AV_TIME_BASE_Q;
    avctx->width = image->w;
    avctx->height = image->h;
    avctx->color_range = mp_csp_levels_to_avcol_range(image->params.color.levels);
    avctx->pix_fmt = imgfmt2pixfmt(image->imgfmt);
    // Annoying deprecated garbage for the jpg encoder.
    if (image->params.color.levels == MP_CSP_LEVELS_PC)
        avctx->pix_fmt = replace_j_format(avctx->pix_fmt);
    if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
        MP_ERR(ctx, "Image format %s not supported by lavc.\n",
               mp_imgfmt_to_name(image->imgfmt));
        goto error_exit;
    }
    if (codec->id == AV_CODEC_ID_PNG) {
        avctx->compression_level = ctx->opts->png_compression;
        av_opt_set_int(avctx, "pred", ctx->opts->png_filter,
                       AV_OPT_SEARCH_CHILDREN);
    }

    if (avcodec_open2(avctx, codec, NULL) < 0) {
     print_open_fail:
        MP_ERR(ctx, "Could not open libavcodec encoder for saving images\n");
        goto error_exit;
    }

    pic = av_frame_alloc();
    if (!pic)
        goto error_exit;
    for (int n = 0; n < 4; n++) {
        pic->data[n] = image->planes[n];
        pic->linesize[n] = image->stride[n];
    }
    pic->format = avctx->pix_fmt;
    pic->width = avctx->width;
    pic->height = avctx->height;
    pic->color_range = avctx->color_range;
    if (ctx->opts->tag_csp) {
        pic->color_primaries = mp_csp_prim_to_avcol_pri(image->params.color.primaries);
        pic->color_trc = mp_csp_trc_to_avcol_trc(image->params.color.gamma);
    }

    int ret = avcodec_send_frame(avctx, pic);
    if (ret < 0)
        goto error_exit;
    ret = avcodec_send_frame(avctx, NULL); // send EOF
    if (ret < 0)
        goto error_exit;
    ret = avcodec_receive_packet(avctx, &pkt);
    if (ret < 0)
        goto error_exit;
    got_output = 1;

    fwrite(pkt.data, pkt.size, 1, fp);

    success = !!got_output;
error_exit:
    if (avctx)
        avcodec_close(avctx);
    av_free(avctx);
    av_frame_free(&pic);
    av_packet_unref(&pkt);
    return success;
}