/* Makes duplicates of data, side_data, but does not copy any other fields */ static int copy_packet_data(AVPacket *dst, AVPacket *src) { dst->data = NULL; dst->side_data = NULL; DUP_DATA(dst->data, src->data, dst->size, 1); dst->destruct = av_destruct_packet; if (dst->side_data_elems) { int i; DUP_DATA(dst->side_data, src->side_data, dst->side_data_elems * sizeof(*dst->side_data), 0); memset(dst->side_data, 0, dst->side_data_elems * sizeof(*dst->side_data)); for (i = 0; i < dst->side_data_elems; i++) { DUP_DATA(dst->side_data[i].data, src->side_data[i].data, src->side_data[i].size, 1); dst->side_data[i].size = src->side_data[i].size; dst->side_data[i].type = src->side_data[i].type; } } return 0; failed_alloc: av_destruct_packet(dst); return AVERROR(ENOMEM); }
static int vfw_read_close(AVFormatContext *s) { struct vfw_ctx *ctx = s->priv_data; AVPacketList *pktl; if(ctx->hwnd) { SendMessage(ctx->hwnd, WM_CAP_SET_CALLBACK_VIDEOSTREAM, 0, 0); SendMessage(ctx->hwnd, WM_CAP_DRIVER_DISCONNECT, 0, 0); DestroyWindow(ctx->hwnd); } if(ctx->mutex) CloseHandle(ctx->mutex); if(ctx->event) CloseHandle(ctx->event); pktl = ctx->pktl; while (pktl) { AVPacketList *next = pktl->next; av_destruct_packet(&pktl->pkt); av_free(pktl); pktl = next; } return 0; }
int av_dup_packet(AVPacket *pkt) { AVPacket tmp_pkt; if (pkt->destruct == NULL && pkt->data) { tmp_pkt = *pkt; pkt->data = NULL; pkt->side_data = NULL; DUP_DATA(pkt->data, tmp_pkt.data, pkt->size, 1); pkt->destruct = av_destruct_packet; if (pkt->side_data_elems) { int i; DUP_DATA(pkt->side_data, tmp_pkt.side_data, pkt->side_data_elems * sizeof(*pkt->side_data), 0); memset(pkt->side_data, 0, pkt->side_data_elems * sizeof(*pkt->side_data)); for (i = 0; i < pkt->side_data_elems; i++) { DUP_DATA(pkt->side_data[i].data, tmp_pkt.side_data[i].data, tmp_pkt.side_data[i].size, 1); pkt->side_data[i].size = tmp_pkt.side_data[i].size; pkt->side_data[i].type = tmp_pkt.side_data[i].type; } } } return 0; failed_alloc: av_destruct_packet(pkt); return AVERROR(ENOMEM); }
/* Makes duplicates of data, side_data, but does not copy any other fields */ static int copy_packet_data(AVPacket *pkt, AVPacket *src, int dup) { pkt->data = NULL; pkt->side_data = NULL; if (pkt->buf) { AVBufferRef *ref = av_buffer_ref(src->buf); if (!ref) return AVERROR(ENOMEM); pkt->buf = ref; pkt->data = ref->data; } else { DUP_DATA(pkt->data, src->data, pkt->size, 1, ALLOC_BUF); } #if FF_API_DESTRUCT_PACKET pkt->destruct = dummy_destruct_packet; #endif if (pkt->side_data_elems && dup) pkt->side_data = src->side_data; if (pkt->side_data_elems && !dup) { return av_copy_packet_side_data(pkt, src); } return 0; failed_alloc: av_destruct_packet(pkt); return AVERROR(ENOMEM); }
static int a64_write_trailer(struct AVFormatContext *s) { A64MuxerContext *c = s->priv_data; AVPacket pkt = {0}; /* need to flush last packet? */ if(c->interleaved) a64_write_packet(s, &pkt); /* discard backed up packet */ if(c->prev_pkt.data) av_destruct_packet(&c->prev_pkt); return 0; }
static void *push_packet(void *ctx) { AVFormatContext *s = (AVFormatContext *)ctx; AVPacket pkt; int ret; while (avpacket_queue_get(&queue, &pkt, 1)) { pkt.destruct = NULL; av_interleaved_write_frame(s, &pkt); av_destruct_packet(&pkt); av_free_packet(&pkt); } return NULL; }
int av_copy_packet_side_data(AVPacket *pkt, AVPacket *src) { if (src->side_data_elems) { int i; DUP_DATA(pkt->side_data, src->side_data, src->side_data_elems * sizeof(*src->side_data), 0, ALLOC_MALLOC); memset(pkt->side_data, 0, src->side_data_elems * sizeof(*src->side_data)); for (i = 0; i < src->side_data_elems; i++) { DUP_DATA(pkt->side_data[i].data, src->side_data[i].data, src->side_data[i].size, 1, ALLOC_MALLOC); pkt->side_data[i].size = src->side_data[i].size; pkt->side_data[i].type = src->side_data[i].type; } } return 0; failed_alloc: av_destruct_packet(pkt); return AVERROR(ENOMEM); }
/* Makes duplicates of data, side_data, but does not copy any other fields */ static int copy_packet_data(AVPacket *pkt, AVPacket *src, int dup) { pkt->data = NULL; pkt->side_data = NULL; if (pkt->buf) { AVBufferRef *ref = av_buffer_ref(src->buf); if (!ref) return AVERROR(ENOMEM); pkt->buf = ref; pkt->data = ref->data; } else { DUP_DATA(pkt->data, src->data, pkt->size, 1, ALLOC_BUF); } #if FF_API_DESTRUCT_PACKET pkt->destruct = dummy_destruct_packet; #endif if (pkt->side_data_elems && dup) pkt->side_data = src->side_data; if (pkt->side_data_elems && !dup) { int i; DUP_DATA(pkt->side_data, src->side_data, pkt->side_data_elems * sizeof(*pkt->side_data), 0, ALLOC_MALLOC); memset(pkt->side_data, 0, pkt->side_data_elems * sizeof(*pkt->side_data)); for (i = 0; i < pkt->side_data_elems; i++) { DUP_DATA(pkt->side_data[i].data, src->side_data[i].data, src->side_data[i].size, 1, ALLOC_MALLOC); pkt->side_data[i].size = src->side_data[i].size; pkt->side_data[i].type = src->side_data[i].type; } } return 0; failed_alloc: av_destruct_packet(pkt); return AVERROR(ENOMEM); }
static int dshow_read_close(AVFormatContext *s) { struct dshow_ctx *ctx = s->priv_data; AVPacketList *pktl; if (ctx->control) { IMediaControl_Stop(ctx->control); IMediaControl_Release(ctx->control); } if (ctx->graph) { IEnumFilters *fenum; int r; r = IGraphBuilder_EnumFilters(ctx->graph, &fenum); if (r == S_OK) { IBaseFilter *f; IEnumFilters_Reset(fenum); while (IEnumFilters_Next(fenum, 1, &f, NULL) == S_OK) { if (IGraphBuilder_RemoveFilter(ctx->graph, f) == S_OK) IEnumFilters_Reset(fenum); /* When a filter is removed, * the list must be reset. */ IBaseFilter_Release(f); } IEnumFilters_Release(fenum); } IGraphBuilder_Release(ctx->graph); } if (ctx->capture_pin[VideoDevice]) libAVPin_Release(ctx->capture_pin[VideoDevice]); if (ctx->capture_pin[AudioDevice]) libAVPin_Release(ctx->capture_pin[AudioDevice]); if (ctx->capture_filter[VideoDevice]) libAVFilter_Release(ctx->capture_filter[VideoDevice]); if (ctx->capture_filter[AudioDevice]) libAVFilter_Release(ctx->capture_filter[AudioDevice]); if (ctx->device_pin[VideoDevice]) IPin_Release(ctx->device_pin[VideoDevice]); if (ctx->device_pin[AudioDevice]) IPin_Release(ctx->device_pin[AudioDevice]); if (ctx->device_filter[VideoDevice]) IBaseFilter_Release(ctx->device_filter[VideoDevice]); if (ctx->device_filter[AudioDevice]) IBaseFilter_Release(ctx->device_filter[AudioDevice]); if (ctx->device_name[0]) av_free(ctx->device_name[0]); if (ctx->device_name[1]) av_free(ctx->device_name[1]); if(ctx->mutex) CloseHandle(ctx->mutex); if(ctx->event) CloseHandle(ctx->event); pktl = ctx->pktl; while (pktl) { AVPacketList *next = pktl->next; av_destruct_packet(&pktl->pkt); av_free(pktl); pktl = next; } return 0; }
recorder::~recorder() { if (!init_failed_ && avcontext_) { // record delayed frames. // unsigned frame_size = 1; #ifdef FFMPEG_54 int got_packet_ptr = 1; while (got_packet_ptr) { AVPacket pkt; av_init_packet(&pkt); pkt.data = NULL; pkt.size = 0; if (avcodec_encode_video2(avcontext_, &pkt, 0, &got_packet_ptr)) std::cout << "avcodec_encode_video2 fail" << std::endl; std::cout << "got_packet_ptr: " << got_packet_ptr << std::endl; if(avcontext_->coded_frame->key_frame) pkt.flags |= AV_PKT_FLAG_KEY; pkt.stream_index = video_st_->index; if (got_packet_ptr) { int ret = av_write_frame(fmtcontext_, &pkt); assert(ret >= 0); } av_destruct_packet(&pkt); } #else unsigned frame_size = 1; while (frame_size) { frame_size = avcodec_encode_video(avcontext_, video_buffer_, video_buffer_size_, NULL); if (frame_size > 0) { AVPacket pkt; av_init_packet(&pkt); assert(avcontext_ && avcontext_->coded_frame); if(avcontext_->coded_frame->key_frame) pkt.flags |= AV_PKT_FLAG_KEY; pkt.stream_index = video_st_->index; pkt.data = video_buffer_; pkt.size = frame_size; int ret = av_write_frame(fmtcontext_, &pkt); assert(ret >= 0); } } #endif av_write_trailer (fmtcontext_); avio_close (fmtcontext_->pb); avcodec_close(avcontext_); av_free(avcontext_); sws_freeContext(swcontext_); av_free(yuvframe_); av_free(rgbframe_); delete [] video_buffer_; delete [] window_capture_; } }
void recorder::operator<<=(widgets::image_view& w) { if (!avcontext_ && !init_failed_) init_context(w.width(), w.height()); if (init_failed_) return; w.set_unresizable(); unsigned old_width = window_capture_width_; unsigned old_height = window_capture_height_; w.dump_rgb_frame_buffer(window_capture_, window_capture_size_, window_capture_width_, window_capture_height_); if (old_width != window_capture_width_ || old_height != window_capture_height_) { std::cout << "new context" << std::endl; sws_getCachedContext(swcontext_, window_capture_width_, window_capture_height_, PIX_FMT_RGB24, avcontext_->width, avcontext_->height, FRAME_FORMAT, SWS_BILINEAR, 0, 0, 0); rgbframe_->data[0] = (uint8_t*) window_capture_; rgbframe_->linesize[0] = window_capture_width_ * 3 * sizeof(unsigned char); } // Flip and convert the image to YUV for video encoding. uint8_t* data = rgbframe_->data[0] + window_capture_width_ * 3 * (window_capture_height_ - 1); unsigned s = -window_capture_width_ * 3; uint8_t* tmp[1] = { data }; int stride[1] = { s }; int r = sws_scale(swcontext_, tmp, stride, 0, window_capture_height_, yuvframe_->data, yuvframe_->linesize); #ifdef FFMPEG_54 { AVPacket pkt; av_init_packet(&pkt); pkt.data = NULL; pkt.size = 0; int got_packet_ptr = 0; if (avcodec_encode_video2(avcontext_, &pkt, yuvframe_, &got_packet_ptr)) std::cout << "avcodec_encode_video2 fail" << std::endl; if(avcontext_->coded_frame->key_frame) pkt.flags |= AV_PKT_FLAG_KEY; pkt.stream_index = video_st_->index; if (got_packet_ptr) { int ret = av_write_frame(fmtcontext_, &pkt); assert(ret >= 0); } av_destruct_packet(&pkt); } #else int encode_size = avcodec_encode_video(avcontext_, video_buffer_, video_buffer_size_, yuvframe_); if (encode_size > 0) { AVPacket pkt; av_init_packet(&pkt); if(avcontext_->coded_frame->key_frame) pkt.flags |= AV_PKT_FLAG_KEY; pkt.stream_index = video_st_->index; pkt.data = video_buffer_; pkt.size = encode_size; int ret = av_write_frame(fmtcontext_, &pkt); assert(ret >= 0); } #endif }
int EncoderFfmpegCore::initEncoder(int bitrate, int samplerate) { #ifndef avformat_alloc_output_context2 qDebug() << "EncoderFfmpegCore::initEncoder: Old Style initialization"; m_pEncodeFormatCtx = avformat_alloc_context(); #endif m_lBitrate = bitrate * 1000; m_lSampleRate = samplerate; #if LIBAVCODEC_VERSION_INT > 3544932 if (m_SCcodecId == AV_CODEC_ID_MP3) { #else if (m_SCcodecId == CODEC_ID_MP3) { #endif // LIBAVCODEC_VERSION_INT > 3544932 qDebug() << "EncoderFfmpegCore::initEncoder: Codec MP3"; #ifdef avformat_alloc_output_context2 avformat_alloc_output_context2(&m_pEncodeFormatCtx, NULL, NULL, "output.mp3"); #else m_pEncoderFormat = av_guess_format(NULL, "output.mp3", NULL); #endif // avformat_alloc_output_context2 #if LIBAVCODEC_VERSION_INT > 3544932 } else if (m_SCcodecId == AV_CODEC_ID_AAC) { #else } else if (m_SCcodecId == CODEC_ID_AAC) { #endif // LIBAVCODEC_VERSION_INT > 3544932 qDebug() << "EncoderFfmpegCore::initEncoder: Codec M4A"; #ifdef avformat_alloc_output_context2 avformat_alloc_output_context2(&m_pEncodeFormatCtx, NULL, NULL, "output.m4a"); #else m_pEncoderFormat = av_guess_format(NULL, "output.m4a", NULL); #endif // avformat_alloc_output_context2 } else { qDebug() << "EncoderFfmpegCore::initEncoder: Codec OGG/Vorbis"; #ifdef avformat_alloc_output_context2 avformat_alloc_output_context2(&m_pEncodeFormatCtx, NULL, NULL, "output.ogg"); m_pEncodeFormatCtx->oformat->audio_codec=AV_CODEC_ID_VORBIS; #else m_pEncoderFormat = av_guess_format(NULL, "output.ogg", NULL); #if LIBAVCODEC_VERSION_INT > 3544932 m_pEncoderFormat->audio_codec=AV_CODEC_ID_VORBIS; #else m_pEncoderFormat->audio_codec=CODEC_ID_VORBIS; #endif // LIBAVCODEC_VERSION_INT > 3544932 #endif // avformat_alloc_output_context2 } #ifdef avformat_alloc_output_context2 m_pEncoderFormat = m_pEncodeFormatCtx->oformat; #else m_pEncodeFormatCtx->oformat = m_pEncoderFormat; #endif // avformat_alloc_output_context2 m_pEncoderAudioStream = addStream(m_pEncodeFormatCtx, &m_pEncoderAudioCodec, m_pEncoderFormat->audio_codec); openAudio(m_pEncoderAudioCodec, m_pEncoderAudioStream); // qDebug() << "jepusti"; return 0; } // Private methods int EncoderFfmpegCore::writeAudioFrame(AVFormatContext *formatctx, AVStream *stream) { AVCodecContext *l_SCodecCtx = NULL;; AVPacket l_SPacket; AVFrame *l_SFrame = avcodec_alloc_frame(); int l_iGotPacket; int l_iRet; #ifdef av_make_error_string char l_strErrorBuff[256]; #endif // av_make_error_string av_init_packet(&l_SPacket); l_SPacket.size = 0; l_SPacket.data = NULL; // Calculate correct DTS for FFMPEG m_lDts = round(((double)m_lRecordedBytes / (double)44100 / (double)2. * (double)m_pEncoderAudioStream->time_base.den)); m_lPts = m_lDts; l_SCodecCtx = stream->codec; #ifdef av_make_error_string memset(l_strErrorBuff, 0x00, 256); #endif // av_make_error_string l_SFrame->nb_samples = m_iAudioInputFrameSize; // Mixxx uses float (32 bit) samples.. l_SFrame->format = AV_SAMPLE_FMT_FLT; #ifndef __FFMPEGOLDAPI__ l_SFrame->channel_layout = l_SCodecCtx->channel_layout; #endif // __FFMPEGOLDAPI__ l_iRet = avcodec_fill_audio_frame(l_SFrame, l_SCodecCtx->channels, AV_SAMPLE_FMT_FLT, (const uint8_t *)m_pFltSamples, m_iFltAudioCpyLen, 1); if (l_iRet != 0) { #ifdef av_make_error_string qDebug() << "Can't fill FFMPEG frame: error " << l_iRet << "String '" << av_make_error_string(l_strErrorBuff, 256, l_iRet) << "'" << m_iFltAudioCpyLen; #endif // av_make_error_string qDebug() << "Can't refill 1st FFMPEG frame!"; return -1; } // If we have something else than AV_SAMPLE_FMT_FLT we have to convert it // to something that fits.. if (l_SCodecCtx->sample_fmt != AV_SAMPLE_FMT_FLT) { reSample(l_SFrame); // After we have turned our samples to destination // Format we must re-alloc l_SFrame.. it easier like this.. #if LIBAVCODEC_VERSION_INT > 3544932 avcodec_free_frame(&l_SFrame); #else av_free(l_SFrame); #endif // LIBAVCODEC_VERSION_INT > 3544932 l_SFrame = NULL; l_SFrame = avcodec_alloc_frame(); l_SFrame->nb_samples = m_iAudioInputFrameSize; l_SFrame->format = l_SCodecCtx->sample_fmt; #ifndef __FFMPEGOLDAPI__ l_SFrame->channel_layout = m_pEncoderAudioStream->codec->channel_layout; #endif // __FFMPEGOLDAPI__ l_iRet = avcodec_fill_audio_frame(l_SFrame, l_SCodecCtx->channels, l_SCodecCtx->sample_fmt, (const uint8_t *)m_pResample->getBuffer(), m_iAudioCpyLen, 1); if (l_iRet != 0) { #ifdef av_make_error_string qDebug() << "Can't refill FFMPEG frame: error " << l_iRet << "String '" << av_make_error_string(l_strErrorBuff, 256, l_iRet) << "'" << m_iAudioCpyLen << " " << av_samples_get_buffer_size( NULL, 2, m_iAudioInputFrameSize, m_pEncoderAudioStream->codec->sample_fmt, 1) << " " << m_pOutSize; #endif // av_make_error_string qDebug() << "Can't refill 2nd FFMPEG frame!"; return -1; } } //qDebug() << "!!" << l_iRet; l_iRet = avcodec_encode_audio2(l_SCodecCtx, &l_SPacket, l_SFrame, &l_iGotPacket); if (l_iRet < 0) { qDebug() << "Error encoding audio frame"; return -1; } if (!l_iGotPacket) { // qDebug() << "No packet! Can't encode audio!!"; return -1; } l_SPacket.stream_index = stream->index; // Let's calculate DTS/PTS and give it to FFMPEG.. // THEN codecs like OGG/Voris works ok!! l_SPacket.dts = m_lDts; l_SPacket.pts = m_lDts; // Some times den is zero.. so 0 dived by 0 is // Something? if (m_pEncoderAudioStream->pts.den == 0) { qDebug() << "Time hack!"; m_pEncoderAudioStream->pts.den = 1; } // Write the compressed frame to the media file. */ l_iRet = av_interleaved_write_frame(formatctx, &l_SPacket); if (l_iRet != 0) { qDebug() << "Error while writing audio frame"; return -1; } av_free_packet(&l_SPacket); av_destruct_packet(&l_SPacket); av_free(l_SFrame); return 0; }