static int ffat_decode(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { ATDecodeContext *at = avctx->priv_data; AVFrame *frame = data; int pkt_size = avpkt->size; AVPacket filtered_packet = {0}; OSStatus ret; AudioBufferList out_buffers; if (avctx->codec_id == AV_CODEC_ID_AAC && avpkt->size > 2 && (AV_RB16(avpkt->data) & 0xfff0) == 0xfff0) { AVPacket filter_pkt = {0}; if (!at->bsf) { const AVBitStreamFilter *bsf = av_bsf_get_by_name("aac_adtstoasc"); if(!bsf) return AVERROR_BSF_NOT_FOUND; if ((ret = av_bsf_alloc(bsf, &at->bsf))) return ret; if (((ret = avcodec_parameters_from_context(at->bsf->par_in, avctx)) < 0) || ((ret = av_bsf_init(at->bsf)) < 0)) { av_bsf_free(&at->bsf); return ret; } } if ((ret = av_packet_ref(&filter_pkt, avpkt)) < 0) return ret; if ((ret = av_bsf_send_packet(at->bsf, &filter_pkt)) < 0) { av_packet_unref(&filter_pkt); return ret; } if ((ret = av_bsf_receive_packet(at->bsf, &filtered_packet)) < 0) return ret; at->extradata = at->bsf->par_out->extradata; at->extradata_size = at->bsf->par_out->extradata_size; avpkt = &filtered_packet; } if (!at->converter) { if ((ret = ffat_create_decoder(avctx, avpkt)) < 0) { av_packet_unref(&filtered_packet); return ret; } } out_buffers = (AudioBufferList){ .mNumberBuffers = 1, .mBuffers = { { .mNumberChannels = avctx->channels, .mDataByteSize = av_get_bytes_per_sample(avctx->sample_fmt) * avctx->frame_size * avctx->channels, } } };
static int bsf_list_filter(AVBSFContext *bsf, AVPacket *out) { BSFListContext *lst = bsf->priv_data; int ret; if (!lst->nb_bsfs) return ff_bsf_get_packet_ref(bsf, out); while (1) { if (lst->idx > lst->flushed_idx) { ret = av_bsf_receive_packet(lst->bsfs[lst->idx-1], out); if (ret == AVERROR(EAGAIN)) { /* no more packets from idx-1, try with previous */ ret = 0; lst->idx--; continue; } else if (ret == AVERROR_EOF) { /* filter idx-1 is done, continue with idx...nb_bsfs */ lst->flushed_idx = lst->idx; continue; }else if (ret < 0) { /* filtering error */ break; } } else { ret = ff_bsf_get_packet_ref(bsf, out); if (ret == AVERROR_EOF) { lst->idx = lst->flushed_idx; } else if (ret < 0) break; } if (lst->idx < lst->nb_bsfs) { AVPacket *pkt; if (ret == AVERROR_EOF && lst->idx == lst->flushed_idx) { /* ff_bsf_get_packet_ref returned EOF and idx is first * filter of yet not flushed filter chain */ pkt = NULL; } else { pkt = out; } ret = av_bsf_send_packet(lst->bsfs[lst->idx], pkt); if (ret < 0) break; lst->idx++; } else { /* The end of filter chain, break to return result */ break; } } if (ret < 0) av_packet_unref(out); return ret; }
STATUS DemuxerLibAV::readNextFrame(StreamFrame *frame) { if (!_initialized) { log->printf("DemuxerLibAV::getNextFrame(): demuxer not opened!\n"); return S_FAIL; } if (_streamFrame.videoFrame.data) { av_free(_streamFrame.videoFrame.data); } if (_streamFrame.audioFrame.data) { av_free(_streamFrame.audioFrame.data); } memset(&_streamFrame, 0, sizeof(StreamFrame)); if (av_read_frame(_afc, &_packedFrame) == 0) { if (_packedFrame.stream_index == _videoStream->index) { if (_bsf) { if (av_bsf_send_packet(_bsf, &_packedFrame) < 0) { log->printf("DemuxerLibAV::getNextFrame(): av_bsf_send_packet failed!\n"); av_packet_unref(&_packedFrame); return S_FAIL; } if (av_bsf_receive_packet(_bsf, &_packedFrame) < 0) { log->printf("DemuxerLibAV::getNextFrame(): av_bsf_receive_packet failed!\n"); av_packet_unref(&_packedFrame); return S_FAIL; } } _streamFrame.videoFrame.pts = _packedFrame.pts * av_q2d(_videoStream->time_base); _streamFrame.videoFrame.keyFrame = (_packedFrame.flags & AV_PKT_FLAG_KEY) != 0; _streamFrame.videoFrame.dataSize = _packedFrame.size; _streamFrame.videoFrame.data = (U8 *)av_malloc(_packedFrame.size + AV_INPUT_BUFFER_PADDING_SIZE); memcpy(_streamFrame.videoFrame.data, _packedFrame.data, _packedFrame.size); memset(_streamFrame.videoFrame.data + _packedFrame.size, 0, AV_INPUT_BUFFER_PADDING_SIZE); _streamFrame.priv = &_packedFrame; } else if (_packedFrame.stream_index == _audioStream->index) { _streamFrame.audioFrame.dataSize = _packedFrame.size; _streamFrame.audioFrame.data = (U8 *)av_malloc(_packedFrame.size + AV_INPUT_BUFFER_PADDING_SIZE); memcpy(_streamFrame.audioFrame.data, _packedFrame.data, _packedFrame.size); memset(_streamFrame.audioFrame.data + _packedFrame.size, 0, AV_INPUT_BUFFER_PADDING_SIZE); _streamFrame.priv = &_packedFrame; } memcpy(frame, &_streamFrame, sizeof(StreamFrame)); return S_OK; } return S_FAIL; }
/* try to get one output packet from the filter chain */ static int bsfs_poll(AVCodecContext *avctx, AVPacket *pkt) { DecodeFilterContext *s = &avctx->internal->filter; int idx, ret; /* start with the last filter in the chain */ idx = s->nb_bsfs - 1; while (idx >= 0) { /* request a packet from the currently selected filter */ ret = av_bsf_receive_packet(s->bsfs[idx], pkt); if (ret == AVERROR(EAGAIN)) { /* no packets available, try the next filter up the chain */ ret = 0; idx--; continue; } else if (ret < 0 && ret != AVERROR_EOF) { return ret; } /* got a packet or EOF -- pass it to the caller or to the next filter * down the chain */ if (idx == s->nb_bsfs - 1) { return ret; } else { idx++; ret = av_bsf_send_packet(s->bsfs[idx], ret < 0 ? NULL : pkt); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Error pre-processing a packet before decoding\n"); av_packet_unref(pkt); return ret; } } } return AVERROR(EAGAIN); }
void _ffmpegPostAudioFrame(struct mAVStream* stream, int16_t left, int16_t right) { struct FFmpegEncoder* encoder = (struct FFmpegEncoder*) stream; if (!encoder->context || !encoder->audioCodec) { return; } if (encoder->absf && !left) { // XXX: AVBSF doesn't like silence. Figure out why. left = 1; } encoder->audioBuffer[encoder->currentAudioSample * 2] = left; encoder->audioBuffer[encoder->currentAudioSample * 2 + 1] = right; ++encoder->currentAudioSample; if (encoder->currentAudioSample * 4 < encoder->audioBufferSize) { return; } int channelSize = 2 * av_get_bytes_per_sample(encoder->audio->sample_fmt); encoder->currentAudioSample = 0; #ifdef USE_LIBAVRESAMPLE avresample_convert(encoder->resampleContext, 0, 0, 0, (uint8_t**) &encoder->audioBuffer, 0, encoder->audioBufferSize / 4); if (avresample_available(encoder->resampleContext) < encoder->audioFrame->nb_samples) { return; } #if LIBAVCODEC_VERSION_MAJOR >= 55 av_frame_make_writable(encoder->audioFrame); #endif int samples = avresample_read(encoder->resampleContext, encoder->audioFrame->data, encoder->postaudioBufferSize / channelSize); #else #if LIBAVCODEC_VERSION_MAJOR >= 55 av_frame_make_writable(encoder->audioFrame); #endif if (swr_get_out_samples(encoder->resampleContext, encoder->audioBufferSize / 4) < encoder->audioFrame->nb_samples) { swr_convert(encoder->resampleContext, NULL, 0, (const uint8_t**) &encoder->audioBuffer, encoder->audioBufferSize / 4); return; } int samples = swr_convert(encoder->resampleContext, encoder->audioFrame->data, encoder->postaudioBufferSize / channelSize, (const uint8_t**) &encoder->audioBuffer, encoder->audioBufferSize / 4); #endif encoder->audioFrame->pts = av_rescale_q(encoder->currentAudioFrame, encoder->audio->time_base, encoder->audioStream->time_base); encoder->currentAudioFrame += samples; AVPacket packet; av_init_packet(&packet); packet.data = 0; packet.size = 0; packet.pts = encoder->audioFrame->pts; int gotData; #ifdef FFMPEG_USE_PACKETS avcodec_send_frame(encoder->audio, encoder->audioFrame); gotData = avcodec_receive_packet(encoder->audio, &packet); gotData = (gotData == 0) && packet.size; #else avcodec_encode_audio2(encoder->audio, &packet, encoder->audioFrame, &gotData); #endif if (gotData) { if (encoder->absf) { AVPacket tempPacket; #ifdef FFMPEG_USE_NEW_BSF int success = av_bsf_send_packet(encoder->absf, &packet); if (success >= 0) { success = av_bsf_receive_packet(encoder->absf, &tempPacket); } #else int success = av_bitstream_filter_filter(encoder->absf, encoder->audio, 0, &tempPacket.data, &tempPacket.size, packet.data, packet.size, 0); #endif if (success >= 0) { #if LIBAVUTIL_VERSION_MAJOR >= 53 tempPacket.buf = av_buffer_create(tempPacket.data, tempPacket.size, av_buffer_default_free, 0, 0); #endif #ifdef FFMPEG_USE_PACKET_UNREF av_packet_move_ref(&packet, &tempPacket); #else av_free_packet(&packet); packet = tempPacket; #endif packet.stream_index = encoder->audioStream->index; av_interleaved_write_frame(encoder->context, &packet); } } else { packet.stream_index = encoder->audioStream->index; av_interleaved_write_frame(encoder->context, &packet); } } #ifdef FFMPEG_USE_PACKET_UNREF av_packet_unref(&packet); #else av_free_packet(&packet); #endif }
static int tee_write_packet(AVFormatContext *avf, AVPacket *pkt) { TeeContext *tee = avf->priv_data; AVFormatContext *avf2; AVBSFContext *bsfs; AVPacket pkt2; int ret_all = 0, ret; unsigned i, s; int s2; for (i = 0; i < tee->nb_slaves; i++) { if (!(avf2 = tee->slaves[i].avf)) continue; /* Flush slave if pkt is NULL*/ if (!pkt) { ret = av_interleaved_write_frame(avf2, NULL); if (ret < 0) { ret = tee_process_slave_failure(avf, i, ret); if (!ret_all && ret < 0) ret_all = ret; } continue; } s = pkt->stream_index; s2 = tee->slaves[i].stream_map[s]; if (s2 < 0) continue; memset(&pkt2, 0, sizeof(AVPacket)); if ((ret = av_packet_ref(&pkt2, pkt)) < 0) if (!ret_all) { ret_all = ret; continue; } bsfs = tee->slaves[i].bsfs[s2]; pkt2.stream_index = s2; ret = av_bsf_send_packet(bsfs, &pkt2); if (ret < 0) { av_log(avf, AV_LOG_ERROR, "Error while sending packet to bitstream filter: %s\n", av_err2str(ret)); ret = tee_process_slave_failure(avf, i, ret); if (!ret_all && ret < 0) ret_all = ret; } while(1) { ret = av_bsf_receive_packet(bsfs, &pkt2); if (ret == AVERROR(EAGAIN)) { ret = 0; break; } else if (ret < 0) { break; } av_packet_rescale_ts(&pkt2, bsfs->time_base_out, avf2->streams[s2]->time_base); ret = av_interleaved_write_frame(avf2, &pkt2); if (ret < 0) break; }; if (ret < 0) { ret = tee_process_slave_failure(avf, i, ret); if (!ret_all && ret < 0) ret_all = ret; } } return ret_all; }
static int cuvid_decode_packet(AVCodecContext *avctx, const AVPacket *avpkt) { CuvidContext *ctx = avctx->priv_data; AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data; AVCUDADeviceContext *device_hwctx = device_ctx->hwctx; CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx; CUVIDSOURCEDATAPACKET cupkt; AVPacket filter_packet = { 0 }; AVPacket filtered_packet = { 0 }; int ret = 0, eret = 0, is_flush = ctx->decoder_flushing; av_log(avctx, AV_LOG_TRACE, "cuvid_decode_packet\n"); if (is_flush && avpkt && avpkt->size) return AVERROR_EOF; if (av_fifo_size(ctx->frame_queue) / sizeof(CuvidParsedFrame) > MAX_FRAME_COUNT - 2 && avpkt && avpkt->size) return AVERROR(EAGAIN); if (ctx->bsf && avpkt && avpkt->size) { if ((ret = av_packet_ref(&filter_packet, avpkt)) < 0) { av_log(avctx, AV_LOG_ERROR, "av_packet_ref failed\n"); return ret; } if ((ret = av_bsf_send_packet(ctx->bsf, &filter_packet)) < 0) { av_log(avctx, AV_LOG_ERROR, "av_bsf_send_packet failed\n"); av_packet_unref(&filter_packet); return ret; } if ((ret = av_bsf_receive_packet(ctx->bsf, &filtered_packet)) < 0) { av_log(avctx, AV_LOG_ERROR, "av_bsf_receive_packet failed\n"); return ret; } avpkt = &filtered_packet; } ret = CHECK_CU(cuCtxPushCurrent(cuda_ctx)); if (ret < 0) { av_packet_unref(&filtered_packet); return ret; } memset(&cupkt, 0, sizeof(cupkt)); if (avpkt && avpkt->size) { cupkt.payload_size = avpkt->size; cupkt.payload = avpkt->data; if (avpkt->pts != AV_NOPTS_VALUE) { cupkt.flags = CUVID_PKT_TIMESTAMP; if (avctx->pkt_timebase.num && avctx->pkt_timebase.den) cupkt.timestamp = av_rescale_q(avpkt->pts, avctx->pkt_timebase, (AVRational){1, 10000000}); else cupkt.timestamp = avpkt->pts; } } else { cupkt.flags = CUVID_PKT_ENDOFSTREAM; ctx->decoder_flushing = 1; } ret = CHECK_CU(cuvidParseVideoData(ctx->cuparser, &cupkt)); av_packet_unref(&filtered_packet); if (ret < 0) goto error; // cuvidParseVideoData doesn't return an error just because stuff failed... if (ctx->internal_error) { av_log(avctx, AV_LOG_ERROR, "cuvid decode callback error\n"); ret = ctx->internal_error; goto error; } error: eret = CHECK_CU(cuCtxPopCurrent(&dummy)); if (eret < 0) return eret; else if (ret < 0) return ret; else if (is_flush) return AVERROR_EOF; else return 0; }
void _ffmpegPostAudioFrame(struct mAVStream* stream, int16_t left, int16_t right) { struct FFmpegEncoder* encoder = (struct FFmpegEncoder*) stream; if (!encoder->context || !encoder->audioCodec) { return; } encoder->audioBuffer[encoder->currentAudioSample * 2] = left; encoder->audioBuffer[encoder->currentAudioSample * 2 + 1] = right; ++encoder->currentAudioFrame; ++encoder->currentAudioSample; if ((encoder->currentAudioSample * 4) < encoder->audioBufferSize) { return; } encoder->currentAudioSample = 0; int channelSize = 2 * av_get_bytes_per_sample(encoder->audio->sample_fmt); avresample_convert(encoder->resampleContext, 0, 0, 0, (uint8_t**) &encoder->audioBuffer, 0, encoder->audioBufferSize / 4); if (avresample_available(encoder->resampleContext) < encoder->audioFrame->nb_samples) { return; } #if LIBAVCODEC_VERSION_MAJOR >= 55 av_frame_make_writable(encoder->audioFrame); #endif avresample_read(encoder->resampleContext, encoder->audioFrame->data, encoder->postaudioBufferSize / channelSize); AVRational timeBase = { 1, PREFERRED_SAMPLE_RATE }; encoder->audioFrame->pts = encoder->nextAudioPts; encoder->nextAudioPts = av_rescale_q(encoder->currentAudioFrame, timeBase, encoder->audioStream->time_base); AVPacket packet; av_init_packet(&packet); packet.data = 0; packet.size = 0; int gotData; #ifdef FFMPEG_USE_PACKETS avcodec_send_frame(encoder->audio, encoder->audioFrame); gotData = avcodec_receive_packet(encoder->audio, &packet) == 0; #else avcodec_encode_audio2(encoder->audio, &packet, encoder->audioFrame, &gotData); #endif if (gotData) { if (encoder->absf) { AVPacket tempPacket = packet; #ifdef FFMPEG_USE_NEW_BSF int success = av_bsf_send_packet(encoder->absf, &packet) && av_bsf_receive_packet(encoder->absf, &packet); #else int success = av_bitstream_filter_filter(encoder->absf, encoder->audio, 0, &tempPacket.data, &tempPacket.size, packet.data, packet.size, 0); #endif if (success > 0) { #if LIBAVUTIL_VERSION_MAJOR >= 53 tempPacket.buf = av_buffer_create(tempPacket.data, tempPacket.size, av_buffer_default_free, 0, 0); #endif #ifdef FFMPEG_USE_PACKET_UNREF av_packet_unref(&packet); #else av_free_packet(&packet); #endif } packet = tempPacket; } packet.stream_index = encoder->audioStream->index; av_interleaved_write_frame(encoder->context, &packet); } #ifdef FFMPEG_USE_PACKET_UNREF av_packet_unref(&packet); #else av_free_packet(&packet); #endif }
static int mediacodec_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { MediaCodecH264DecContext *s = avctx->priv_data; AVFrame *frame = data; int ret; /* buffer the input packet */ if (avpkt->size) { AVPacket input_pkt = { 0 }; if (av_fifo_space(s->fifo) < sizeof(input_pkt)) { ret = av_fifo_realloc2(s->fifo, av_fifo_size(s->fifo) + sizeof(input_pkt)); if (ret < 0) return ret; } ret = av_packet_ref(&input_pkt, avpkt); if (ret < 0) return ret; av_fifo_generic_write(s->fifo, &input_pkt, sizeof(input_pkt), NULL); } /* process buffered data */ while (!*got_frame) { /* prepare the input data -- convert to Annex B if needed */ if (s->filtered_pkt.size <= 0) { AVPacket input_pkt = { 0 }; av_packet_unref(&s->filtered_pkt); /* no more data */ if (av_fifo_size(s->fifo) < sizeof(AVPacket)) { return avpkt->size ? avpkt->size : ff_mediacodec_dec_decode(avctx, &s->ctx, frame, got_frame, avpkt); } av_fifo_generic_read(s->fifo, &input_pkt, sizeof(input_pkt), NULL); ret = av_bsf_send_packet(s->bsf, &input_pkt); if (ret < 0) { return ret; } ret = av_bsf_receive_packet(s->bsf, &s->filtered_pkt); if (ret == AVERROR(EAGAIN)) { goto done; } /* h264_mp4toannexb is used here and does not requires flushing */ av_assert0(ret != AVERROR_EOF); if (ret < 0) { return ret; } } ret = mediacodec_process_data(avctx, frame, got_frame, &s->filtered_pkt); if (ret < 0) return ret; s->filtered_pkt.size -= ret; s->filtered_pkt.data += ret; } done: return avpkt->size; }
int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe) { BSFCompatContext *priv = bsfc->priv_data; AVPacket pkt = { 0 }; int ret; if (!priv->ctx) { ret = av_bsf_alloc(bsfc->filter, &priv->ctx); if (ret < 0) return ret; ret = avcodec_parameters_from_context(priv->ctx->par_in, avctx); if (ret < 0) return ret; priv->ctx->time_base_in = avctx->time_base; if (bsfc->args && bsfc->filter->priv_class) { const AVOption *opt = av_opt_next(priv->ctx->priv_data, NULL); const char * shorthand[2] = {NULL}; if (opt) shorthand[0] = opt->name; ret = av_opt_set_from_string(priv->ctx->priv_data, bsfc->args, shorthand, "=", ":"); } ret = av_bsf_init(priv->ctx); if (ret < 0) return ret; } pkt.data = buf; pkt.size = buf_size; ret = av_bsf_send_packet(priv->ctx, &pkt); if (ret < 0) return ret; *poutbuf = NULL; *poutbuf_size = 0; ret = av_bsf_receive_packet(priv->ctx, &pkt); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) return 0; else if (ret < 0) return ret; *poutbuf = av_malloc(pkt.size + AV_INPUT_BUFFER_PADDING_SIZE); if (!*poutbuf) { av_packet_unref(&pkt); return AVERROR(ENOMEM); } *poutbuf_size = pkt.size; memcpy(*poutbuf, pkt.data, pkt.size); av_packet_unref(&pkt); /* drain all the remaining packets we cannot return */ while (ret >= 0) { ret = av_bsf_receive_packet(priv->ctx, &pkt); av_packet_unref(&pkt); } if (!priv->extradata_updated) { /* update extradata in avctx from the output codec parameters */ if (priv->ctx->par_out->extradata_size && (!args || !strstr(args, "private_spspps_buf"))) { av_freep(&avctx->extradata); avctx->extradata_size = 0; avctx->extradata = av_mallocz(priv->ctx->par_out->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!avctx->extradata) return AVERROR(ENOMEM); memcpy(avctx->extradata, priv->ctx->par_out->extradata, priv->ctx->par_out->extradata_size); avctx->extradata_size = priv->ctx->par_out->extradata_size; } priv->extradata_updated = 1; } return 1; }
static int cuvid_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { CuvidContext *ctx = avctx->priv_data; AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data; AVCUDADeviceContext *device_hwctx = device_ctx->hwctx; CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx; AVFrame *frame = data; CUVIDSOURCEDATAPACKET cupkt; AVPacket filter_packet = { 0 }; AVPacket filtered_packet = { 0 }; CUdeviceptr mapped_frame = 0; int ret = 0, eret = 0; if (ctx->bsf && avpkt->size) { if ((ret = av_packet_ref(&filter_packet, avpkt)) < 0) { av_log(avctx, AV_LOG_ERROR, "av_packet_ref failed\n"); return ret; } if ((ret = av_bsf_send_packet(ctx->bsf, &filter_packet)) < 0) { av_log(avctx, AV_LOG_ERROR, "av_bsf_send_packet failed\n"); av_packet_unref(&filter_packet); return ret; } if ((ret = av_bsf_receive_packet(ctx->bsf, &filtered_packet)) < 0) { av_log(avctx, AV_LOG_ERROR, "av_bsf_receive_packet failed\n"); return ret; } avpkt = &filtered_packet; } ret = CHECK_CU(cuCtxPushCurrent(cuda_ctx)); if (ret < 0) { av_packet_unref(&filtered_packet); return ret; } memset(&cupkt, 0, sizeof(cupkt)); if (avpkt->size) { cupkt.payload_size = avpkt->size; cupkt.payload = avpkt->data; if (avpkt->pts != AV_NOPTS_VALUE) { cupkt.flags = CUVID_PKT_TIMESTAMP; if (avctx->pkt_timebase.num && avctx->pkt_timebase.den) cupkt.timestamp = av_rescale_q(avpkt->pts, avctx->pkt_timebase, (AVRational){1, 10000000}); else cupkt.timestamp = avpkt->pts; } } else { cupkt.flags = CUVID_PKT_ENDOFSTREAM; } ret = CHECK_CU(cuvidParseVideoData(ctx->cuparser, &cupkt)); av_packet_unref(&filtered_packet); if (ret < 0) { goto error; } // cuvidParseVideoData doesn't return an error just because stuff failed... if (ctx->internal_error) { av_log(avctx, AV_LOG_ERROR, "cuvid decode callback error\n"); ret = ctx->internal_error; goto error; } if (av_fifo_size(ctx->frame_queue)) { CUVIDPARSERDISPINFO dispinfo; CUVIDPROCPARAMS params; unsigned int pitch = 0; int offset = 0; int i; av_fifo_generic_read(ctx->frame_queue, &dispinfo, sizeof(CUVIDPARSERDISPINFO), NULL); memset(¶ms, 0, sizeof(params)); params.progressive_frame = dispinfo.progressive_frame; params.second_field = 0; params.top_field_first = dispinfo.top_field_first; ret = CHECK_CU(cuvidMapVideoFrame(ctx->cudecoder, dispinfo.picture_index, &mapped_frame, &pitch, ¶ms)); if (ret < 0) goto error; if (avctx->pix_fmt == AV_PIX_FMT_CUDA) { ret = av_hwframe_get_buffer(ctx->hwframe, frame, 0); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "av_hwframe_get_buffer failed\n"); goto error; } ret = ff_decode_frame_props(avctx, frame); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "ff_decode_frame_props failed\n"); goto error; } for (i = 0; i < 2; i++) { CUDA_MEMCPY2D cpy = { .srcMemoryType = CU_MEMORYTYPE_DEVICE, .dstMemoryType = CU_MEMORYTYPE_DEVICE, .srcDevice = mapped_frame, .dstDevice = (CUdeviceptr)frame->data[i], .srcPitch = pitch, .dstPitch = frame->linesize[i], .srcY = offset, .WidthInBytes = FFMIN(pitch, frame->linesize[i]), .Height = avctx->coded_height >> (i ? 1 : 0), }; ret = CHECK_CU(cuMemcpy2D(&cpy)); if (ret < 0) goto error; offset += avctx->coded_height; } } else if (avctx->pix_fmt == AV_PIX_FMT_NV12) {