static int libx265_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic, int *got_packet) { libx265Context *ctx = avctx->priv_data; x265_picture x265pic; x265_picture x265pic_out = { { 0 } }; x265_nal *nal; uint8_t *dst; int payload = 0; int nnal; int ret; int i; x265_picture_init(ctx->params, &x265pic); if (pic) { for (i = 0; i < 3; i++) { x265pic.planes[i] = pic->data[i]; x265pic.stride[i] = pic->linesize[i]; } x265pic.pts = pic->pts; x265pic.bitDepth = av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].depth_minus1 + 1; } ret = x265_encoder_encode(ctx->encoder, &nal, &nnal, pic ? &x265pic : NULL, &x265pic_out); if (ret < 0) return AVERROR_UNKNOWN; if (!nnal) return 0; for (i = 0; i < nnal; i++) payload += nal[i].sizeBytes; ret = ff_alloc_packet(pkt, payload); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n"); return ret; } dst = pkt->data; for (i = 0; i < nnal; i++) { memcpy(dst, nal[i].payload, nal[i].sizeBytes); dst += nal[i].sizeBytes; if (is_keyframe(nal[i].type)) pkt->flags |= AV_PKT_FLAG_KEY; } pkt->pts = x265pic_out.pts; pkt->dts = x265pic_out.dts; *got_packet = 1; return 0; }
static GstFlowReturn theora_parse_drain_queue (GstTheoraParse * parse, gint64 granulepos) { GstFlowReturn ret = GST_FLOW_OK; gint64 keyframe, prev_frame, frame; parse_granulepos (parse, granulepos, &keyframe, &frame); GST_DEBUG ("draining queue of length %d", g_queue_get_length (parse->buffer_queue)); GST_LOG_OBJECT (parse, "gp %" G_GINT64_FORMAT ", kf %" G_GINT64_FORMAT ", frame %" G_GINT64_FORMAT, granulepos, keyframe, frame); prev_frame = frame - g_queue_get_length (parse->buffer_queue); GST_LOG_OBJECT (parse, "new prev %" G_GINT64_FORMAT ", prev %" G_GINT64_FORMAT, prev_frame, parse->prev_frame); if (prev_frame < parse->prev_frame) { GST_WARNING ("jumped %" G_GINT64_FORMAT " frames backwards! not sure what to do here", parse->prev_frame - prev_frame); parse->prev_frame = prev_frame; } else if (prev_frame > parse->prev_frame) { GST_INFO ("discontinuity detected (%" G_GINT64_FORMAT " frames)", prev_frame - parse->prev_frame); if (keyframe <= prev_frame && keyframe > parse->prev_keyframe) parse->prev_keyframe = keyframe; parse->prev_frame = prev_frame; } while (!g_queue_is_empty (parse->buffer_queue)) { GstBuffer *buf; parse->prev_frame++; g_assert (parse->prev_frame >= 0); buf = GST_BUFFER_CAST (g_queue_pop_head (parse->buffer_queue)); if (is_keyframe (buf)) /* we have a keyframe */ parse->prev_keyframe = parse->prev_frame; else GST_BUFFER_FLAGS (buf) |= GST_BUFFER_FLAG_DELTA_UNIT; ret = theora_parse_push_buffer (parse, buf, parse->prev_keyframe, parse->prev_frame); if (ret != GST_FLOW_OK) goto done; } done: return ret; }
static GstFlowReturn theora_parse_drain_queue_prematurely (GstTheoraParse * parse) { GstFlowReturn ret = GST_FLOW_OK; /* got an EOS event, make sure to push out any buffers that were in the queue * -- won't normally be the case, but this catches the * didn't-get-a-granulepos-on-the-last-packet case. Assuming a continuous * stream. */ GST_DEBUG_OBJECT (parse, "got EOS, draining queue"); /* if we get an eos before pushing the streamheaders, drain our events before * eos */ theora_parse_drain_event_queue (parse); while (!g_queue_is_empty (parse->buffer_queue)) { GstBuffer *buf; buf = GST_BUFFER_CAST (g_queue_pop_head (parse->buffer_queue)); parse->prev_frame++; if (is_keyframe (buf)) /* we have a keyframe */ parse->prev_keyframe = parse->prev_frame; else GST_BUFFER_FLAGS (buf) |= GST_BUFFER_FLAG_DELTA_UNIT; if (parse->prev_keyframe < 0) { if (GST_BUFFER_OFFSET_END_IS_VALID (buf)) { parse_granulepos (parse, GST_BUFFER_OFFSET_END (buf), &parse->prev_keyframe, NULL); } else { /* No previous keyframe known; can't extract one from this frame. That * means we can't do any valid output for this frame, just continue to * the next frame. */ gst_buffer_unref (buf); continue; } } ret = theora_parse_push_buffer (parse, buf, parse->prev_keyframe, parse->prev_frame); if (ret != GST_FLOW_OK) goto done; } done: return ret; }
static int libx265_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic, int *got_packet) { libx265Context *ctx = avctx->priv_data; x265_picture x265pic; x265_picture x265pic_out = { 0 }; x265_nal *nal; uint8_t *dst; int payload = 0; int nnal; int ret; int i; ctx->api->picture_init(ctx->params, &x265pic); if (pic) { for (i = 0; i < 3; i++) { x265pic.planes[i] = pic->data[i]; x265pic.stride[i] = pic->linesize[i]; } x265pic.pts = pic->pts; x265pic.bitDepth = av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].depth; x265pic.sliceType = pic->pict_type == AV_PICTURE_TYPE_I ? (ctx->forced_idr ? X265_TYPE_IDR : X265_TYPE_I) : pic->pict_type == AV_PICTURE_TYPE_P ? X265_TYPE_P : pic->pict_type == AV_PICTURE_TYPE_B ? X265_TYPE_B : X265_TYPE_AUTO; } ret = ctx->api->encoder_encode(ctx->encoder, &nal, &nnal, pic ? &x265pic : NULL, &x265pic_out); if (ret < 0) return AVERROR_EXTERNAL; if (!nnal) return 0; for (i = 0; i < nnal; i++) payload += nal[i].sizeBytes; ret = ff_alloc_packet2(avctx, pkt, payload, payload); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n"); return ret; } dst = pkt->data; for (i = 0; i < nnal; i++) { memcpy(dst, nal[i].payload, nal[i].sizeBytes); dst += nal[i].sizeBytes; if (is_keyframe(nal[i].type)) pkt->flags |= AV_PKT_FLAG_KEY; } pkt->pts = x265pic_out.pts; pkt->dts = x265pic_out.dts; #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS switch (x265pic_out.sliceType) { case X265_TYPE_IDR: case X265_TYPE_I: avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; break; case X265_TYPE_P: avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P; break; case X265_TYPE_B: avctx->coded_frame->pict_type = AV_PICTURE_TYPE_B; break; } FF_ENABLE_DEPRECATION_WARNINGS #endif #if X265_BUILD >= 130 if (x265pic_out.sliceType == X265_TYPE_B) #else if (x265pic_out.frameData.sliceType == 'b') #endif pkt->flags |= AV_PKT_FLAG_DISPOSABLE; *got_packet = 1; return 0; }