int PrivateDecoderCrystalHD::GetFrame(AVStream *stream, AVFrame *picture, int *got_picture_ptr, AVPacket *pkt) { int result = -1; if (!stream || !m_device || !picture) return result; AVCodecContext *avctx = stream->codec; if (!avctx || !StartFetcherThread()) return result; if (pkt && pkt->size) { result = ProcessPacket(stream, pkt); if (result < 0) return result; } m_decoded_frames_lock.lock(); int available = m_decoded_frames.size(); m_decoded_frames_lock.unlock(); if (!available) return result; if (avctx->get_buffer(avctx, picture) < 0) { LOG(VB_GENERAL, LOG_ERR, LOC + QString("%1 decoded frames available but no video buffers.") .arg(available)); return -1; } m_decoded_frames_lock.lock(); VideoFrame *frame = m_decoded_frames.takeLast(); m_decoded_frames_lock.unlock(); *got_picture_ptr = 1; picture->reordered_opaque = (int64_t)(frame->timecode / av_q2d(stream->time_base) / 10000000); LOG(VB_TIMESTAMP, LOG_DEBUG, LOC + QString("decoder output timecode %1 ms (pts %2)") .arg(frame->timecode / 10000).arg(picture->reordered_opaque)); picture->interlaced_frame = frame->interlaced_frame; picture->top_field_first = frame->top_field_first; picture->repeat_pict = frame->repeat_pict; copy((VideoFrame*)picture->opaque, frame); if (frame->priv[0] && frame->qstride) { memcpy(picture->atsc_cc_buf, frame->priv[0], frame->qstride); picture->atsc_cc_len = frame->qstride; } free_frame(frame); return result; }
static AVFrame *alloc_picture(AVStream *video_stream, enum AVPixelFormat pix_fmt) { AVCodecContext *c = video_stream->codec; AVFrame *video_frame = avcodec_alloc_frame(); if (!video_frame) return NULL; avcodec_get_frame_defaults(video_frame); video_frame->format = pix_fmt; video_frame->width = c->width; video_frame->height = c->height; video_frame->pts = 0; if (c->get_buffer(c, video_frame) < 0) { avcodec_free_frame(&video_frame); return NULL; } return video_frame; }
int PrivateDecoderVDA::GetFrame(AVStream *stream, AVFrame *picture, int *got_picture_ptr, AVPacket *pkt) { if (!pkt) CocoaAutoReleasePool pool; int result = -1; if (!m_lib || !stream) return result; AVCodecContext *avctx = stream->codec; if (!avctx) return result; if (pkt) { CFDataRef avc_demux; CFDictionaryRef params; if (m_annexb) { // convert demuxer packet from bytestream (AnnexB) to bitstream AVIOContext *pb; int demuxer_bytes; uint8_t *demuxer_content; if(avio_open_dyn_buf(&pb) < 0) { return result; } demuxer_bytes = avc_parse_nal_units(pb, pkt->data, pkt->size); demuxer_bytes = avio_close_dyn_buf(pb, &demuxer_content); avc_demux = CFDataCreate(kCFAllocatorDefault, demuxer_content, demuxer_bytes); av_free(demuxer_content); } else if (m_convert_3byteTo4byteNALSize) { // convert demuxer packet from 3 byte NAL sizes to 4 byte AVIOContext *pb; if (avio_open_dyn_buf(&pb) < 0) { return result; } uint32_t nal_size; uint8_t *end = pkt->data + pkt->size; uint8_t *nal_start = pkt->data; while (nal_start < end) { nal_size = VDA_RB24(nal_start); avio_wb32(pb, nal_size); nal_start += 3; avio_write(pb, nal_start, nal_size); nal_start += nal_size; } uint8_t *demuxer_content; int demuxer_bytes = avio_close_dyn_buf(pb, &demuxer_content); avc_demux = CFDataCreate(kCFAllocatorDefault, demuxer_content, demuxer_bytes); av_free(demuxer_content); } else { avc_demux = CFDataCreate(kCFAllocatorDefault, pkt->data, pkt->size); } CFStringRef keys[4] = { CFSTR("FRAME_PTS"), CFSTR("FRAME_INTERLACED"), CFSTR("FRAME_TFF"), CFSTR("FRAME_REPEAT") }; CFNumberRef values[5]; values[0] = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &pkt->pts); values[1] = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt8Type, &picture->interlaced_frame); values[2] = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt8Type, &picture->top_field_first); values[3] = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt8Type, &picture->repeat_pict); params = CFDictionaryCreate(kCFAllocatorDefault, (const void **)&keys, (const void **)&values, 4, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); INIT_ST; vda_st = m_lib->decoderDecode((VDADecoder)m_decoder, 0, avc_demux, params); CHECK_ST; if (vda_st == kVDADecoderNoErr) result = pkt->size; CFRelease(avc_demux); CFRelease(params); } if (m_decoded_frames.size() < m_max_ref_frames) return result; *got_picture_ptr = 1; m_frame_lock.lock(); VDAFrame vdaframe = m_decoded_frames.takeLast(); m_frame_lock.unlock(); if (avctx->get_buffer(avctx, picture) < 0) return -1; picture->reordered_opaque = vdaframe.pts; picture->interlaced_frame = vdaframe.interlaced_frame; picture->top_field_first = vdaframe.top_field_first; picture->repeat_pict = vdaframe.repeat_pict; VideoFrame *frame = (VideoFrame*)picture->opaque; PixelFormat in_fmt = PIX_FMT_NONE; PixelFormat out_fmt = PIX_FMT_NONE; if (vdaframe.format == 'BGRA') in_fmt = PIX_FMT_BGRA; else if (vdaframe.format == '2vuy') in_fmt = PIX_FMT_UYVY422; if (frame->codec == FMT_YV12) out_fmt = PIX_FMT_YUV420P; if (out_fmt != PIX_FMT_NONE && in_fmt != PIX_FMT_NONE && frame->buf) { CVPixelBufferLockBaseAddress(vdaframe.buffer, 0); uint8_t* base = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(vdaframe.buffer, 0); AVPicture img_in, img_out; avpicture_fill(&img_out, (uint8_t *)frame->buf, out_fmt, frame->width, frame->height); avpicture_fill(&img_in, base, in_fmt, frame->width, frame->height); myth_sws_img_convert(&img_out, out_fmt, &img_in, in_fmt, frame->width, frame->height); CVPixelBufferUnlockBaseAddress(vdaframe.buffer, 0); } else { LOG(VB_GENERAL, LOG_ERR, LOC + "Failed to convert decoded frame."); } CVPixelBufferRelease(vdaframe.buffer); return result; }
int PrivateDecoderMPEG2::GetFrame(AVStream *stream, AVFrame *picture, int *got_picture_ptr, AVPacket *pkt) { AVCodecContext *avctx = stream->codec; *got_picture_ptr = 0; const mpeg2_info_t *info = mpeg2_info(mpeg2dec); mpeg2_buffer(mpeg2dec, pkt->data, pkt->data + pkt->size); while (1) { switch (mpeg2_parse(mpeg2dec)) { case STATE_SEQUENCE: // libmpeg2 needs three buffers to do its work. // We set up two prediction buffers here, from // the set of available video frames. mpeg2_custom_fbuf(mpeg2dec, 1); for (int i = 0; i < 2; i++) { avctx->get_buffer(avctx, picture); mpeg2_set_buf(mpeg2dec, picture->data, picture->opaque); } break; case STATE_PICTURE: // This sets up the third buffer for libmpeg2. // We use up one of the three buffers for each // frame shown. The frames get released once // they are drawn (outside this routine). avctx->get_buffer(avctx, picture); mpeg2_set_buf(mpeg2dec, picture->data, picture->opaque); break; case STATE_BUFFER: // We're finished with the buffer... if (partialFrames.size()) { AVFrame *frm = partialFrames.dequeue(); *got_picture_ptr = 1; *picture = *frm; delete frm; #if 0 QString msg(""); AvFormatDecoder *nd = (AvFormatDecoder *)(avctx->opaque); if (nd && nd->GetNVP() && nd->GetNVP()->getVideoOutput()) msg = nd->GetNVP()->getVideoOutput()->GetFrameStatus(); VERBOSE(VB_IMPORTANT, "ret frame: "<<picture->opaque <<" "<<msg); #endif } return pkt->size; case STATE_INVALID: // This is the error state. The decoder must be // reset on an error. Reset(); return -1; case STATE_SLICE: case STATE_END: case STATE_INVALID_END: if (info->display_fbuf) { bool exists = false; avframe_q::iterator it = partialFrames.begin(); for (; it != partialFrames.end(); ++it) if ((*it)->opaque == info->display_fbuf->id) exists = true; if (!exists) { AVFrame *frm = new AVFrame(); frm->data[0] = info->display_fbuf->buf[0]; frm->data[1] = info->display_fbuf->buf[1]; frm->data[2] = info->display_fbuf->buf[2]; frm->data[3] = NULL; frm->opaque = info->display_fbuf->id; frm->type = FF_BUFFER_TYPE_USER; frm->top_field_first = !!(info->display_picture->flags & PIC_FLAG_TOP_FIELD_FIRST); frm->interlaced_frame = !(info->display_picture->flags & PIC_FLAG_PROGRESSIVE_FRAME); frm->repeat_pict = !!(info->display_picture->flags & #if CONFIG_LIBMPEG2EXTERNAL PIC_FLAG_REPEAT_FIRST_FIELD); #else PIC_FLAG_REPEAT_FIELD); #endif partialFrames.enqueue(frm); } } if (info->discard_fbuf) { bool exists = false; avframe_q::iterator it = partialFrames.begin(); for (; it != partialFrames.end(); ++it) { if ((*it)->opaque == info->discard_fbuf->id) { exists = true; (*it)->data[3] = (unsigned char*) 1; } } if (!exists) { AVFrame frame; frame.opaque = info->discard_fbuf->id; frame.type = FF_BUFFER_TYPE_USER; avctx->release_buffer(avctx, &frame); } } break; default: break; }