// Convert an MPEG I-frame into a bitmap. This is used as the way of // sending still pictures. We convert the image to a QImage even // though that actually means converting it from YUV and eventually // converting it back again but we do this very infrequently so the // cost is outweighed by the simplification. void MHIBitmap::CreateFromMPEG(const unsigned char *data, int length) { AVCodecContext *c = NULL; AVFrame *picture = NULL; AVPacket pkt; uint8_t *buff = NULL; int gotPicture = 0, len; m_image = QImage(); // Find the mpeg2 video decoder. AVCodec *codec = avcodec_find_decoder(CODEC_ID_MPEG2VIDEO); if (!codec) return; c = avcodec_alloc_context3(NULL); picture = avcodec_alloc_frame(); if (avcodec_open2(c, codec, NULL) < 0) goto Close; // Copy the data into AVPacket if (av_new_packet(&pkt, length) < 0) goto Close; memcpy(pkt.data, data, length); buff = pkt.data; while (pkt.size > 0 && ! gotPicture) { len = avcodec_decode_video2(c, picture, &gotPicture, &pkt); if (len < 0) // Error goto Close; pkt.data += len; pkt.size -= len; } if (!gotPicture) { pkt.data = NULL; pkt.size = 0; // Process any buffered data if (avcodec_decode_video2(c, picture, &gotPicture, &pkt) < 0) goto Close; } if (gotPicture) { int nContentWidth = c->width; int nContentHeight = c->height; m_image = QImage(nContentWidth, nContentHeight, QImage::Format_ARGB32); m_opaque = true; // MPEG images are always opaque. AVPicture retbuf; memset(&retbuf, 0, sizeof(AVPicture)); int bufflen = nContentWidth * nContentHeight * 3; unsigned char *outputbuf = new unsigned char[bufflen]; avpicture_fill(&retbuf, outputbuf, PIX_FMT_RGB24, nContentWidth, nContentHeight); myth_sws_img_convert( &retbuf, PIX_FMT_RGB24, (AVPicture*)picture, c->pix_fmt, nContentWidth, nContentHeight); uint8_t * buf = outputbuf; // Copy the data a pixel at a time. // This should handle endianness correctly. for (int i = 0; i < nContentHeight; i++) { for (int j = 0; j < nContentWidth; j++) { int red = *buf++; int green = *buf++; int blue = *buf++; m_image.setPixel(j, i, qRgb(red, green, blue)); } } delete [] outputbuf; } Close: pkt.data = buff; av_free_packet(&pkt); avcodec_close(c); av_free(c); av_free(picture); }
int PrivateDecoderVDA::GetFrame(AVStream *stream, AVFrame *picture, int *got_picture_ptr, AVPacket *pkt) { if (!pkt) CocoaAutoReleasePool pool; int result = -1; if (!m_lib || !stream) return result; AVCodecContext *avctx = stream->codec; if (!avctx) return result; if (pkt) { CFDataRef avc_demux; CFDictionaryRef params; if (m_annexb) { // convert demuxer packet from bytestream (AnnexB) to bitstream AVIOContext *pb; int demuxer_bytes; uint8_t *demuxer_content; if(avio_open_dyn_buf(&pb) < 0) { return result; } demuxer_bytes = avc_parse_nal_units(pb, pkt->data, pkt->size); demuxer_bytes = avio_close_dyn_buf(pb, &demuxer_content); avc_demux = CFDataCreate(kCFAllocatorDefault, demuxer_content, demuxer_bytes); av_free(demuxer_content); } else if (m_convert_3byteTo4byteNALSize) { // convert demuxer packet from 3 byte NAL sizes to 4 byte AVIOContext *pb; if (avio_open_dyn_buf(&pb) < 0) { return result; } uint32_t nal_size; uint8_t *end = pkt->data + pkt->size; uint8_t *nal_start = pkt->data; while (nal_start < end) { nal_size = VDA_RB24(nal_start); avio_wb32(pb, nal_size); nal_start += 3; avio_write(pb, nal_start, nal_size); nal_start += nal_size; } uint8_t *demuxer_content; int demuxer_bytes = avio_close_dyn_buf(pb, &demuxer_content); avc_demux = CFDataCreate(kCFAllocatorDefault, demuxer_content, demuxer_bytes); av_free(demuxer_content); } else { avc_demux = CFDataCreate(kCFAllocatorDefault, pkt->data, pkt->size); } CFStringRef keys[4] = { CFSTR("FRAME_PTS"), CFSTR("FRAME_INTERLACED"), CFSTR("FRAME_TFF"), CFSTR("FRAME_REPEAT") }; CFNumberRef values[5]; values[0] = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &pkt->pts); values[1] = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt8Type, &picture->interlaced_frame); values[2] = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt8Type, &picture->top_field_first); values[3] = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt8Type, &picture->repeat_pict); params = CFDictionaryCreate(kCFAllocatorDefault, (const void **)&keys, (const void **)&values, 4, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); INIT_ST; vda_st = m_lib->decoderDecode((VDADecoder)m_decoder, 0, avc_demux, params); CHECK_ST; if (vda_st == kVDADecoderNoErr) result = pkt->size; CFRelease(avc_demux); CFRelease(params); } if (m_decoded_frames.size() < m_max_ref_frames) return result; *got_picture_ptr = 1; m_frame_lock.lock(); VDAFrame vdaframe = m_decoded_frames.takeLast(); m_frame_lock.unlock(); if (avctx->get_buffer(avctx, picture) < 0) return -1; picture->reordered_opaque = vdaframe.pts; picture->interlaced_frame = vdaframe.interlaced_frame; picture->top_field_first = vdaframe.top_field_first; picture->repeat_pict = vdaframe.repeat_pict; VideoFrame *frame = (VideoFrame*)picture->opaque; PixelFormat in_fmt = PIX_FMT_NONE; PixelFormat out_fmt = PIX_FMT_NONE; if (vdaframe.format == 'BGRA') in_fmt = PIX_FMT_BGRA; else if (vdaframe.format == '2vuy') in_fmt = PIX_FMT_UYVY422; if (frame->codec == FMT_YV12) out_fmt = PIX_FMT_YUV420P; if (out_fmt != PIX_FMT_NONE && in_fmt != PIX_FMT_NONE && frame->buf) { CVPixelBufferLockBaseAddress(vdaframe.buffer, 0); uint8_t* base = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(vdaframe.buffer, 0); AVPicture img_in, img_out; avpicture_fill(&img_out, (uint8_t *)frame->buf, out_fmt, frame->width, frame->height); avpicture_fill(&img_in, base, in_fmt, frame->width, frame->height); myth_sws_img_convert(&img_out, out_fmt, &img_in, in_fmt, frame->width, frame->height); CVPixelBufferUnlockBaseAddress(vdaframe.buffer, 0); } else { LOG(VB_GENERAL, LOG_ERR, LOC + "Failed to convert decoded frame."); } CVPixelBufferRelease(vdaframe.buffer); return result; }
bool ThumbFinder::getFrameImage(bool needKeyFrame, int64_t requiredPTS) { AVPacket pkt; AVPicture orig; AVPicture retbuf; bzero(&orig, sizeof(AVPicture)); bzero(&retbuf, sizeof(AVPicture)); av_init_packet(&pkt); int frameFinished = 0; int keyFrame; int frameCount = 0; bool gotKeyFrame = false; while (av_read_frame(m_inputFC, &pkt) >= 0 && !frameFinished) { if (pkt.stream_index == m_videostream) { frameCount++; keyFrame = pkt.flags & PKT_FLAG_KEY; if (m_startPTS == -1 && pkt.dts != (int64_t)AV_NOPTS_VALUE) { m_startPTS = pkt.dts; m_frameTime = pkt.duration; } if (keyFrame) gotKeyFrame = true; if (!gotKeyFrame && needKeyFrame) { av_free_packet(&pkt); continue; } if (m_firstIFramePTS == -1) m_firstIFramePTS = pkt.dts; avcodec_decode_video2(m_codecCtx, m_frame, &frameFinished, &pkt); if (requiredPTS != -1 && pkt.dts != (int64_t)AV_NOPTS_VALUE && pkt.dts < requiredPTS) frameFinished = false; m_currentPTS = pkt.dts; } av_free_packet(&pkt); } if (frameFinished) { avpicture_fill(&retbuf, m_outputbuf, PIX_FMT_RGB32, m_frameWidth, m_frameHeight); avpicture_deinterlace((AVPicture*)m_frame, (AVPicture*)m_frame, m_codecCtx->pix_fmt, m_frameWidth, m_frameHeight); myth_sws_img_convert( &retbuf, PIX_FMT_RGB32, (AVPicture*) m_frame, m_codecCtx->pix_fmt, m_frameWidth, m_frameHeight); QImage img(m_outputbuf, m_frameWidth, m_frameHeight, QImage::Format_RGB32); QByteArray ffile = m_frameFile.toLocal8Bit(); if (!img.save(ffile.constData(), "JPEG")) { VERBOSE(VB_IMPORTANT, "Failed to save thumb: " + m_frameFile); } if (m_updateFrame) { if (m_image) { m_image->DownRef(); m_image = NULL; } m_image = GetMythMainWindow()->GetCurrentPainter()->GetFormatImage(); m_image->Assign(img); m_image->UpRef(); m_frameImage->SetImage(m_image); } updateCurrentPos(); } return true; }
void PrivateDecoderCrystalHD::FillFrame(BC_DTS_PROC_OUT *out) { bool second_field = false; if (m_frame) { if (out->PicInfo.picture_number != m_frame->frameNumber) { LOG(VB_PLAYBACK, LOG_WARNING, LOC + "Missing second field"); AddFrameToQueue(); } else { second_field = true; } } int in_width = out->PicInfo.width; int in_height = out->PicInfo.height; int out_width = (in_width + 15) & (~0xf); int out_height = in_height; int size = ((out_width * (out_height + 1)) * 3) / 2; uint8_t* src = out->Ybuff; if (!m_frame) { unsigned char* buf = new unsigned char[size]; m_frame = new VideoFrame(); init(m_frame, FMT_YV12, buf, out_width, out_height, size); m_frame->timecode = (int64_t)out->PicInfo.timeStamp; m_frame->frameNumber = out->PicInfo.picture_number; } if (!m_frame) return; // line 21 data (608/708 captions) // this appears to be unimplemented in the driver if (out->UserData && out->UserDataSz) { int size = out->UserDataSz > 1024 ? 1024 : out->UserDataSz; m_frame->priv[0] = new unsigned char[size]; memcpy(m_frame->priv[0], out->UserData, size); m_frame->qstride = size; // don't try this at home } PixelFormat out_fmt = PIX_FMT_YUV420P; PixelFormat in_fmt = bcmpixfmt_to_pixfmt(m_pix_fmt); AVPicture img_in, img_out; avpicture_fill(&img_out, (uint8_t *)m_frame->buf, out_fmt, out_width, out_height); avpicture_fill(&img_in, src, in_fmt, in_width, in_height); if (!(out->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC)) { myth_sws_img_convert(&img_out, out_fmt, &img_in, in_fmt, in_width, in_height); m_frame->interlaced_frame = 0; AddFrameToQueue(); } else { img_out.linesize[0] *= 2; img_out.linesize[1] *= 2; img_out.linesize[2] *= 2; m_frame->top_field_first = out->PicInfo.pulldown == vdecTopBottom; int field = out->PoutFlags & BC_POUT_FLAGS_FLD_BOT; if (field) { img_out.data[0] += out_width; img_out.data[1] += out_width >> 1; img_out.data[2] += out_width >> 1; } myth_sws_img_convert(&img_out, out_fmt, &img_in, in_fmt, in_width, in_height / 2); if (second_field) AddFrameToQueue(); } }