int AVFormatWriter::WriteVideoFrame(VideoFrame *frame) { int framesEncoded = m_framesWritten + m_bufferedVideoFrameTimes.size(); av_frame_unref(m_picture); AVPictureFill(reinterpret_cast<AVPicture*>(m_picture), frame); m_picture->pts = framesEncoded + 1; if ((framesEncoded % m_keyFrameDist) == 0) m_picture->pict_type = AV_PICTURE_TYPE_I; else m_picture->pict_type = AV_PICTURE_TYPE_NONE; int got_pkt = 0; int ret = 0; m_bufferedVideoFrameTimes.push_back(frame->timecode); m_bufferedVideoFrameTypes.push_back(m_picture->pict_type); AVPacket pkt; av_init_packet(&pkt); pkt.data = NULL; pkt.size = 0; { QMutexLocker locker(avcodeclock); ret = avcodec_encode_video2(m_videoStream->codec, &pkt, m_picture, &got_pkt); } if (ret < 0) { LOG(VB_RECORD, LOG_ERR, "avcodec_encode_video2() failed"); return ret; } if (!got_pkt) { //LOG(VB_RECORD, LOG_DEBUG, QString("WriteVideoFrame(): Frame Buffered: cs: %1, mfw: %2, f->tc: %3, fn: %4, pt: %5").arg(pkt.size).arg(m_framesWritten).arg(frame->timecode).arg(frame->frameNumber).arg(m_picture->pict_type)); return ret; } long long tc = frame->timecode; if (!m_bufferedVideoFrameTimes.isEmpty()) tc = m_bufferedVideoFrameTimes.takeFirst(); if (!m_bufferedVideoFrameTypes.isEmpty()) { int pict_type = m_bufferedVideoFrameTypes.takeFirst(); if (pict_type == AV_PICTURE_TYPE_I) pkt.flags |= AV_PKT_FLAG_KEY; } if (m_startingTimecodeOffset == -1) m_startingTimecodeOffset = tc - 1; tc -= m_startingTimecodeOffset; pkt.pts = tc * m_videoStream->time_base.den / m_videoStream->time_base.num / 1000; pkt.dts = AV_NOPTS_VALUE; pkt.stream_index= m_videoStream->index; //LOG(VB_RECORD, LOG_DEBUG, QString("WriteVideoFrame(): cs: %1, mfw: %2, pkt->pts: %3, tc: %4, fn: %5, pic->pts: %6, f->tc: %7, pt: %8").arg(pkt.size).arg(m_framesWritten).arg(pkt.pts).arg(tc).arg(frame->frameNumber).arg(m_picture->pts).arg(frame->timecode).arg(m_picture->pict_type)); ret = av_interleaved_write_frame(m_ctx, &pkt); if (ret != 0) LOG(VB_RECORD, LOG_ERR, LOC + "WriteVideoFrame(): " "av_interleaved_write_frame couldn't write Video"); frame->timecode = tc + m_startingTimecodeOffset; m_framesWritten++; av_packet_unref(&pkt); return 1; }
void PrivateDecoderCrystalHD::FillFrame(BC_DTS_PROC_OUT *out) { bool second_field = false; if (m_frame) { if (out->PicInfo.picture_number != m_frame->frameNumber) { LOG(VB_PLAYBACK, LOG_WARNING, LOC + "Missing second field"); AddFrameToQueue(); } else { second_field = true; } } int in_width = out->PicInfo.width; int in_height = out->PicInfo.height; int out_width = (in_width + 15) & (~0xf); int out_height = in_height; uint8_t* src = out->Ybuff; if (!m_frame) { int size = buffersize(FMT_YV12, out_width, out_height); unsigned char* buf = (unsigned char*)av_malloc(size); m_frame = new VideoFrame(); init(m_frame, FMT_YV12, buf, out_width, out_height, size); m_frame->timecode = (int64_t)out->PicInfo.timeStamp; m_frame->frameNumber = out->PicInfo.picture_number; } // line 21 data (608/708 captions) // this appears to be unimplemented in the driver if (out->UserData && out->UserDataSz) { int size = out->UserDataSz > 1024 ? 1024 : out->UserDataSz; m_frame->priv[0] = (unsigned char*)av_malloc(size); memcpy(m_frame->priv[0], out->UserData, size); m_frame->qstride = size; // don't try this at home } AVPixelFormat out_fmt = AV_PIX_FMT_YUV420P; AVPixelFormat in_fmt = bcmpixfmt_to_pixfmt(m_pix_fmt); AVPicture img_in; avpicture_fill(&img_in, src, in_fmt, in_width, in_height); if (!(out->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC)) { m_copyCtx.Copy(m_frame, &img_in, in_fmt); m_frame->interlaced_frame = 0; AddFrameToQueue(); } else { AVPicture img_out; AVPictureFill(&img_out, m_frame); img_out.linesize[0] *= 2; img_out.linesize[1] *= 2; img_out.linesize[2] *= 2; m_frame->top_field_first = out->PicInfo.pulldown == vdecTopBottom; int field = out->PoutFlags & BC_POUT_FLAGS_FLD_BOT; if (field) { img_out.data[0] += out_width; img_out.data[1] += out_width >> 1; img_out.data[2] += out_width >> 1; } m_copyCtx.Copy(&img_out, out_fmt, &img_in, in_fmt, in_width, in_height / 2); if (second_field) AddFrameToQueue(); } }