bool MediaEngine::setVideoDim(int width, int height) { if (!m_pCodecCtx) return false; #ifdef USE_FFMPEG if (width == 0 && height == 0) { // use the orignal video size m_desWidth = m_pCodecCtx->width; m_desHeight = m_pCodecCtx->height; } else { m_desWidth = width; m_desHeight = height; } // Allocate video frame m_pFrame = avcodec_alloc_frame(); m_sws_ctx = NULL; m_sws_fmt = -1; updateSwsFormat(TPSM_PIXEL_STORAGE_MODE_32BIT_ABGR8888); // Allocate video frame for RGB24 m_pFrameRGB = avcodec_alloc_frame(); int numBytes = avpicture_get_size((AVPixelFormat)m_sws_fmt, m_desWidth, m_desHeight); m_buffer = (u8*)av_malloc(numBytes * sizeof(uint8_t)); // Assign appropriate parts of buffer to image planes in pFrameRGB avpicture_fill((AVPicture *)m_pFrameRGB, m_buffer, (AVPixelFormat)m_sws_fmt, m_desWidth, m_desHeight); #endif // USE_FFMPEG return true; }
bool MediaEngine::stepVideo(int videoPixelMode) { #ifdef USE_FFMPEG auto codecIter = m_pCodecCtxs.find(m_videoStream); AVCodecContext *m_pCodecCtx = codecIter == m_pCodecCtxs.end() ? 0 : codecIter->second; if (!m_pFormatCtx) return false; if (!m_pCodecCtx) return false; if ((!m_pFrame)||(!m_pFrameRGB)) return false; updateSwsFormat(videoPixelMode); // TODO: Technically we could set this to frameWidth instead of m_desWidth for better perf. // Update the linesize for the new format too. We started with the largest size, so it should fit. m_pFrameRGB->linesize[0] = getPixelFormatBytes(videoPixelMode) * m_desWidth; AVPacket packet; int frameFinished; bool bGetFrame = false; while (!bGetFrame) { bool dataEnd = av_read_frame(m_pFormatCtx, &packet) < 0; // Even if we've read all frames, some may have been re-ordered frames at the end. // Still need to decode those, so keep calling avcodec_decode_video2(). if (dataEnd || packet.stream_index == m_videoStream) { // avcodec_decode_video2() gives us the re-ordered frames with a NULL packet. if (dataEnd) av_free_packet(&packet); int result = avcodec_decode_video2(m_pCodecCtx, m_pFrame, &frameFinished, &packet); if (frameFinished) { sws_scale(m_sws_ctx, m_pFrame->data, m_pFrame->linesize, 0, m_pCodecCtx->height, m_pFrameRGB->data, m_pFrameRGB->linesize); if (av_frame_get_best_effort_timestamp(m_pFrame) != AV_NOPTS_VALUE) m_videopts = av_frame_get_best_effort_timestamp(m_pFrame) + av_frame_get_pkt_duration(m_pFrame) - m_firstTimeStamp; else m_videopts += av_frame_get_pkt_duration(m_pFrame); bGetFrame = true; } if (result <= 0 && dataEnd) { // Sometimes, m_readSize is less than m_streamSize at the end, but not by much. // This is kinda a hack, but the ringbuffer would have to be prematurely empty too. m_isVideoEnd = !bGetFrame && (m_pdata->getQueueSize() == 0); if (m_isVideoEnd) m_decodingsize = 0; break; } } av_free_packet(&packet); } return bGetFrame; #else // If video engine is not available, just add to the timestamp at least. m_videopts += 3003; return true; #endif // USE_FFMPEG }
bool MediaEngine::stepVideo(int videoPixelMode) { // if video engine is broken, force to add timestamp m_videopts += 3003; #ifdef USE_FFMPEG updateSwsFormat(videoPixelMode); // TODO: Technically we could set this to frameWidth instead of m_desWidth for better perf. // Update the linesize for the new format too. We started with the largest size, so it should fit. m_pFrameRGB->linesize[0] = getPixelFormatBytes(videoPixelMode) * m_desWidth; AVFormatContext *pFormatCtx = (AVFormatContext*)m_pFormatCtx; AVCodecContext *pCodecCtx = (AVCodecContext*)m_pCodecCtx; AVFrame *pFrame = (AVFrame*)m_pFrame; AVFrame *pFrameRGB = (AVFrame*)m_pFrameRGB; if ((!m_pFrame)||(!m_pFrameRGB)) return false; AVPacket packet; int frameFinished; bool bGetFrame = false; while(av_read_frame(pFormatCtx, &packet)>=0) { if(packet.stream_index == m_videoStream) { // Decode video frame avcodec_decode_video2(pCodecCtx, m_pFrame, &frameFinished, &packet); sws_scale(m_sws_ctx, m_pFrame->data, m_pFrame->linesize, 0, pCodecCtx->height, m_pFrameRGB->data, m_pFrameRGB->linesize); if(frameFinished) { int firstTimeStamp = bswap32(*(int*)(m_pdata + 86)); m_videopts = pFrame->pkt_dts + pFrame->pkt_duration - firstTimeStamp; bGetFrame = true; } } av_free_packet(&packet); if (bGetFrame) break; } if (!bGetFrame && m_readSize >= m_streamSize) m_isVideoEnd = true; return bGetFrame; #else return true; #endif // USE_FFMPEG }
bool MediaEngine::setVideoDim(int width, int height) { #ifdef USE_FFMPEG auto codecIter = m_pCodecCtxs.find(m_videoStream); if (codecIter == m_pCodecCtxs.end()) return false; AVCodecContext *m_pCodecCtx = codecIter->second; if (width == 0 && height == 0) { // use the orignal video size m_desWidth = m_pCodecCtx->width; m_desHeight = m_pCodecCtx->height; } else { m_desWidth = width; m_desHeight = height; } // Allocate video frame m_pFrame = av_frame_alloc(); sws_freeContext(m_sws_ctx); m_sws_ctx = NULL; m_sws_fmt = -1; updateSwsFormat(GE_CMODE_32BIT_ABGR8888); // Allocate video frame for RGB24 m_pFrameRGB = av_frame_alloc(); int numBytes = avpicture_get_size((AVPixelFormat)m_sws_fmt, m_desWidth, m_desHeight); m_buffer = (u8*)av_malloc(numBytes * sizeof(uint8_t)); // Assign appropriate parts of buffer to image planes in m_pFrameRGB avpicture_fill((AVPicture *)m_pFrameRGB, m_buffer, (AVPixelFormat)m_sws_fmt, m_desWidth, m_desHeight); #endif // USE_FFMPEG return true; }