bool QTDecompressionSession::canDecompress(QTPixelBuffer inBuffer) { return m_session && inBuffer.pixelFormatType() == m_pixelFormat && inBuffer.width() == m_width && inBuffer.height() == m_height; }
void MediaPlayerPrivateQuickTimeVisualContext::paint(GraphicsContext* p, const IntRect& r) { MediaRenderingMode currentMode = currentRenderingMode(); if (currentMode == MediaRenderingNone) return; if (currentMode == MediaRenderingSoftwareRenderer && !m_visualContext) return; QTPixelBuffer buffer = m_visualContext->imageForTime(0); if (buffer.pixelBufferRef()) { #if USE(ACCELERATED_COMPOSITING) if (m_qtVideoLayer) { // We are probably being asked to render the video into a canvas, but // there's a good chance the QTPixelBuffer is not ARGB and thus can't be // drawn using CG. If so, fire up an ICMDecompressionSession and convert // the current frame into something which can be rendered by CG. if (!buffer.pixelFormatIs32ARGB() && !buffer.pixelFormatIs32BGRA()) { // The decompression session will only decompress a specific pixelFormat // at a specific width and height; if these differ, the session must be // recreated with the new parameters. if (!m_decompressionSession || !m_decompressionSession->canDecompress(buffer)) m_decompressionSession = QTDecompressionSession::create(buffer.pixelFormatType(), buffer.width(), buffer.height()); buffer = m_decompressionSession->decompress(buffer); } } #endif CGImageRef image = CreateCGImageFromPixelBuffer(buffer); CGContextRef context = p->platformContext(); CGContextSaveGState(context); CGContextTranslateCTM(context, r.x(), r.y()); CGContextTranslateCTM(context, 0, r.height()); CGContextScaleCTM(context, 1, -1); CGContextDrawImage(context, CGRectMake(0, 0, r.width(), r.height()), image); CGContextRestoreGState(context); CGImageRelease(image); } paintCompleted(*p, r); }
void MediaPlayerPrivateQuickTimeVisualContext::retrieveCurrentImage() { if (!m_visualContext) return; #if USE(ACCELERATED_COMPOSITING) if (m_qtVideoLayer) { QTPixelBuffer buffer = m_visualContext->imageForTime(0); if (!buffer.pixelBufferRef()) return; WKCACFLayer* layer = static_cast<WKCACFLayer*>(m_qtVideoLayer->platformLayer()); if (!buffer.lockBaseAddress()) { if (requiredDllsAvailable()) { if (!m_imageQueue) { m_imageQueue = new WKCAImageQueue(buffer.width(), buffer.height(), 30); m_imageQueue->setFlags(WKCAImageQueue::Fill, WKCAImageQueue::Fill); layer->setContents(m_imageQueue->get()); } // Debug QuickTime links against a non-Debug version of CoreFoundation, so the // CFDictionary attached to the CVPixelBuffer cannot be directly passed on into the // CAImageQueue without being converted to a non-Debug CFDictionary. Additionally, // old versions of QuickTime used a non-AAS CoreFoundation, so the types are not // interchangable even in the release case. RetainPtr<CFDictionaryRef> attachments(AdoptCF, QTCFDictionaryCreateCopyWithDataCallback(kCFAllocatorDefault, buffer.attachments(), &QTCFDictionaryCreateWithDataCallback)); CFTimeInterval imageTime = QTMovieVisualContext::currentHostTime(); m_imageQueue->collect(); uint64_t imageId = m_imageQueue->registerPixelBuffer(buffer.baseAddress(), buffer.dataSize(), buffer.bytesPerRow(), buffer.width(), buffer.height(), buffer.pixelFormatType(), attachments.get(), 0); if (m_imageQueue->insertImage(imageTime, WKCAImageQueue::Buffer, imageId, WKCAImageQueue::Opaque | WKCAImageQueue::Flush, &QTPixelBuffer::imageQueueReleaseCallback, buffer.pixelBufferRef())) { // Retain the buffer one extra time so it doesn't dissappear before CAImageQueue decides to release it: QTPixelBuffer::retainCallback(buffer.pixelBufferRef()); } } else { CGImageRef image = CreateCGImageFromPixelBuffer(buffer); layer->setContents(image); CGImageRelease(image); } buffer.unlockBaseAddress(); layer->rootLayer()->setNeedsRender(); } } else #endif m_player->repaint(); m_visualContext->task(); }