Esempio n. 1
0
void MediaPlayerPrivateQuickTimeVisualContext::paint(GraphicsContext* p, const IntRect& r)
{
    MediaRenderingMode currentMode = currentRenderingMode();
 
    if (currentMode == MediaRenderingNone)
        return;

    if (currentMode == MediaRenderingSoftwareRenderer && !m_visualContext)
        return;

#if USE(ACCELERATED_COMPOSITING)
    if (m_qtVideoLayer)
        return;
#endif
    QTPixelBuffer buffer = m_visualContext->imageForTime(0);
    if (buffer.pixelBufferRef()) {
        CGImageRef image = CreateCGImageFromPixelBuffer(buffer);
        
        CGContextRef context = p->platformContext();
        CGContextSaveGState(context);
        CGContextTranslateCTM(context, r.x(), r.y());
        CGContextTranslateCTM(context, 0, r.height());
        CGContextScaleCTM(context, 1, -1);
        CGContextDrawImage(context, CGRectMake(0, 0, r.width(), r.height()), image);
        CGContextRestoreGState(context);

        CGImageRelease(image);
    }
    paintCompleted(*p, r);
}
bool QTDecompressionSession::canDecompress(QTPixelBuffer inBuffer)
{
    return m_session 
        && inBuffer.pixelFormatType() == m_pixelFormat 
        && inBuffer.width() == m_width
        && inBuffer.height() == m_height;
}
QTPixelBuffer QTMovieVisualContextPriv::imageForTime(const QTCVTimeStamp* timeStamp)
{
    QTPixelBuffer pixelBuffer;
    if (m_visualContext) {
        CVImageBufferRef newImage = 0;
        OSStatus status = QTVisualContextCopyImageForTime(m_visualContext, kCFAllocatorDefault, reinterpret_cast<const CVTimeStamp*>(timeStamp), &newImage);
        if (status == noErr)
            pixelBuffer.adopt(newImage);
    }
    return pixelBuffer;
}
Esempio n. 4
0
void MediaPlayerPrivateQuickTimeVisualContext::retrieveCurrentImage()
{
    if (!m_visualContext)
        return;

#if USE(ACCELERATED_COMPOSITING)
    if (m_qtVideoLayer) {

        QTPixelBuffer buffer = m_visualContext->imageForTime(0);
        if (!buffer.pixelBufferRef())
            return;

        WKCACFLayer* layer = static_cast<WKCACFLayer*>(m_qtVideoLayer->platformLayer());

        if (!buffer.lockBaseAddress()) {
            if (requiredDllsAvailable()) {
                if (!m_imageQueue) {
                    m_imageQueue = new WKCAImageQueue(buffer.width(), buffer.height(), 30);
                    m_imageQueue->setFlags(WKCAImageQueue::Fill, WKCAImageQueue::Fill);
                    layer->setContents(m_imageQueue->get());
                }

                // Debug QuickTime links against a non-Debug version of CoreFoundation, so the
                // CFDictionary attached to the CVPixelBuffer cannot be directly passed on into the
                // CAImageQueue without being converted to a non-Debug CFDictionary.  Additionally,
                // old versions of QuickTime used a non-AAS CoreFoundation, so the types are not 
                // interchangable even in the release case.
                RetainPtr<CFDictionaryRef> attachments(AdoptCF, QTCFDictionaryCreateCopyWithDataCallback(kCFAllocatorDefault, buffer.attachments(), &QTCFDictionaryCreateWithDataCallback));
                CFTimeInterval imageTime = QTMovieVisualContext::currentHostTime();

                m_imageQueue->collect();

                uint64_t imageId = m_imageQueue->registerPixelBuffer(buffer.baseAddress(), buffer.dataSize(), buffer.bytesPerRow(), buffer.width(), buffer.height(), buffer.pixelFormatType(), attachments.get(), 0);

                if (m_imageQueue->insertImage(imageTime, WKCAImageQueue::Buffer, imageId, WKCAImageQueue::Opaque | WKCAImageQueue::Flush, &QTPixelBuffer::imageQueueReleaseCallback, buffer.pixelBufferRef())) {
                    // Retain the buffer one extra time so it doesn't dissappear before CAImageQueue decides to release it:
                    QTPixelBuffer::retainCallback(buffer.pixelBufferRef());
                }

            } else {
                CGImageRef image = CreateCGImageFromPixelBuffer(buffer);
                layer->setContents(image);
                CGImageRelease(image);
            }

            buffer.unlockBaseAddress();
            layer->rootLayer()->setNeedsRender();
        }
    } else
#endif
        m_player->repaint();

    m_visualContext->task();
}
QTPixelBuffer QTDecompressionSession::decompress(QTPixelBuffer inBuffer)
{
    if (!canDecompress(inBuffer))
        return QTPixelBuffer();
    
    inBuffer.lockBaseAddress();
    ICMDecompressionSessionDecodeFrame(m_session,
        static_cast<UInt8*>(inBuffer.baseAddress()),
        inBuffer.dataSize(),
        0, // frameOptions
        0, // frameTime
        0); // sourceFrameRefCon

    // Because we passed in 0 for frameTime, the above function
    // is synchronous, and the client callback will have been
    // called before the function returns, and m_latestFrame
    // will contain the newly decompressed frame.
    return m_latestFrame;
}
Esempio n. 6
0
void MediaPlayerPrivateQuickTimeVisualContext::paint(GraphicsContext* p, const IntRect& r)
{
    MediaRenderingMode currentMode = currentRenderingMode();

    if (currentMode == MediaRenderingNone)
        return;

    if (currentMode == MediaRenderingSoftwareRenderer && !m_visualContext)
        return;

    QTPixelBuffer buffer = m_visualContext->imageForTime(0);
    if (buffer.pixelBufferRef()) {
#if USE(ACCELERATED_COMPOSITING)
        if (m_qtVideoLayer) {
            // We are probably being asked to render the video into a canvas, but
            // there's a good chance the QTPixelBuffer is not ARGB and thus can't be
            // drawn using CG.  If so, fire up an ICMDecompressionSession and convert
            // the current frame into something which can be rendered by CG.
            if (!buffer.pixelFormatIs32ARGB() && !buffer.pixelFormatIs32BGRA()) {
                // The decompression session will only decompress a specific pixelFormat
                // at a specific width and height; if these differ, the session must be
                // recreated with the new parameters.
                if (!m_decompressionSession || !m_decompressionSession->canDecompress(buffer))
                    m_decompressionSession = QTDecompressionSession::create(buffer.pixelFormatType(), buffer.width(), buffer.height());
                buffer = m_decompressionSession->decompress(buffer);
            }
        }
#endif
        CGImageRef image = CreateCGImageFromPixelBuffer(buffer);

        CGContextRef context = p->platformContext();
        CGContextSaveGState(context);
        CGContextTranslateCTM(context, r.x(), r.y());
        CGContextTranslateCTM(context, 0, r.height());
        CGContextScaleCTM(context, 1, -1);
        CGContextDrawImage(context, CGRectMake(0, 0, r.width(), r.height()), image);
        CGContextRestoreGState(context);

        CGImageRelease(image);
    }
    paintCompleted(*p, r);
}
Esempio n. 7
0
static CGImageRef CreateCGImageFromPixelBuffer(QTPixelBuffer buffer)
{
#if USE(ACCELERATED_COMPOSITING)
    CGDataProviderRef provider = 0;
    CGColorSpaceRef colorSpace = 0;
    CGImageRef image = 0;

    size_t bitsPerComponent = 0;
    size_t bitsPerPixel = 0;
    CGImageAlphaInfo alphaInfo = kCGImageAlphaNone;
        
    if (buffer.pixelFormatIs32BGRA()) {
        bitsPerComponent = 8;
        bitsPerPixel = 32;
        alphaInfo = (CGImageAlphaInfo)(kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Little);
    } else if (buffer.pixelFormatIs32ARGB()) {
        bitsPerComponent = 8;
        bitsPerPixel = 32;
        alphaInfo = (CGImageAlphaInfo)(kCGImageAlphaNoneSkipLast | kCGBitmapByteOrder32Big);
    } else {
        // All other pixel formats are currently unsupported:
        ASSERT_NOT_REACHED();
    }

    CGDataProviderDirectAccessCallbacks callbacks = {
        &QTPixelBuffer::dataProviderGetBytePointerCallback,
        &QTPixelBuffer::dataProviderReleaseBytePointerCallback,
        &QTPixelBuffer::dataProviderGetBytesAtPositionCallback,
        &QTPixelBuffer::dataProviderReleaseInfoCallback,
    };
    
    // Colorspace should be device, so that Quartz does not have to do an extra render.
    colorSpace = CGColorSpaceCreateDeviceRGB();
    require(colorSpace, Bail);
            
    provider = CGDataProviderCreateDirectAccess(buffer.pixelBufferRef(), buffer.dataSize(), &callbacks);
    require(provider, Bail);

    // CGDataProvider does not retain the buffer, but it will release it later, so do an extra retain here:
    QTPixelBuffer::retainCallback(buffer.pixelBufferRef());
        
    image = CGImageCreate(buffer.width(), buffer.height(), bitsPerComponent, bitsPerPixel, buffer.bytesPerRow(), colorSpace, alphaInfo, provider, 0, false, kCGRenderingIntentDefault);
 
Bail:
    // Once the image is created we can release our reference to the provider and the colorspace, they are retained by the image
    if (provider)
        CGDataProviderRelease(provider);
    if (colorSpace)
        CGColorSpaceRelease(colorSpace);
 
    return image;
#else
    return 0;
#endif
}