void QTMovieVisualContextPriv::setMovie(PassRefPtr<QTMovie> movie) { if (movie == m_movie) return; if (m_movie) { SetMovieVisualContext(m_movie->getMovieHandle(), 0); m_movie = 0; } if (movie) OSStatus status = SetMovieVisualContext(movie->getMovieHandle(), m_visualContext); m_movie = movie; }
void MovieGlHap::allocateVisualContext() { // Load HAP Movie if( HapQTQuickTimeMovieHasHapTrackPlayable( getObj()->mMovie ) ) { // QT Visual Context attributes OSStatus err = noErr; QTVisualContextRef * visualContext = (QTVisualContextRef*)&getObj()->mVisualContext; CFDictionaryRef pixelBufferOptions = HapQTCreateCVPixelBufferOptionsDictionary(); const CFStringRef keys[] = { kQTVisualContextPixelBufferAttributesKey }; CFDictionaryRef visualContextOptions = ::CFDictionaryCreate(kCFAllocatorDefault, (const void**)&keys, (const void**)&pixelBufferOptions, sizeof(keys)/sizeof(keys[0]), &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); err = QTPixelBufferContextCreate( kCFAllocatorDefault, visualContextOptions, visualContext ); ::CFRelease( pixelBufferOptions ); ::CFRelease( visualContextOptions ); if( err != noErr ) { CI_LOG_E( "HAP ERROR :: " << err << " couldnt create visual context." ); return; } // Set the movie's visual context err = SetMovieVisualContext( getObj()->mMovie, *visualContext ); if( err != noErr ) { CI_LOG_E( "HAP ERROR :: " << err << " SetMovieVisualContext." ); return; } } // Get codec name for (long i = 1; i <= GetMovieTrackCount(getObj()->mMovie); i++) { Track track = GetMovieIndTrack(getObj()->mMovie, i); Media media = GetTrackMedia(track); OSType mediaType; GetMediaHandlerDescription(media, &mediaType, NULL, NULL); if (mediaType == VideoMediaType) { // Get the codec-type of this track ImageDescriptionHandle imageDescription = (ImageDescriptionHandle)NewHandle(0); // GetMediaSampleDescription will resize it GetMediaSampleDescription(media, 1, (SampleDescriptionHandle)imageDescription); OSType codecType = (*imageDescription)->cType; DisposeHandle((Handle)imageDescription); switch (codecType) { case 'Hap1': mCodec = Codec::HAP; break; case 'Hap5': mCodec = Codec::HAP_A; break; case 'HapY': mCodec = Codec::HAP_Q; break; default: mCodec = Codec::UNSUPPORTED; break; } } } // Set framerate callback this->setNewFrameCallback( updateMovieFPS, (void*)this ); }
static HRESULT QT_Process_Video_Track(QTSplitter* filter, Track trk) { AM_MEDIA_TYPE amt; VIDEOINFOHEADER * pvi; PIN_INFO piOutput; HRESULT hr = S_OK; OSErr err; static const WCHAR szwVideoOut[] = {'V','i','d','e','o',0}; CFMutableDictionaryRef pixelBufferOptions = NULL; CFMutableDictionaryRef visualContextOptions = NULL; CFNumberRef n = NULL; int t; DWORD outputWidth, outputHeight, outputDepth; Fixed trackWidth, trackHeight; Media videoMedia; long sampleCount; TimeValue64 duration; TimeScale timeScale; ZeroMemory(&amt, sizeof(amt)); amt.formattype = FORMAT_VideoInfo; amt.majortype = MEDIATYPE_Video; amt.subtype = MEDIASUBTYPE_RGB24; GetTrackDimensions(trk, &trackWidth, &trackHeight); outputDepth = 3; outputWidth = Fix2Long(trackWidth); outputHeight = Fix2Long(trackHeight); TRACE("Width %i Height %i\n",outputWidth, outputHeight); amt.cbFormat = sizeof(VIDEOINFOHEADER); amt.pbFormat = CoTaskMemAlloc(amt.cbFormat); ZeroMemory(amt.pbFormat, amt.cbFormat); pvi = (VIDEOINFOHEADER *)amt.pbFormat; pvi->bmiHeader.biSize = sizeof (BITMAPINFOHEADER); pvi->bmiHeader.biWidth = outputWidth; pvi->bmiHeader.biHeight = outputHeight; pvi->bmiHeader.biPlanes = 1; pvi->bmiHeader.biBitCount = 24; pvi->bmiHeader.biCompression = BI_RGB; pvi->bmiHeader.biSizeImage = outputWidth * outputHeight * outputDepth; filter->outputSize = pvi->bmiHeader.biSizeImage; amt.lSampleSize = 0; pixelBufferOptions = CFDictionaryCreateMutable(NULL, 0, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); t = k32ARGBPixelFormat; n = CFNumberCreate(NULL, kCFNumberIntType, &t); CFDictionaryAddValue(pixelBufferOptions, kCVPixelBufferPixelFormatTypeKey, n); CFRelease(n); n = CFNumberCreate(NULL, kCFNumberIntType, &outputWidth); CFDictionaryAddValue(pixelBufferOptions, kCVPixelBufferWidthKey, n); CFRelease(n); n = CFNumberCreate(NULL, kCFNumberIntType, &outputHeight); CFDictionaryAddValue(pixelBufferOptions, kCVPixelBufferHeightKey, n); CFRelease(n); t = 16; n = CFNumberCreate(NULL, kCFNumberIntType, &t); CFDictionaryAddValue(pixelBufferOptions, kCVPixelBufferBytesPerRowAlignmentKey, n); CFRelease(n); visualContextOptions = CFDictionaryCreateMutable(NULL, 0, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); CFDictionarySetValue(visualContextOptions, kQTVisualContextPixelBufferAttributesKey, pixelBufferOptions); err = QTPixelBufferContextCreate(NULL, visualContextOptions,&filter->vContext); CFRelease(pixelBufferOptions); CFRelease(visualContextOptions); if (err != noErr) { ERR("Failed to create Visual Context\n"); return E_FAIL; } err = SetMovieVisualContext(filter->pQTMovie, filter->vContext); if (err != noErr) { ERR("Failed to set Visual Context\n"); return E_FAIL; } videoMedia = GetTrackMedia(trk); sampleCount = GetMediaSampleCount(videoMedia); timeScale = GetMediaTimeScale(videoMedia); duration = GetMediaDisplayDuration(videoMedia); pvi->AvgTimePerFrame = (100000.0 * sampleCount * timeScale) / duration; piOutput.dir = PINDIR_OUTPUT; piOutput.pFilter = &filter->filter.IBaseFilter_iface; lstrcpyW(piOutput.achName,szwVideoOut); hr = QT_AddPin(filter, &piOutput, &amt, TRUE); if (FAILED(hr)) ERR("Failed to add Video Track\n"); else TRACE("Video Pin %p\n",filter->pVideo_Pin); return hr; }