Esempio n. 1
0
static OSStatus finish_video(void)
{
    video_ready = 0;
    
    // ----- PixelBuffer -----
    CVPixelBufferRelease(pixelBuffer);

    // ----- Codec -----
    
    OSErr theError = ICMCompressionSessionCompleteFrames(videoCompressionSession, true, 0, 0);
    if (theError)
        log_debug("quicktime_video: error completing frames!");
        
    ICMCompressionSessionRelease(videoCompressionSession);
	
    // ----- Movie -----

    //End media editing
    theError = EndMediaEdits(videoMedia);
    if (theError)
        log_debug("quicktime_video: error ending media edits");

    theError = ExtendMediaDecodeDurationToDisplayEndTime(videoMedia, NULL);
    if (theError)
        log_debug("quicktime_video: error setting decode duration!");

    //Add media to track
    theError = InsertMediaIntoTrack(videoTrack, 0, 0, GetMediaDisplayDuration(videoMedia), fixed1);
    if (theError)
        log_debug("quicktime_video: error inserting media into track!");

    videoTrack=NULL;
    videoMedia=NULL;
    return theError;
}
Esempio n. 2
0
void finish_audio(void)
{
    OSStatus theError;
    
    // flush buffer
    if (audioBuffer.used > 0)
        encode_audio(&audioBuffer);
    
    //End media editing
    theError = EndMediaEdits(audioMedia);
    if (theError)
        log_debug("quicktime_audio: error ending media edits");

    theError = ExtendMediaDecodeDurationToDisplayEndTime(audioMedia, NULL);
    if (theError)
        log_debug("quicktime_audio: error setting decode duration!");

    //Add media to track
    theError = InsertMediaIntoTrack(audioTrack, 0, 0, GetMediaDisplayDuration(audioMedia), fixed1);
    if (theError)
        log_debug("quicktime_audio: error inserting media into track!");

    audioTrack=NULL;
    audioMedia=NULL;
    
    DisposeHandle((Handle)soundDescriptionHandle);

    // free buffer
    if (audioBuffer.buffer != NULL) {
        free(audioBuffer.buffer);
        audioBuffer.buffer = NULL;
    }

    audio_ready = 0;
}
Esempio n. 3
0
static HRESULT QT_Process_Video_Track(QTSplitter* filter, Track trk)
{
    AM_MEDIA_TYPE amt;
    VIDEOINFOHEADER * pvi;
    PIN_INFO piOutput;
    HRESULT hr = S_OK;
    OSErr err;
    static const WCHAR szwVideoOut[] = {'V','i','d','e','o',0};
    CFMutableDictionaryRef  pixelBufferOptions = NULL;
    CFMutableDictionaryRef  visualContextOptions = NULL;
    CFNumberRef n = NULL;
    int t;
    DWORD outputWidth, outputHeight, outputDepth;
    Fixed trackWidth, trackHeight;
    Media videoMedia;
    long sampleCount;
    TimeValue64 duration;
    TimeScale timeScale;

    ZeroMemory(&amt, sizeof(amt));
    amt.formattype = FORMAT_VideoInfo;
    amt.majortype = MEDIATYPE_Video;
    amt.subtype = MEDIASUBTYPE_RGB24;

    GetTrackDimensions(trk, &trackWidth, &trackHeight);

    outputDepth = 3;
    outputWidth = Fix2Long(trackWidth);
    outputHeight = Fix2Long(trackHeight);
    TRACE("Width %i  Height %i\n",outputWidth, outputHeight);

    amt.cbFormat = sizeof(VIDEOINFOHEADER);
    amt.pbFormat = CoTaskMemAlloc(amt.cbFormat);
    ZeroMemory(amt.pbFormat, amt.cbFormat);
    pvi = (VIDEOINFOHEADER *)amt.pbFormat;
    pvi->bmiHeader.biSize = sizeof (BITMAPINFOHEADER);
    pvi->bmiHeader.biWidth = outputWidth;
    pvi->bmiHeader.biHeight = outputHeight;
    pvi->bmiHeader.biPlanes = 1;
    pvi->bmiHeader.biBitCount = 24;
    pvi->bmiHeader.biCompression = BI_RGB;
    pvi->bmiHeader.biSizeImage = outputWidth * outputHeight * outputDepth;

    filter->outputSize = pvi->bmiHeader.biSizeImage;
    amt.lSampleSize = 0;

    pixelBufferOptions = CFDictionaryCreateMutable(NULL, 0, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks);

    t = k32ARGBPixelFormat;
    n = CFNumberCreate(NULL, kCFNumberIntType, &t);
    CFDictionaryAddValue(pixelBufferOptions, kCVPixelBufferPixelFormatTypeKey, n);
    CFRelease(n);

    n = CFNumberCreate(NULL, kCFNumberIntType, &outputWidth);
    CFDictionaryAddValue(pixelBufferOptions, kCVPixelBufferWidthKey, n);
    CFRelease(n);

    n = CFNumberCreate(NULL, kCFNumberIntType, &outputHeight);
    CFDictionaryAddValue(pixelBufferOptions, kCVPixelBufferHeightKey, n);
    CFRelease(n);

    t = 16;
    n = CFNumberCreate(NULL, kCFNumberIntType, &t);
    CFDictionaryAddValue(pixelBufferOptions, kCVPixelBufferBytesPerRowAlignmentKey, n);
    CFRelease(n);

    visualContextOptions = CFDictionaryCreateMutable(NULL, 0, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks);

    CFDictionarySetValue(visualContextOptions, kQTVisualContextPixelBufferAttributesKey, pixelBufferOptions);

    err = QTPixelBufferContextCreate(NULL, visualContextOptions,&filter->vContext);
    CFRelease(pixelBufferOptions);
    CFRelease(visualContextOptions);
    if (err != noErr)
    {
        ERR("Failed to create Visual Context\n");
        return E_FAIL;
    }

    err = SetMovieVisualContext(filter->pQTMovie, filter->vContext);
    if (err != noErr)
    {
        ERR("Failed to set Visual Context\n");
        return E_FAIL;
    }

    videoMedia = GetTrackMedia(trk);
    sampleCount = GetMediaSampleCount(videoMedia);
    timeScale = GetMediaTimeScale(videoMedia);
    duration = GetMediaDisplayDuration(videoMedia);
    pvi->AvgTimePerFrame = (100000.0 * sampleCount * timeScale) / duration;

    piOutput.dir = PINDIR_OUTPUT;
    piOutput.pFilter = &filter->filter.IBaseFilter_iface;
    lstrcpyW(piOutput.achName,szwVideoOut);

    hr = QT_AddPin(filter, &piOutput, &amt, TRUE);
    if (FAILED(hr))
        ERR("Failed to add Video Track\n");
     else
        TRACE("Video Pin %p\n",filter->pVideo_Pin);

    return hr;
}