示例#1
0
OSErr QTInfo_ClearPreview (Movie theMovie, MovieController theMC)
{
    long				myCount = 0L;
    long				myIndex = 0L;
    Track				myTrack = NULL;
    long				myUsage = 0L;
    ComponentResult		myErr = noErr;

    // set the movie preview start time and duration to 0
    SetMoviePreviewTime(theMovie, 0, 0);

    // remove all tracks that are used *only* in the movie preview
    myCount = GetMovieTrackCount(theMovie);
    for (myIndex = myCount; myIndex >= 1; myIndex--) {
        myTrack = GetMovieIndTrack(theMovie, myIndex);
        if (myTrack == NULL)
            continue;

        myUsage = GetTrackUsage(myTrack);
        myUsage &= trackUsageInMovie | trackUsageInPreview | trackUsageInPoster;
        if (myUsage == trackUsageInPreview)
            DisposeMovieTrack(myTrack);
    }

    // add trackUsageInPreview to any remaining tracks that are in the movie
    // (so that subsequently setting the preview to a selection will include
    // these tracks)
    myCount = GetMovieTrackCount(theMovie);
    for (myIndex = 1; myIndex <= myCount; myIndex++) {
        myTrack = GetMovieIndTrack(theMovie, myIndex);
        if (myTrack == NULL)
            continue;

        myUsage = GetTrackUsage(myTrack);
        if (myUsage & trackUsageInMovie)
            SetTrackUsage(myTrack, myUsage | trackUsageInPreview);
    }

    myErr = MCMovieChanged(theMC, theMovie);

    return((OSErr)myErr);
}
示例#2
0
文件: track.c 项目: one-k/rmov
/*
  call-seq: delete()
  
  Removes the track from its movie and deletes it from memory.
*/
static VALUE track_delete(VALUE obj)
{
  DisposeMovieTrack(TRACK(obj));
  return Qnil;
}
示例#3
0
/* Import function for movies that lack an index.
 * Supports progressive importing, but will not idle if maxFrames == 0.
 */
ComponentResult import_with_idle(ff_global_ptr storage, long inFlags, long *outFlags, int minFrames, int maxFrames, bool addSamples) {
    SampleReference64Record sampleRec;
    AVFormatContext *formatContext;
    AVCodecContext *codecContext;
    AVStream *stream;
    AVPacket packet;
    NCStream *ncstream;
    ComponentResult dataResult; //used for data handler operations that can fail.
    ComponentResult result;
    TimeValue minLoadedTime;
    TimeValue movieTimeScale = GetMovieTimeScale(storage->movie);
    int64_t availableSize, margin;
    long idling;
    int readResult, framesProcessed, i;
    int firstPts[storage->map_count];
    short flags;

    formatContext = storage->format_context;
    result = noErr;
    minLoadedTime = -1;
    availableSize = 0;
    idling = (inFlags & movieImportWithIdle);
    framesProcessed = 0;

    if(idling) {
        //get the size of immediately available data
        if(storage->dataHandlerSupportsWideOffsets) {
            wide wideSize;

            dataResult = DataHGetAvailableFileSize64(storage->dataHandler, &wideSize);
            if(dataResult == noErr) availableSize = ((int64_t)wideSize.hi << 32) + wideSize.lo;
        } else {
            long longSize;

            dataResult = DataHGetAvailableFileSize(storage->dataHandler, &longSize);
            if(dataResult == noErr) availableSize = longSize;
        }
    }

    for(i = 0; i < storage->map_count; i++) {
        ncstream = &storage->stream_map[i];
        Media media = ncstream->media;

        firstPts[i] = -1;
        if(media && ncstream->duration == -1)
            ncstream->duration = GetMediaDuration(media);
    }

    while((readResult = av_read_frame(formatContext, &packet)) == 0) {
        bool trustPacketDuration = true;
        int64_t dts = packet.dts;
        ncstream = &storage->stream_map[packet.stream_index];
        stream = ncstream->str;
        codecContext = stream->codec;
        flags = 0;

        if (!ncstream->valid)
            continue;

        if((packet.flags & AV_PKT_FLAG_KEY) == 0)
            flags |= mediaSampleNotSync;

        if(IS_NUV(storage->componentType) && codecContext->codec_id == CODEC_ID_MP3) trustPacketDuration = false;
        if(IS_FLV(storage->componentType)) trustPacketDuration = false;

        memset(&sampleRec, 0, sizeof(sampleRec));
        sampleRec.dataOffset.hi = packet.pos >> 32;
        sampleRec.dataOffset.lo = (uint32_t)packet.pos;
        sampleRec.dataSize = packet.size;
        sampleRec.sampleFlags = flags;

        if (packet.pos <= 0)
            continue;

        if(firstPts[packet.stream_index] < 0)
            firstPts[packet.stream_index] = packet.pts;

        if(packet.size > storage->largestPacketSize)
            storage->largestPacketSize = packet.size;

        if(sampleRec.dataSize <= 0)
            continue;

        if(codecContext->codec_type == AVMEDIA_TYPE_AUDIO && !ncstream->vbr)
            sampleRec.numberOfSamples = (packet.size * ncstream->asbd.mFramesPerPacket) / ncstream->asbd.mBytesPerPacket;
        else
            sampleRec.numberOfSamples = 1; //packet.duration;

        //add any samples waiting to be added
        if(ncstream->lastSample.numberOfSamples > 0) {
            //calculate the duration of the sample before adding it
            ncstream->lastSample.durationPerSample = (dts - ncstream->lastdts) * ncstream->base.num;

            AddMediaSampleReferences64(ncstream->media, ncstream->sampleHdl, 1, &ncstream->lastSample, NULL);
        }

#if 0
        if (0) {
            Codecprintf(NULL, "Stream:%d Pts:%lld Dts:%lld DtsUsed:%lld Pos:%lld Size:%d\n", packet.stream_index, packet.pts, packet.dts, dts, packet.pos, packet.size);
            Codecprintf(NULL, "Stream:%d Nsamples:%ld RealDuration:%d CalcDuration:%ld TimeDts:%lld TimeDurations:%lld FrameDts:%d FrameGuess:%lld\n",
                        packet.stream_index, sampleRec.numberOfSamples, packet.duration, ncstream->lastSample.durationPerSample,
                        packet.dts, ncstream->timeByDurations, (int)((packet.dts * stream->time_base.num * ncstream->asbd.mSampleRate) / stream->time_base.den),
                        ncstream->timeByFrames);

            ncstream->timeByDurations += packet.duration;
            ncstream->timeByFrames += ncstream->asbd.mFramesPerPacket;
        }
#endif

        ncstream->lastSample = sampleRec;
        ncstream->lastdts = packet.dts;

        // If this is a nuv file, then we want to set the duration to zero.
        // This is because the nuv container doesn't have the framesize info
        // for audio.
        if(packet.duration == 0 || !trustPacketDuration) {
            //no duration, we'll have to wait for the next packet to calculate it
            // keep the duration of the last sample, so we can use it if it's the last frame
            sampleRec.durationPerSample = ncstream->lastSample.durationPerSample;
        } else {
            ncstream->lastSample.numberOfSamples = 0;

            if(codecContext->codec_type == AVMEDIA_TYPE_AUDIO && !ncstream->vbr)
                sampleRec.durationPerSample = 1;
            else
                sampleRec.durationPerSample = ncstream->base.num * packet.duration;

            AddMediaSampleReferences64(ncstream->media, ncstream->sampleHdl, 1, &sampleRec, NULL);
        }

        framesProcessed++;

        //if we're idling, try really not to read past the end of available data
        //otherwise we will cause blocking i/o.
        if(idling && framesProcessed >= minFrames && availableSize > 0 && availableSize < storage->dataSize) {
            margin = availableSize - (packet.pos + packet.size);
            if(margin < (storage->largestPacketSize * 8)) { // 8x fudge factor for comfortable margin, could be tweaked.
                av_free_packet(&packet);
                break;
            }
        }

        av_free_packet(&packet);

        //stop processing if we've hit the max frame limit
        if(maxFrames > 0 && framesProcessed >= maxFrames)
            break;
    }

    if(readResult != 0) {
        //if readResult != 0, we've hit the end of the stream.
        //add any pending last frames.
        for(i = 0; i < formatContext->nb_streams; i++) {
            ncstream = &storage->stream_map[i];
            if(ncstream->lastSample.numberOfSamples > 0)
                AddMediaSampleReferences64(ncstream->media, ncstream->sampleHdl, 1, &ncstream->lastSample, NULL);
        }
    }

    for(i = 0; i < storage->map_count && result == noErr; i++) {
        ncstream = &storage->stream_map[i];
        Media media = ncstream->media;

        if(ncstream->valid && (addSamples || readResult != 0)) {
            Track track = GetMediaTrack(media);
            TimeScale mediaTimeScale = GetMediaTimeScale(media);
            TimeValue prevDuration = ncstream->duration;
            TimeValue mediaDuration = GetMediaDuration(media);
            TimeValue addedDuration = mediaDuration - prevDuration;
            TimeValue mediaLoadedTime = movieTimeScale * mediaDuration / mediaTimeScale;

            if(minLoadedTime == -1 || mediaLoadedTime < minLoadedTime)
                minLoadedTime = mediaLoadedTime;

            if(addedDuration > 0) {
                result = InsertMediaIntoTrack(track, -1, prevDuration, addedDuration, fixed1);
            }

            if (!prevDuration && firstPts[i] > 0) {
                TimeRecord startTimeRec;
                startTimeRec.value.hi = 0;
                startTimeRec.value.lo = firstPts[i] * formatContext->streams[i]->time_base.num;
                startTimeRec.scale = formatContext->streams[i]->time_base.den;
                startTimeRec.base = NULL;
                ConvertTimeScale(&startTimeRec, movieTimeScale);
                SetTrackOffset(track, startTimeRec.value.lo);
            }
            ncstream->duration = -1;
        }
    }

    //set the loaded time to the length of the shortest track.
    if(minLoadedTime > 0)
        storage->loadedTime = minLoadedTime;

    if(readResult != 0) {
        //remove the placeholder track
        if(storage->placeholderTrack != NULL) {
            DisposeMovieTrack(storage->placeholderTrack);
            storage->placeholderTrack = NULL;
        }

        //set the movie load state to complete, as well as mark the import output flag.
        storage->movieLoadState = kMovieLoadStateComplete;
        *outFlags |= movieImportResultComplete;
    } else {
        //if we're not yet done with the import, calculate the movie load state.
        int64_t timeToCompleteFile; //time until the file should be completely available, in terms of AV_TIME_BASE
        long dataRate = 0;

        dataResult = DataHGetDataRate(storage->dataHandler, 0, &dataRate);
        if(dataResult == noErr && dataRate > 0) {
            timeToCompleteFile = (AV_TIME_BASE * (storage->dataSize - availableSize)) / dataRate;

            if(storage->loadedTime > (10 * GetMovieTimeScale(storage->movie)) && timeToCompleteFile < (storage->format_context->duration * .85))
                storage->movieLoadState = kMovieLoadStatePlaythroughOK;
            else
                storage->movieLoadState = kMovieLoadStatePlayable;

        } else {
            storage->movieLoadState = kMovieLoadStatePlayable;
        }

        *outFlags |= movieImportResultNeedIdles;
    }

    send_movie_changed_notification(storage->movie);

    //tell the idle manager to idle us again in 500ms.
    if(idling && storage->idleManager && storage->isStreamed)
        QTIdleManagerSetNextIdleTimeDelta(storage->idleManager, 1, 2);

    return(result);
} /* import_with_idle() */
示例#4
0
/* This function prepares the movie to receivve the movie data,
 * After success, *out_map points to a valid stream maping
 * Return values:
 *	  0: ok
 */
OSStatus prepare_movie(ff_global_ptr storage, Movie theMovie, Handle dataRef, OSType dataRefType)
{
    int j;
    AVStream *st;
    NCStream *map;
    Track track = NULL;
    Track first_audio_track = NULL;
    AVFormatContext *ic = storage->format_context;
    OSStatus err = noErr;

    /* make the stream map structure */
    map = av_mallocz(ic->nb_streams * sizeof(NCStream));

    for(j = 0; j < ic->nb_streams; j++) {

        st = ic->streams[j];
        map[j].index = st->index;
        map[j].str = st;
        map[j].duration = -1;

        if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            Fixed width, height;

            get_track_dimensions_for_codec(st, &width, &height);
            track = NewMovieTrack(theMovie, width, height, kNoVolume);

            // Support for 'old' NUV files, that didn't put the codec_tag in the file.
            if( st->codec->codec_id == CODEC_ID_NUV && st->codec->codec_tag == 0 ) {
                st->codec->codec_tag = MKTAG( 'N', 'U', 'V', '1' );
            }

            initialize_video_map(&map[j], track, dataRef, dataRefType, storage->firstFrames + j);
            set_track_clean_aperture_ext((ImageDescriptionHandle)map[j].sampleHdl, width, height, IntToFixed(st->codec->width), IntToFixed(st->codec->height));
            set_track_colorspace_ext((ImageDescriptionHandle)map[j].sampleHdl, width, height);
        } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            if (st->codec->sample_rate > 0) {
                track = NewMovieTrack(theMovie, 0, 0, kFullVolume);
                err = initialize_audio_map(&map[j], track, dataRef, dataRefType, storage->firstFrames + j);

                if (first_audio_track == NULL)
                    first_audio_track = track;
                else
                    SetTrackAlternate(track, first_audio_track);
            }
        } else
            continue;

        // can't import samples if neither of these were created.
        map[j].valid = map[j].media && map[j].sampleHdl;

        if (err) {
            if (track)
                DisposeMovieTrack(track);
            return err;
        }
    }

    add_metadata(ic, theMovie);

    storage->stream_map = map;

    return 0;
} /* prepare_movie() */