Esempio n. 1
0
/*----------------------------------------------------------------------+
|    ATX_RingBuffer_Read
+----------------------------------------------------------------------*/
ATX_Result
ATX_RingBuffer_Read(ATX_RingBuffer* ring,
                    ATX_ByteBuffer  buffer, 
                    ATX_Size        byte_count)
{
    if (!byte_count) return ATX_SUCCESS;
    if (ring->in > ring->out) {
        if (buffer) {
            ATX_CopyMemory(buffer, ring->out, byte_count);
        }
        ring->out += byte_count;
        if (ring->out == ring->data.end) {
            ring->out = ring->data.start;
        }
    } else {
        unsigned int chunk = ring->data.end - ring->out;
        if (chunk >= byte_count) {
            chunk = byte_count;
        }

        if (buffer) {
            ATX_CopyMemory(buffer, ring->out, chunk);
        }
        ring->out += chunk;
        if (ring->out == ring->data.end) {
            ring->out = ring->data.start;
        }
        if (chunk != byte_count) {
            if (buffer) {
                ATX_CopyMemory(buffer+chunk, ring->out, byte_count-chunk);
            }
            ring->out += byte_count-chunk;
            if (ring->out == ring->data.end) {
                ring->out = ring->data.start;
            }
        }
    }

    return ATX_SUCCESS;
}
/*----------------------------------------------------------------------
|   BLT_DecoderServer_PropertyValueWrapper::BLT_DecoderServer_PropertyValueWrapper
+---------------------------------------------------------------------*/
BLT_DecoderServer_PropertyValueWrapper::BLT_DecoderServer_PropertyValueWrapper(
    const ATX_PropertyValue* value)
{
    if (value == NULL) {
        m_Value = NULL;
        return;
    }
    
    m_Value = new ATX_PropertyValue();
    m_Value->type = value->type;
    switch (value->type) {
        case ATX_PROPERTY_VALUE_TYPE_BOOLEAN:
        case ATX_PROPERTY_VALUE_TYPE_FLOAT:
        case ATX_PROPERTY_VALUE_TYPE_INTEGER:
        case ATX_PROPERTY_VALUE_TYPE_LARGE_INTEGER:
        case ATX_PROPERTY_VALUE_TYPE_POINTER:
            m_Value->data = value->data;
            break;

        case ATX_PROPERTY_VALUE_TYPE_STRING:
            if (value->data.string) {
                char* copy = new char[ATX_StringLength(value->data.string)+1];
                ATX_CopyString(copy, value->data.string);
                m_Value->data.string = copy;
            } else {
                m_Value->data.string = NULL;
            }
            break;

        case ATX_PROPERTY_VALUE_TYPE_RAW_DATA:
            if (value->data.raw_data.data &&
                value->data.raw_data.size) {
                m_Value->data.raw_data.size = value->data.raw_data.size;
                m_Value->data.raw_data.data = new unsigned char[value->data.raw_data.size];
                ATX_CopyMemory(m_Value->data.raw_data.data, value->data.raw_data.data, value->data.raw_data.size);
            } else {
                m_Value->data.raw_data.size = 0;
                m_Value->data.raw_data.data = NULL;
            }
            break;
    }
}
/*----------------------------------------------------------------------
|   Mp4ParserOutput_GetPacket
+---------------------------------------------------------------------*/
BLT_METHOD
Mp4ParserOutput_GetPacket(BLT_PacketProducer* _self,
                          BLT_MediaPacket**   packet)
{
    Mp4ParserOutput* self = ATX_SELF(Mp4ParserOutput, BLT_PacketProducer);

    *packet = NULL;
     
    // if we don't have an input yet, we can't produce packets
    //if (self->parser->input.mp4_file == NULL) {
    //    return BLT_ERROR_PORT_HAS_NO_DATA;
    //}
    
    if (self->track == NULL) {
        return BLT_ERROR_EOS;
    } else {
        // check for end-of-stream
        if (self->sample >= self->track->GetSampleCount()) {
            return BLT_ERROR_EOS;
        }

        // read one sample
        AP4_Sample sample;
        AP4_DataBuffer* sample_buffer = self->sample_buffer;
        AP4_Result result;
        if (self->parser->input.reader) {
            // linear reader mode
            result = self->parser->input.reader->ReadNextSample(self->track->GetId(), sample, *sample_buffer);
            if (AP4_SUCCEEDED(result)) self->sample++;
        } else {
            // normal mode
            result = self->track->ReadSample(self->sample++, sample, *sample_buffer);
        }
        if (AP4_FAILED(result)) {
            ATX_LOG_WARNING_1("ReadSample failed (%d)", result);
            if (result == AP4_ERROR_EOS || result == ATX_ERROR_OUT_OF_RANGE) {
                ATX_LOG_WARNING("incomplete media");
                return BLT_ERROR_INCOMPLETE_MEDIA;
            } else {
                return BLT_ERROR_PORT_HAS_NO_DATA;
            }
        }

        // update the sample description if it has changed
        if (sample.GetDescriptionIndex() != self->sample_description_index) {
            result = Mp4ParserOutput_SetSampleDescription(self, sample.GetDescriptionIndex());
            if (BLT_FAILED(result)) return result;
        }
        
        // decrypt the sample if needed
        if (self->sample_decrypter) {
            self->sample_decrypter->DecryptSampleData(*sample_buffer, *self->sample_decrypted_buffer);
            sample_buffer = self->sample_decrypted_buffer;
        }

        AP4_Size packet_size = sample_buffer->GetDataSize();
        result = BLT_Core_CreateMediaPacket(ATX_BASE(self->parser, BLT_BaseMediaNode).core,
                                            packet_size,
                                            (const BLT_MediaType*)self->media_type,
                                            packet);
        if (BLT_FAILED(result)) return result;
        BLT_MediaPacket_SetPayloadSize(*packet, packet_size);
        void* buffer = BLT_MediaPacket_GetPayloadBuffer(*packet);
        ATX_CopyMemory(buffer, sample_buffer->GetData(), packet_size);

        // set the timestamp
        AP4_UI32 media_timescale = self->track->GetMediaTimeScale();
        if (media_timescale) {
            AP4_UI64 ts = ((AP4_UI64)sample.GetCts())*1000000;
            ts /= media_timescale;
            BLT_TimeStamp bt_ts = {
                (BLT_Int32)(ts / 1000000),
                (BLT_Int32)((ts % 1000000)*1000)
            };
            BLT_MediaPacket_SetTimeStamp(*packet, bt_ts);
        }

        // set packet flags
        if (self->sample == 1) {
            BLT_MediaPacket_SetFlags(*packet, BLT_MEDIA_PACKET_FLAG_START_OF_STREAM);
        }

        return BLT_SUCCESS;
    }
}
/*----------------------------------------------------------------------
|   Mp4ParserOutput_SetSampleDescription
+---------------------------------------------------------------------*/
static BLT_Result
Mp4ParserOutput_SetSampleDescription(Mp4ParserOutput* self, 
                                     unsigned int     indx)
{
    // if we had a decrypter before, release it now
    delete self->sample_decrypter;
    self->sample_decrypter = NULL;
    
    // check that the audio track is of the right type
    AP4_SampleDescription* sample_desc = self->track->GetSampleDescription(indx);
    if (sample_desc == NULL) {
        ATX_LOG_FINE("no sample description for track");
        return BLT_ERROR_INVALID_MEDIA_FORMAT;
    }
    // handle encrypted tracks
    BLT_Result result = Mp4ParserOutput_ProcessCryptoInfo(self, sample_desc);
    if (BLT_FAILED(result)) return result;
    
    // update the generic part of the stream info
    BLT_StreamInfo stream_info;
    stream_info.id            = self->track->GetId();
    stream_info.duration      = self->track->GetDurationMs();
    stream_info.mask = BLT_STREAM_INFO_MASK_ID |
                       BLT_STREAM_INFO_MASK_DURATION;
    
    // deal with audio details, if this is an audio track
    AP4_AudioSampleDescription* audio_desc = dynamic_cast<AP4_AudioSampleDescription*>(sample_desc);
    if (audio_desc) {
        ATX_LOG_FINE("sample description is audio");
        stream_info.type          = BLT_STREAM_TYPE_AUDIO;
        stream_info.channel_count = audio_desc->GetChannelCount();
        stream_info.sample_rate   = audio_desc->GetSampleRate();
        stream_info.mask |= BLT_STREAM_INFO_MASK_TYPE          |
                            BLT_STREAM_INFO_MASK_CHANNEL_COUNT |
                            BLT_STREAM_INFO_MASK_SAMPLE_RATE;
    } else if (self == &self->parser->audio_output) {
        ATX_LOG_FINE("expected audio sample description, but did not get one");
        return BLT_ERROR_INVALID_MEDIA_FORMAT;
    }

    AP4_VideoSampleDescription* video_desc = dynamic_cast<AP4_VideoSampleDescription*>(sample_desc);
    if (video_desc) {
        ATX_LOG_FINE("sample description is video");
        stream_info.type     = BLT_STREAM_TYPE_VIDEO;
        stream_info.width    = video_desc->GetWidth();
        stream_info.height   = video_desc->GetHeight();
        stream_info.mask |= BLT_STREAM_INFO_MASK_TYPE     |
                            BLT_STREAM_INFO_MASK_WIDTH    |
                            BLT_STREAM_INFO_MASK_HEIGHT;
    } else if (self == &self->parser->video_output) {
        ATX_LOG_FINE("expected video sample descriton, but did not get one");
        return BLT_ERROR_INVALID_MEDIA_FORMAT;
    }
    
    AP4_MpegSampleDescription* mpeg_desc = NULL;
    if (sample_desc->GetType() == AP4_SampleDescription::TYPE_MPEG) {
        ATX_LOG_FINE("sample description is of type MPEG");
        mpeg_desc = dynamic_cast<AP4_MpegSampleDescription*>(sample_desc);
    }
    if (mpeg_desc) {
        stream_info.data_type       = mpeg_desc->GetObjectTypeString(mpeg_desc->GetObjectTypeId());
        stream_info.average_bitrate = mpeg_desc->GetAvgBitrate();
        stream_info.nominal_bitrate = mpeg_desc->GetAvgBitrate();
        stream_info.mask |= BLT_STREAM_INFO_MASK_AVERAGE_BITRATE |
                            BLT_STREAM_INFO_MASK_NOMINAL_BITRATE |
                            BLT_STREAM_INFO_MASK_DATA_TYPE;
    }
    
    // setup the output media type
    AP4_DataBuffer  decoder_info;
    BLT_MediaTypeId media_type_id = BLT_MEDIA_TYPE_ID_NONE;
    AP4_UI32        format_or_object_type_id = 0;
    if (mpeg_desc) {
        decoder_info.SetData(mpeg_desc->GetDecoderInfo().GetData(),
                             mpeg_desc->GetDecoderInfo().GetDataSize());
        media_type_id = self->mp4_es_type_id;
        format_or_object_type_id = mpeg_desc->GetObjectTypeId();
    } else {
        // here we have to be format-specific for the decoder info
        stream_info.data_type = AP4_GetFormatName(sample_desc->GetFormat());
        stream_info.mask |= BLT_STREAM_INFO_MASK_DATA_TYPE;
        format_or_object_type_id = sample_desc->GetFormat();
        if (sample_desc->GetFormat() == AP4_SAMPLE_FORMAT_AVC1) {
            // look for an 'avcC' atom
            AP4_AvccAtom* avcc = static_cast<AP4_AvccAtom*>(sample_desc->GetDetails().GetChild(AP4_ATOM_TYPE_AVCC));
            if (avcc) {
                // pass the avcc payload as the decoder info
                decoder_info.SetData(avcc->GetRawBytes().GetData(),
                                     avcc->GetRawBytes().GetDataSize());
            } 
        } else if (sample_desc->GetFormat() == AP4_SAMPLE_FORMAT_ALAC) {
            // look for an 'alac' atom (either top-level or inside a 'wave') 
            AP4_Atom* alac = sample_desc->GetDetails().GetChild(AP4_SAMPLE_FORMAT_ALAC);
            if (alac == NULL) {
                AP4_ContainerAtom* wave = dynamic_cast<AP4_ContainerAtom*>(sample_desc->GetDetails().GetChild(AP4_ATOM_TYPE_WAVE));
                if (wave) {
                    alac = wave->GetChild(AP4_SAMPLE_FORMAT_ALAC);
                }
            }
            if (alac) {
                // pass the alac payload as the decoder info
                AP4_MemoryByteStream* mbs = new AP4_MemoryByteStream((AP4_Size)alac->GetSize());
                alac->WriteFields(*mbs);
                decoder_info.SetData(mbs->GetData(), mbs->GetDataSize());                
                mbs->Release();
            } 
        }
        
        media_type_id = self->iso_base_es_type_id;
    }
    BLT_Mp4MediaType* media_type = NULL;
    unsigned int struct_size = decoder_info.GetDataSize()?decoder_info.GetDataSize()-1:0;
    if (audio_desc) {
        struct_size += sizeof(BLT_Mp4AudioMediaType);
        BLT_Mp4AudioMediaType* audio_type = (BLT_Mp4AudioMediaType*)ATX_AllocateZeroMemory(struct_size);;
        audio_type->base.stream_type    = BLT_MP4_STREAM_TYPE_AUDIO;
        audio_type->channel_count       = audio_desc->GetChannelCount();
        audio_type->sample_rate         = audio_desc->GetSampleRate();
        audio_type->decoder_info_length = decoder_info.GetDataSize();
        if (decoder_info.GetDataSize()) {
            ATX_CopyMemory(&audio_type->decoder_info[0], decoder_info.GetData(), decoder_info.GetDataSize());
        }
        media_type = &audio_type->base;
    } else {
        struct_size += sizeof(BLT_Mp4VideoMediaType);
        BLT_Mp4VideoMediaType* video_type = (BLT_Mp4VideoMediaType*)ATX_AllocateZeroMemory(struct_size);
        video_type->base.stream_type    = BLT_MP4_STREAM_TYPE_VIDEO;
        video_type->width               = video_desc->GetWidth();
        video_type->height              = video_desc->GetHeight();
        video_type->decoder_info_length = decoder_info.GetDataSize();
        if (decoder_info.GetDataSize()) {
            ATX_CopyMemory(&video_type->decoder_info[0], decoder_info.GetData(), decoder_info.GetDataSize());
        }
        media_type = &video_type->base;
    }
    media_type->base.id                  = media_type_id;
    media_type->base.extension_size      = struct_size-sizeof(BLT_MediaType); 
    media_type->format_or_object_type_id = format_or_object_type_id;
    self->media_type = &media_type->base;
    self->sample_description_index = indx;
    
    // final update to the stream info
    BLT_Stream_SetInfo(ATX_BASE(self->parser, BLT_BaseMediaNode).context, &stream_info);
    
    // enable the track in the linear reader if we have one
    if (self->parser->input.reader) {
        self->parser->input.reader->EnableTrack(self->track->GetId());
    }
    
    return BLT_SUCCESS;    
}
Esempio n. 5
0
/*----------------------------------------------------------------------
|    SdlVideoOutput_PutPacket
+---------------------------------------------------------------------*/
BLT_METHOD
SdlVideoOutput_PutPacket(BLT_PacketConsumer* _self,
                         BLT_MediaPacket*    packet)
{
    SdlVideoOutput*              self = ATX_SELF(SdlVideoOutput, BLT_PacketConsumer);
    unsigned char*               pixel_data = (unsigned char*)BLT_MediaPacket_GetPayloadBuffer(packet);
    const BLT_RawVideoMediaType* media_type;
    unsigned int                 plane;
	SDL_Rect                     rect;
    
    /* check the media type */
    BLT_MediaPacket_GetMediaType(packet, (const BLT_MediaType**)&media_type);
    if (media_type->base.id != BLT_MEDIA_TYPE_ID_VIDEO_RAW) {
        ATX_LOG_FINE_1("rejecting media type id %d", media_type->base.id);
        return BLT_ERROR_INVALID_MEDIA_TYPE;
    }
    if (media_type->format != BLT_PIXEL_FORMAT_YV12) {
        ATX_LOG_FINE_1("rejecting pixel format %d", media_type->format);
        return BLT_ERROR_INVALID_MEDIA_TYPE;
    }

    /* resize/create the window and overlay if needed */
    if (self->yuv_overlay == NULL                 || 
        self->yuv_overlay->w != media_type->width ||
        self->yuv_overlay->h != media_type->height) {
        self->screen = SDL_SetVideoMode(media_type->width, media_type->height, 24, SDL_HWSURFACE | SDL_RESIZABLE);
        if (self->screen == NULL) {
            ATX_LOG_WARNING("SDL_SetVideoMode() failed");
            return BLT_FAILURE;
        }
        self->yuv_overlay = SDL_CreateYUVOverlay(media_type->width, media_type->height, SDL_YV12_OVERLAY, self->screen);
        if (self->yuv_overlay == NULL) {
            ATX_LOG_WARNING("SDL_CreateYUVOverlay() failed");
            return BLT_FAILURE;
        }
        
    }
    
    /* transfer the pixels */
    SDL_LockYUVOverlay(self->yuv_overlay);
    for (plane=0; plane<3; plane++) {
        unsigned int   plane_width  = (plane==0?media_type->width:(media_type->width/2));
        unsigned int   plane_height = (plane==0?media_type->height:(media_type->height/2));
        unsigned char* src          = pixel_data+media_type->planes[plane].offset;
        unsigned int   src_pitch    = media_type->planes[plane].bytes_per_line;
        unsigned char* dst          = self->yuv_overlay->pixels[plane==0?0:3-plane];
        unsigned int   dst_pitch    = self->yuv_overlay->pitches[plane==0?0:3-plane];
        while (plane_height--) {
            ATX_CopyMemory(dst, src,  plane_width);
            src += src_pitch;
            dst += dst_pitch;
        }
    }
    SDL_UnlockYUVOverlay(self->yuv_overlay);

    rect.x = 0;
	rect.y = 0;
	rect.w = self->screen->w;
	rect.h = self->screen->h;
	SDL_DisplayYUVOverlay(self->yuv_overlay, &rect);    
    
    {
        ATX_TimeStamp now;
        ATX_System_GetCurrentTimeStamp(&now);
        if (ATX_TimeStamp_IsLaterOrEqual(self->next_display_time, now)) { 
            ATX_TimeInterval delta;
            ATX_TimeStamp_Sub(delta, self->next_display_time, now);
            /* sanity check */
            if (delta.seconds == 0 && delta.nanoseconds > 1000000) {
                ATX_System_Sleep(&delta);
            }
        } else {
            self->next_display_time = now;
        }
        {
            ATX_TimeStamp frame_duration = {0, 41708000};
            ATX_TimeStamp_Add(self->next_display_time, self->next_display_time, frame_duration);
        }
    }
    
    return BLT_SUCCESS;
}
/*----------------------------------------------------------------------
|    OsxAudioUnitsOutput_RenderCallback
+---------------------------------------------------------------------*/
static OSStatus     
OsxAudioUnitsOutput_RenderCallback(void*						inRefCon,
                                   AudioUnitRenderActionFlags*	ioActionFlags,
                                   const AudioTimeStamp*		inTimeStamp,
                                   UInt32						inBusNumber,
                                   UInt32						inNumberFrames,
                                   AudioBufferList*			    ioData)
{
    OsxAudioUnitsOutput* self = (OsxAudioUnitsOutput*)inRefCon;
    ATX_ListItem*        item;
    unsigned int         requested;
    unsigned char*       out;
    ATX_Boolean          timestamp_measured = ATX_FALSE;
    
    BLT_COMPILER_UNUSED(ioActionFlags);
    BLT_COMPILER_UNUSED(inTimeStamp);
    BLT_COMPILER_UNUSED(inBusNumber);
    BLT_COMPILER_UNUSED(inNumberFrames);
                
    /* sanity check on the parameters */
    if (ioData == NULL || ioData->mNumberBuffers == 0) return 0;
    
    /* in case we have a strange request with more than one buffer, just return silence */
    if (ioData->mNumberBuffers != 1) {
        unsigned int i;
        ATX_LOG_FINEST_1("strange request with %d buffers", 
                         (int)ioData->mNumberBuffers);
        for (i=0; i<ioData->mNumberBuffers; i++) {
            ATX_SetMemory(ioData->mBuffers[i].mData, 0, ioData->mBuffers[i].mDataByteSize);
        }
        return 0;
    }
        
    /* init local variables */
    requested = ioData->mBuffers[0].mDataByteSize;
    out = (unsigned char*)ioData->mBuffers[0].mData;
    ATX_LOG_FINEST_2("request for %d bytes, %d frames", (int)requested, (int)inNumberFrames);

    /* lock the packet queue */
    pthread_mutex_lock(&self->lock);
    
    /* return now if we're paused */
    //if (self->paused) goto end;
    
    /* abort early if we have no packets */
    if (ATX_List_GetItemCount(self->packet_queue) == 0) goto end;
        
    /* fill as much as we can */
    while (requested && (item = ATX_List_GetFirstItem(self->packet_queue))) {
        BLT_MediaPacket*        packet = ATX_ListItem_GetData(item);
        const BLT_PcmMediaType* media_type;
        BLT_Size                payload_size;
        BLT_Size                chunk_size;
        BLT_TimeStamp           chunk_duration;
        BLT_TimeStamp           packet_ts;
        unsigned int            bytes_per_frame;
        unsigned int            sample_rate;
        
        /* get the packet info */
        BLT_MediaPacket_GetMediaType(packet, (const BLT_MediaType**)&media_type);
        packet_ts = BLT_MediaPacket_GetTimeStamp(packet);
        bytes_per_frame = media_type->channel_count*media_type->bits_per_sample/8;
        sample_rate = media_type->sample_rate;
        
        /* record the timestamp if we have not already done so */
        if (!timestamp_measured) {
            self->media_time_snapshot.rendered_packet_ts = packet_ts;
            self->media_time_snapshot.rendered_host_time = 
                AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
            BLT_TimeStamp_Set(self->media_time_snapshot.rendered_duration, 0, 0);
            timestamp_measured = ATX_TRUE;
            
            ATX_LOG_FINEST_2("rendered TS: packet ts=%lld, host ts=%lld",
                             BLT_TimeStamp_ToNanos(packet_ts),
                             self->media_time_snapshot.rendered_host_time);
        }
         
        /* compute how much to copy from this packet */
        payload_size = BLT_MediaPacket_GetPayloadSize(packet);
        if (payload_size <= requested) {
            /* copy the entire payload and remove the packet from the queue */
            chunk_size = payload_size;
            ATX_CopyMemory(out, BLT_MediaPacket_GetPayloadBuffer(packet), chunk_size);
            ATX_List_RemoveItem(self->packet_queue, item);
            packet = NULL;
            media_type = NULL;
            ATX_LOG_FINER_1("media packet fully consumed, %d left in queue",
                            ATX_List_GetItemCount(self->packet_queue));
        } else {
            /* only copy a portion of the payload */
            chunk_size = requested;
            ATX_CopyMemory(out, BLT_MediaPacket_GetPayloadBuffer(packet), chunk_size);            
        }
        
        /* update the counters */
        requested -= chunk_size;
        out       += chunk_size;
        
        /* update the media time snapshot */
        if (bytes_per_frame) {
            unsigned int frames_in_chunk = chunk_size/bytes_per_frame;
            chunk_duration = BLT_TimeStamp_FromSamples(frames_in_chunk, sample_rate);
        } else {
            BLT_TimeStamp_Set(chunk_duration, 0, 0);
        }
        self->media_time_snapshot.rendered_duration = 
            BLT_TimeStamp_Add(self->media_time_snapshot.rendered_duration, chunk_duration);
        
        /* update the packet unless we're done with it */
        if (packet) {
            /* update the packet offset and timestamp */
            BLT_MediaPacket_SetPayloadOffset(packet, BLT_MediaPacket_GetPayloadOffset(packet)+chunk_size);
            BLT_MediaPacket_SetTimeStamp(packet, BLT_TimeStamp_Add(packet_ts, chunk_duration));
        }
    }
   
end:
    /* fill whatever is left with silence */    
    if (requested) {
        ATX_LOG_FINEST_1("filling with %d bytes of silence", requested);
        ATX_SetMemory(out, 0, requested);
    }
    
    pthread_mutex_unlock(&self->lock);
        
    return 0;
}