Exemplo n.º 1
0
/*----------------------------------------------------------------------
|    BtPlayerServer::BtPlayerServer
+---------------------------------------------------------------------*/
BtPlayerServer::BtPlayerServer(const char* web_root, unsigned int port) :
    m_WebRoot(web_root),
    m_DecoderState(BLT_DecoderServer::STATE_STOPPED)
{
    // initialize status fields
    ATX_SetMemory(&m_StreamInfo, 0, sizeof(m_StreamInfo));
    ATX_Properties_Create(&m_CoreProperties);
    ATX_Properties_Create(&m_StreamProperties);
    
    // set ourselves as the player event listener
    m_Player.SetEventListener(this);
    
    // create the http server
    m_HttpServer = new NPT_HttpServer(port);
    
    // attach a file handler for the ajax player
    m_HttpServer->AddRequestHandler(new NPT_HttpFileRequestHandler("/control/ajax", web_root, true, "index.html"), "/control/ajax", true);
    
    // attach ourselves as a dynamic handler for the form control
    m_HttpServer->AddRequestHandler(this, "/control/form", false);
    
    // attach ourselves as a dynamic handler for commands
    m_HttpServer->AddRequestHandler(this, "/player", true);

    // attach ourselves as the top level handler
    m_HttpServer->AddRequestHandler(this, "/", false);
}
Exemplo n.º 2
0
/*----------------------------------------------------------------------
|   BLT_RawVideoMediaType_Init
+---------------------------------------------------------------------*/
void
BLT_RawVideoMediaType_Init(BLT_RawVideoMediaType* media_type)
{
    ATX_SetMemory(media_type, 0, sizeof(*media_type));
    media_type->base.id = BLT_MEDIA_TYPE_ID_VIDEO_RAW;
    media_type->base.extension_size = sizeof(BLT_RawVideoMediaType)-sizeof(BLT_MediaType);
}
/*----------------------------------------------------------------------
|    Test
+---------------------------------------------------------------------*/
static void
Test(int buffer_size, int source_size)
{
    ATX_MemoryStream* memory;
    ATX_InputStream*  source;
    ATX_OutputStream* source_buffer;
    ATX_InputStream*  stream;
    ATX_Offset        offset = 0;
    int               i; 
    unsigned char     scratch[4096];
    ATX_Boolean       expect_eos = ATX_FALSE;
    ATX_Result        result;

    /* create and setup the source */
    ATX_MemoryStream_Create(source_size, &memory);
    ATX_MemoryStream_GetOutputStream(memory, &source_buffer);
    ATX_MemoryStream_GetInputStream(memory, &source);
    for (i=0; i<source_size; i++) {
        unsigned char x = (unsigned char)i;
        ATX_OutputStream_Write(source_buffer, &x, 1, NULL);        
    }
    ATX_OutputStream_Seek(source_buffer, 0);
    ATX_RELEASE_OBJECT(source_buffer);
    ATX_MemoryStream_Destroy(memory);

    /* create the stream */
    BLT_NetworkStream_Create(buffer_size, source, &stream);
    offset = 0;
    for (;;) {
        ATX_Size bytes_read = 0;
        ATX_Size chunk;
        if ((ATX_System_GetRandomInteger()%7) == 0) {
            chunk = ATX_System_GetRandomInteger()%(10+source_size);
        } else {
            chunk = ATX_System_GetRandomInteger()%7;
        }
        if ((ATX_System_GetRandomInteger()%5) == 0) {
            ATX_Position position = ATX_System_GetRandomInteger()%(10+source_size);
            ATX_Position new_position;
            ATX_Result   result = ATX_InputStream_Seek(stream, position);
            if (ATX_SUCCEEDED(result)) {
                ATX_InputStream_Tell(stream, &new_position);
                CHECK(new_position == position);
                expect_eos = ATX_FALSE;
            } else {
                result = ATX_InputStream_Seek(stream, offset);
                CHECK(result == ATX_SUCCESS);
                ATX_InputStream_Tell(stream, &new_position);
                CHECK(new_position == (ATX_Position)offset);
            }
            offset = new_position;
        }
        ATX_SetMemory(scratch, 0, chunk);
        result = ATX_InputStream_Read(stream, scratch, chunk, &bytes_read);
        if (ATX_FAILED(result)) {
            CHECK(result == ATX_ERROR_EOS);
            CHECK(offset == source_size);
            break;
        }
        CHECK(chunk == 0 || expect_eos == ATX_FALSE);
        CHECK(bytes_read <= chunk);
        if (bytes_read != chunk) {
            expect_eos = ATX_TRUE;
        }
        {
            unsigned int j;
            for (j=0; j<bytes_read; j++) {
                CHECK(scratch[j] == (unsigned char)(offset+j));
            }
        }
        offset += bytes_read;
    }

    ATX_RELEASE_OBJECT(stream);
    ATX_RELEASE_OBJECT(source);
}
Exemplo n.º 4
0
/*----------------------------------------------------------------------
|   AacDecoderInput_PutPacket
+---------------------------------------------------------------------*/
BLT_METHOD
AacDecoderInput_PutPacket(BLT_PacketConsumer* _self,
                          BLT_MediaPacket*    packet)
{
    AacDecoder* self = ATX_SELF_M(input, AacDecoder, BLT_PacketConsumer);
    ATX_Result  result;

    /* check to see if this is the end of a stream */
    if (BLT_MediaPacket_GetFlags(packet) & 
        BLT_MEDIA_PACKET_FLAG_END_OF_STREAM) {
        self->input.eos = BLT_TRUE;
    }

    /* check to see if we need to create a decoder for this */
    if (self->helix_decoder == NULL) {
        AacDecoderConfig             decoder_config;
        AACFrameInfo                 aac_frame_info;
        const BLT_MediaType*         media_type;
        const BLT_Mp4AudioMediaType* mp4_media_type;

        BLT_MediaPacket_GetMediaType(packet, &media_type);
        if (media_type == NULL || media_type->id != self->mp4es_type_id) {
            return BLT_ERROR_INVALID_MEDIA_TYPE;
        }
        mp4_media_type = (const BLT_Mp4AudioMediaType*)media_type;
        if (mp4_media_type->base.stream_type != BLT_MP4_STREAM_TYPE_AUDIO) {
            return BLT_ERROR_INVALID_MEDIA_TYPE;
        }
        if (BLT_FAILED(AacDecoderConfig_Parse(mp4_media_type->decoder_info, mp4_media_type->decoder_info_length, &decoder_config))) {
            return BLT_ERROR_INVALID_MEDIA_FORMAT;
        }
        if (decoder_config.object_type != BLT_AAC_OBJECT_TYPE_AAC_LC &&
            decoder_config.object_type != BLT_AAC_OBJECT_TYPE_SBR) {
            return BLT_ERROR_UNSUPPORTED_CODEC;
        }
        
        /* create the decoder */
        self->helix_decoder = AACInitDecoder();
        if (self->helix_decoder == NULL) return BLT_ERROR_OUT_OF_MEMORY;

        /* configure the decoder */
        ATX_SetMemory(&aac_frame_info, 0, sizeof(aac_frame_info));
        aac_frame_info.nChans       = AacDecoderConfig_GetChannelCount(&decoder_config);
        aac_frame_info.sampRateCore = AacDecoderConfig_GetSampleRate(&decoder_config);
        if (decoder_config.object_type == BLT_AAC_OBJECT_TYPE_AAC_LC) {
            aac_frame_info.profile = AAC_PROFILE_LC;
        }
        self->sample_buffer_size = BLT_AAC_FRAME_SIZE*2*aac_frame_info.nChans*2; /* the last *2 is for SBR support */
        AACSetRawBlockParams(self->helix_decoder, 0, &aac_frame_info);        
    }

    {
        unsigned char*   in_buffer;
        int              in_size;
        short*           out_buffer;
        BLT_MediaPacket* out_packet;
        AACFrameInfo     aac_frame_info;
        
        /* create a PCM packet for the output */
        result = BLT_Core_CreateMediaPacket(ATX_BASE(self, BLT_BaseMediaNode).core,
                                            self->sample_buffer_size,
                                            (BLT_MediaType*)&self->output.media_type,
                                            &out_packet);
        if (BLT_FAILED(result)) return result;

        /* copy the timestamp */
        BLT_MediaPacket_SetTimeStamp(out_packet, BLT_MediaPacket_GetTimeStamp(packet));

        /* decode the packet as a frame */
        in_buffer  = BLT_MediaPacket_GetPayloadBuffer(packet);
        in_size    = BLT_MediaPacket_GetPayloadSize(packet);
        out_buffer = (short*)BLT_MediaPacket_GetPayloadBuffer(out_packet);
        result = AACDecode(self->helix_decoder, &in_buffer, &in_size, out_buffer); 
        if (result != 0) {
            BLT_MediaPacket_Release(out_packet);
            return BLT_ERROR_INVALID_MEDIA_FORMAT;
        }

        /* check that the sample buffer matches our current media type */
        AACGetLastFrameInfo(self->helix_decoder, &aac_frame_info);
        if (self->output.media_type.channel_count == 0) {
            /* first time, setup our media type */
            self->output.media_type.channel_count   = aac_frame_info.nChans;
            self->output.media_type.sample_rate     = aac_frame_info.sampRateOut;
            self->output.media_type.bits_per_sample = 16;
            self->output.media_type.sample_format   = BLT_PCM_SAMPLE_FORMAT_SIGNED_INT_NE;
            self->output.media_type.channel_mask    = 0;

            /* update the stream info */
            if (ATX_BASE(self, BLT_BaseMediaNode).context) {
                BLT_StreamInfo stream_info;
                stream_info.data_type     = "MPEG-4 AAC";
                stream_info.sample_rate   = aac_frame_info.sampRateOut;
                stream_info.channel_count = aac_frame_info.nChans;
                stream_info.mask = BLT_STREAM_INFO_MASK_DATA_TYPE    |
                                   BLT_STREAM_INFO_MASK_SAMPLE_RATE  |
                                   BLT_STREAM_INFO_MASK_CHANNEL_COUNT;
                BLT_Stream_SetInfo(ATX_BASE(self, BLT_BaseMediaNode).context, &stream_info);
            }

            /* update the packet media type */
            BLT_MediaPacket_SetMediaType(out_packet, (BLT_MediaType*)&self->output.media_type);
        } else {
            /* we've already setup a media type, check that this is the same */
            if (self->output.media_type.sample_rate   != (unsigned int)aac_frame_info.sampRateOut || 
                self->output.media_type.channel_count != aac_frame_info.nChans) {
                BLT_MediaPacket_Release(out_packet);
                return BLT_ERROR_INVALID_MEDIA_FORMAT;
            }
        }

        /* add to the output packet list */
        BLT_MediaPacket_SetPayloadSize(out_packet, aac_frame_info.outputSamps*2);
        ATX_List_AddData(self->output.packets, out_packet);
    }

    return BLT_SUCCESS;
}
Exemplo n.º 5
0
/*----------------------------------------------------------------------
|   AacDecoderConfig_Parse
+---------------------------------------------------------------------*/
static BLT_Result
AacDecoderConfig_Parse(const unsigned char* encoded, 
                       BLT_Size             encoded_size, 
                       AacDecoderConfig*    config)
{
    BLT_Result   result;
    AacBitStream bits;
    AacBitStream_Construct(&bits, encoded, encoded_size);

    /* default config */
    ATX_SetMemory(config, 0, sizeof(*config));

	result = AacGetAudioObjectType(&bits, &config->object_type);
    if (BLT_FAILED(result)) return result;

    result = AacGetSamplingFrequency(&bits, 
                                     &config->sampling_frequency_index, 
                                     &config->sampling_frequency);
    if (BLT_FAILED(result)) return result;

    if (AacBitStream_GetBitsLeft(&bits) < 4) {
        return BLT_ERROR_INVALID_MEDIA_FORMAT;
    }
	config->channel_configuration = AacBitStream_ReadBits(&bits, 4);

	if (config->object_type == BLT_AAC_OBJECT_TYPE_SBR) {
		config->extension.object_type = config->object_type;
		config->extension.sbr_present = BLT_TRUE;
        result = AacGetSamplingFrequency(&bits, 
                                         &config->extension.sampling_frequency_index, 
                                         &config->extension.sampling_frequency);
        if (BLT_FAILED(result)) return result;
		result = AacGetAudioObjectType(&bits, &config->object_type);
        if (BLT_FAILED(result)) return result;
	}
    
	switch (config->object_type) {
        case BLT_AAC_OBJECT_TYPE_AAC_MAIN:
        case BLT_AAC_OBJECT_TYPE_AAC_LC:
        case BLT_AAC_OBJECT_TYPE_AAC_SSR:
        case BLT_AAC_OBJECT_TYPE_AAC_LTP:
        case BLT_AAC_OBJECT_TYPE_AAC_SCALABLE:
        case BLT_AAC_OBJECT_TYPE_ER_AAC_LC:
        case BLT_AAC_OBJECT_TYPE_ER_AAC_LTP:
        case BLT_AAC_OBJECT_TYPE_ER_AAC_SCALABLE:
        case BLT_AAC_OBJECT_TYPE_ER_AAC_LD:
            result = AacGetGASpecificInfo(&bits, config);
            break;

        default:
            break;
    }

    /* extension (only supported for non-ER AAC types here) */
	if ((config->object_type == BLT_AAC_OBJECT_TYPE_AAC_MAIN ||
         config->object_type == BLT_AAC_OBJECT_TYPE_AAC_LC   ||
         config->object_type == BLT_AAC_OBJECT_TYPE_AAC_SSR  ||
         config->object_type == BLT_AAC_OBJECT_TYPE_AAC_LTP  ||
         config->object_type == BLT_AAC_OBJECT_TYPE_AAC_SCALABLE) &&
        AacBitStream_GetBitsLeft(&bits) >= 16) {
        unsigned int sync_extension_type = AacBitStream_ReadBits(&bits, 11);
        if (sync_extension_type == 0x2b7) {
            result = AacGetAudioObjectType(&bits, &config->extension.object_type);
            if (BLT_FAILED(result)) return result;
            if (config->extension.object_type == BLT_AAC_OBJECT_TYPE_SBR) {
                config->extension.sbr_present = AacBitStream_ReadBits(&bits, 1);
                if (config->extension.sbr_present) {
                    result = AacGetSamplingFrequency(&bits, 
                                      &config->extension.sampling_frequency_index, 
                                      &config->extension.sampling_frequency);
                    if (BLT_FAILED(result)) return result;
                }
            }
        }
    }

    return BLT_SUCCESS;
}
Exemplo n.º 6
0
/*----------------------------------------------------------------------
|    BLT_Decoder_ClearStatus
+---------------------------------------------------------------------*/
static BLT_Result
BLT_Decoder_ClearStatus(BLT_Decoder* decoder) 
{
    ATX_SetMemory(&decoder->status, 0, sizeof(decoder->status));
    return BLT_SUCCESS;
}
Exemplo n.º 7
0
/*----------------------------------------------------------------------
|    OsxAudioUnitsOutput_Create
+---------------------------------------------------------------------*/
static BLT_Result
OsxAudioUnitsOutput_Create(BLT_Module*              module,
                           BLT_Core*                core, 
                           BLT_ModuleParametersType parameters_type,
                           const void*              parameters, 
                           BLT_MediaNode**          object)
{
    OsxAudioUnitsOutput*      self;
    BLT_MediaNodeConstructor* constructor = (BLT_MediaNodeConstructor*)parameters;
    AudioDeviceID             audio_device_id = 0;
    AudioUnit                 audio_unit = NULL;
    Component                 component;
    ComponentDescription      component_desc;
    ComponentResult           result;
    BLT_Result                blt_result;
    
    /* check parameters */
    if (parameters == NULL || 
        parameters_type != BLT_MODULE_PARAMETERS_TYPE_MEDIA_NODE_CONSTRUCTOR) {
        return BLT_ERROR_INVALID_PARAMETERS;
    }

    /* parse the name */
    if (!ATX_StringsEqualN(constructor->name, "osxau:", 6)) {
        return BLT_ERROR_INTERNAL;
    }

    /* map the name into a device ID */
    blt_result = OsxAudioUnitsOutput_MapDeviceName(constructor->name+6, &audio_device_id);
    if (BLT_FAILED(blt_result)) return blt_result;
    
    /* get the default output audio unit */
    ATX_SetMemory(&component_desc, 0, sizeof(component_desc));
    component_desc.componentType         = kAudioUnitType_Output;
    component_desc.componentSubType      = audio_device_id?kAudioUnitSubType_HALOutput:kAudioUnitSubType_DefaultOutput;
    component_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
    component_desc.componentFlags        = 0;
    component_desc.componentFlagsMask    = 0;
    component = FindNextComponent(NULL, &component_desc);
    if (component == NULL) {
        ATX_LOG_WARNING("FindNextComponent failed");
        return BLT_FAILURE;
    }
    
    /* open the audio unit (we will initialize it later) */
    result = OpenAComponent(component, &audio_unit);
    if (result != noErr) {
        ATX_LOG_WARNING_1("OpenAComponent failed (%d)", (int)result);
        return BLT_FAILURE;
    }

    /* Since SnowLeopard, kAudioHardwarePropertyRunLoop points at the process's main thread.
       Since not all apps service a run loop on the main thread (like command-line apps), we
       need to set the run loop to NULL to tell the HAL to run its own thread for this.
       This is important in order for the HAL to receive events when, for example, the headphones
       are plugged in or out.
       NOTE: this was the default before SnowLeopard (10.6), so we only do this if we're 10.6 or
       after */
    {
        SInt32 major, minor;

        Gestalt(gestaltSystemVersionMajor, &major);
        Gestalt(gestaltSystemVersionMinor, &minor);
        if (major > 10 || (major == 10 && minor >= 6)) {
            ATX_LOG_INFO("configuring the HAL to use its own thread for the run loop");
            CFRunLoopRef null_loop =  NULL;
            AudioObjectPropertyAddress address = { kAudioHardwarePropertyRunLoop, 
                                                   kAudioObjectPropertyScopeGlobal, 
                                                   kAudioObjectPropertyElementMaster };
            AudioObjectSetPropertyData(kAudioObjectSystemObject, &address, 0, NULL, sizeof(CFRunLoopRef), &null_loop);
        }
    }
    
    /* allocate memory for the object */
    self = ATX_AllocateZeroMemory(sizeof(OsxAudioUnitsOutput));
    if (self == NULL) {
        *object = NULL;
        return BLT_ERROR_OUT_OF_MEMORY;
    }

    /* construct the inherited object */
    BLT_BaseMediaNode_Construct(&ATX_BASE(self, BLT_BaseMediaNode), module, core);

    /* construct the object */
    self->audio_device_id            = audio_device_id;
    self->audio_unit                 = audio_unit;
    self->media_type.sample_rate     = 0;
    self->media_type.channel_count   = 0;
    self->media_type.bits_per_sample = 0;

    /* create a lock */
    pthread_mutex_init(&self->lock, NULL);
    
    /* create the packet queue */
    {
        ATX_ListDataDestructor destructor = { NULL, OsxAudioUnitsOutput_QueueItemDestructor };
        ATX_List_CreateEx(&destructor, &self->packet_queue);
        self->max_packets_in_queue = BLT_OSX_AUDIO_UNITS_OUTPUT_DEFAULT_PACKET_QUEUE_SIZE;
    }
    
    /* setup the expected media type */
    BLT_PcmMediaType_Init(&self->expected_media_type);
    self->expected_media_type.sample_format = BLT_PCM_SAMPLE_FORMAT_SIGNED_INT_NE;

    /* setup interfaces */
    ATX_SET_INTERFACE_EX(self, OsxAudioUnitsOutput, BLT_BaseMediaNode, BLT_MediaNode);
    ATX_SET_INTERFACE_EX(self, OsxAudioUnitsOutput, BLT_BaseMediaNode, ATX_Referenceable);
    ATX_SET_INTERFACE   (self, OsxAudioUnitsOutput, BLT_PacketConsumer);
    ATX_SET_INTERFACE   (self, OsxAudioUnitsOutput, BLT_OutputNode);
    ATX_SET_INTERFACE   (self, OsxAudioUnitsOutput, BLT_MediaPort);
    ATX_SET_INTERFACE   (self, OsxAudioUnitsOutput, BLT_VolumeControl);
    *object = &ATX_BASE_EX(self, BLT_BaseMediaNode, BLT_MediaNode);

    return BLT_SUCCESS;
}
Exemplo n.º 8
0
/*----------------------------------------------------------------------
|    OsxAudioUnitsOutput_RenderCallback
+---------------------------------------------------------------------*/
static OSStatus     
OsxAudioUnitsOutput_RenderCallback(void*						inRefCon,
                                   AudioUnitRenderActionFlags*	ioActionFlags,
                                   const AudioTimeStamp*		inTimeStamp,
                                   UInt32						inBusNumber,
                                   UInt32						inNumberFrames,
                                   AudioBufferList*			    ioData)
{
    OsxAudioUnitsOutput* self = (OsxAudioUnitsOutput*)inRefCon;
    ATX_ListItem*        item;
    unsigned int         requested;
    unsigned char*       out;
    ATX_Boolean          timestamp_measured = ATX_FALSE;
    
    BLT_COMPILER_UNUSED(ioActionFlags);
    BLT_COMPILER_UNUSED(inTimeStamp);
    BLT_COMPILER_UNUSED(inBusNumber);
    BLT_COMPILER_UNUSED(inNumberFrames);
                
    /* sanity check on the parameters */
    if (ioData == NULL || ioData->mNumberBuffers == 0) return 0;
    
    /* in case we have a strange request with more than one buffer, just return silence */
    if (ioData->mNumberBuffers != 1) {
        unsigned int i;
        ATX_LOG_FINEST_1("strange request with %d buffers", 
                         (int)ioData->mNumberBuffers);
        for (i=0; i<ioData->mNumberBuffers; i++) {
            ATX_SetMemory(ioData->mBuffers[i].mData, 0, ioData->mBuffers[i].mDataByteSize);
        }
        return 0;
    }
        
    /* init local variables */
    requested = ioData->mBuffers[0].mDataByteSize;
    out = (unsigned char*)ioData->mBuffers[0].mData;
    ATX_LOG_FINEST_2("request for %d bytes, %d frames", (int)requested, (int)inNumberFrames);

    /* lock the packet queue */
    pthread_mutex_lock(&self->lock);
    
    /* return now if we're paused */
    //if (self->paused) goto end;
    
    /* abort early if we have no packets */
    if (ATX_List_GetItemCount(self->packet_queue) == 0) goto end;
        
    /* fill as much as we can */
    while (requested && (item = ATX_List_GetFirstItem(self->packet_queue))) {
        BLT_MediaPacket*        packet = ATX_ListItem_GetData(item);
        const BLT_PcmMediaType* media_type;
        BLT_Size                payload_size;
        BLT_Size                chunk_size;
        BLT_TimeStamp           chunk_duration;
        BLT_TimeStamp           packet_ts;
        unsigned int            bytes_per_frame;
        unsigned int            sample_rate;
        
        /* get the packet info */
        BLT_MediaPacket_GetMediaType(packet, (const BLT_MediaType**)&media_type);
        packet_ts = BLT_MediaPacket_GetTimeStamp(packet);
        bytes_per_frame = media_type->channel_count*media_type->bits_per_sample/8;
        sample_rate = media_type->sample_rate;
        
        /* record the timestamp if we have not already done so */
        if (!timestamp_measured) {
            self->media_time_snapshot.rendered_packet_ts = packet_ts;
            self->media_time_snapshot.rendered_host_time = 
                AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
            BLT_TimeStamp_Set(self->media_time_snapshot.rendered_duration, 0, 0);
            timestamp_measured = ATX_TRUE;
            
            ATX_LOG_FINEST_2("rendered TS: packet ts=%lld, host ts=%lld",
                             BLT_TimeStamp_ToNanos(packet_ts),
                             self->media_time_snapshot.rendered_host_time);
        }
         
        /* compute how much to copy from this packet */
        payload_size = BLT_MediaPacket_GetPayloadSize(packet);
        if (payload_size <= requested) {
            /* copy the entire payload and remove the packet from the queue */
            chunk_size = payload_size;
            ATX_CopyMemory(out, BLT_MediaPacket_GetPayloadBuffer(packet), chunk_size);
            ATX_List_RemoveItem(self->packet_queue, item);
            packet = NULL;
            media_type = NULL;
            ATX_LOG_FINER_1("media packet fully consumed, %d left in queue",
                            ATX_List_GetItemCount(self->packet_queue));
        } else {
            /* only copy a portion of the payload */
            chunk_size = requested;
            ATX_CopyMemory(out, BLT_MediaPacket_GetPayloadBuffer(packet), chunk_size);            
        }
        
        /* update the counters */
        requested -= chunk_size;
        out       += chunk_size;
        
        /* update the media time snapshot */
        if (bytes_per_frame) {
            unsigned int frames_in_chunk = chunk_size/bytes_per_frame;
            chunk_duration = BLT_TimeStamp_FromSamples(frames_in_chunk, sample_rate);
        } else {
            BLT_TimeStamp_Set(chunk_duration, 0, 0);
        }
        self->media_time_snapshot.rendered_duration = 
            BLT_TimeStamp_Add(self->media_time_snapshot.rendered_duration, chunk_duration);
        
        /* update the packet unless we're done with it */
        if (packet) {
            /* update the packet offset and timestamp */
            BLT_MediaPacket_SetPayloadOffset(packet, BLT_MediaPacket_GetPayloadOffset(packet)+chunk_size);
            BLT_MediaPacket_SetTimeStamp(packet, BLT_TimeStamp_Add(packet_ts, chunk_duration));
        }
    }
   
end:
    /* fill whatever is left with silence */    
    if (requested) {
        ATX_LOG_FINEST_1("filling with %d bytes of silence", requested);
        ATX_SetMemory(out, 0, requested);
    }
    
    pthread_mutex_unlock(&self->lock);
        
    return 0;
}
Exemplo n.º 9
0
/*----------------------------------------------------------------------
|   OsxAudioUnitsOutput_Activate
+---------------------------------------------------------------------*/
BLT_METHOD
OsxAudioUnitsOutput_Activate(BLT_MediaNode* _self, BLT_Stream* stream)
{
    OsxAudioUnitsOutput*   self = ATX_SELF_EX(OsxAudioUnitsOutput, BLT_BaseMediaNode, BLT_MediaNode);
    ComponentResult        result;
    AURenderCallbackStruct callback;
    
    BLT_COMPILER_UNUSED(stream);
        
    ATX_LOG_FINER("start");

    /* select the device */
    if (self->audio_device_id) {
        result = AudioUnitSetProperty(self->audio_unit,
                                      kAudioOutputUnitProperty_CurrentDevice,
                                      kAudioUnitScope_Global,
                                      0,
                                      &self->audio_device_id,
                                      sizeof(self->audio_device_id));
        if (result != noErr) {
            ATX_LOG_WARNING_1("AudioUnitSetProperty (kAudioOutputUnitProperty_CurrentDevice) failed (%d)", (int)result);
        }
    }

    /* initialize the output */
    if (self->audio_unit) {
        result = AudioUnitInitialize(self->audio_unit);
        if (result != noErr) {
            ATX_LOG_WARNING_1("AudioUnitInitialize failed (%d)", (int)result);
            return BLT_FAILURE;
        }
    }

    /* set some default audio format */
    {
        AudioStreamBasicDescription audio_desc;
        ATX_SetMemory(&audio_desc, 0, sizeof(audio_desc));
        
        /* setup the audio description */
        audio_desc.mFormatID         = kAudioFormatLinearPCM;
        audio_desc.mFormatFlags      = kAudioFormatFlagsNativeEndian | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
        audio_desc.mFramesPerPacket  = 1;
        audio_desc.mSampleRate       = 44100;
        audio_desc.mChannelsPerFrame = 2;
        audio_desc.mBitsPerChannel   = 16;
        audio_desc.mBytesPerFrame    = (audio_desc.mBitsPerChannel * audio_desc.mChannelsPerFrame) / 8;
        audio_desc.mBytesPerPacket   = audio_desc.mBytesPerFrame * audio_desc.mFramesPerPacket;
        audio_desc.mReserved         = 0;
        result = AudioUnitSetProperty(self->audio_unit,
                                      kAudioUnitProperty_StreamFormat,
                                      kAudioUnitScope_Input,
                                      0,
                                      &audio_desc,
                                      sizeof(audio_desc));
        if (result != noErr) {
            ATX_LOG_WARNING_1("AudioUnitSetProperty failed (%d)", (int)result);
            return BLT_FAILURE;
        }
    }

    /* check for downmix based on the number of supported channels */
    OsxAudioUnitsOutput_CheckDownmix(self);
    
    /* setup the callback */
    callback.inputProc = OsxAudioUnitsOutput_RenderCallback;
    callback.inputProcRefCon = _self;
    result = AudioUnitSetProperty(self->audio_unit, 
                                  kAudioUnitProperty_SetRenderCallback, 
                                  kAudioUnitScope_Input, 
                                  0,
                                  &callback, 
                                  sizeof(callback));
    if (result != noErr) {
        ATX_LOG_SEVERE_1("AudioUnitSetProperty failed when setting callback (%d)", (int)result);
        return BLT_FAILURE;
    }

    return BLT_SUCCESS;
}