/*---------------------------------------------------------------------- | AlsaOutput_PutPacket +---------------------------------------------------------------------*/ BLT_METHOD AlsaOutput_PutPacket(BLT_PacketConsumer* _self, BLT_MediaPacket* packet) { AlsaOutput* self = ATX_SELF(AlsaOutput, BLT_PacketConsumer); const BLT_PcmMediaType* media_type; BLT_ByteBuffer buffer; BLT_Size size; BLT_Result result; /* check parameters */ if (packet == NULL) { return BLT_ERROR_INVALID_PARAMETERS; } /* check the payload buffer and size */ buffer = BLT_MediaPacket_GetPayloadBuffer(packet); size = BLT_MediaPacket_GetPayloadSize(packet); if (size == 0) return BLT_SUCCESS; /* get the media type */ result = BLT_MediaPacket_GetMediaType(packet, (const BLT_MediaType**)(const void*)&media_type); if (BLT_FAILED(result)) return result; /* check the media type */ if (media_type->base.id != BLT_MEDIA_TYPE_ID_AUDIO_PCM) { return BLT_ERROR_INVALID_MEDIA_TYPE; } /* configure the device for this format */ result = AlsaOutput_Configure(self, media_type); if (BLT_FAILED(result)) return result; /* update the media time */ { BLT_TimeStamp ts = BLT_MediaPacket_GetTimeStamp(packet); ATX_UInt64 ts_nanos = BLT_TimeStamp_ToNanos(ts); BLT_TimeStamp packet_duration; if (media_type->sample_rate && media_type->channel_count && media_type->bits_per_sample) { unsigned int sample_count = BLT_MediaPacket_GetPayloadSize(packet)/ (media_type->channel_count*media_type->bits_per_sample/8); packet_duration = BLT_TimeStamp_FromSamples(sample_count, media_type->sample_rate); } else { packet_duration = BLT_TimeStamp_FromSeconds(0); } if (ts_nanos == 0) { self->media_time = self->next_media_time; } else { self->media_time = ts_nanos; } self->next_media_time = self->media_time+BLT_TimeStamp_ToNanos(packet_duration); } /* write the audio samples */ return AlsaOutput_Write(self, buffer, size); }
/*---------------------------------------------------------------------- | SilenceRemover_TrimPending +---------------------------------------------------------------------*/ static void SilenceRemover_TrimPending(SilenceRemover* self) { BLT_MediaPacket* packet = self->input.pending; short* pcm; BLT_Cardinal sample_count; BLT_Cardinal skip = 0; int sample; /* quick check */ if (!packet) return; ATX_LOG_FINER("SilenceRemover: trimming pending packet"); /* remove silence at the end of the packet */ pcm = (short*)BLT_MediaPacket_GetPayloadBuffer(packet); sample_count = BLT_MediaPacket_GetPayloadSize(packet)/4; pcm += sample_count*2; for (sample = sample_count-1; sample >= 0; sample--, pcm-=2) { if (pcm[0] > -BLT_SILENCE_REMOVER_THRESHOLD && pcm[0] < BLT_SILENCE_REMOVER_THRESHOLD && pcm[1] > -BLT_SILENCE_REMOVER_THRESHOLD && pcm[1] < BLT_SILENCE_REMOVER_THRESHOLD) { skip++; } } BLT_MediaPacket_SetPayloadSize(packet, (sample_count-skip)*4); }
/*---------------------------------------------------------------------- | ScanPacket +---------------------------------------------------------------------*/ static BLT_Result ScanPacket(BLT_MediaPacket* packet, BLT_Cardinal* zero_head, BLT_Cardinal* zero_tail) { BLT_PcmMediaType* media_type; short* pcm; BLT_Cardinal sample_count; BLT_Ordinal sample; BLT_Cardinal zero_head_count = 0; BLT_Cardinal non_zero_run = 0; /* default values */ *zero_head = 0; *zero_tail = 0; /* get the media type */ BLT_MediaPacket_GetMediaType(packet, (const BLT_MediaType**)(const void*)&media_type); /* check the media type */ if (media_type->base.id != BLT_MEDIA_TYPE_ID_AUDIO_PCM) { return BLT_ERROR_INVALID_MEDIA_TYPE; } /* for now, we only support 16-bit, stereo, PCM */ if (media_type->bits_per_sample != 16 || media_type->channel_count != 2) { return BLT_SUCCESS; } /* check for zero samples */ sample_count = BLT_MediaPacket_GetPayloadSize(packet)/4; if (sample_count == 0) return BLT_SUCCESS; pcm = BLT_MediaPacket_GetPayloadBuffer(packet); for (sample = 0; sample < sample_count; sample++, pcm+=2) { if (pcm[0] > -BLT_SILENCE_REMOVER_THRESHOLD && pcm[0] < BLT_SILENCE_REMOVER_THRESHOLD && pcm[1] > -BLT_SILENCE_REMOVER_THRESHOLD && pcm[1] < BLT_SILENCE_REMOVER_THRESHOLD) { if (sample == zero_head_count) { zero_head_count++; } } else { non_zero_run = sample+1; } } *zero_head = zero_head_count*4; if (non_zero_run > 0) { *zero_tail = (sample_count-non_zero_run)*4; } return BLT_SUCCESS; }
/*---------------------------------------------------------------------- | CrossFader_BufferPacket +---------------------------------------------------------------------*/ static BLT_Result CrossFader_BufferPacket(CrossFader* fader, BLT_MediaPacket* packet) { BLT_Size space; BLT_Size payload_size; BLT_Any payload_buffer; /* quick check */ if (fader->input.buffer == NULL) return BLT_SUCCESS; /* see how much data we have */ payload_size = BLT_MediaPacket_GetPayloadSize(packet); payload_buffer = BLT_MediaPacket_GetPayloadBuffer(packet); /* shortcut */ if (payload_size == 0) return BLT_SUCCESS; /* ensure that we have enough space in the buffer */ space = ATX_RingBuffer_GetSpace(fader->input.buffer); if (space < payload_size) { /* not enough space */ BLT_Size available = ATX_RingBuffer_GetAvailable(fader->input.buffer); if (available >= payload_size) { CrossFader_PromotePacket(fader, payload_size); } else { CrossFader_PromotePacket(fader, available); } space = ATX_RingBuffer_GetSpace(fader->input.buffer); if (space < payload_size) { /* we can't handle it */ return BLT_FAILURE; } } /* copy the data to the input buffer */ ATX_LOG_FINER_1("CrossFader::BufferPacket - buffering %d", payload_size); ATX_RingBuffer_Write(fader->input.buffer, payload_buffer, payload_size); return BLT_SUCCESS; }
/*---------------------------------------------------------------------- | CrossFaderInputPort_PutPacket +---------------------------------------------------------------------*/ BLT_METHOD CrossFaderInputPort_PutPacket(BLT_PacketConsumerInstance* instance, BLT_MediaPacket* packet) { CrossFader* fader = (CrossFader*)instance; BLT_PcmMediaType* media_type; ATX_Result result; ATX_LOG_FINER_1("CrossFaderInputPort::PutPacket - state = %s", fader->state == CROSS_FADER_STATE_IN_START ? "START" : fader->state == CROSS_FADER_STATE_IN_MAIN ? "MAIN" : "???"); /* get the media type */ result = BLT_MediaPacket_GetMediaType(packet, (const BLT_MediaType**)&media_type); if (BLT_FAILED(result)) return result; /* check the if media type is PCM */ if (media_type->base.id != BLT_MEDIA_TYPE_ID_AUDIO_PCM) { return BLT_ERROR_INVALID_MEDIA_TYPE; } /* check if the media type has changed */ if (media_type->sample_rate != fader->input.media_type.sample_rate || media_type->channel_count != fader->input.media_type.channel_count || media_type->bits_per_sample != fader->input.media_type.bits_per_sample || media_type->sample_format != fader->input.media_type.sample_format) { /* media type has changed */ ATX_LOG_FINER("CrossFaderInputPort::PutPacket - new media type"); CrossFader_Flush(fader); result = CrossFader_SetupInput(fader, media_type); if (BLT_FAILED(result)) return result; } /* decide what to do with the packet */ switch (fader->state) { case CROSS_FADER_STATE_IN_START: { unsigned int sample; BLT_Size size = BLT_MediaPacket_GetPayloadSize(packet); short* samples = (short*)BLT_MediaPacket_GetPayloadBuffer(packet); float pos = (float)fader->input.position/(float)(44100*4*10); float factor = (float)pow(10.0f, -(30.0f-pos*30.0f)/20.0f); ATX_LOG_FINDER_1("CrossFaderInputPort::PutPacket - factor = %f", factor); for (sample = 0; sample < size/2; sample++) { *samples = (short)(((float)*samples)*factor); samples++; } fader->input.position += size; if (fader->input.position >= 44100*4*10) { fader->input.position = 0; fader->state = CROSS_FADER_STATE_IN_MAIN; } } CrossFader_BufferPacket(fader, packet); break; case CROSS_FADER_STATE_IN_MAIN: CrossFader_BufferPacket(fader, packet); break; } return BLT_SUCCESS; }
/*---------------------------------------------------------------------- | SilenceRemoverInput_PutPacket +---------------------------------------------------------------------*/ BLT_METHOD SilenceRemoverInput_PutPacket(BLT_PacketConsumer* _self, BLT_MediaPacket* packet) { SilenceRemover* self = ATX_SELF_M(input, SilenceRemover, BLT_PacketConsumer); BLT_Flags packet_flags; BLT_Cardinal zero_head = 0; BLT_Cardinal zero_tail = 0; BLT_Offset payload_offset; BLT_Size payload_size; ATX_Result result; ATX_LOG_FINER("SilenceRemoverInput::PutPacket"); /* get the packet info */ packet_flags = BLT_MediaPacket_GetFlags(packet); payload_offset = BLT_MediaPacket_GetPayloadOffset(packet); payload_size = BLT_MediaPacket_GetPayloadSize(packet); /* scan the packet for zeros */ if (payload_size != 0) { result = ScanPacket(packet, &zero_head, &zero_tail); if (BLT_FAILED(result)) return result; if (zero_head || zero_tail) { ATX_LOG_FINER_2("SilenceRemoverInput::PutPacket zero_head=%d, zero_tail=%d", (int)zero_head, (int)zero_tail); } } /* decide how to process the packet */ if (self->state == SILENCE_REMOVER_STATE_START_OF_STREAM) { if (zero_head == payload_size) { /* packet is all silence */ if (packet_flags != 0) { /* packet has flags, don't discard it, just empty it */ ATX_LOG_FINER("SilenceRemover: emptying packet"); BLT_MediaPacket_SetPayloadSize(packet, 0); SilenceRemover_AcceptPacket(self, packet); } else { ATX_LOG_FINER("SilenceRemover: dropping packet"); } } else { /* remove silence at the start of the packet */ BLT_MediaPacket_SetPayloadOffset(packet, payload_offset+zero_head); SilenceRemover_AcceptPacket(self, packet); /* we're now in the stream unless this is also the end */ if (!(packet_flags & BLT_MEDIA_PACKET_FLAG_END_OF_STREAM)) { ATX_LOG_FINER("SilenceRemover: new state = IN_STREAM"); self->state = SILENCE_REMOVER_STATE_IN_STREAM; } } } else { /* in stream */ if (zero_head == payload_size) { /* packet is all silence */ ATX_LOG_FINER("SilenceRemover: packet is all silence"); if (packet_flags) { /* packet has flags, don't discard it, just empty it */ SilenceRemover_TrimPending(self); BLT_MediaPacket_SetPayloadSize(packet, 0); SilenceRemover_AcceptPacket(self, packet); } else { ATX_LOG_FINER("SilenceRemover: dropping packet"); } } else { /* accept the pending packet */ SilenceRemover_AcceptPending(self); if (zero_tail) { /* packet has some silence at the end */ ATX_LOG_FINER("SilenceRemover: packet has silence at end"); SilenceRemover_HoldPacket(self, packet); } else { /* packet has no silence at the end */ ATX_LOG_FINER("SilenceRemover: packet has no silence at end"); SilenceRemover_AcceptPacket(self, packet); } } if (packet_flags & BLT_MEDIA_PACKET_FLAG_END_OF_STREAM || packet_flags & BLT_MEDIA_PACKET_FLAG_START_OF_STREAM) { ATX_LOG_FINER("SilenceRemover: new state = START_OF_STREAM"); self->state = SILENCE_REMOVER_STATE_START_OF_STREAM; } } return BLT_SUCCESS; }
/*---------------------------------------------------------------------- | AacDecoderInput_PutPacket +---------------------------------------------------------------------*/ BLT_METHOD AacDecoderInput_PutPacket(BLT_PacketConsumer* _self, BLT_MediaPacket* packet) { AacDecoder* self = ATX_SELF_M(input, AacDecoder, BLT_PacketConsumer); ATX_Result result; /* check to see if this is the end of a stream */ if (BLT_MediaPacket_GetFlags(packet) & BLT_MEDIA_PACKET_FLAG_END_OF_STREAM) { self->input.eos = BLT_TRUE; } /* check to see if we need to create a decoder for this */ if (self->helix_decoder == NULL) { AacDecoderConfig decoder_config; AACFrameInfo aac_frame_info; const BLT_MediaType* media_type; const BLT_Mp4AudioMediaType* mp4_media_type; BLT_MediaPacket_GetMediaType(packet, &media_type); if (media_type == NULL || media_type->id != self->mp4es_type_id) { return BLT_ERROR_INVALID_MEDIA_TYPE; } mp4_media_type = (const BLT_Mp4AudioMediaType*)media_type; if (mp4_media_type->base.stream_type != BLT_MP4_STREAM_TYPE_AUDIO) { return BLT_ERROR_INVALID_MEDIA_TYPE; } if (BLT_FAILED(AacDecoderConfig_Parse(mp4_media_type->decoder_info, mp4_media_type->decoder_info_length, &decoder_config))) { return BLT_ERROR_INVALID_MEDIA_FORMAT; } if (decoder_config.object_type != BLT_AAC_OBJECT_TYPE_AAC_LC && decoder_config.object_type != BLT_AAC_OBJECT_TYPE_SBR) { return BLT_ERROR_UNSUPPORTED_CODEC; } /* create the decoder */ self->helix_decoder = AACInitDecoder(); if (self->helix_decoder == NULL) return BLT_ERROR_OUT_OF_MEMORY; /* configure the decoder */ ATX_SetMemory(&aac_frame_info, 0, sizeof(aac_frame_info)); aac_frame_info.nChans = AacDecoderConfig_GetChannelCount(&decoder_config); aac_frame_info.sampRateCore = AacDecoderConfig_GetSampleRate(&decoder_config); if (decoder_config.object_type == BLT_AAC_OBJECT_TYPE_AAC_LC) { aac_frame_info.profile = AAC_PROFILE_LC; } self->sample_buffer_size = BLT_AAC_FRAME_SIZE*2*aac_frame_info.nChans*2; /* the last *2 is for SBR support */ AACSetRawBlockParams(self->helix_decoder, 0, &aac_frame_info); } { unsigned char* in_buffer; int in_size; short* out_buffer; BLT_MediaPacket* out_packet; AACFrameInfo aac_frame_info; /* create a PCM packet for the output */ result = BLT_Core_CreateMediaPacket(ATX_BASE(self, BLT_BaseMediaNode).core, self->sample_buffer_size, (BLT_MediaType*)&self->output.media_type, &out_packet); if (BLT_FAILED(result)) return result; /* copy the timestamp */ BLT_MediaPacket_SetTimeStamp(out_packet, BLT_MediaPacket_GetTimeStamp(packet)); /* decode the packet as a frame */ in_buffer = BLT_MediaPacket_GetPayloadBuffer(packet); in_size = BLT_MediaPacket_GetPayloadSize(packet); out_buffer = (short*)BLT_MediaPacket_GetPayloadBuffer(out_packet); result = AACDecode(self->helix_decoder, &in_buffer, &in_size, out_buffer); if (result != 0) { BLT_MediaPacket_Release(out_packet); return BLT_ERROR_INVALID_MEDIA_FORMAT; } /* check that the sample buffer matches our current media type */ AACGetLastFrameInfo(self->helix_decoder, &aac_frame_info); if (self->output.media_type.channel_count == 0) { /* first time, setup our media type */ self->output.media_type.channel_count = aac_frame_info.nChans; self->output.media_type.sample_rate = aac_frame_info.sampRateOut; self->output.media_type.bits_per_sample = 16; self->output.media_type.sample_format = BLT_PCM_SAMPLE_FORMAT_SIGNED_INT_NE; self->output.media_type.channel_mask = 0; /* update the stream info */ if (ATX_BASE(self, BLT_BaseMediaNode).context) { BLT_StreamInfo stream_info; stream_info.data_type = "MPEG-4 AAC"; stream_info.sample_rate = aac_frame_info.sampRateOut; stream_info.channel_count = aac_frame_info.nChans; stream_info.mask = BLT_STREAM_INFO_MASK_DATA_TYPE | BLT_STREAM_INFO_MASK_SAMPLE_RATE | BLT_STREAM_INFO_MASK_CHANNEL_COUNT; BLT_Stream_SetInfo(ATX_BASE(self, BLT_BaseMediaNode).context, &stream_info); } /* update the packet media type */ BLT_MediaPacket_SetMediaType(out_packet, (BLT_MediaType*)&self->output.media_type); } else { /* we've already setup a media type, check that this is the same */ if (self->output.media_type.sample_rate != (unsigned int)aac_frame_info.sampRateOut || self->output.media_type.channel_count != aac_frame_info.nChans) { BLT_MediaPacket_Release(out_packet); return BLT_ERROR_INVALID_MEDIA_FORMAT; } } /* add to the output packet list */ BLT_MediaPacket_SetPayloadSize(out_packet, aac_frame_info.outputSamps*2); ATX_List_AddData(self->output.packets, out_packet); } return BLT_SUCCESS; }
/*---------------------------------------------------------------------- | OsxAudioUnitsOutput_RenderCallback +---------------------------------------------------------------------*/ static OSStatus OsxAudioUnitsOutput_RenderCallback(void* inRefCon, AudioUnitRenderActionFlags* ioActionFlags, const AudioTimeStamp* inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList* ioData) { OsxAudioUnitsOutput* self = (OsxAudioUnitsOutput*)inRefCon; ATX_ListItem* item; unsigned int requested; unsigned char* out; ATX_Boolean timestamp_measured = ATX_FALSE; BLT_COMPILER_UNUSED(ioActionFlags); BLT_COMPILER_UNUSED(inTimeStamp); BLT_COMPILER_UNUSED(inBusNumber); BLT_COMPILER_UNUSED(inNumberFrames); /* sanity check on the parameters */ if (ioData == NULL || ioData->mNumberBuffers == 0) return 0; /* in case we have a strange request with more than one buffer, just return silence */ if (ioData->mNumberBuffers != 1) { unsigned int i; ATX_LOG_FINEST_1("strange request with %d buffers", (int)ioData->mNumberBuffers); for (i=0; i<ioData->mNumberBuffers; i++) { ATX_SetMemory(ioData->mBuffers[i].mData, 0, ioData->mBuffers[i].mDataByteSize); } return 0; } /* init local variables */ requested = ioData->mBuffers[0].mDataByteSize; out = (unsigned char*)ioData->mBuffers[0].mData; ATX_LOG_FINEST_2("request for %d bytes, %d frames", (int)requested, (int)inNumberFrames); /* lock the packet queue */ pthread_mutex_lock(&self->lock); /* return now if we're paused */ //if (self->paused) goto end; /* abort early if we have no packets */ if (ATX_List_GetItemCount(self->packet_queue) == 0) goto end; /* fill as much as we can */ while (requested && (item = ATX_List_GetFirstItem(self->packet_queue))) { BLT_MediaPacket* packet = ATX_ListItem_GetData(item); const BLT_PcmMediaType* media_type; BLT_Size payload_size; BLT_Size chunk_size; BLT_TimeStamp chunk_duration; BLT_TimeStamp packet_ts; unsigned int bytes_per_frame; unsigned int sample_rate; /* get the packet info */ BLT_MediaPacket_GetMediaType(packet, (const BLT_MediaType**)&media_type); packet_ts = BLT_MediaPacket_GetTimeStamp(packet); bytes_per_frame = media_type->channel_count*media_type->bits_per_sample/8; sample_rate = media_type->sample_rate; /* record the timestamp if we have not already done so */ if (!timestamp_measured) { self->media_time_snapshot.rendered_packet_ts = packet_ts; self->media_time_snapshot.rendered_host_time = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); BLT_TimeStamp_Set(self->media_time_snapshot.rendered_duration, 0, 0); timestamp_measured = ATX_TRUE; ATX_LOG_FINEST_2("rendered TS: packet ts=%lld, host ts=%lld", BLT_TimeStamp_ToNanos(packet_ts), self->media_time_snapshot.rendered_host_time); } /* compute how much to copy from this packet */ payload_size = BLT_MediaPacket_GetPayloadSize(packet); if (payload_size <= requested) { /* copy the entire payload and remove the packet from the queue */ chunk_size = payload_size; ATX_CopyMemory(out, BLT_MediaPacket_GetPayloadBuffer(packet), chunk_size); ATX_List_RemoveItem(self->packet_queue, item); packet = NULL; media_type = NULL; ATX_LOG_FINER_1("media packet fully consumed, %d left in queue", ATX_List_GetItemCount(self->packet_queue)); } else { /* only copy a portion of the payload */ chunk_size = requested; ATX_CopyMemory(out, BLT_MediaPacket_GetPayloadBuffer(packet), chunk_size); } /* update the counters */ requested -= chunk_size; out += chunk_size; /* update the media time snapshot */ if (bytes_per_frame) { unsigned int frames_in_chunk = chunk_size/bytes_per_frame; chunk_duration = BLT_TimeStamp_FromSamples(frames_in_chunk, sample_rate); } else { BLT_TimeStamp_Set(chunk_duration, 0, 0); } self->media_time_snapshot.rendered_duration = BLT_TimeStamp_Add(self->media_time_snapshot.rendered_duration, chunk_duration); /* update the packet unless we're done with it */ if (packet) { /* update the packet offset and timestamp */ BLT_MediaPacket_SetPayloadOffset(packet, BLT_MediaPacket_GetPayloadOffset(packet)+chunk_size); BLT_MediaPacket_SetTimeStamp(packet, BLT_TimeStamp_Add(packet_ts, chunk_duration)); } } end: /* fill whatever is left with silence */ if (requested) { ATX_LOG_FINEST_1("filling with %d bytes of silence", requested); ATX_SetMemory(out, 0, requested); } pthread_mutex_unlock(&self->lock); return 0; }