bool RDHPIPlayStream::formatSupported() { switch(getFormatTag()) { case WAVE_FORMAT_PCM: switch(getBitsPerSample()) { case 8: return formatSupported(RDWaveFile::Pcm8); break; case 16: return formatSupported(RDWaveFile::Pcm16); break; default: return false; } break; case WAVE_FORMAT_MPEG: switch(getHeadLayer()) { case 1: return formatSupported(RDWaveFile::MpegL1); break; case 2: return formatSupported(RDWaveFile::MpegL2); break; case 3: return formatSupported(RDWaveFile::MpegL3); break; default: return false; } break; default: return false; } }
float RIFFAudioFile::convertBytesToSample(const unsigned char *ubuf) { switch (getBitsPerSample()) { case 8: { // WAV stores 8-bit samples unsigned, other sizes signed. return (float)(ubuf[0] - 128.0) / 128.0; } case 16: { // Two's complement little-endian 16-bit integer. // We convert endianness (if necessary) but assume 16-bit short. unsigned char b2 = ubuf[0]; unsigned char b1 = ubuf[1]; unsigned int bits = (b1 << 8) + b2; return (float)(short(bits)) / 32768.0; } case 24: { // Two's complement little-endian 24-bit integer. // Again, convert endianness but assume 32-bit int. unsigned char b3 = ubuf[0]; unsigned char b2 = ubuf[1]; unsigned char b1 = ubuf[2]; // Rotate 8 bits too far in order to get the sign bit // in the right place; this gives us a 32-bit value, // hence the larger float divisor unsigned int bits = (b1 << 24) + (b2 << 16) + (b3 << 8); return (float)(int(bits)) / 2147483648.0; } case 32: { // IEEE floating point return *(float *)ubuf; } default: return 0.0f; } }
/*------------------------------------------------------------------------------ * Initialize the object *----------------------------------------------------------------------------*/ void JackDspSource :: init ( const char* name ) throw ( Exception ) { // Set defaults ports[0] = NULL; // Left Port ports[1] = NULL; // Right Port rb[0] = NULL; // Left Ring Buffer rb[1] = NULL; // Right Ring Buffer client = NULL; auto_connect = false; // Default is to not auto connect the JACK ports tmp_buffer = NULL; // Buffer big enough for one 'read' of audio // Auto connect the ports ? if ( Util::strEq( name, "jack_auto", 9) ) { auto_connect = true; } // Check the sample size if (getBitsPerSample() != 16) { throw Exception( __FILE__, __LINE__, "JackDspSource doesn't support non 16-bit samples"); } }
void PlayStream::pause() { #ifdef RPLAYSTREAM_SHOW_SLOTS printf("pause() -- Card: %d Stream: %d\n",card_number,stream_number); #endif // RPLAYSTREAM_SHOW_SLOTS uint16_t state; uint32_t buffer_size; uint32_t data_to_play; uint32_t reserved; if(!is_open) { return; } if(playing) { HPICall(HPI_OutStreamStop(NULL,hpi_stream)); clock->stop(); HPICall(HPI_OutStreamGetInfoEx(NULL,hpi_stream,&state,&buffer_size, &data_to_play,&samples_played,&reserved)); switch(getFormatTag()) { case WAVE_FORMAT_PCM: samples_pending=data_to_play/(getChannels()*getBitsPerSample()/8); break; case WAVE_FORMAT_MPEG: samples_pending= 1152*data_to_play/(144*getHeadBitRate()/getSamplesPerSec()); break; } playing=false; is_paused=true; stream_state=PlayStream::Paused; if(!restart_transport) { emit paused(); emit stateChanged(card_number,stream_number,(int)stream_state); } } }
bool PlayStream::play() { #ifdef RPLAYSTREAM_SHOW_SLOTS printf("play() -- Card: %d Stream: %d\n",card_number,stream_number); #endif // RPLAYSTREAM_SHOW_SLOTS if(!is_open) { printf("FAIL1\n"); return false; } if((!playing)&&(!is_paused)) { HPICall(HPI_OutStreamSetTimeScale(NULL,hpi_stream, (uint16_t)((1000.0/(double)play_speed)* HPI_OSTREAM_TIMESCALE_UNITS))); if(!HPICall(HPI_OutStreamGetInfoEx(NULL,hpi_stream, &state,&buffer_size,&data_to_play, &samples_played,&reserved))) { printf("FAIL3\n"); return false; } fragment_size=buffer_size/4; if(fragment_size>MAX_FRAGMENT_SIZE) { fragment_size=MAX_FRAGMENT_SIZE; } if(pdata!=NULL) { delete pdata; } pdata=(uint8_t *)malloc(fragment_size); if(pdata==NULL) { printf("FAIL4\n"); return false; } switch(getFormatTag()) { case WAVE_FORMAT_PCM: case WAVE_FORMAT_VORBIS: switch(getBitsPerSample()) { case 8: HPICall(HPI_FormatCreate(&format,getChannels(), HPI_FORMAT_PCM8_UNSIGNED, getSamplesPerSec(),0,0)); break; case 16: HPICall(HPI_FormatCreate(&format,getChannels(), HPI_FORMAT_PCM16_SIGNED, getSamplesPerSec(),0,0)); break; case 32: HPICall(HPI_FormatCreate(&format,getChannels(), HPI_FORMAT_PCM32_SIGNED, getSamplesPerSec(),0,0)); break; default: HPICall(HPI_AdapterClose(NULL,card_number)); return false; break; } break; case WAVE_FORMAT_MPEG: switch(getHeadLayer()) { case 1: HPICall(HPI_FormatCreate(&format,getChannels(), HPI_FORMAT_MPEG_L1,getSamplesPerSec(), getHeadBitRate(),getHeadFlags())); break; case 2: HPICall(HPI_FormatCreate(&format,getChannels(), HPI_FORMAT_MPEG_L2,getSamplesPerSec(), getHeadBitRate(),getHeadFlags())); break; case 3: HPICall(HPI_FormatCreate(&format,getChannels(), HPI_FORMAT_MPEG_L3,getSamplesPerSec(), getHeadBitRate(),getHeadFlags())); break; default: HPI_AdapterClose(NULL,card_number); return false; } break; default: return false; } #if HPI_VER < 0x00030500 if(HPI_DataCreate(&hpi_data,&format,pdata,fragment_size)!=0) { return false; } #endif } if(!is_paused) { memset(pdata,0,fragment_size); left_to_write=getDataLength()-seekWave(0,SEEK_CUR); if(left_to_write<fragment_size) { read_bytes = left_to_write; left_to_write=0; stopping=true; } else { read_bytes=fragment_size; left_to_write-=fragment_size; } readWave(pdata,read_bytes); #if HPI_VER > 0x00030500 HPICall(HPI_OutStreamWriteBuf(NULL,hpi_stream,pdata,read_bytes,&format)); #else HPICall(HPI_DataCreate(&hpi_data,&format,pdata,read_bytes)); HPICall(HPI_OutStreamWrite(NULL,hpi_stream,&hpi_data)); #endif if(HPI_OutStreamStart(NULL,hpi_stream)!=0) { printf("FAIL11\n"); return false; } clock->start(50); clock->start(FRAGMENT_TIME); playing=true; is_paused=false; stopping=false; if(play_length>0) { play_timer->start(play_length,true); start_time=QTime::currentTime(); } stream_state=PlayStream::Playing; if(!restart_transport) { emit isStopped(false); emit played(); emit stateChanged(card_number,stream_number,(int)stream_state); } } if((!playing)&(is_paused|repositioned)) { HPICall(HPI_OutStreamStart(NULL,hpi_stream)); clock->start(FRAGMENT_TIME); playing=true; stopping=false; is_paused=false; stream_state=PlayStream::Playing; if(!restart_transport) { emit isStopped(false); emit played(); emit stateChanged(card_number,stream_number,(int)stream_state); } } return true; }
/*------------------------------------------------------------------------------ * Open the audio source *----------------------------------------------------------------------------*/ bool AlsaDspSource :: open ( void ) throw ( Exception ) { unsigned int u; snd_pcm_format_t format; snd_pcm_hw_params_t *hwParams; if ( isOpen() ) { return false; } switch ( getBitsPerSample() ) { case 8: format = SND_PCM_FORMAT_S8; break; case 16: format = SND_PCM_FORMAT_S16; break; default: return false; } if (snd_pcm_open(&captureHandle, pcmName, SND_PCM_STREAM_CAPTURE, 0) < 0) { captureHandle = 0; return false; } if (snd_pcm_hw_params_malloc(&hwParams) < 0) { close(); throw Exception( __FILE__, __LINE__, "can't alloc hardware "\ "parameter structure"); } if (snd_pcm_hw_params_any(captureHandle, hwParams) < 0) { snd_pcm_hw_params_free(hwParams); close(); throw Exception( __FILE__, __LINE__, "can't initialize hardware "\ "parameter structure"); } if (snd_pcm_hw_params_set_access(captureHandle, hwParams, SND_PCM_ACCESS_RW_INTERLEAVED) < 0) { snd_pcm_hw_params_free(hwParams); close(); throw Exception( __FILE__, __LINE__, "can't set access type"); } if (snd_pcm_hw_params_set_format(captureHandle, hwParams, format) < 0) { snd_pcm_hw_params_free(hwParams); close(); throw Exception( __FILE__, __LINE__, "can't set sample format"); } u = getSampleRate(); if (snd_pcm_hw_params_set_rate_near(captureHandle, hwParams, &u, 0) < 0) { snd_pcm_hw_params_free(hwParams); close(); throw Exception( __FILE__, __LINE__, "can't set sample rate", u); } u = getChannel(); if (snd_pcm_hw_params_set_channels(captureHandle, hwParams, u) < 0) { snd_pcm_hw_params_free(hwParams); close(); throw Exception( __FILE__, __LINE__, "can't set channels", u); } u = 4; if (snd_pcm_hw_params_set_periods_near(captureHandle, hwParams, &u, 0) < 0) { snd_pcm_hw_params_free(hwParams); close(); throw Exception( __FILE__, __LINE__, "can't set interrupt frequency"); } u = getBufferTime(); if (snd_pcm_hw_params_set_buffer_time_near(captureHandle, hwParams, &u, 0) < 0) { snd_pcm_hw_params_free(hwParams); close(); throw Exception( __FILE__, __LINE__, "can't set buffer size"); } if (snd_pcm_hw_params(captureHandle, hwParams) < 0) { snd_pcm_hw_params_free(hwParams); close(); throw Exception( __FILE__, __LINE__, "can't set hardware parameters"); } snd_pcm_hw_params_free(hwParams); if (snd_pcm_prepare(captureHandle) < 0) { close(); throw Exception( __FILE__, __LINE__, "can't prepare audio interface "\ "for use"); } bytesPerFrame = getChannel() * getBitsPerSample() / 8; return true; }
status_t FLACParser::init() { // setup libFLAC parser mDecoder = FLAC__stream_decoder_new(); if (mDecoder == NULL) { // The new should succeed, since probably all it does is a malloc // that always succeeds in Android. But to avoid dependence on the // libFLAC internals, we check and log here. LOGE("new failed"); return NO_INIT; } FLAC__stream_decoder_set_md5_checking(mDecoder, false); FLAC__stream_decoder_set_metadata_ignore_all(mDecoder); FLAC__stream_decoder_set_metadata_respond( mDecoder, FLAC__METADATA_TYPE_STREAMINFO); FLAC__stream_decoder_set_metadata_respond( mDecoder, FLAC__METADATA_TYPE_PICTURE); FLAC__stream_decoder_set_metadata_respond( mDecoder, FLAC__METADATA_TYPE_VORBIS_COMMENT); FLAC__StreamDecoderInitStatus initStatus; initStatus = FLAC__stream_decoder_init_stream( mDecoder, read_callback, seek_callback, tell_callback, length_callback, eof_callback, write_callback, metadata_callback, error_callback, (void *) this); if (initStatus != FLAC__STREAM_DECODER_INIT_STATUS_OK) { // A failure here probably indicates a programming error and so is // unlikely to happen. But we check and log here similarly to above. LOGE("init_stream failed %d", initStatus); return NO_INIT; } // parse all metadata if (!FLAC__stream_decoder_process_until_end_of_metadata(mDecoder)) { LOGE("end_of_metadata failed"); return NO_INIT; } if (mStreamInfoValid) { // check channel count switch (getChannels()) { case 1: case 2: break; default: LOGE("unsupported channel count %u", getChannels()); return NO_INIT; } // check bit depth switch (getBitsPerSample()) { case 8: case 16: case 24: break; default: LOGE("unsupported bits per sample %u", getBitsPerSample()); return NO_INIT; } // check sample rate switch (getSampleRate()) { case 8000: case 11025: case 12000: case 16000: case 22050: case 24000: case 32000: case 44100: case 48000: break; default: // 96000 would require a proper downsampler in AudioFlinger LOGE("unsupported sample rate %u", getSampleRate()); return NO_INIT; } // configure the appropriate copy function, defaulting to trespass static const struct { unsigned mChannels; unsigned mBitsPerSample; void (*mCopy)(short *dst, const int *const *src, unsigned nSamples); } table[] = { { 1, 8, copyMono8 }, { 2, 8, copyStereo8 }, { 1, 16, copyMono16 }, { 2, 16, copyStereo16 }, { 1, 24, copyMono24 }, { 2, 24, copyStereo24 }, }; for (unsigned i = 0; i < sizeof(table)/sizeof(table[0]); ++i) { if (table[i].mChannels == getChannels() && table[i].mBitsPerSample == getBitsPerSample()) { mCopy = table[i].mCopy; break; } } // populate track metadata if (mTrackMetadata != 0) { mTrackMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW); mTrackMetadata->setInt32(kKeyChannelCount, getChannels()); mTrackMetadata->setInt32(kKeySampleRate, getSampleRate()); // sample rate is non-zero, so division by zero not possible mTrackMetadata->setInt64(kKeyDuration, (getTotalSamples() * 1000000LL) / getSampleRate()); } } else { LOGE("missing STREAMINFO"); return NO_INIT; } if (mFileMetadata != 0) { mFileMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_FLAC); } return OK; }
bool RDHPIRecordStream::recordReady() { HW16 hpi_error=0; char hpi_text[200]; if(debug) { printf("RDHPIRecordStream: received recordReady()\n"); } if(!is_open) { return false; } if((!is_recording)&&(!is_paused)) { resetWave(); if(HPI_InStreamGetInfoEx(hpi_subsys,hpi_stream, &state,&buffer_size,&data_recorded, &samples_recorded,&reserved)!=0) { if(debug) { printf("RDHPIRecordStream: HPI_InStreamGetInfoEx() failed\n"); } return false; } fragment_size=buffer_size/4; if(fragment_size>192000) { // ALSA Compatibility Limitation fragment_size=192000; } fragment_time=(1000*fragment_size)/(getAvgBytesPerSec()); if(pdata!=NULL) { delete pdata; } pdata=(HW8 *)malloc(fragment_size); if(pdata==NULL) { if(debug) { printf("RDHPIRecordStream: couldn't allocate buffer\n"); } return false; } switch(getFormatTag()) { case WAVE_FORMAT_PCM: if(debug) { printf("RDHPIRecordStream: using PCM%d format\n", getBitsPerSample()); } switch(getBitsPerSample()) { case 8: HPI_FormatCreate(&format,getChannels(), HPI_FORMAT_PCM8_UNSIGNED,getSamplesPerSec(), 0,0); break; case 16: HPI_FormatCreate(&format,getChannels(), HPI_FORMAT_PCM16_SIGNED,getSamplesPerSec(), 0,0); break; case 32: HPI_FormatCreate(&format,getChannels(), HPI_FORMAT_PCM32_SIGNED,getSamplesPerSec(), 0,0); break; default: if(debug) { printf("RDHPIRecordStream: unsupported sample size\n"); } return false; } break; case WAVE_FORMAT_MPEG: if(debug) { printf("RDHPIRecordStream: using MPEG-1 Layer %d\n",getHeadLayer()); } switch(getHeadLayer()) { case 1: HPI_FormatCreate(&format,getChannels(), HPI_FORMAT_MPEG_L1,getSamplesPerSec(), getHeadBitRate(),getHeadFlags()); break; case 2: HPI_FormatCreate(&format,getChannels(), HPI_FORMAT_MPEG_L2,getSamplesPerSec(), getHeadBitRate(),getHeadFlags()); break; case 3: HPI_FormatCreate(&format,getChannels(), HPI_FORMAT_MPEG_L3,getSamplesPerSec(), getHeadBitRate(),getHeadFlags()); break; default: HPI_AdapterClose(hpi_subsys,card_number); if(debug) { printf("RDHPIRecordStream: invalid MPEG-1 layer\n"); } return false; } if(getMextChunk()) { setMextHomogenous(true); setMextPaddingUsed(false); setMextHackedBitRate(true); setMextFreeFormat(false); setMextFrameSize(144*getHeadBitRate()/getSamplesPerSec()); setMextAncillaryLength(5); setMextLeftEnergyPresent(true); if(getChannels()>1) { setMextRightEnergyPresent(true); } else { setMextRightEnergyPresent(false); } setMextPrivateDataPresent(false); } break; case WAVE_FORMAT_VORBIS: if(debug) { printf("RDHPIRecordStream: using OggVorbis\n"); } HPI_FormatCreate(&format,getChannels(), HPI_FORMAT_PCM16_SIGNED,getSamplesPerSec(), 0,0); break; default: if(debug) { printf("RDHPIRecordStream: invalid format tag\n"); } return false; break; } if((hpi_error=HPI_InStreamQueryFormat(hpi_subsys,hpi_stream, &format))!=0) { if(debug) { HPI_GetErrorText(hpi_error,hpi_text); printf("Num: %d\n",hpi_error); printf("RDHPIRecordStream: %s\n",hpi_text); } return false; } } #if HPI_VER < 0x00030500 HPI_DataCreate(&hpi_data,&format,pdata,fragment_size); #endif HPI_InStreamSetFormat(hpi_subsys,hpi_stream,&format); HPI_InStreamStart(hpi_subsys,hpi_stream); // clock->start(2*fragment_time/3); clock->start(100); is_ready=true; is_recording=false; is_paused=false; stopping=false; emit isStopped(false); emit ready(); emit stateChanged(card_number,stream_number,1); // RecordReady if(debug) { printf("RDHPIRecordStream: emitted isStopped(false)\n"); printf("RDHPIRecordStream: emitted ready()\n"); printf("RDHPIRecordStream: emitted stateChanged(%d,%d,RDHPIRecordStream::RecordReady)\n",card_number,stream_number); } return true; }
bool WAVAudioFile::decode(const unsigned char *ubuf, size_t sourceBytes, size_t targetSampleRate, size_t targetChannels, size_t nframes, std::vector<float *> &target, bool adding) { size_t sourceChannels = getChannels(); size_t sourceSampleRate = getSampleRate(); size_t fileFrames = sourceBytes / getBytesPerFrame(); int bitsPerSample = getBitsPerSample(); if (bitsPerSample != 8 && bitsPerSample != 16 && bitsPerSample != 24 && bitsPerSample != 32) { // 32-bit is IEEE-float (enforced in RIFFAudioFile) RG_WARNING << "WAVAudioFile::decode: unsupported " << bitsPerSample << "-bit sample size"; return false; } #ifdef DEBUG_DECODE RG_DEBUG << "WAVAudioFile::decode: " << sourceBytes << " bytes -> " << nframes << " frames, SSR " << getSampleRate() << ", TSR " << targetSampleRate << ", sch " << getChannels() << ", tch " << targetChannels; #endif // If we're reading a stereo file onto a mono target, we mix the // two channels. If we're reading mono to stereo, we duplicate // the mono channel. Otherwise if the numbers of channels differ, // we just copy across the ones that do match and zero the rest. bool reduceToMono = (targetChannels == 1 && sourceChannels == 2); for (size_t ch = 0; ch < sourceChannels; ++ch) { if (!reduceToMono || ch == 0) { if (ch >= targetChannels) break; if (!adding) memset(target[ch], 0, nframes * sizeof(float)); } int tch = ch; // target channel for this data if (reduceToMono && ch == 1) { tch = 0; } float ratio = 1.0; if (sourceSampleRate != targetSampleRate) { ratio = float(sourceSampleRate) / float(targetSampleRate); } for (size_t i = 0; i < nframes; ++i) { size_t j = i; if (sourceSampleRate != targetSampleRate) { j = size_t(i * ratio); } if (j >= fileFrames) j = fileFrames - 1; float sample = convertBytesToSample (&ubuf[(bitsPerSample / 8) * (ch + j * sourceChannels)]); target[tch][i] += sample; } } // Now deal with any excess target channels for (size_t ch = sourceChannels; ch < targetChannels; ++ch) { if (ch == 1 && targetChannels == 2) { // copy mono to stereo if (!adding) { memcpy(target[ch], target[ch - 1], nframes * sizeof(float)); } else { for (size_t i = 0; i < nframes; ++i) { target[ch][i] += target[ch - 1][i]; } } } else { if (!adding) { memset(target[ch], 0, nframes * sizeof(float)); } } } return true; }