static enum sample_format audiofile_setup_sample_format(AFfilehandle af_fp) { int fs, bits; afGetSampleFormat(af_fp, AF_DEFAULT_TRACK, &fs, &bits); if (!audio_valid_sample_format(audiofile_bits_to_sample_format(bits))) { g_debug("input file has %d bit samples, converting to 16", bits); bits = 16; } afSetVirtualSampleFormat(af_fp, AF_DEFAULT_TRACK, AF_SAMPFMT_TWOSCOMP, bits); afGetVirtualSampleFormat(af_fp, AF_DEFAULT_TRACK, &fs, &bits); return audiofile_bits_to_sample_format(bits); }
void getASBDForFile (AFfilehandle file, int track, AudioStreamBasicDescription *asbd) { int sampleFormat, sampleWidth, channelCount; double rate; afGetVirtualSampleFormat(file, track, &sampleFormat, &sampleWidth); channelCount = afGetChannels(file, track); rate = afGetRate(file, track); asbd->mSampleRate = rate; asbd->mFormatID = kAudioFormatLinearPCM; switch (sampleFormat) { case AF_SAMPFMT_TWOSCOMP: asbd->mFormatFlags = kAudioFormatFlagIsSignedInteger; asbd->mBitsPerChannel = sampleWidth; break; case AF_SAMPFMT_UNSIGNED: asbd->mFormatFlags = 0; asbd->mBitsPerChannel = sampleWidth; break; case AF_SAMPFMT_FLOAT: asbd->mFormatFlags = kAudioFormatFlagIsFloat; asbd->mBitsPerChannel = 32; break; case AF_SAMPFMT_DOUBLE: asbd->mFormatFlags = kAudioFormatFlagIsFloat; asbd->mBitsPerChannel = 64; break; } asbd->mChannelsPerFrame = channelCount; asbd->mFramesPerPacket = 1; asbd->mBytesPerFrame = ceilf(afGetVirtualFrameSize(file, track, 1)); asbd->mBytesPerPacket = asbd->mBytesPerFrame; if (afGetVirtualByteOrder(file, track) == AF_BYTEORDER_BIGENDIAN) asbd->mFormatFlags |= kAudioFormatFlagIsBigEndian; }
main (int argc, char **argv) { AFfilehandle file; AFframecount count, frameCount; int channelCount, sampleFormat, sampleWidth; float frameSize; void *buffer; double sampleRate; ALport outport; ALconfig outportconfig; if (argc < 2) usage(); file = afOpenFile(argv[1], "r", NULL); if (file == AF_NULL_FILEHANDLE) { fprintf(stderr, "Could not open file %s.\n", argv[1]); exit(EXIT_FAILURE); } frameCount = afGetFrameCount(file, AF_DEFAULT_TRACK); frameSize = afGetVirtualFrameSize(file, AF_DEFAULT_TRACK, 1); channelCount = afGetVirtualChannels(file, AF_DEFAULT_TRACK); sampleRate = afGetRate(file, AF_DEFAULT_TRACK); afGetVirtualSampleFormat(file, AF_DEFAULT_TRACK, &sampleFormat, &sampleWidth); if (sampleFormat == AF_SAMPFMT_UNSIGNED) { afSetVirtualSampleFormat(file, AF_DEFAULT_TRACK, AF_SAMPFMT_TWOSCOMP, sampleWidth); } printf("frame count: %lld\n", frameCount); printf("frame size: %d bytes\n", (int) frameSize); printf("channel count: %d\n", channelCount); printf("sample rate: %.2f Hz\n", sampleRate); buffer = malloc(BUFFERED_FRAME_COUNT * frameSize); outportconfig = alNewConfig(); setwidth(outportconfig, sampleWidth); setsampleformat(outportconfig, sampleFormat); alSetChannels(outportconfig, channelCount); count = afReadFrames(file, AF_DEFAULT_TRACK, buffer, BUFFERED_FRAME_COUNT); outport = alOpenPort("irixread", "w", outportconfig); setrate(outport, sampleRate); do { printf("count = %lld\n", count); alWriteFrames(outport, buffer, count); count = afReadFrames(file, AF_DEFAULT_TRACK, buffer, BUFFERED_FRAME_COUNT); } while (count > 0); waitport(outport); alClosePort(outport); alFreeConfig(outportconfig); afCloseFile(file); }
//*************************************************************************** bool Kwave::AudiofileDecoder::open(QWidget *widget, QIODevice &src) { metaData().clear(); Q_ASSERT(!m_source); if (m_source) qWarning("AudiofileDecoder::open(), already open !"); // try to open the source if (!src.open(QIODevice::ReadOnly)) { qWarning("AudiofileDecoder::open(), failed to open source !"); return false; } // source successfully opened m_source = &src; m_src_adapter = new Kwave::VirtualAudioFile(*m_source); Q_ASSERT(m_src_adapter); if (!m_src_adapter) return false; m_src_adapter->open(m_src_adapter, 0); AFfilehandle fh = m_src_adapter->handle(); if (!fh || (m_src_adapter->lastError() >= 0)) { QString reason; switch (m_src_adapter->lastError()) { case AF_BAD_NOT_IMPLEMENTED: reason = i18n("Format or function is not implemented"); break; case AF_BAD_MALLOC: reason = i18n("Out of memory"); break; case AF_BAD_HEADER: reason = i18n("File header is damaged"); break; case AF_BAD_CODEC_TYPE: reason = i18n("Invalid codec type"); break; case AF_BAD_OPEN: reason = i18n("Opening the file failed"); break; case AF_BAD_READ: reason = i18n("Read access failed"); break; case AF_BAD_SAMPFMT: reason = i18n("Invalid sample format"); break; default: reason = reason.number(m_src_adapter->lastError()); } QString text= i18n("An error occurred while opening the "\ "file:\n'%1'", reason); Kwave::MessageBox::error(widget, text); return false; } AFframecount length = afGetFrameCount(fh, AF_DEFAULT_TRACK); unsigned int tracks = qMax(afGetVirtualChannels(fh, AF_DEFAULT_TRACK), 0); unsigned int bits = 0; double rate = 0.0; int af_sample_format; afGetVirtualSampleFormat(fh, AF_DEFAULT_TRACK, &af_sample_format, reinterpret_cast<int *>(&bits)); Kwave::SampleFormat::Format fmt; switch (af_sample_format) { case AF_SAMPFMT_TWOSCOMP: fmt = Kwave::SampleFormat::Signed; break; case AF_SAMPFMT_UNSIGNED: fmt = Kwave::SampleFormat::Unsigned; break; case AF_SAMPFMT_FLOAT: fmt = Kwave::SampleFormat::Float; break; case AF_SAMPFMT_DOUBLE: fmt = Kwave::SampleFormat::Double; break; default: fmt = Kwave::SampleFormat::Unknown; break; } // get sample rate, with fallback to 8kHz rate = afGetRate(fh, AF_DEFAULT_TRACK); if (rate < 1.0) { qWarning("\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"\ "WARNING: file has no sample rate!\n"\ " => using 8000 samples/sec as fallback\n"\ "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"); rate = 8000.0; } Kwave::SampleFormat::Map sf; QString sample_format_name = sf.description(Kwave::SampleFormat(fmt), true); if (static_cast<signed int>(bits) < 0) bits = 0; int af_compression = afGetCompression(fh, AF_DEFAULT_TRACK); const Kwave::Compression compression( Kwave::Compression::fromAudiofile(af_compression) ); Kwave::FileInfo info(metaData()); info.setRate(rate); info.setBits(bits); info.setTracks(tracks); info.setLength(length); info.set(INF_SAMPLE_FORMAT, Kwave::SampleFormat(fmt).toInt()); info.set(Kwave::INF_COMPRESSION, compression.toInt()); metaData().replace(Kwave::MetaDataList(info)); qDebug("-------------------------"); qDebug("info:"); qDebug("compression = %d", af_compression); qDebug("channels = %d", info.tracks()); qDebug("rate = %0.0f", info.rate()); qDebug("bits/sample = %d", info.bits()); qDebug("length = %lu samples", static_cast<unsigned long int>(info.length())); qDebug("format = %d (%s)", af_sample_format, DBG(sample_format_name)); qDebug("-------------------------"); // set up libaudiofile to produce Kwave's internal sample format #if Q_BYTE_ORDER == Q_BIG_ENDIAN afSetVirtualByteOrder(fh, AF_DEFAULT_TRACK, AF_BYTEORDER_BIGENDIAN); #else afSetVirtualByteOrder(fh, AF_DEFAULT_TRACK, AF_BYTEORDER_LITTLEENDIAN); #endif afSetVirtualSampleFormat(fh, AF_DEFAULT_TRACK, AF_SAMPFMT_TWOSCOMP, SAMPLE_STORAGE_BITS); return true; }
static void audiofile_stream_decode(struct decoder *decoder, struct input_stream *is) { AFvirtualfile *vf; int fs, frame_count; AFfilehandle af_fp; int bits; struct audio_format audio_format; float total_time; uint16_t bit_rate; int ret, current = 0; char chunk[CHUNK_SIZE]; enum decoder_command cmd; if (!is->seekable) { g_warning("not seekable"); return; } vf = setup_virtual_fops(is); af_fp = afOpenVirtualFile(vf, "r", NULL); if (af_fp == AF_NULL_FILEHANDLE) { g_warning("failed to input stream\n"); return; } afGetSampleFormat(af_fp, AF_DEFAULT_TRACK, &fs, &bits); if (!audio_valid_sample_format(bits)) { g_debug("input file has %d bit samples, converting to 16", bits); bits = 16; } afSetVirtualSampleFormat(af_fp, AF_DEFAULT_TRACK, AF_SAMPFMT_TWOSCOMP, bits); afGetVirtualSampleFormat(af_fp, AF_DEFAULT_TRACK, &fs, &bits); audio_format.bits = (uint8_t)bits; audio_format.sample_rate = (unsigned int)afGetRate(af_fp, AF_DEFAULT_TRACK); audio_format.channels = (uint8_t)afGetVirtualChannels(af_fp, AF_DEFAULT_TRACK); if (!audio_format_valid(&audio_format)) { g_warning("Invalid audio format: %u:%u:%u\n", audio_format.sample_rate, audio_format.bits, audio_format.channels); afCloseFile(af_fp); return; } frame_count = afGetFrameCount(af_fp, AF_DEFAULT_TRACK); total_time = ((float)frame_count / (float)audio_format.sample_rate); bit_rate = (uint16_t)(is->size * 8.0 / total_time / 1000.0 + 0.5); fs = (int)afGetVirtualFrameSize(af_fp, AF_DEFAULT_TRACK, 1); decoder_initialized(decoder, &audio_format, true, total_time); do { ret = afReadFrames(af_fp, AF_DEFAULT_TRACK, chunk, CHUNK_SIZE / fs); if (ret <= 0) break; current += ret; cmd = decoder_data(decoder, NULL, chunk, ret * fs, (float)current / (float)audio_format.sample_rate, bit_rate, NULL); if (cmd == DECODE_COMMAND_SEEK) { current = decoder_seek_where(decoder) * audio_format.sample_rate; afSeekFrame(af_fp, AF_DEFAULT_TRACK, current); decoder_command_finished(decoder); cmd = DECODE_COMMAND_NONE; } } while (cmd == DECODE_COMMAND_NONE); afCloseFile(af_fp); }