示例#1
0
SoundSample::SoundSample(std::string name, int rev)
{
    uMin = rev;
    samples = NULL;

#ifdef HAVE_AUDIOFILE

    lowPitchOffset = highPitchOffset = 1.0;
    lowVolumeOffset = highVolumeOffset = 1.0;
    lastDir = 0;
    lastValue = 0;
    file = afOpenFile(name.c_str(), "r", 0);
    if (file == AF_NULL_FILEHANDLE)
    {
        QMessageBox::critical(theWindow, "Error",
                              "SoundSample::afOpenFD() - failed!",
                              QMessageBox::Ok,
                              QMessageBox::Ok);
    }
    else
    {
        afSetVirtualSampleFormat(file, AF_DEFAULT_TRACK, AF_SAMPFMT_TWOSCOMP, 16);
        //afSetVirtualSampleFormat(file, AF_DEFAULT_TRACK, AF_SAMPFMT_FLOAT, 32);

        channels = afGetChannels(file, AF_DEFAULT_TRACK);
        samplesPerSec = (int)afGetRate(file, AF_DEFAULT_TRACK);
        numSamples = afGetFrameCount(file, AF_DEFAULT_TRACK);
        sampleBlocksize = (int)afGetVirtualFrameSize(file, AF_DEFAULT_TRACK, 1);
        bytesPerSample = (int)afGetVirtualFrameSize(file, AF_DEFAULT_TRACK, 1) / channels;
        if (channels != 2)
        {
            std::strstream buf;
            buf << "SoundSample::not a stereo sound file" << name;
            std::string s;
            buf >> s;
            QMessageBox::critical(theWindow, "Error",
                                  s.c_str(),
                                  QMessageBox::Ok,
                                  QMessageBox::Ok);
        }
        if (samplesPerSec != 48000)
        {
            std::strstream buf;
            buf << "SoundSample::48kHz required but " << name << " has " << samplesPerSec;
            std::string s;
            buf >> s;
            QMessageBox::critical(theWindow, "Error",
                                  s.c_str(),
                                  QMessageBox::Ok,
                                  QMessageBox::Ok);
        }
示例#2
0
OSStatus fileRenderProc (void *inRefCon,
	AudioUnitRenderActionFlags *inActionFlags,
	const AudioTimeStamp *inTimeStamp,
	UInt32 inBusNumber,
	UInt32 inNumFrames,
	AudioBufferList *ioData)
{
	AFfilehandle	file = (AFfilehandle) inRefCon;
	AFframecount	framesToRead, framesRead;

	framesToRead = inNumFrames;
	if (framesToRead > BUFFER_FRAME_COUNT)
		framesToRead = BUFFER_FRAME_COUNT;

	framesRead = afReadFrames(file, AF_DEFAULT_TRACK,
		buffer, framesToRead);
	if (framesRead > 0)
	{
		ioData->mBuffers[0].mData = buffer;
		ioData->mBuffers[0].mDataByteSize = framesRead *
			afGetVirtualFrameSize(file, AF_DEFAULT_TRACK, 1);
	}
	else
		isPlaying = 0;

	return noErr;
}
示例#3
0
/*
	Copy audio data from one file to another.  This function
	assumes that the virtual sample formats of the two files
	match.
*/
int copyaudiodata (AFfilehandle infile, AFfilehandle outfile, int trackid,
	AFframecount totalFrameCount)
{
	AFframecount totalFramesWritten = 0;
	void *buffer;
	int frameSize;
	bool ok = true, done = false;

	frameSize = afGetVirtualFrameSize(infile, trackid, 1);

	buffer = malloc(BUFFER_FRAME_COUNT * frameSize);

	while (!done)
	{
		AFframecount	framesToRead = BUFFER_FRAME_COUNT;
		AFframecount	framesRead, framesWritten;

		framesRead = afReadFrames(infile, trackid, buffer,
			framesToRead);

		if (framesRead < 0)
		{
			fprintf(stderr, "Bad read of audio track data.\n");
			ok = false;
			done = true;
		}

		framesWritten = afWriteFrames(outfile, trackid, buffer,
			framesRead);

		if (framesWritten < 0)
		{
			fprintf(stderr, "Bad write of audio track data.\n");
			ok = false;
			done = true;
		}
		else
		{
			totalFramesWritten += framesWritten;
		}

		if (totalFramesWritten == totalFrameCount)
			done = true;
	}

	free(buffer);

	return ok;
}
示例#4
0
static enum audiotap_status audiofile_read_init(struct audiotap **audiotap,
                                                const char *file,
                                                struct tapenc_params *params,
                                                uint8_t machine,
                                                uint8_t videotype,
                                                uint8_t *halfwaves){
  uint32_t freq;
  enum audiotap_status error = AUDIOTAP_LIBRARY_ERROR;
  AFfilehandle fh;

  if (status.audiofile_init_status != LIBRARY_OK
   || status.tapencoder_init_status != LIBRARY_OK)
    return AUDIOTAP_LIBRARY_UNAVAILABLE;
  fh=afOpenFile(file,"r", NULL);
  if (fh == AF_NULL_FILEHANDLE)
    return AUDIOTAP_LIBRARY_ERROR;
  do{
    if ( (freq=(uint32_t)afGetRate(fh, AF_DEFAULT_TRACK)) == -1)
      break;
    if (afSetVirtualChannels(fh, AF_DEFAULT_TRACK, 1) == -1)
      break;
    if (afSetVirtualSampleFormat(fh, AF_DEFAULT_TRACK, AF_SAMPFMT_TWOSCOMP, 32) == -1)
      break;
    if (afGetVirtualFrameSize(fh, AF_DEFAULT_TRACK, 0) != 4)
      break;
    error = AUDIOTAP_OK;
  }while(0);
  if(error != AUDIOTAP_OK){
    afCloseFile(fh);
    return error;
  }
  *halfwaves = 1;
  return audio2tap_audio_open_common(audiotap,
                                     freq,
                                     params,
                                     machine,
                                     videotype,
                                     &audiofile_read_functions,
                                     fh);
}
示例#5
0
void getASBDForFile (AFfilehandle file, int track,
	AudioStreamBasicDescription *asbd)
{
	int	sampleFormat, sampleWidth, channelCount;
	double	rate;

	afGetVirtualSampleFormat(file, track, &sampleFormat, &sampleWidth);
	channelCount = afGetChannels(file, track);
	rate = afGetRate(file, track);

	asbd->mSampleRate = rate;
	asbd->mFormatID = kAudioFormatLinearPCM;
	switch (sampleFormat)
	{
		case AF_SAMPFMT_TWOSCOMP:
			asbd->mFormatFlags = kAudioFormatFlagIsSignedInteger;
			asbd->mBitsPerChannel = sampleWidth;
			break;
		case AF_SAMPFMT_UNSIGNED:
			asbd->mFormatFlags = 0;
			asbd->mBitsPerChannel = sampleWidth;
			break;
		case AF_SAMPFMT_FLOAT:
			asbd->mFormatFlags = kAudioFormatFlagIsFloat;
			asbd->mBitsPerChannel = 32;
			break;
		case AF_SAMPFMT_DOUBLE:
			asbd->mFormatFlags = kAudioFormatFlagIsFloat;
			asbd->mBitsPerChannel = 64;
			break;
	}

	asbd->mChannelsPerFrame = channelCount;
	asbd->mFramesPerPacket = 1;
	asbd->mBytesPerFrame = ceilf(afGetVirtualFrameSize(file, track, 1));
	asbd->mBytesPerPacket = asbd->mBytesPerFrame;

	if (afGetVirtualByteOrder(file, track) == AF_BYTEORDER_BIGENDIAN)
		asbd->mFormatFlags |= kAudioFormatFlagIsBigEndian;
}
示例#6
0
int main (int argc, char **argv)
{
	AFfilehandle	file;
	AudioUnit	outputUnit;

	if (argc < 2)
	{
		fprintf(stderr, "usage: %s filename\n", argv[0]);
		exit(EXIT_FAILURE);
	}

	file = afOpenFile(argv[1], "r", AF_NULL_FILESETUP);
	if (file == AF_NULL_FILEHANDLE)
	{
		fprintf(stderr, "Could not open file '%s' for reading.\n", argv[1]);
		exit(EXIT_FAILURE);
	}

	openOutput(&outputUnit);
	setupOutput(&outputUnit, file);
	AudioOutputUnitStart(outputUnit);

	buffer = malloc(BUFFER_FRAME_COUNT *
		afGetVirtualFrameSize(file, AF_DEFAULT_TRACK, 1));

	while (isPlaying)
		usleep(250000);

	AudioOutputUnitStop(outputUnit);
	AudioUnitUninitialize(outputUnit);
	CloseComponent(outputUnit);

	free(buffer);

	afCloseFile(file);
}
示例#7
0
main (int argc, char **argv)
{
	AFfilehandle	file;
	AFframecount	count, frameCount;
	int		channelCount, sampleFormat, sampleWidth;
	float		frameSize;
	void		*buffer;
	double		sampleRate;

	ALport		outport;
	ALconfig	outportconfig;

	if (argc < 2)
		usage();

	file = afOpenFile(argv[1], "r", NULL);
	if (file == AF_NULL_FILEHANDLE)
	{
		fprintf(stderr, "Could not open file %s.\n", argv[1]);
		exit(EXIT_FAILURE);
	}

	frameCount = afGetFrameCount(file, AF_DEFAULT_TRACK);
	frameSize = afGetVirtualFrameSize(file, AF_DEFAULT_TRACK, 1);
	channelCount = afGetVirtualChannels(file, AF_DEFAULT_TRACK);
	sampleRate = afGetRate(file, AF_DEFAULT_TRACK);
	afGetVirtualSampleFormat(file, AF_DEFAULT_TRACK, &sampleFormat,
		&sampleWidth);

	if (sampleFormat == AF_SAMPFMT_UNSIGNED)
	{
		afSetVirtualSampleFormat(file, AF_DEFAULT_TRACK,
			AF_SAMPFMT_TWOSCOMP, sampleWidth);
	}

	printf("frame count: %lld\n", frameCount);
	printf("frame size: %d bytes\n", (int) frameSize);
	printf("channel count: %d\n", channelCount);
	printf("sample rate: %.2f Hz\n", sampleRate);
	buffer = malloc(BUFFERED_FRAME_COUNT * frameSize);

	outportconfig = alNewConfig();
	setwidth(outportconfig, sampleWidth);
	setsampleformat(outportconfig, sampleFormat);
	alSetChannels(outportconfig, channelCount);

	count = afReadFrames(file, AF_DEFAULT_TRACK, buffer, BUFFERED_FRAME_COUNT);

	outport = alOpenPort("irixread", "w", outportconfig);
	setrate(outport, sampleRate);

	do
	{
		printf("count = %lld\n", count);
		alWriteFrames(outport, buffer, count);

		count = afReadFrames(file, AF_DEFAULT_TRACK, buffer,
			BUFFERED_FRAME_COUNT);
	} while (count > 0);

	waitport(outport);

	alClosePort(outport);
	alFreeConfig(outportconfig);

	afCloseFile(file);
}
示例#8
0
//***************************************************************************
bool Kwave::AudiofileDecoder::decode(QWidget */*widget*/,
                                     Kwave::MultiWriter &dst)
{
    Q_ASSERT(m_src_adapter);
    Q_ASSERT(m_source);
    if (!m_source) return false;
    if (!m_src_adapter) return false;

    AFfilehandle fh = m_src_adapter->handle();
    Q_ASSERT(fh);
    if (!fh) return false;

    unsigned int frame_size = Kwave::toUint(
	afGetVirtualFrameSize(fh, AF_DEFAULT_TRACK, 1));

    // allocate a buffer for input data
    const unsigned int buffer_frames = (8 * 1024);
    sample_storage_t *buffer =
	static_cast<sample_storage_t *>(malloc(buffer_frames * frame_size));
    Q_ASSERT(buffer);
    if (!buffer) return false;

    // read in from the audiofile source
    const unsigned int tracks = Kwave::FileInfo(metaData()).tracks();
    sample_index_t rest = Kwave::FileInfo(metaData()).length();
    while (rest) {
	unsigned int frames = buffer_frames;
	if (frames > rest) frames = Kwave::toUint(rest);
	int buffer_used = afReadFrames(fh,
	    AF_DEFAULT_TRACK, reinterpret_cast<char *>(buffer), frames);

	// break if eof reached
	if (buffer_used <= 0) break;
	rest -= buffer_used;

	// split into the tracks
	sample_storage_t *p = buffer;
	unsigned int count = buffer_used;
	while (count) {
	    for (unsigned int track = 0; track < tracks; track++) {
		sample_storage_t s = *p++;

		// adjust precision
		if (SAMPLE_STORAGE_BITS != SAMPLE_BITS) {
		    s /= (1 << (SAMPLE_STORAGE_BITS - SAMPLE_BITS));
		}

		// the following cast is only necessary if
		// sample_t is not equal to a quint32
		*(dst[track]) << static_cast<sample_t>(s);
	    }
	    --count;
	}

	// abort if the user pressed cancel
	if (dst.isCanceled()) break;
    }

    // return with a valid Signal, even if the user pressed cancel !
    if (buffer) free(buffer);
    return true;
}
示例#9
0
static void
audiofile_stream_decode(struct decoder *decoder, struct input_stream *is)
{
	AFvirtualfile *vf;
	int fs, frame_count;
	AFfilehandle af_fp;
	int bits;
	struct audio_format audio_format;
	float total_time;
	uint16_t bit_rate;
	int ret, current = 0;
	char chunk[CHUNK_SIZE];
	enum decoder_command cmd;

	if (!is->seekable) {
		g_warning("not seekable");
		return;
	}

	vf = setup_virtual_fops(is);

	af_fp = afOpenVirtualFile(vf, "r", NULL);
	if (af_fp == AF_NULL_FILEHANDLE) {
		g_warning("failed to input stream\n");
		return;
	}

	afGetSampleFormat(af_fp, AF_DEFAULT_TRACK, &fs, &bits);
	if (!audio_valid_sample_format(bits)) {
		g_debug("input file has %d bit samples, converting to 16",
			bits);
		bits = 16;
	}

	afSetVirtualSampleFormat(af_fp, AF_DEFAULT_TRACK,
	                         AF_SAMPFMT_TWOSCOMP, bits);
	afGetVirtualSampleFormat(af_fp, AF_DEFAULT_TRACK, &fs, &bits);
	audio_format.bits = (uint8_t)bits;
	audio_format.sample_rate =
	                      (unsigned int)afGetRate(af_fp, AF_DEFAULT_TRACK);
	audio_format.channels =
	              (uint8_t)afGetVirtualChannels(af_fp, AF_DEFAULT_TRACK);

	if (!audio_format_valid(&audio_format)) {
		g_warning("Invalid audio format: %u:%u:%u\n",
			  audio_format.sample_rate, audio_format.bits,
			  audio_format.channels);
		afCloseFile(af_fp);
		return;
	}

	frame_count = afGetFrameCount(af_fp, AF_DEFAULT_TRACK);

	total_time = ((float)frame_count / (float)audio_format.sample_rate);

	bit_rate = (uint16_t)(is->size * 8.0 / total_time / 1000.0 + 0.5);

	fs = (int)afGetVirtualFrameSize(af_fp, AF_DEFAULT_TRACK, 1);

	decoder_initialized(decoder, &audio_format, true, total_time);

	do {
		ret = afReadFrames(af_fp, AF_DEFAULT_TRACK, chunk,
				   CHUNK_SIZE / fs);
		if (ret <= 0)
			break;

		current += ret;
		cmd = decoder_data(decoder, NULL,
				   chunk, ret * fs,
				   (float)current /
				   (float)audio_format.sample_rate,
				   bit_rate, NULL);

		if (cmd == DECODE_COMMAND_SEEK) {
			current = decoder_seek_where(decoder) *
				audio_format.sample_rate;
			afSeekFrame(af_fp, AF_DEFAULT_TRACK, current);

			decoder_command_finished(decoder);
			cmd = DECODE_COMMAND_NONE;
		}
	} while (cmd == DECODE_COMMAND_NONE);

	afCloseFile(af_fp);
}
示例#10
0
static void
audiofile_stream_decode(struct decoder *decoder, struct input_stream *is)
{
	GError *error = NULL;
	AFvirtualfile *vf;
	int fs, frame_count;
	AFfilehandle af_fp;
	struct audio_format audio_format;
	float total_time;
	uint16_t bit_rate;
	int ret;
	char chunk[CHUNK_SIZE];
	enum decoder_command cmd;

	if (!is->seekable) {
		g_warning("not seekable");
		return;
	}

	vf = setup_virtual_fops(is);

	af_fp = afOpenVirtualFile(vf, "r", NULL);
	if (af_fp == AF_NULL_FILEHANDLE) {
		g_warning("failed to input stream\n");
		return;
	}

	if (!audio_format_init_checked(&audio_format,
				       afGetRate(af_fp, AF_DEFAULT_TRACK),
				       audiofile_setup_sample_format(af_fp),
				       afGetVirtualChannels(af_fp, AF_DEFAULT_TRACK),
				       &error)) {
		g_warning("%s", error->message);
		g_error_free(error);
		afCloseFile(af_fp);
		return;
	}

	frame_count = afGetFrameCount(af_fp, AF_DEFAULT_TRACK);

	total_time = ((float)frame_count / (float)audio_format.sample_rate);

	bit_rate = (uint16_t)(is->size * 8.0 / total_time / 1000.0 + 0.5);

	fs = (int)afGetVirtualFrameSize(af_fp, AF_DEFAULT_TRACK, 1);

	decoder_initialized(decoder, &audio_format, true, total_time);

	do {
		ret = afReadFrames(af_fp, AF_DEFAULT_TRACK, chunk,
				   CHUNK_SIZE / fs);
		if (ret <= 0)
			break;

		cmd = decoder_data(decoder, NULL,
				   chunk, ret * fs,
				   bit_rate);

		if (cmd == DECODE_COMMAND_SEEK) {
			AFframecount frame = decoder_seek_where(decoder) *
				audio_format.sample_rate;
			afSeekFrame(af_fp, AF_DEFAULT_TRACK, frame);

			decoder_command_finished(decoder);
			cmd = DECODE_COMMAND_NONE;
		}
	} while (cmd == DECODE_COMMAND_NONE);

	afCloseFile(af_fp);
}