Ejemplo n.º 1
0
SoundSample::SoundSample(std::string name, int rev)
{
    uMin = rev;
    samples = NULL;

#ifdef HAVE_AUDIOFILE

    lowPitchOffset = highPitchOffset = 1.0;
    lowVolumeOffset = highVolumeOffset = 1.0;
    lastDir = 0;
    lastValue = 0;
    file = afOpenFile(name.c_str(), "r", 0);
    if (file == AF_NULL_FILEHANDLE)
    {
        QMessageBox::critical(theWindow, "Error",
                              "SoundSample::afOpenFD() - failed!",
                              QMessageBox::Ok,
                              QMessageBox::Ok);
    }
    else
    {
        afSetVirtualSampleFormat(file, AF_DEFAULT_TRACK, AF_SAMPFMT_TWOSCOMP, 16);
        //afSetVirtualSampleFormat(file, AF_DEFAULT_TRACK, AF_SAMPFMT_FLOAT, 32);

        channels = afGetChannels(file, AF_DEFAULT_TRACK);
        samplesPerSec = (int)afGetRate(file, AF_DEFAULT_TRACK);
        numSamples = afGetFrameCount(file, AF_DEFAULT_TRACK);
        sampleBlocksize = (int)afGetVirtualFrameSize(file, AF_DEFAULT_TRACK, 1);
        bytesPerSample = (int)afGetVirtualFrameSize(file, AF_DEFAULT_TRACK, 1) / channels;
        if (channels != 2)
        {
            std::strstream buf;
            buf << "SoundSample::not a stereo sound file" << name;
            std::string s;
            buf >> s;
            QMessageBox::critical(theWindow, "Error",
                                  s.c_str(),
                                  QMessageBox::Ok,
                                  QMessageBox::Ok);
        }
        if (samplesPerSec != 48000)
        {
            std::strstream buf;
            buf << "SoundSample::48kHz required but " << name << " has " << samplesPerSec;
            std::string s;
            buf >> s;
            QMessageBox::critical(theWindow, "Error",
                                  s.c_str(),
                                  QMessageBox::Ok,
                                  QMessageBox::Ok);
        }
Ejemplo n.º 2
0
static enum sample_format
audiofile_setup_sample_format(AFfilehandle af_fp)
{
	int fs, bits;

	afGetSampleFormat(af_fp, AF_DEFAULT_TRACK, &fs, &bits);
	if (!audio_valid_sample_format(audiofile_bits_to_sample_format(bits))) {
		g_debug("input file has %d bit samples, converting to 16",
			bits);
		bits = 16;
	}

	afSetVirtualSampleFormat(af_fp, AF_DEFAULT_TRACK,
	                         AF_SAMPFMT_TWOSCOMP, bits);
	afGetVirtualSampleFormat(af_fp, AF_DEFAULT_TRACK, &fs, &bits);

	return audiofile_bits_to_sample_format(bits);
}
Ejemplo n.º 3
0
static enum audiotap_status audiofile_read_init(struct audiotap **audiotap,
                                                const char *file,
                                                struct tapenc_params *params,
                                                uint8_t machine,
                                                uint8_t videotype,
                                                uint8_t *halfwaves){
  uint32_t freq;
  enum audiotap_status error = AUDIOTAP_LIBRARY_ERROR;
  AFfilehandle fh;

  if (status.audiofile_init_status != LIBRARY_OK
   || status.tapencoder_init_status != LIBRARY_OK)
    return AUDIOTAP_LIBRARY_UNAVAILABLE;
  fh=afOpenFile(file,"r", NULL);
  if (fh == AF_NULL_FILEHANDLE)
    return AUDIOTAP_LIBRARY_ERROR;
  do{
    if ( (freq=(uint32_t)afGetRate(fh, AF_DEFAULT_TRACK)) == -1)
      break;
    if (afSetVirtualChannels(fh, AF_DEFAULT_TRACK, 1) == -1)
      break;
    if (afSetVirtualSampleFormat(fh, AF_DEFAULT_TRACK, AF_SAMPFMT_TWOSCOMP, 32) == -1)
      break;
    if (afGetVirtualFrameSize(fh, AF_DEFAULT_TRACK, 0) != 4)
      break;
    error = AUDIOTAP_OK;
  }while(0);
  if(error != AUDIOTAP_OK){
    afCloseFile(fh);
    return error;
  }
  *halfwaves = 1;
  return audio2tap_audio_open_common(audiotap,
                                     freq,
                                     params,
                                     machine,
                                     videotype,
                                     &audiofile_read_functions,
                                     fh);
}
Ejemplo n.º 4
0
main (int argc, char **argv)
{
	AFfilehandle	file;
	AFframecount	count, frameCount;
	int		channelCount, sampleFormat, sampleWidth;
	float		frameSize;
	void		*buffer;
	double		sampleRate;

	ALport		outport;
	ALconfig	outportconfig;

	if (argc < 2)
		usage();

	file = afOpenFile(argv[1], "r", NULL);
	if (file == AF_NULL_FILEHANDLE)
	{
		fprintf(stderr, "Could not open file %s.\n", argv[1]);
		exit(EXIT_FAILURE);
	}

	frameCount = afGetFrameCount(file, AF_DEFAULT_TRACK);
	frameSize = afGetVirtualFrameSize(file, AF_DEFAULT_TRACK, 1);
	channelCount = afGetVirtualChannels(file, AF_DEFAULT_TRACK);
	sampleRate = afGetRate(file, AF_DEFAULT_TRACK);
	afGetVirtualSampleFormat(file, AF_DEFAULT_TRACK, &sampleFormat,
		&sampleWidth);

	if (sampleFormat == AF_SAMPFMT_UNSIGNED)
	{
		afSetVirtualSampleFormat(file, AF_DEFAULT_TRACK,
			AF_SAMPFMT_TWOSCOMP, sampleWidth);
	}

	printf("frame count: %lld\n", frameCount);
	printf("frame size: %d bytes\n", (int) frameSize);
	printf("channel count: %d\n", channelCount);
	printf("sample rate: %.2f Hz\n", sampleRate);
	buffer = malloc(BUFFERED_FRAME_COUNT * frameSize);

	outportconfig = alNewConfig();
	setwidth(outportconfig, sampleWidth);
	setsampleformat(outportconfig, sampleFormat);
	alSetChannels(outportconfig, channelCount);

	count = afReadFrames(file, AF_DEFAULT_TRACK, buffer, BUFFERED_FRAME_COUNT);

	outport = alOpenPort("irixread", "w", outportconfig);
	setrate(outport, sampleRate);

	do
	{
		printf("count = %lld\n", count);
		alWriteFrames(outport, buffer, count);

		count = afReadFrames(file, AF_DEFAULT_TRACK, buffer,
			BUFFERED_FRAME_COUNT);
	} while (count > 0);

	waitport(outport);

	alClosePort(outport);
	alFreeConfig(outportconfig);

	afCloseFile(file);
}
Ejemplo n.º 5
0
//***************************************************************************
bool Kwave::AudiofileDecoder::open(QWidget *widget, QIODevice &src)
{
    metaData().clear();
    Q_ASSERT(!m_source);
    if (m_source) qWarning("AudiofileDecoder::open(), already open !");

    // try to open the source
    if (!src.open(QIODevice::ReadOnly)) {
	qWarning("AudiofileDecoder::open(), failed to open source !");
	return false;
    }

    // source successfully opened
    m_source = &src;
    m_src_adapter = new Kwave::VirtualAudioFile(*m_source);

    Q_ASSERT(m_src_adapter);
    if (!m_src_adapter) return false;

    m_src_adapter->open(m_src_adapter, 0);

    AFfilehandle fh = m_src_adapter->handle();
    if (!fh || (m_src_adapter->lastError() >= 0)) {
	QString reason;

	switch (m_src_adapter->lastError()) {
	    case AF_BAD_NOT_IMPLEMENTED:
	        reason = i18n("Format or function is not implemented");
	        break;
	    case AF_BAD_MALLOC:
	        reason = i18n("Out of memory");
	        break;
	    case AF_BAD_HEADER:
	        reason = i18n("File header is damaged");
	        break;
	    case AF_BAD_CODEC_TYPE:
	        reason = i18n("Invalid codec type");
	        break;
	    case AF_BAD_OPEN:
	        reason = i18n("Opening the file failed");
	        break;
	    case AF_BAD_READ:
	        reason = i18n("Read access failed");
	        break;
	    case AF_BAD_SAMPFMT:
	        reason = i18n("Invalid sample format");
	        break;
	    default:
		reason = reason.number(m_src_adapter->lastError());
	}

	QString text= i18n("An error occurred while opening the "\
	    "file:\n'%1'", reason);
	Kwave::MessageBox::error(widget, text);

	return false;
    }

    AFframecount length = afGetFrameCount(fh, AF_DEFAULT_TRACK);
    unsigned int tracks = qMax(afGetVirtualChannels(fh, AF_DEFAULT_TRACK), 0);
    unsigned int bits = 0;
    double       rate = 0.0;
    int af_sample_format;
    afGetVirtualSampleFormat(fh, AF_DEFAULT_TRACK, &af_sample_format,
	reinterpret_cast<int *>(&bits));
    Kwave::SampleFormat::Format fmt;
    switch (af_sample_format)
    {
	case AF_SAMPFMT_TWOSCOMP:
	    fmt = Kwave::SampleFormat::Signed;
	    break;
	case AF_SAMPFMT_UNSIGNED:
	    fmt = Kwave::SampleFormat::Unsigned;
	    break;
	case AF_SAMPFMT_FLOAT:
	    fmt = Kwave::SampleFormat::Float;
	    break;
	case AF_SAMPFMT_DOUBLE:
	    fmt = Kwave::SampleFormat::Double;
	    break;
	default:
	    fmt = Kwave::SampleFormat::Unknown;
	    break;
    }

    // get sample rate, with fallback to 8kHz
    rate = afGetRate(fh, AF_DEFAULT_TRACK);
    if (rate < 1.0) {
	qWarning("\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"\
	         "WARNING: file has no sample rate!\n"\
	         "         => using 8000 samples/sec as fallback\n"\
	         "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
	rate = 8000.0;
    }

    Kwave::SampleFormat::Map sf;
    QString sample_format_name = sf.description(Kwave::SampleFormat(fmt), true);

    if (static_cast<signed int>(bits) < 0) bits = 0;

    int af_compression = afGetCompression(fh, AF_DEFAULT_TRACK);
    const Kwave::Compression compression(
	Kwave::Compression::fromAudiofile(af_compression)
    );

    Kwave::FileInfo info(metaData());
    info.setRate(rate);
    info.setBits(bits);
    info.setTracks(tracks);
    info.setLength(length);
    info.set(INF_SAMPLE_FORMAT, Kwave::SampleFormat(fmt).toInt());
    info.set(Kwave::INF_COMPRESSION, compression.toInt());
    metaData().replace(Kwave::MetaDataList(info));
    qDebug("-------------------------");
    qDebug("info:");
    qDebug("compression = %d", af_compression);
    qDebug("channels    = %d", info.tracks());
    qDebug("rate        = %0.0f", info.rate());
    qDebug("bits/sample = %d", info.bits());
    qDebug("length      = %lu samples",
	   static_cast<unsigned long int>(info.length()));
    qDebug("format      = %d (%s)", af_sample_format,
                                    DBG(sample_format_name));
    qDebug("-------------------------");

    // set up libaudiofile to produce Kwave's internal sample format
#if Q_BYTE_ORDER == Q_BIG_ENDIAN
    afSetVirtualByteOrder(fh, AF_DEFAULT_TRACK, AF_BYTEORDER_BIGENDIAN);
#else
    afSetVirtualByteOrder(fh, AF_DEFAULT_TRACK, AF_BYTEORDER_LITTLEENDIAN);
#endif
    afSetVirtualSampleFormat(fh, AF_DEFAULT_TRACK,
	AF_SAMPFMT_TWOSCOMP, SAMPLE_STORAGE_BITS);

    return true;
}
Ejemplo n.º 6
0
static void
audiofile_stream_decode(struct decoder *decoder, struct input_stream *is)
{
	AFvirtualfile *vf;
	int fs, frame_count;
	AFfilehandle af_fp;
	int bits;
	struct audio_format audio_format;
	float total_time;
	uint16_t bit_rate;
	int ret, current = 0;
	char chunk[CHUNK_SIZE];
	enum decoder_command cmd;

	if (!is->seekable) {
		g_warning("not seekable");
		return;
	}

	vf = setup_virtual_fops(is);

	af_fp = afOpenVirtualFile(vf, "r", NULL);
	if (af_fp == AF_NULL_FILEHANDLE) {
		g_warning("failed to input stream\n");
		return;
	}

	afGetSampleFormat(af_fp, AF_DEFAULT_TRACK, &fs, &bits);
	if (!audio_valid_sample_format(bits)) {
		g_debug("input file has %d bit samples, converting to 16",
			bits);
		bits = 16;
	}

	afSetVirtualSampleFormat(af_fp, AF_DEFAULT_TRACK,
	                         AF_SAMPFMT_TWOSCOMP, bits);
	afGetVirtualSampleFormat(af_fp, AF_DEFAULT_TRACK, &fs, &bits);
	audio_format.bits = (uint8_t)bits;
	audio_format.sample_rate =
	                      (unsigned int)afGetRate(af_fp, AF_DEFAULT_TRACK);
	audio_format.channels =
	              (uint8_t)afGetVirtualChannels(af_fp, AF_DEFAULT_TRACK);

	if (!audio_format_valid(&audio_format)) {
		g_warning("Invalid audio format: %u:%u:%u\n",
			  audio_format.sample_rate, audio_format.bits,
			  audio_format.channels);
		afCloseFile(af_fp);
		return;
	}

	frame_count = afGetFrameCount(af_fp, AF_DEFAULT_TRACK);

	total_time = ((float)frame_count / (float)audio_format.sample_rate);

	bit_rate = (uint16_t)(is->size * 8.0 / total_time / 1000.0 + 0.5);

	fs = (int)afGetVirtualFrameSize(af_fp, AF_DEFAULT_TRACK, 1);

	decoder_initialized(decoder, &audio_format, true, total_time);

	do {
		ret = afReadFrames(af_fp, AF_DEFAULT_TRACK, chunk,
				   CHUNK_SIZE / fs);
		if (ret <= 0)
			break;

		current += ret;
		cmd = decoder_data(decoder, NULL,
				   chunk, ret * fs,
				   (float)current /
				   (float)audio_format.sample_rate,
				   bit_rate, NULL);

		if (cmd == DECODE_COMMAND_SEEK) {
			current = decoder_seek_where(decoder) *
				audio_format.sample_rate;
			afSeekFrame(af_fp, AF_DEFAULT_TRACK, current);

			decoder_command_finished(decoder);
			cmd = DECODE_COMMAND_NONE;
		}
	} while (cmd == DECODE_COMMAND_NONE);

	afCloseFile(af_fp);
}
Ejemplo n.º 7
0
int main (int argc, char **argv)
{
	AFfilesetup	setup;
	if ((setup = afNewFileSetup()) == AF_NULL_FILESETUP)
	{
		fprintf(stderr, "Could not allocate file setup.\n");
		exit(EXIT_FAILURE);
	}

	afInitFileFormat(setup, AF_FILE_IRCAM);
	afInitChannels(setup, AF_DEFAULT_TRACK, 1);
	afInitSampleFormat(setup, AF_DEFAULT_TRACK, AF_SAMPFMT_FLOAT, 32);

	char *testFileName;
	if (!createTemporaryFile("floatto24", &testFileName))
	{
		fprintf(stderr, "Could not create temporary file.\n");
		exit(EXIT_FAILURE);
	}

	AFfilehandle file = afOpenFile(testFileName, "w", setup);
	if (file == AF_NULL_FILEHANDLE)
	{
		printf("could not open file for writing\n");
		exit(EXIT_FAILURE);
	}

	afFreeFileSetup(setup);

	AFframecount framesWritten = afWriteFrames(file, AF_DEFAULT_TRACK, samples,
		frameCount);

	if (framesWritten != frameCount)
	{
		fprintf(stderr, "Wrong number of frames read.\n");
		exit(EXIT_FAILURE);
	}

	if (afCloseFile(file) != 0)
	{
		fprintf(stderr, "Closing file returned non-zero status.\n");
		exit(EXIT_FAILURE);
	}

	file = afOpenFile(testFileName, "r", AF_NULL_FILESETUP);
	if (file == AF_NULL_FILEHANDLE)
	{
		fprintf(stderr, "Could not open file for writing.\n");
		exit(EXIT_FAILURE);
	}

	if (afSetVirtualSampleFormat(file, AF_DEFAULT_TRACK,
		AF_SAMPFMT_TWOSCOMP, 24) != 0)
	{
		fprintf(stderr, "afSetVirtualSampleFormat returned non-zero status.\n");
		exit(EXIT_FAILURE);
	}

	int readSamples[frameCount];
	for (int i=0; i<frameCount; i++)
		readSamples[i] = -1000 - i;
	AFframecount framesRead = afReadFrames(file, AF_DEFAULT_TRACK, readSamples,
		frameCount);

	if (framesRead != frameCount)
	{
		fprintf(stderr, "Wrong number of frames read.\n");
		exit(EXIT_FAILURE);
	}

	for (int i=0; i<framesRead; i++)
	{
#ifdef DEBUG
		printf("[%d] = %d\n", i, readSamples[i]);
#endif

		if (readSamples[i] == -1000 - i)
		{
			fprintf(stderr, "Data in destination array untouched.\n");
			exit(EXIT_FAILURE);
		}

		/*
			Ensure that the high-order 8 bits represent
			sign extension: only 0x00 (+) or 0xff (-) is
			valid.
		*/
		if ((readSamples[i] & 0xff000000) != 0x000000 &&
			(readSamples[i] & 0xff000000) != 0xff000000)
		{
			fprintf(stderr, "Data is not within range of "
				"{-2^23, ..., 2^23-1}.\n");
			exit(EXIT_FAILURE);
		}

		if (readSamples[i] != referenceConvertedSamples[i])
		{
			fprintf(stderr, "Data doesn't match reference data.\n");
			exit(EXIT_FAILURE);
		}
	}

	if (afCloseFile(file) != 0)
	{
		fprintf(stderr, "Closing file returned non-zero status.\n");
		exit(EXIT_FAILURE);
	}

	unlink(testFileName);
	free(testFileName);

	exit(EXIT_SUCCESS);
}
Ejemplo n.º 8
0
int main (int argc, char **argv)
{
	int	i = 1;
	char	*infilename, *outfilename;
	int	fileFormat, outFileFormat = AF_FILE_UNKNOWN;

	AFfilehandle	infile, outfile;
	AFfilesetup	outfilesetup;
	int		sampleFormat, sampleWidth, channelCount;
	double		sampleRate;
	int		outSampleFormat = -1, outSampleWidth = -1,
			outChannelCount = -1;
	double		outMaxAmp = 1.0;

	AFframecount	totalFrames;

	if (argc == 2)
	{
		if (!strcmp(argv[1], "--version") || !strcmp(argv[1], "-v"))
		{
			printversion();
			exit(EXIT_SUCCESS);
		}

		if (!strcmp(argv[1], "--help") || !strcmp(argv[1], "-h"))
		{
			printusage();
			exit(EXIT_SUCCESS);
		}
	}

	if (argc < 3)
		usageerror();

	infilename = argv[1];
	outfilename = argv[2];

	i = 3;

	while (i < argc)
	{
		if (!strcmp(argv[i], "format"))
		{
			if (i + 1 >= argc)
				usageerror();
			if (!strcmp(argv[i+1], "aiff"))
				outFileFormat = AF_FILE_AIFF;
			else if (!strcmp(argv[i+1], "aifc"))
				outFileFormat = AF_FILE_AIFFC;
			else if (!strcmp(argv[i+1], "wave"))
				outFileFormat = AF_FILE_WAVE;
			else if (!strcmp(argv[i+1], "next"))
				outFileFormat = AF_FILE_NEXTSND;
			else if (!strcmp(argv[i+1], "bics"))
				outFileFormat = AF_FILE_BICSF;
			else if (!strcmp(argv[i+1], "voc"))
				outFileFormat = AF_FILE_VOC;
			else if (!strcmp(argv[i+1], "nist"))
				outFileFormat = AF_FILE_NIST_SPHERE;
			else if (!strcmp(argv[i+1], "caf"))
				outFileFormat = AF_FILE_CAF;
			else
			{
				fprintf(stderr, "sfconvert: Unknown format %s.\n", argv[i+1]);
				exit(EXIT_FAILURE);
			}

			/* Increment for argument. */
			i++;
		}
		else if (!strcmp(argv[i], "channels"))
		{
			if (i + 1 >= argc)
				usageerror();

			outChannelCount = atoi(argv[i+1]);
			if (outChannelCount < 1)
				usageerror();

			/* Increment for argument. */
			i++;
		}
		else if (!strcmp(argv[i], "float"))
		{
			if (i + 1 >= argc)
				usageerror();

			outSampleFormat = AF_SAMPFMT_FLOAT;
			outSampleWidth = 32;
			outMaxAmp = atof(argv[i+1]);

			/* Increment for argument. */
			i++;
		}
		else if (!strcmp(argv[i], "integer"))
		{
			if (i + 2 >= argc)
				usageerror();

			outSampleWidth = atoi(argv[i+1]);
			if (outSampleWidth < 1 || outSampleWidth > 32)
				usageerror();

			if (!strcmp(argv[i+2], "2scomp"))
				outSampleFormat = AF_SAMPFMT_TWOSCOMP;
			else if (!strcmp(argv[i+2], "unsigned"))
				outSampleFormat = AF_SAMPFMT_UNSIGNED;
			else
				usageerror();

			/* Increment for arguments. */
			i += 2;
		}
		else
		{
			printf("Unrecognized command %s\n", argv[i]);
		}

		i++;
	}

	infile = afOpenFile(infilename, "r", AF_NULL_FILESETUP);
	if (infile == AF_NULL_FILEHANDLE)
	{
		printf("Could not open file '%s' for reading.\n", infilename);
		return 1;
	}

	/* Get audio format parameters from input file. */
	fileFormat = afGetFileFormat(infile, NULL);
	totalFrames = afGetFrameCount(infile, AF_DEFAULT_TRACK);
	channelCount = afGetChannels(infile, AF_DEFAULT_TRACK);
	sampleRate = afGetRate(infile, AF_DEFAULT_TRACK);
	afGetSampleFormat(infile, AF_DEFAULT_TRACK, &sampleFormat, &sampleWidth);

	/* Initialize output audio format parameters. */
	outfilesetup = afNewFileSetup();

	if (outFileFormat == -1)
		outFileFormat = fileFormat;

	if (outSampleFormat == -1 || outSampleWidth == -1)
	{
		outSampleFormat = sampleFormat;
		outSampleWidth = sampleWidth;
	}

	if (outChannelCount == -1)
		outChannelCount = channelCount;

	afInitFileFormat(outfilesetup, outFileFormat);
	afInitSampleFormat(outfilesetup, AF_DEFAULT_TRACK, outSampleFormat,
		outSampleWidth);
	afInitChannels(outfilesetup, AF_DEFAULT_TRACK, outChannelCount);
	afInitRate(outfilesetup, AF_DEFAULT_TRACK, sampleRate);

	outfile = afOpenFile(outfilename, "w", outfilesetup);
	if (outfile == AF_NULL_FILEHANDLE)
	{
		printf("Could not open file '%s' for writing.\n", outfilename);
		return 1;
	}

	/*
		Set the output file's virtual audio format parameters
		to match the audio format parameters of the input file.
	*/
	afSetVirtualChannels(outfile, AF_DEFAULT_TRACK, channelCount);
	afSetVirtualSampleFormat(outfile, AF_DEFAULT_TRACK, sampleFormat,
		sampleWidth);

	afFreeFileSetup(outfilesetup);

	copyaudiodata(infile, outfile, AF_DEFAULT_TRACK, totalFrames);

	afCloseFile(infile);
	afCloseFile(outfile);

	printfileinfo(infilename);
	putchar('\n');
	printfileinfo(outfilename);

	return EXIT_SUCCESS;
}
Ejemplo n.º 9
0
void print_power (char *filename)
{
	AFfilehandle	file;
	double		*sums, *frames;
	int		channelCount, windowSize, frameCount;
	int		i, c;
	struct smooth	*powsmooth;
	int		winStart, winEnd;
	int		lastWindow = FALSE;
	double		pow, maxpow;

	double		level, peak, minSample = 1, maxSample = -1;

	file = afOpenFile(filename, "r", NULL);
	if (file == AF_NULL_FILEHANDLE)
	{
		fprintf(stderr, "Could not open file %s.\n", filename);
		return;
	}

	channelCount = afGetChannels(file, AF_DEFAULT_TRACK);
	windowSize = afGetRate(file, AF_DEFAULT_TRACK) / 100;
	frameCount = afGetFrameCount(file, AF_DEFAULT_TRACK);

	sums = calloc(channelCount, sizeof (double));
	for (c=0; c<channelCount; c++)
		sums[c] = 0;

	frames = calloc(channelCount * windowSize, sizeof (double));

	afSetVirtualSampleFormat(file, AF_DEFAULT_TRACK, AF_SAMPFMT_DOUBLE,
		sizeof (double));

	powsmooth = calloc(channelCount, sizeof (struct smooth));
	for (c=0; c<channelCount; c++)
	{
		/* Use a 100-element (1 second) window. */
		powsmooth[c].length = 100;
		powsmooth[c].buf = calloc(powsmooth[c].length, sizeof (double));
		powsmooth[c].start = 0;
		powsmooth[c].n = 0;
	}

	winStart = 0;
	winEnd = 0;
	lastWindow = FALSE;
	maxpow = 0;

	do
	{
		winEnd = winStart + windowSize;

		if (winEnd >= frameCount)
		{
			winEnd = frameCount;
			lastWindow = TRUE;
		}

		afReadFrames(file, AF_DEFAULT_TRACK, frames, windowSize);

		for (c=0; c<channelCount; c++)
		{
			sums[c] = 0;

			for (i=0; i < winEnd - winStart; i++)
			{
				double	sample;

				sample = frames[i*channelCount + c];
				sums[c] += sample*sample;

				if (sample > maxSample)
					maxSample = sample;
				if (sample < minSample)
					minSample = sample;
			}
		}

		/* Compute power for each channel. */
		for (c=0; c<channelCount; c++)
		{
			double pow;
			int end;

			pow = sums[c] / (winEnd - winStart);

			end = (powsmooth[c].start + powsmooth[c].n) %
				powsmooth[c].length;
			powsmooth[c].buf[end] = pow;

			if (powsmooth[c].n == powsmooth[c].length)
			{
				powsmooth[c].start = (powsmooth[c].start + 1) % powsmooth[c].length;
				pow = get_smoothed_data(&powsmooth[c]);
				if (pow > maxpow)
					maxpow = pow;
			}
			else
			{
				powsmooth[c].n++;
			}
		}

		winStart += windowSize;
	} while (!lastWindow);

	for (c = 0; c < channelCount; c++)
	{
		pow = get_smoothed_data(&powsmooth[c]);
		if (pow > maxpow)
			maxpow = pow;
	}

	free(sums);
	free(frames);
	for (c=0; c<channelCount; c++)
		free(powsmooth[c].buf);
	free(powsmooth);

	level = sqrt(maxpow);

	afCloseFile(file);

	printf("file: %s\n", filename);

	printf("level (dB): %f\n", 20 * log10(level));
	printf("peak-: %f\n", minSample);
	printf("peak+: %f\n", maxSample);

	peak = abs(minSample);
	if (peak < abs(maxSample))
		peak = abs(maxSample);

	printf("peak (dB): %f\n", 20 * log10(peak));
}
Ejemplo n.º 10
0
libspectrum_error
libspectrum_wav_read( libspectrum_tape *tape, const char *filename )
{
  libspectrum_byte *buffer; size_t length;
  libspectrum_byte *tape_buffer; size_t tape_length;
  size_t data_length;
  libspectrum_tape_block *block = NULL;
  int frames;

  /* Our filehandle from libaudiofile */
  AFfilehandle handle;

  /* The track we're using in the file */
  int track = AF_DEFAULT_TRACK; 

  if( !filename ) {
    libspectrum_print_error(
      LIBSPECTRUM_ERROR_LOGIC,
      "libspectrum_wav_read: no filename provided - wav files can only be loaded from a file"
    );
    return LIBSPECTRUM_ERROR_LOGIC;
  }

  handle = afOpenFile( filename, "r", NULL );
  if( handle == AF_NULL_FILEHANDLE ) {
    libspectrum_print_error(
      LIBSPECTRUM_ERROR_LOGIC,
      "libspectrum_wav_read: audiofile failed to open file:%s", filename
    );
    return LIBSPECTRUM_ERROR_LOGIC;
  }

  if( afSetVirtualSampleFormat( handle, track, AF_SAMPFMT_UNSIGNED, 8 ) ) {
    afCloseFile( handle );
    libspectrum_print_error(
      LIBSPECTRUM_ERROR_LOGIC,
      "libspectrum_wav_read: audiofile failed to set virtual sample format"
    );
    return LIBSPECTRUM_ERROR_LOGIC;
  }

  if( afSetVirtualChannels( handle, track, 1 ) ) {
    afCloseFile( handle );
    libspectrum_print_error(
      LIBSPECTRUM_ERROR_LOGIC,
      "libspectrum_wav_read: audiofile failed to set virtual channel count"
    );
    return LIBSPECTRUM_ERROR_LOGIC;
  }

  length = afGetFrameCount( handle, track );

  tape_length = length;
  if( tape_length%8 ) tape_length += 8 - (tape_length%8);

  buffer = libspectrum_new0( libspectrum_byte,
			     tape_length * afGetChannels(handle, track) );

  frames = afReadFrames( handle, track, buffer, length );
  if( frames == -1 ) {
    libspectrum_free( buffer );
    afCloseFile( handle );
    libspectrum_print_error(
      LIBSPECTRUM_ERROR_CORRUPT,
      "libspectrum_wav_read: can't calculate number of frames in audio file"
    );
    return LIBSPECTRUM_ERROR_CORRUPT;
  }

  if( !length ) {
    libspectrum_free( buffer );
    afCloseFile( handle );
    libspectrum_print_error(
      LIBSPECTRUM_ERROR_CORRUPT,
      "libspectrum_wav_read: empty audio file, nothing to load"
    );
    return LIBSPECTRUM_ERROR_CORRUPT;
  }

  if( frames != length ) {
    libspectrum_free( buffer );
    afCloseFile( handle );
    libspectrum_print_error(
      LIBSPECTRUM_ERROR_CORRUPT,
      "libspectrum_wav_read: read %d frames, but expected %lu\n", frames,
      (unsigned long)length
    );
    return LIBSPECTRUM_ERROR_CORRUPT;
  }

  block = libspectrum_tape_block_alloc( LIBSPECTRUM_TAPE_BLOCK_RAW_DATA );

  /* 44100 Hz 79 t-states 22050 Hz 158 t-states */
  libspectrum_tape_block_set_bit_length( block,
                                         3500000/afGetRate( handle, track ) );
  libspectrum_set_pause_ms( block, 0 );
  libspectrum_tape_block_set_bits_in_last_byte( block,
              length % LIBSPECTRUM_BITS_IN_BYTE ?
                length % LIBSPECTRUM_BITS_IN_BYTE : LIBSPECTRUM_BITS_IN_BYTE );
  data_length = tape_length / LIBSPECTRUM_BITS_IN_BYTE;
  libspectrum_tape_block_set_data_length( block, data_length );

  tape_buffer = libspectrum_new0( libspectrum_byte, data_length );

  libspectrum_byte *from = buffer;
  libspectrum_byte *to = tape_buffer;
  length = tape_length;
  do {
    libspectrum_byte val = 0;
    int i;
    for( i = 7; i >= 0; i-- ) {
      if( *from++ > 127 ) val |= 1 << i;
    }
    *to++ = val;
  } while ((length -= 8) > 0);

  libspectrum_tape_block_set_data( block, tape_buffer );

  libspectrum_tape_append_block( tape, block );

  if( afCloseFile( handle ) ) {
    libspectrum_free( buffer );
    libspectrum_print_error(
      LIBSPECTRUM_ERROR_UNKNOWN,
      "libspectrum_wav_read: failed to close audio file"
    );
    return LIBSPECTRUM_ERROR_UNKNOWN;
  }

  libspectrum_free( buffer );

  /* Successful completion */
  return LIBSPECTRUM_ERROR_NONE;
}
Ejemplo n.º 11
0
	AFfilehandle openTestFile(int sampleWidth)
	{
		AFfilehandle file = afOpenFile(m_testFileName.c_str(), "r", AF_NULL_FILESETUP);
		afSetVirtualSampleFormat(file, AF_DEFAULT_TRACK, AF_SAMPFMT_UNSIGNED, sampleWidth);
		return file;
	}
Ejemplo n.º 12
0
OSStatus setupOutput (AudioUnit *outputUnit, AFfilehandle file)
{
	OSStatus	status = noErr;
	UInt32		size;
	Boolean		outWritable;

	AudioStreamBasicDescription	fileASBD, inputASBD, outputASBD;
	AURenderCallbackStruct		renderCallback;

	/* Set virtual sample format to single-precision floating-point. */
	afSetVirtualSampleFormat(file, AF_DEFAULT_TRACK, AF_SAMPFMT_FLOAT, 32);

	/* Get ASBD for virtual sample format. */ 
	getASBDForFile(file, AF_DEFAULT_TRACK, &fileASBD);

	status = AudioUnitGetPropertyInfo(*outputUnit,
		kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output,
		0, &size, &outWritable);

	status = AudioUnitGetProperty(*outputUnit,
		kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output,
		0, &outputASBD, &size);

	if (outWritable)
	{
		outputASBD = fileASBD;

		status = AudioUnitSetProperty(*outputUnit,
			kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output,
			0, &outputASBD, size);
	}

	inputASBD = fileASBD;

	status = AudioUnitSetProperty(*outputUnit,
		kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
		0, &inputASBD, size);
	if (status != noErr)
	{
		fprintf(stderr, "Could not set input stream format.\n");
		exit(EXIT_FAILURE);
	}

	/*
		Set the render callback to a procedure which will
		read from the file.
	*/
	renderCallback.inputProc = fileRenderProc;
	renderCallback.inputProcRefCon = file;

	status = AudioUnitSetProperty(*outputUnit,
		kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0,
		&renderCallback, sizeof (AURenderCallbackStruct));
	if (status != noErr)
	{
		fprintf(stderr, "Could not set render callback.\n");
		exit(EXIT_FAILURE);
	}

	return status;
}
Ejemplo n.º 13
0
VeSound *veSoundLoadFile_AudioFile(char *file) {
  AFfilehandle fh;
  VeSound *snd;
  int nchan, nframe;
  int fsz, i, n;
  float *buf;
  
  if (!(fh = afOpenFile(file,"r",0))) {
    veError(MODULE,"audiofile library could not open sound file '%s'",
	    file);
    return NULL;
  }
  afSetVirtualSampleFormat(fh,AF_DEFAULT_TRACK,AF_SAMPFMT_FLOAT,4);
  // Hack. 
  // afSetVirtualRate(fh,AF_DEFAULT_TRACK,(double)veAudioGetSampFreq());
  nchan = afGetChannels(fh,AF_DEFAULT_TRACK);
  if (nchan < 1) {
    veError(MODULE,"sound file has no tracks?");
    afCloseFile(fh);
    return NULL;
  }
  if (nchan > 2) {
    veError(MODULE,"sound file has more than 2 tracks - rejecting it");
    afCloseFile(fh);
    return NULL;
  }
  nframe = afGetFrameCount(fh,AF_DEFAULT_TRACK);
  fsz = veAudioGetFrameSize();

  snd = veAllocObj(VeSound);
  snd->nframes = nframe/fsz;
  if (n % fsz)
    snd->nframes++;
  snd->data = veAlloc(snd->nframes*sizeof(float)*fsz,0);
  buf = veAlloc(2*fsz*sizeof(float),0);

  for (i = 0; i < snd->nframes; i++) {
    memset(buf,0,fsz*sizeof(float)); /* zero buffer */
    n = afReadFrames(fh,AF_DEFAULT_TRACK,buf,fsz);
    if (n == -1) {
      veError(MODULE,"sound file read failed for '%s': %s",
	      file, strerror(errno));
      afCloseFile(fh);
      veFree(buf);
      veFree(snd->data);
      veFree(snd);
      return NULL;
    }
    if (nchan == 1)
      /* remember: snd->data is a (float *) */
      memcpy(snd->data+fsz*i,buf,fsz*sizeof(float));
    else {
      /* downmix stereo to mono (trivial) */
      int k;
      for (k = 0; k < fsz; k++)
	snd->data[fsz*i+k] = (buf[2*k] + buf[2*k+1])/2.0;
    }
  }

  afCloseFile(fh);
  veFree(buf);

  snd->file = veDupString(file);
  /* initialize other fields */
  snd->id = -1;
  snd->name = NULL;

  return snd;
}