Esempio n. 1
0
void testChannelMatrixReading(int sampleFormat, int sampleWidth)
{
	// Create test file.
	const int channelCount = 2;
	const int frameCount = 10;
	const T samples[channelCount * frameCount] =
	{
		2, 3, 5, 7, 11,
		13, 17, 19, 23, 29,
		31, 37, 41, 43, 47,
		53, 59, 61, 67, 71
	};
	AFfilesetup setup = afNewFileSetup();
	afInitFileFormat(setup, AF_FILE_AIFFC);
	afInitChannels(setup, AF_DEFAULT_TRACK, 2);
	afInitSampleFormat(setup, AF_DEFAULT_TRACK, sampleFormat, sampleWidth);
	AFfilehandle file = afOpenFile(kTestFileName, "w", setup);
	afFreeFileSetup(setup);
	EXPECT_TRUE(file);

	AFframecount framesWritten = afWriteFrames(file, AF_DEFAULT_TRACK,
		samples, frameCount);
	EXPECT_EQ(framesWritten, frameCount);

	EXPECT_EQ(afCloseFile(file), 0);

	// Open file for reading and read data using different channel matrices.
	file = afOpenFile(kTestFileName, "r", NULL);
	EXPECT_TRUE(file);

	EXPECT_EQ(afGetChannels(file, AF_DEFAULT_TRACK), 2);
	EXPECT_EQ(afGetFrameCount(file, AF_DEFAULT_TRACK), frameCount);

	afSetVirtualChannels(file, AF_DEFAULT_TRACK, 1);

	for (int c=0; c<2; c++)
	{
		double channelMatrix[2] = { 0, 0 };
		channelMatrix[c] = 1;
		afSetChannelMatrix(file, AF_DEFAULT_TRACK, channelMatrix);

		EXPECT_EQ(afSeekFrame(file, AF_DEFAULT_TRACK, 0), 0);

		T *readSamples = new T[frameCount]; 
		AFframecount framesRead = afReadFrames(file, AF_DEFAULT_TRACK,
			readSamples, frameCount);
		EXPECT_EQ(framesRead, frameCount);

		for (int i=0; i<frameCount; i++)
			EXPECT_EQ(readSamples[i], samples[2*i + c]);

		delete [] readSamples;
	}

	EXPECT_EQ(afCloseFile(file), 0);

	::unlink(kTestFileName);
}
Esempio n. 2
0
static enum audiotap_status audiofile_read_init(struct audiotap **audiotap,
                                                const char *file,
                                                struct tapenc_params *params,
                                                uint8_t machine,
                                                uint8_t videotype,
                                                uint8_t *halfwaves){
  uint32_t freq;
  enum audiotap_status error = AUDIOTAP_LIBRARY_ERROR;
  AFfilehandle fh;

  if (status.audiofile_init_status != LIBRARY_OK
   || status.tapencoder_init_status != LIBRARY_OK)
    return AUDIOTAP_LIBRARY_UNAVAILABLE;
  fh=afOpenFile(file,"r", NULL);
  if (fh == AF_NULL_FILEHANDLE)
    return AUDIOTAP_LIBRARY_ERROR;
  do{
    if ( (freq=(uint32_t)afGetRate(fh, AF_DEFAULT_TRACK)) == -1)
      break;
    if (afSetVirtualChannels(fh, AF_DEFAULT_TRACK, 1) == -1)
      break;
    if (afSetVirtualSampleFormat(fh, AF_DEFAULT_TRACK, AF_SAMPFMT_TWOSCOMP, 32) == -1)
      break;
    if (afGetVirtualFrameSize(fh, AF_DEFAULT_TRACK, 0) != 4)
      break;
    error = AUDIOTAP_OK;
  }while(0);
  if(error != AUDIOTAP_OK){
    afCloseFile(fh);
    return error;
  }
  *halfwaves = 1;
  return audio2tap_audio_open_common(audiotap,
                                     freq,
                                     params,
                                     machine,
                                     videotype,
                                     &audiofile_read_functions,
                                     fh);
}
Esempio n. 3
0
int main (int argc, char **argv)
{
	int	i = 1;
	char	*infilename, *outfilename;
	int	fileFormat, outFileFormat = AF_FILE_UNKNOWN;

	AFfilehandle	infile, outfile;
	AFfilesetup	outfilesetup;
	int		sampleFormat, sampleWidth, channelCount;
	double		sampleRate;
	int		outSampleFormat = -1, outSampleWidth = -1,
			outChannelCount = -1;
	double		outMaxAmp = 1.0;

	AFframecount	totalFrames;

	if (argc == 2)
	{
		if (!strcmp(argv[1], "--version") || !strcmp(argv[1], "-v"))
		{
			printversion();
			exit(EXIT_SUCCESS);
		}

		if (!strcmp(argv[1], "--help") || !strcmp(argv[1], "-h"))
		{
			printusage();
			exit(EXIT_SUCCESS);
		}
	}

	if (argc < 3)
		usageerror();

	infilename = argv[1];
	outfilename = argv[2];

	i = 3;

	while (i < argc)
	{
		if (!strcmp(argv[i], "format"))
		{
			if (i + 1 >= argc)
				usageerror();
			if (!strcmp(argv[i+1], "aiff"))
				outFileFormat = AF_FILE_AIFF;
			else if (!strcmp(argv[i+1], "aifc"))
				outFileFormat = AF_FILE_AIFFC;
			else if (!strcmp(argv[i+1], "wave"))
				outFileFormat = AF_FILE_WAVE;
			else if (!strcmp(argv[i+1], "next"))
				outFileFormat = AF_FILE_NEXTSND;
			else if (!strcmp(argv[i+1], "bics"))
				outFileFormat = AF_FILE_BICSF;
			else if (!strcmp(argv[i+1], "voc"))
				outFileFormat = AF_FILE_VOC;
			else if (!strcmp(argv[i+1], "nist"))
				outFileFormat = AF_FILE_NIST_SPHERE;
			else if (!strcmp(argv[i+1], "caf"))
				outFileFormat = AF_FILE_CAF;
			else
			{
				fprintf(stderr, "sfconvert: Unknown format %s.\n", argv[i+1]);
				exit(EXIT_FAILURE);
			}

			/* Increment for argument. */
			i++;
		}
		else if (!strcmp(argv[i], "channels"))
		{
			if (i + 1 >= argc)
				usageerror();

			outChannelCount = atoi(argv[i+1]);
			if (outChannelCount < 1)
				usageerror();

			/* Increment for argument. */
			i++;
		}
		else if (!strcmp(argv[i], "float"))
		{
			if (i + 1 >= argc)
				usageerror();

			outSampleFormat = AF_SAMPFMT_FLOAT;
			outSampleWidth = 32;
			outMaxAmp = atof(argv[i+1]);

			/* Increment for argument. */
			i++;
		}
		else if (!strcmp(argv[i], "integer"))
		{
			if (i + 2 >= argc)
				usageerror();

			outSampleWidth = atoi(argv[i+1]);
			if (outSampleWidth < 1 || outSampleWidth > 32)
				usageerror();

			if (!strcmp(argv[i+2], "2scomp"))
				outSampleFormat = AF_SAMPFMT_TWOSCOMP;
			else if (!strcmp(argv[i+2], "unsigned"))
				outSampleFormat = AF_SAMPFMT_UNSIGNED;
			else
				usageerror();

			/* Increment for arguments. */
			i += 2;
		}
		else
		{
			printf("Unrecognized command %s\n", argv[i]);
		}

		i++;
	}

	infile = afOpenFile(infilename, "r", AF_NULL_FILESETUP);
	if (infile == AF_NULL_FILEHANDLE)
	{
		printf("Could not open file '%s' for reading.\n", infilename);
		return 1;
	}

	/* Get audio format parameters from input file. */
	fileFormat = afGetFileFormat(infile, NULL);
	totalFrames = afGetFrameCount(infile, AF_DEFAULT_TRACK);
	channelCount = afGetChannels(infile, AF_DEFAULT_TRACK);
	sampleRate = afGetRate(infile, AF_DEFAULT_TRACK);
	afGetSampleFormat(infile, AF_DEFAULT_TRACK, &sampleFormat, &sampleWidth);

	/* Initialize output audio format parameters. */
	outfilesetup = afNewFileSetup();

	if (outFileFormat == -1)
		outFileFormat = fileFormat;

	if (outSampleFormat == -1 || outSampleWidth == -1)
	{
		outSampleFormat = sampleFormat;
		outSampleWidth = sampleWidth;
	}

	if (outChannelCount == -1)
		outChannelCount = channelCount;

	afInitFileFormat(outfilesetup, outFileFormat);
	afInitSampleFormat(outfilesetup, AF_DEFAULT_TRACK, outSampleFormat,
		outSampleWidth);
	afInitChannels(outfilesetup, AF_DEFAULT_TRACK, outChannelCount);
	afInitRate(outfilesetup, AF_DEFAULT_TRACK, sampleRate);

	outfile = afOpenFile(outfilename, "w", outfilesetup);
	if (outfile == AF_NULL_FILEHANDLE)
	{
		printf("Could not open file '%s' for writing.\n", outfilename);
		return 1;
	}

	/*
		Set the output file's virtual audio format parameters
		to match the audio format parameters of the input file.
	*/
	afSetVirtualChannels(outfile, AF_DEFAULT_TRACK, channelCount);
	afSetVirtualSampleFormat(outfile, AF_DEFAULT_TRACK, sampleFormat,
		sampleWidth);

	afFreeFileSetup(outfilesetup);

	copyaudiodata(infile, outfile, AF_DEFAULT_TRACK, totalFrames);

	afCloseFile(infile);
	afCloseFile(outfile);

	printfileinfo(infilename);
	putchar('\n');
	printfileinfo(outfilename);

	return EXIT_SUCCESS;
}
Esempio n. 4
0
libspectrum_error
libspectrum_wav_read( libspectrum_tape *tape, const char *filename )
{
  libspectrum_byte *buffer; size_t length;
  libspectrum_byte *tape_buffer; size_t tape_length;
  size_t data_length;
  libspectrum_tape_block *block = NULL;
  int frames;

  /* Our filehandle from libaudiofile */
  AFfilehandle handle;

  /* The track we're using in the file */
  int track = AF_DEFAULT_TRACK; 

  if( !filename ) {
    libspectrum_print_error(
      LIBSPECTRUM_ERROR_LOGIC,
      "libspectrum_wav_read: no filename provided - wav files can only be loaded from a file"
    );
    return LIBSPECTRUM_ERROR_LOGIC;
  }

  handle = afOpenFile( filename, "r", NULL );
  if( handle == AF_NULL_FILEHANDLE ) {
    libspectrum_print_error(
      LIBSPECTRUM_ERROR_LOGIC,
      "libspectrum_wav_read: audiofile failed to open file:%s", filename
    );
    return LIBSPECTRUM_ERROR_LOGIC;
  }

  if( afSetVirtualSampleFormat( handle, track, AF_SAMPFMT_UNSIGNED, 8 ) ) {
    afCloseFile( handle );
    libspectrum_print_error(
      LIBSPECTRUM_ERROR_LOGIC,
      "libspectrum_wav_read: audiofile failed to set virtual sample format"
    );
    return LIBSPECTRUM_ERROR_LOGIC;
  }

  if( afSetVirtualChannels( handle, track, 1 ) ) {
    afCloseFile( handle );
    libspectrum_print_error(
      LIBSPECTRUM_ERROR_LOGIC,
      "libspectrum_wav_read: audiofile failed to set virtual channel count"
    );
    return LIBSPECTRUM_ERROR_LOGIC;
  }

  length = afGetFrameCount( handle, track );

  tape_length = length;
  if( tape_length%8 ) tape_length += 8 - (tape_length%8);

  buffer = libspectrum_new0( libspectrum_byte,
			     tape_length * afGetChannels(handle, track) );

  frames = afReadFrames( handle, track, buffer, length );
  if( frames == -1 ) {
    libspectrum_free( buffer );
    afCloseFile( handle );
    libspectrum_print_error(
      LIBSPECTRUM_ERROR_CORRUPT,
      "libspectrum_wav_read: can't calculate number of frames in audio file"
    );
    return LIBSPECTRUM_ERROR_CORRUPT;
  }

  if( !length ) {
    libspectrum_free( buffer );
    afCloseFile( handle );
    libspectrum_print_error(
      LIBSPECTRUM_ERROR_CORRUPT,
      "libspectrum_wav_read: empty audio file, nothing to load"
    );
    return LIBSPECTRUM_ERROR_CORRUPT;
  }

  if( frames != length ) {
    libspectrum_free( buffer );
    afCloseFile( handle );
    libspectrum_print_error(
      LIBSPECTRUM_ERROR_CORRUPT,
      "libspectrum_wav_read: read %d frames, but expected %lu\n", frames,
      (unsigned long)length
    );
    return LIBSPECTRUM_ERROR_CORRUPT;
  }

  block = libspectrum_tape_block_alloc( LIBSPECTRUM_TAPE_BLOCK_RAW_DATA );

  /* 44100 Hz 79 t-states 22050 Hz 158 t-states */
  libspectrum_tape_block_set_bit_length( block,
                                         3500000/afGetRate( handle, track ) );
  libspectrum_set_pause_ms( block, 0 );
  libspectrum_tape_block_set_bits_in_last_byte( block,
              length % LIBSPECTRUM_BITS_IN_BYTE ?
                length % LIBSPECTRUM_BITS_IN_BYTE : LIBSPECTRUM_BITS_IN_BYTE );
  data_length = tape_length / LIBSPECTRUM_BITS_IN_BYTE;
  libspectrum_tape_block_set_data_length( block, data_length );

  tape_buffer = libspectrum_new0( libspectrum_byte, data_length );

  libspectrum_byte *from = buffer;
  libspectrum_byte *to = tape_buffer;
  length = tape_length;
  do {
    libspectrum_byte val = 0;
    int i;
    for( i = 7; i >= 0; i-- ) {
      if( *from++ > 127 ) val |= 1 << i;
    }
    *to++ = val;
  } while ((length -= 8) > 0);

  libspectrum_tape_block_set_data( block, tape_buffer );

  libspectrum_tape_append_block( tape, block );

  if( afCloseFile( handle ) ) {
    libspectrum_free( buffer );
    libspectrum_print_error(
      LIBSPECTRUM_ERROR_UNKNOWN,
      "libspectrum_wav_read: failed to close audio file"
    );
    return LIBSPECTRUM_ERROR_UNKNOWN;
  }

  libspectrum_free( buffer );

  /* Successful completion */
  return LIBSPECTRUM_ERROR_NONE;
}