Esempio n. 1
0
static int
oss_configure_device(int fd, audio_format *ifmt, audio_format *ofmt)
{
	/* Configure the audio device. This function is called for both read */
	/* and write file descriptors, so the setup must not break if done   */
	/* twice on the same device.                                         */
	int  	mode, stereo, speed;

	mode = deve2oss(ifmt->encoding);
	if ((ioctl(fd, SNDCTL_DSP_SETFMT, &mode) == -1)) {
		if (ifmt->encoding == DEV_S16) {
			audio_format_change_encoding(ifmt, DEV_PCMU);
			audio_format_change_encoding(ofmt, DEV_PCMU);
			if ((ioctl(fd, SNDCTL_DSP_SETFMT, &mode) == -1)) {
				return FALSE;
			}
			debug_msg("device doesn't support 16bit audio, using 8 bit PCMU\n");
		}
	}

	stereo = ifmt->channels - 1; 
	assert(stereo == 0 || stereo == 1);
	if ((ioctl(fd, SNDCTL_DSP_STEREO, &stereo) == -1) || (stereo != (ifmt->channels - 1))) {
		debug_msg("device doesn't support %d channels!\n", ifmt->channels);
		return FALSE;
	}

	speed = ifmt->sample_rate;
	if (ioctl(fd, SNDCTL_DSP_SPEED, &speed) == -1) {
		debug_msg("device doesn't support %dHz sampling rate in full duplex!\n", ifmt->sample_rate);
		return FALSE;
	}
	return TRUE;
}
Esempio n. 2
0
int macosx_audio_open(audio_desc_t ad, audio_format* ifmt, audio_format *ofmt)
{
	OSStatus err = noErr;
	UInt32   propertySize;
	Boolean  writable;
	obtained_ = false;
	add = ad;
	//dev[0] = devices[ad];
	UNUSED(ofmt);

	// Get the default input device ID. 
	err = AudioHardwareGetPropertyInfo(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &writable);              
	if (err != noErr) {
		return 0;
	}
	err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &(devices[ad].inputDeviceID_));
	if (err != noErr) {
		debug_msg("error kAudioHardwarePropertyDefaultInputDevice");
		return 0;
	}
	if (devices[ad].inputDeviceID_ == kAudioDeviceUnknown) {
		debug_msg("error kAudioDeviceUnknown");
		return 0;
	}
	// Get the input stream description.
	err = AudioDeviceGetPropertyInfo(devices[ad].inputDeviceID_, 0, true, kAudioDevicePropertyStreamFormat, &propertySize, &writable);
	if (err != noErr) {
		debug_msg("error AudioDeviceGetPropertyInfo");
		return 0;
	}
	err = AudioDeviceGetProperty(devices[ad].inputDeviceID_, 0, true, kAudioDevicePropertyStreamFormat, &propertySize, &(devices[ad].inputStreamBasicDescription_));
	//printf("inputStreamBasicDescription_.mBytesPerFrame %d\n", devices[add].inputStreamBasicDescription_);
	if (err != noErr) {
		debug_msg("error AudioDeviceGetProperty");
		return 0;
	}

	// nastavime maly endian
	devices[ad].inputStreamBasicDescription_.mFormatFlags &= (kAudioFormatFlagIsBigEndian & 0);

	if (writable) {
	        err = AudioDeviceSetProperty(devices[ad].inputDeviceID_, NULL, 0, true, kAudioDevicePropertyStreamFormat, sizeof(AudioStreamBasicDescription), &(devices[ad].inputStreamBasicDescription_));
	        if (err != noErr) printf("err: AudioDeviceSetProperty: kAudioDevicePropertyStreamFormat\n");
	}
	
	/* set the buffer size of the device */
	
	/*
	int bufferByteSize = 8192;
	propertySize = sizeof(bufferByteSize);
	err = AudioDeviceSetProperty(devices[ad].inputDeviceID_, NULL, 0, true, kAudioDevicePropertyBufferSize, propertySize, &bufferByteSize);
	if (err != noErr) debug_msg("err: Set kAudioDevicePropertyBufferSize to %d\n", bufferByteSize);
	else debug_msg("sucessfully set kAudioDevicePropertyBufferSize to %d\n", bufferByteSize);
	*/

        // Set the device sample rate -- a temporary fix for the G5's
        //   built-in audio and possibly other audio devices.
	Boolean IsInput = 0;
	int inChannel = 0;
         
	Float64 theAnswer = 44100;
	UInt32 theSize = sizeof(theAnswer);
	err = AudioDeviceSetProperty(devices[ad].inputDeviceID_, NULL, inChannel, IsInput,
                                kAudioDevicePropertyNominalSampleRate, theSize, &theAnswer);

	if (err != noErr) {
		debug_msg("error AudioDeviceSetProperty\n");
		return 0;
	}
	debug_msg("Sample rate, %f\n", theAnswer);
#if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5)
	err = AudioDeviceCreateIOProcID(devices[ad].inputDeviceID_, audioIOProc, (void*)NULL, &devices[ad].inputDeviceProcID_);
	if (err != noErr) {
		debug_msg("error AudioDeviceCreateIOProcID, %s\n", GetMacOSStatusCommentString(err));
		return 0;
	}
	err = OpenADefaultComponent(kAudioUnitType_Output, kAudioUnitSubType_DefaultOutput, &(devices[ad].outputUnit_));
	// The HAL AU maybe a better way to in the future...
	//err = OpenADefaultComponent(kAudioUnitType_Output, kAudioUnitSubType_HALOutput, &(devices[ad].outputUnit_));
	if (err != noErr) {
		debug_msg("error OpenADefaultComponent\n");
		return 0;
	}
#else
	// Register the AudioDeviceIOProc.
	err = AudioDeviceAddIOProc(devices[ad].inputDeviceID_, audioIOProc, NULL);
	if (err != noErr) {
		debug_msg("error AudioDeviceAddIOProc\n");
		return 0;
	}
	err = OpenDefaultAudioOutput(&(devices[ad].outputUnit_));
	if (err != noErr) {
		debug_msg("error OpenDefaultAudioOutput\n");
		return 0;
	}
#endif
	// Register a callback function to provide output data to the unit.
	devices[ad].input.inputProc = outputRenderer;
	devices[ad].input.inputProcRefCon = 0;
	/* These would be needed if HAL used
	 * UInt32 enableIO =1; 
	err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, (const void*)&enableIO, sizeof(UInt32));
	enableIO=0;
	err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, (const void*)&enableIO, sizeof(UInt32));
	if (err != noErr) {
		debug_msg("error AudioUnitSetProperty EnableIO with error %ld: %s\n", err, GetMacOSStatusErrorString(err));
		return 0;
	}*/
#if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5)
	err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &(devices[ad].input), sizeof(AURenderCallbackStruct));
#else
	err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &(devices[ad].input), sizeof(AURenderCallbackStruct));
#endif

	if (err != noErr) {
		debug_msg("error AudioUnitSetProperty1 with error %ld: %s\n", err, GetMacOSStatusErrorString(err));
		return 0;
	}
	// Define the Mash stream description. Mash puts 20ms of data into each read
	// and write call. 20ms at 8000Hz equals 160 samples. Each sample is a u_char,
	// so that's 160 bytes. Mash uses 8-bit mu-law internally, so we need to convert
	// to 16-bit linear before using the audio data.
	devices[ad].mashStreamBasicDescription_.mSampleRate = 8000.0;
	//devices[ad].mashStreamBasicDescription_.mSampleRate = ifmt->sample_rate;
	devices[ad].mashStreamBasicDescription_.mFormatID = kAudioFormatLinearPCM;
#ifdef WORDS_BIGENDIAN
	devices[ad].mashStreamBasicDescription_.mFormatFlags =kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsBigEndian |kLinearPCMFormatFlagIsPacked;
#else
	devices[ad].mashStreamBasicDescription_.mFormatFlags =kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
#endif
	devices[ad].mashStreamBasicDescription_.mBytesPerPacket = 2;
	devices[ad].mashStreamBasicDescription_.mFramesPerPacket = 1;
	devices[ad].mashStreamBasicDescription_.mBytesPerFrame = 2;
	devices[ad].mashStreamBasicDescription_.mChannelsPerFrame = 1;
	devices[ad].mashStreamBasicDescription_.mBitsPerChannel = 16;

	// Inform the default output unit of our source format.
	err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &(devices[ad].mashStreamBasicDescription_), sizeof(AudioStreamBasicDescription));
	if (err != noErr) {
		debug_msg("error AudioUnitSetProperty2");
		printf("error setting output unit source format\n");
		return 0;
	}

	// check the stream format
	err = AudioUnitGetPropertyInfo(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &propertySize, &writable);
	if (err != noErr) debug_msg("err getting propert info for kAudioUnitProperty_StreamFormat\n");

	err = AudioUnitGetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamdesc_, &propertySize);
	if (err != noErr) debug_msg("err getting values for kAudioUnitProperty_StreamFormat\n");
	
	char name[128];
	audio_format_name(ifmt, name, 128);
	debug_msg("Requested ifmt %s\n",name);
	debug_msg("ifmt bytes pre block: %d\n",ifmt->bytes_per_block);

	// handle the requested format
	if (ifmt->encoding != DEV_S16) {
		audio_format_change_encoding(ifmt, DEV_S16);
		debug_msg("Requested ifmt changed to %s\n",name);
		debug_msg("ifmt bytes pre block: %d\n",ifmt->bytes_per_block);
	}

	audio_format_name(ofmt, name, 128);
	debug_msg("Requested ofmt %s\n",name);
	debug_msg("ofmt bytes pre block: %d\n",ofmt->bytes_per_block);
	
	// Allocate the read buffer and Z delay line.
	//readBufferSize_ = 8192;
	readBufferSize_ = ifmt->bytes_per_block * ringBufferFactor_;
	//readBufferSize_ = 320;
	//printf("readBufferSize_ %d\n", readBufferSize_);
	readBuffer_ = malloc(sizeof(u_char)*readBufferSize_);
	bzero(readBuffer_, readBufferSize_ * sizeof(u_char));
	//memset(readBuffer_, PCMU_AUDIO_ZERO, readBufferSize_);
	//inputReadIndex_ = -1; 
	inputReadIndex_ = 0; inputWriteIndex_ = 0;
	zLine_ = malloc(sizeof(double)*DECIM441_LENGTH / 80);
	availableInput_ = 0;

	// Allocate the write buffer.
	//writeBufferSize_ = 8000;
	writeBufferSize_ = ofmt->bytes_per_block * ringBufferFactor_;
	writeBuffer_ = malloc(sizeof(SInt16)*writeBufferSize_);
	bzero(writeBuffer_, writeBufferSize_ * sizeof(SInt16));
	outputReadIndex_ = 0; outputWriteIndex_ = 0;
	//outputWriteIndex_ = -1;
    	// Start audio processing.
	err = AudioUnitInitialize(devices[ad].outputUnit_);
	if (err != noErr) {
		debug_msg("error AudioUnitInitialize\n");
		return 0;
	}
	err = AudioDeviceStart(devices[ad].inputDeviceID_, audioIOProc);
	if (err != noErr) {
		fprintf(stderr, "Input device error: AudioDeviceStart\n");
		return 0;
	}
	err = AudioOutputUnitStart(devices[ad].outputUnit_);
	if (err != noErr) {
		fprintf(stderr, "Output device error: AudioOutputUnitStart\n");
		return 0;
	}
	// Inform the default output unit of our source format.
	/*
	err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &(devices[ad].mashStreamBasicDescription_), sizeof(AudioStreamBasicDescription));
	if (err != noErr) {
		debug_msg("error AudioUnitSetProperty3");
		return 0;
	}
	*/
	return 1;
};
Esempio n. 3
0
/* Returns TRUE if ok, 0 otherwise. */
int
atm_audio_open(audio_desc_t ad, audio_format* ifmt, audio_format* ofmt)
{

    int len;
    struct sockaddr_atmpvc vcc_address;
    FILE *f = fopen("atm_socket", "r");

    if (f == NULL) {
        debug_msg("ATM socket file not found");
        return FALSE;
    }

    if (atm_audio_supports(ifmt) == FALSE) {
        /* Should never get here */
        debug_msg("ATM input format not supported\n");
        return FALSE;
    }

    if (atm_audio_supports(ifmt) == FALSE) {
        /* Should never get here */
        debug_msg("ATM output format not supported\n");
        return FALSE;
    }

    fread(&atm_socket, sizeof(atm_socket), 1, f);
    fclose(f);

    len = sizeof(vcc_address);

    if (audio_fd != -1) {
        debug_msg("Device already open!");
        atm_audio_close(ad);
        return FALSE;
    }

    if (getsockopt(atm_socket, SOL_ATM, SO_ATMPVC, (char *)&vcc_address, &len) >= 0) {
        unsigned gfc=0, vpi, vci, type=0, clp=0;
        vpi = vcc_address.sap_addr.vpi;
        vci = vcc_address.sap_addr.vci;
        audio_fd = atm_socket;
        dev_info.atmhdr = (gfc << ATM_HDR_GFC_SHIFT) | (vpi << ATM_HDR_VPI_SHIFT) |
                          (vci << ATM_HDR_VCI_SHIFT) |       (type << ATM_HDR_PTI_SHIFT) | clp;

        dev_info.txresidue_bytes    = 0;
        dev_info.rxresidue_bytes    = 0;
        dev_info.rxresidue          = dev_info.rxresidue_buf;
        dev_info.seqno              = 0;
        memset(dev_info.txresidue, 0, sizeof(dev_info.txresidue));
        memset(dev_info.rxresidue, 0, sizeof(dev_info.txresidue));

        dev_info.monitor_gain       = 0;
        dev_info.output_muted       = 0; /* 0==not muted */
        dev_info.play.port          = AUDIO_LINE_OUT;
        dev_info.record.port        = AUDIO_LINE_IN;

        audio_format_change_encoding(ifmt, DEV_PCMA);
        audio_format_change_encoding(ofmt, DEV_PCMA);

        debug_msg("ATM audio device open (fd=%d, vpi=%d, vci=%d)\n", audio_fd, vpi, vci);
        atm_audio_drain(ad);

        return audio_fd;
    } else {
        debug_msg("ATM socket descriptor is invalid");
        return FALSE;
    }
}