示例#1
0
int
sound_lowlevel_init( const char *device, int *freqptr, int *stereoptr )
{
  switch(*freqptr) {
  case 32000:
    samplerate = AI_SAMPLERATE_32KHZ;
    break;
  case 48000:
    samplerate = AI_SAMPLERATE_48KHZ;
    break;
  default:
    printf("Sample rate %d not supported on Wii\n", *freqptr);
    return 1;
  }

  sfifo_init( &sound_fifo, BUFSIZE );
  *stereoptr = 1;
  
  AUDIO_Init( NULL );
  AUDIO_SetDSPSampleRate( samplerate );

#ifndef DISPLAY_AUDIO
  AUDIO_RegisterDMACallback( sound_dmacallback );
  memset( dmabuf, 0, BUFSIZE );
  AUDIO_InitDMA( (u32)dmabuf, BUFSIZE );
  DCFlushRange( dmabuf, dmalen );
  AUDIO_StartDMA();
#endif
  
  return 0;
}
示例#2
0
static int open_dataconnection(struct tcp_pcb *pcb, struct ftpd_msgstate *fsm)
{
	if (fsm->passive)
		return 0;

	/* Allocate memory for the structure that holds the state of the
	   connection. */
	fsm->datafs = malloc(sizeof(struct ftpd_datastate));

	if (fsm->datafs == NULL) {
		send_msg(pcb, fsm, msg451);
		return 1;
	}
	memset(fsm->datafs, 0, sizeof(struct ftpd_datastate));
	fsm->datafs->msgfs = fsm;
	fsm->datafs->msgpcb = pcb;
	sfifo_init(&fsm->datafs->fifo, 2000);

	fsm->datapcb = tcp_new();
	/* Tell TCP that this is the structure we wish to be passed for our
	   callbacks. */
	tcp_arg(fsm->datapcb, fsm->datafs);
	ip_addr_t dataip;
	IP_SET_TYPE_VAL(dataip, IPADDR_TYPE_V4);
	ip4_addr_copy(*ip_2_ip4(&dataip), fsm->dataip);
	tcp_connect(fsm->datapcb, &dataip, fsm->dataport, ftpd_dataconnected);

	return 0;
}
示例#3
0
static err_t ftpd_msgaccept(void *arg, struct tcp_pcb *pcb, err_t err)
{
	struct ftpd_msgstate *fsm;

	/* Allocate memory for the structure that holds the state of the
	   connection. */
	fsm = malloc(sizeof(struct ftpd_msgstate));

	if (fsm == NULL) {
		dbg_printf("ftpd_msgaccept: Out of memory\n");
		return ERR_MEM;
	}
	memset(fsm, 0, sizeof(struct ftpd_msgstate));

	/* Initialize the structure. */
	sfifo_init(&fsm->fifo, 2000);
	fsm->state = FTPD_IDLE;
	fsm->vfs = vfs_openfs();
	if (!fsm->vfs) {
		free(fsm);
		return ERR_CLSD;
	}

	/* Tell TCP that this is the structure we wish to be passed for our
	   callbacks. */
	tcp_arg(pcb, fsm);

	/* Tell TCP that we wish to be informed of incoming data by a call
	   to the http_recv() function. */
	tcp_recv(pcb, ftpd_msgrecv);

	/* Tell TCP that we wish be to informed of data that has been
	   successfully sent by a call to the ftpd_sent() function. */
	tcp_sent(pcb, ftpd_msgsent);

	tcp_err(pcb, ftpd_msgerr);

	tcp_poll(pcb, ftpd_msgpoll, 1);

	send_msg(pcb, fsm, msg220);

	return ERR_OK;
}
示例#4
0
static EEL_xno n2_tcp_setbuf(EEL_vm *vm)
{
	EEL_value *args = vm->heap + vm->argv;
	EB_socket *ebs;

	if(EEL_TYPE(args) != md.net2_socket_cid)
		return EEL_XWRONGTYPE;
	ebs = o2EB_socket(args->objref.v);
	if(!ebs->rs)
		return EEL_XDEVICECLOSED;

	if(ebs->rs->sender)
	{
		/*
		 * Change buffer size!
		 * We need to close the sender thread, buffer etc,
		 * and set up new gear to do this safely. To avoid
		 * mixed up data, we wait for the old thread to
		 * finish before moving on.
		 */
		ebs->rs->closed = 2;
		SDL_WaitThread(ebs->rs->sender, NULL);
		sfifo_close(&ebs->rs->fifo);
	}

	ebs->rs->fifosize = eel_v2l(args + 1);

	if(vm->argc >= 3)
		ebs->rs->pollperiod = eel_v2l(args + 2);

	if(sfifo_init(&ebs->rs->fifo, ebs->rs->fifosize) != 0)
		return EEL_XMEMORY;
	ebs->rs->sender = SDL_CreateThread(n2s_sender_thread, ebs->rs);
	if(!ebs->rs->sender)
	{
		sfifo_close(&ebs->rs->fifo);
		return EEL_XTHREADCREATE;
	}
	return 0;
}
示例#5
0
static int open_sdl(out123_handle *ao)
{
	sfifo_t *fifo = (sfifo_t*)ao->userptr;
	
	/* Open an audio I/O stream. */
	if (ao->rate > 0 && ao->channels >0 ) {
		size_t ringbuffer_len;
		SDL_AudioSpec wanted;
	
		/* L16 uncompressed audio data, using 16-bit signed representation in twos 
		   complement notation - system endian-ness. */
		wanted.format = AUDIO_S16SYS;
		wanted.samples = 1024;  /* Good low-latency value for callback */ 
		wanted.callback = audio_callback_sdl; 
		wanted.userdata = ao; 
		wanted.channels = ao->channels; 
		wanted.freq = ao->rate; 

		/* Open the audio device, forcing the desired format
		   Actually, it is still subject to constraints by hardware.
		   Need to have sample rate checked beforehand! SDL will
		   happily play 22 kHz files with 44 kHz hardware rate!
		   Same with channel count. No conversion. The manual is a bit
		   misleading on that (only talking about sample format, I guess). */
		if ( SDL_OpenAudio(&wanted, NULL) )
		{
			if(!AOQUIET)
				error1("Couldn't open SDL audio: %s\n", SDL_GetError());
			return -1;
		}
		
		/* Initialise FIFO */
		ringbuffer_len = ao->rate * FIFO_DURATION * SAMPLE_SIZE *ao->channels;
		debug2( "Allocating %d byte ring-buffer (%f seconds)", (int)ringbuffer_len, (float)FIFO_DURATION);
		if (sfifo_init( fifo, ringbuffer_len ) && !AOQUIET)
			error1( "Failed to initialise FIFO of size %d bytes", (int)ringbuffer_len );
	}
	
	return(0);
}
示例#6
0
int
sound_lowlevel_init( const char *dev, int *freqptr, int *stereoptr )
{
  OSStatus err = kAudioHardwareNoError;
  UInt32 count;
  AudioDeviceID device = kAudioDeviceUnknown; /* the default device */
  UInt32 deviceBufferSize;  /* bufferSize returned by
                               kAudioDevicePropertyBufferSize */
  int error;
  float hz;
  int sound_framesiz;

  /* get the default output device for the HAL */
  count = sizeof( device );
  err = AudioHardwareGetProperty( kAudioHardwarePropertyDefaultOutputDevice,
                                  &count, (void *)&device );
  if ( err != kAudioHardwareNoError ) {
    ui_error( UI_ERROR_ERROR,
              "get kAudioHardwarePropertyDefaultOutputDevice error %ld",
              err );
    return 1;
  }

  /* get the buffersize that the default device uses for IO */
  count = sizeof( deviceBufferSize );
  err = AudioDeviceGetProperty( device, 0, false, kAudioDevicePropertyBufferSize,
                                &count, &deviceBufferSize );
  if( err != kAudioHardwareNoError ) {
    ui_error( UI_ERROR_ERROR, "get kAudioDevicePropertyBufferSize error %ld",
              err );
    return 1;
  }

  /* get a description of the data format used by the default device */
  count = sizeof( deviceFormat );
  err = AudioDeviceGetProperty( device, 0, false,
                                kAudioDevicePropertyStreamFormat, &count,
                                &deviceFormat );
  if( err != kAudioHardwareNoError ) {
    ui_error( UI_ERROR_ERROR,
              "get kAudioDevicePropertyStreamFormat error %ld", err );
    return 1;
  }

  *freqptr = deviceFormat.mSampleRate;

  deviceFormat.mFormatFlags =  kLinearPCMFormatFlagIsSignedInteger
#ifdef WORDS_BIGENDIAN
                    | kLinearPCMFormatFlagIsBigEndian
#endif      /* #ifdef WORDS_BIGENDIAN */
                    | kLinearPCMFormatFlagIsPacked;
  deviceFormat.mBytesPerPacket = *stereoptr ? 4 : 2;
  deviceFormat.mFramesPerPacket = 1;
  deviceFormat.mBytesPerFrame = *stereoptr ? 4 : 2;
  deviceFormat.mBitsPerChannel = 16;
  deviceFormat.mChannelsPerFrame = *stereoptr ? 2 : 1;

  /* Open the default output unit */
  ComponentDescription desc;
  desc.componentType = kAudioUnitType_Output;
  desc.componentSubType = kAudioUnitSubType_DefaultOutput;
  desc.componentManufacturer = kAudioUnitManufacturer_Apple;
  desc.componentFlags = 0;
  desc.componentFlagsMask = 0;

  Component comp = FindNextComponent( NULL, &desc );
  if( comp == NULL ) {
    ui_error( UI_ERROR_ERROR, "FindNextComponent" );
    return 1;
  }

  err = OpenAComponent( comp, &gOutputUnit );
  if( comp == NULL ) {
    ui_error( UI_ERROR_ERROR, "OpenAComponent=%ld", err );
    return 1;
  }

  /* Set up a callback function to generate output to the output unit */
  AURenderCallbackStruct input;
  input.inputProc = coreaudiowrite;
  input.inputProcRefCon = NULL;

  err = AudioUnitSetProperty( gOutputUnit,                       
                              kAudioUnitProperty_SetRenderCallback,
                              kAudioUnitScope_Input,
                              0,
                              &input,
                              sizeof( input ) );
  if( err ) {
    ui_error( UI_ERROR_ERROR, "AudioUnitSetProperty-CB=%ld", err );
    return 1;
  }

  err = AudioUnitSetProperty( gOutputUnit,
                              kAudioUnitProperty_StreamFormat,
                              kAudioUnitScope_Input,
                              0,
                              &deviceFormat,
                              sizeof( AudioStreamBasicDescription ) );
  if( err ) {
    ui_error( UI_ERROR_ERROR, "AudioUnitSetProperty-SF=%4.4s, %ld", (char*)&err, err );
    return 1;
  }

  err = AudioUnitInitialize( gOutputUnit );
  if( err ) {
    ui_error( UI_ERROR_ERROR, "AudioUnitInitialize=%ld", err );
    return 1;
  }

  hz = (float)machine_current->timings.processor_speed /
              machine_current->timings.tstates_per_frame;
  sound_framesiz = deviceFormat.mSampleRate / hz;

  if( ( error = sfifo_init( &sound_fifo, NUM_FRAMES
                                         * deviceFormat.mBytesPerFrame
                                         * deviceFormat.mChannelsPerFrame
                                         * sound_framesiz + 1 ) ) ) {
    ui_error( UI_ERROR_ERROR, "Problem initialising sound fifo: %s",
              strerror ( error ) );
    return 1;
  }

  /* wait to run sound until we have some sound to play */
  audio_output_started = 0;

  return 0;
}
示例#7
0
static void cmd_pasv(const char *arg, struct tcp_pcb *pcb, struct ftpd_msgstate *fsm)
{
	static u16_t port = 4096;
	static u16_t start_port = 4096;
	struct tcp_pcb *temppcb;

	/* Allocate memory for the structure that holds the state of the
	   connection. */
	fsm->datafs = malloc(sizeof(struct ftpd_datastate));

	if (fsm->datafs == NULL) {
		send_msg(pcb, fsm, msg451);
		return;
	}
	memset(fsm->datafs, 0, sizeof(struct ftpd_datastate));

	fsm->datapcb = tcp_new();
	if (!fsm->datapcb) {
		free(fsm->datafs);
		send_msg(pcb, fsm, msg451);
		return;
	}

	sfifo_init(&fsm->datafs->fifo, 2000);

	start_port = port;

	while (1) {
		err_t err;

		if(++port > 0x7fff)
			port = 4096;

		fsm->dataport = port;
		err = tcp_bind(fsm->datapcb, (ip_addr_t*)&pcb->local_ip, fsm->dataport);
		if (err == ERR_OK)
			break;
		if (start_port == port)
			err = ERR_CLSD;
		if (err == ERR_USE) {
			continue;
		} else {
			ftpd_dataclose(fsm->datapcb, fsm->datafs);
			fsm->datapcb = NULL;
			fsm->datafs = NULL;
			return;
		}
	}

	fsm->datafs->msgfs = fsm;

	temppcb = tcp_listen(fsm->datapcb);
	if (!temppcb) {
		ftpd_dataclose(fsm->datapcb, fsm->datafs);
		fsm->datapcb = NULL;
		fsm->datafs = NULL;
		return;
	}
	fsm->datapcb = temppcb;

	fsm->passive = 1;
	fsm->datafs->connected = 0;
	fsm->datafs->msgpcb = pcb;

	/* Tell TCP that this is the structure we wish to be passed for our
	   callbacks. */
	tcp_arg(fsm->datapcb, fsm->datafs);

	tcp_accept(fsm->datapcb, ftpd_dataaccept);
	send_msg(pcb, fsm, msg227, ip4_addr1(ip_2_ip4(&pcb->local_ip)), ip4_addr2(ip_2_ip4(&pcb->local_ip)), ip4_addr3(ip_2_ip4(&pcb->local_ip)), ip4_addr4(ip_2_ip4(&pcb->local_ip)), (fsm->dataport >> 8) & 0xff, (fsm->dataport) & 0xff);
}
示例#8
0
static int open_coreaudio(audio_output_t *ao)
{
	mpg123_coreaudio_t* ca = (mpg123_coreaudio_t*)ao->userptr;
	UInt32 size;
	ComponentDescription desc;
	Component comp;
	AudioStreamBasicDescription inFormat;
	AudioStreamBasicDescription outFormat;
	AURenderCallbackStruct  renderCallback;
	Boolean outWritable;
	
	/* Initialize our environment */
	ca->play = 0;
	ca->buffer = NULL;
	ca->buffer_size = 0;
	ca->last_buffer = 0;
	ca->play_done = 0;
	ca->decode_done = 0;

	
	/* Get the default audio output unit */
	desc.componentType = kAudioUnitType_Output; 
	desc.componentSubType = kAudioUnitSubType_DefaultOutput;
	desc.componentManufacturer = kAudioUnitManufacturer_Apple;
	desc.componentFlags = 0;
	desc.componentFlagsMask = 0;
	comp = FindNextComponent(NULL, &desc);
	if(comp == NULL) {
		error("FindNextComponent failed");
		return(-1);
	}
	
	if(OpenAComponent(comp, &(ca->outputUnit)))  {
		error("OpenAComponent failed");
		return (-1);
	}
	
	if(AudioUnitInitialize(ca->outputUnit)) {
		error("AudioUnitInitialize failed");
		return (-1);
	}
	
	/* Specify the output PCM format */
	AudioUnitGetPropertyInfo(ca->outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &size, &outWritable);
	if(AudioUnitGetProperty(ca->outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &outFormat, &size)) {
		error("AudioUnitGetProperty(kAudioUnitProperty_StreamFormat) failed");
		return (-1);
	}
	
	if(AudioUnitSetProperty(ca->outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &outFormat, size)) {
		error("AudioUnitSetProperty(kAudioUnitProperty_StreamFormat) failed");
		return (-1);
	}
	
	/* Specify the input PCM format */
	ca->channels = ao->channels;
	inFormat.mSampleRate = ao->rate;
	inFormat.mChannelsPerFrame = ao->channels;
	inFormat.mFormatID = kAudioFormatLinearPCM;
#ifdef _BIG_ENDIAN
	inFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsBigEndian;
#else
	inFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked;
#endif
	
	switch(ao->format)
	{
		case MPG123_ENC_SIGNED_16:
			inFormat.mFormatFlags |= kLinearPCMFormatFlagIsSignedInteger;
			ca->bps = 2;
			break;
		case MPG123_ENC_SIGNED_8:
			inFormat.mFormatFlags |= kLinearPCMFormatFlagIsSignedInteger;
			ca->bps = 1;
			break;
		case MPG123_ENC_UNSIGNED_8:
			ca->bps = 1;
			break;
		case MPG123_ENC_SIGNED_32:
			inFormat.mFormatFlags |= kLinearPCMFormatFlagIsSignedInteger;
			ca->bps = 4;
			break;
		case MPG123_ENC_FLOAT_32:
			inFormat.mFormatFlags |= kLinearPCMFormatFlagIsFloat;
			ca->bps = 4;
			break;
		case MPG123_ENC_FLOAT_64:
			inFormat.mFormatFlags |= kLinearPCMFormatFlagIsFloat;
			ca->bps = 4;
			break;
	}
	
	inFormat.mBitsPerChannel = ca->bps << 3;
	inFormat.mBytesPerPacket = ca->bps*inFormat.mChannelsPerFrame;
	inFormat.mFramesPerPacket = 1;
	inFormat.mBytesPerFrame = ca->bps*inFormat.mChannelsPerFrame;
	
	/* Add our callback - but don't start it yet */
	memset(&renderCallback, 0, sizeof(AURenderCallbackStruct));
	renderCallback.inputProc = convertProc;
	renderCallback.inputProcRefCon = ao->userptr;
	if(AudioUnitSetProperty(ca->outputUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &renderCallback, sizeof(AURenderCallbackStruct))) {
		error("AudioUnitSetProperty(kAudioUnitProperty_SetRenderCallback) failed");
		return(-1);
	}
	
	
	/* Open an audio I/O stream and create converter */
	if (ao->rate > 0 && ao->channels >0 ) {
		int ringbuffer_len;

		if(AudioConverterNew(&inFormat, &outFormat, &(ca->converter))) {
			error("AudioConverterNew failed");
			return(-1);
		}
		if(ao->channels == 1) {
			SInt32 channelMap[2] = { 0, 0 };
			if(AudioConverterSetProperty(ca->converter, kAudioConverterChannelMap, sizeof(channelMap), channelMap)) {
				error("AudioConverterSetProperty(kAudioConverterChannelMap) failed");
				return(-1);
			}
		}
		
		/* Initialise FIFO */
		ringbuffer_len = ao->rate * FIFO_DURATION * ca->bps * ao->channels;
		debug2( "Allocating %d byte ring-buffer (%f seconds)", ringbuffer_len, (float)FIFO_DURATION);
		sfifo_init( &ca->fifo, ringbuffer_len );
	}
	
	return(0);
}
示例#9
0
static int open_coreaudio(audio_output_t *ao)
{
    mpg123_coreaudio_t* ca = (mpg123_coreaudio_t*)ao->userptr;
    UInt32 size;
    AudioComponentDescription desc;
    AudioComponent comp;
    AudioStreamBasicDescription inFormat;
    AudioStreamBasicDescription outFormat;
    AURenderCallbackStruct  renderCallback;
    Boolean outWritable;

    /* Initialize our environment */
    ca->play = 0;
    ca->buffer = NULL;
    ca->buffer_size = 0;
    ca->last_buffer = 0;
    ca->play_done = 0;
    ca->decode_done = 0;


    /* Get the default audio output unit */
    desc.componentType = kAudioUnitType_Output;
    desc.componentSubType = kAudioUnitSubType_DefaultOutput;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;
    comp = AudioComponentFindNext(NULL, &desc);
    if(comp == NULL) {
        return -1;
    }

    if(AudioComponentInstanceNew(comp, &(ca->outputUnit)))  {
        return -1;
    }

    if(AudioUnitInitialize(ca->outputUnit)) {
        return -1;
    }

    /* Specify the output PCM format */
    AudioUnitGetPropertyInfo(ca->outputUnit,
                             kAudioUnitProperty_StreamFormat,
                             kAudioUnitScope_Output,
                             0,
                             &size,
                             &outWritable);
    if(AudioUnitGetProperty(ca->outputUnit,
                            kAudioUnitProperty_StreamFormat,
                            kAudioUnitScope_Output,
                            0,
                            &outFormat,
                            &size)) {
        return -1;
    }

    if(AudioUnitSetProperty(ca->outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &outFormat, size)) {
        return -1;
    }

    /* Specify the input PCM format */
    ca->channels = ao->channels;
    inFormat.mSampleRate = ao->rate;
    inFormat.mChannelsPerFrame = ao->channels;
    inFormat.mFormatID = kAudioFormatLinearPCM;
#ifdef _BIG_ENDIAN
    inFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsBigEndian;
#else
    inFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked;
#endif

    if (ao->signed_samples) {
        inFormat.mFormatFlags |= kLinearPCMFormatFlagIsSignedInteger;
    }

    ca->bps = ao->bytes_per_sample;

    inFormat.mBitsPerChannel = ca->bps << 3;
    inFormat.mBytesPerPacket = ca->bps*inFormat.mChannelsPerFrame;
    inFormat.mFramesPerPacket = 1;
    inFormat.mBytesPerFrame = ca->bps*inFormat.mChannelsPerFrame;

    /* Add our callback - but don't start it yet */
    memset(&renderCallback, 0, sizeof(AURenderCallbackStruct));
    renderCallback.inputProc = convertProc;
    renderCallback.inputProcRefCon = ao->userptr;
    if(AudioUnitSetProperty(ca->outputUnit,
                            kAudioUnitProperty_SetRenderCallback,
                            kAudioUnitScope_Input,
                            0,
                            &renderCallback,
                            sizeof(AURenderCallbackStruct))) {
        return -1;
    }


    /* Open an audio I/O stream and create converter */
    if (ao->rate > 0 && ao->channels >0 ) {
        int ringbuffer_len;

        if(AudioConverterNew(&inFormat, &outFormat, &(ca->converter))) {
            return -1;
        }
        if(ao->channels == 1) {
            SInt32 channelMap[2] = { 0, 0 };
            if(AudioConverterSetProperty(ca->converter, kAudioConverterChannelMap, sizeof(channelMap), channelMap)) {
                return -1;
            }
        }

        /* Initialise FIFO */
        ringbuffer_len = ((int)ao->rate *
                          FIFO_DURATION *
                          ca->bps *
                          ao->channels);
        sfifo_init( &ca->fifo, ringbuffer_len );
    }

    return(0);
}